summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIlya Alekseyev <ialekseev@griddynamics.com>2010-12-29 17:17:06 +0300
committerIlya Alekseyev <ialekseev@griddynamics.com>2010-12-29 17:17:06 +0300
commit0dd84453db5d8a3293421049b92385b00a602fc3 (patch)
tree7e30ff22eb268aebb9578dbfbf160b67c3319fb5
parentf4da70ef42e5b8908f9979f2f80c60798a517c60 (diff)
parent90acbd3dd834fdef6f1000b69dfba4d4c1d12fd5 (diff)
downloadnova-0dd84453db5d8a3293421049b92385b00a602fc3.tar.gz
nova-0dd84453db5d8a3293421049b92385b00a602fc3.tar.xz
nova-0dd84453db5d8a3293421049b92385b00a602fc3.zip
merge
-rw-r--r--.mailmap2
-rw-r--r--Authors4
-rw-r--r--nova/api/openstack/__init__.py4
-rw-r--r--nova/api/openstack/backup_schedules.py15
-rw-r--r--nova/api/openstack/images.py85
-rw-r--r--nova/api/openstack/ratelimiting/__init__.py4
-rw-r--r--nova/api/openstack/servers.py62
-rw-r--r--nova/api/openstack/sharedipgroups.py39
-rw-r--r--nova/compute/api.py29
-rw-r--r--nova/compute/manager.py33
-rw-r--r--nova/compute/power_state.py4
-rw-r--r--nova/tests/api/openstack/test_images.py18
-rw-r--r--nova/tests/api/openstack/test_servers.py32
-rw-r--r--nova/tests/test_compute.py12
-rw-r--r--nova/tests/test_virt.py2
-rw-r--r--nova/tests/test_xenapi.py1
-rw-r--r--nova/tests/xenapi/stubs.py13
-rw-r--r--nova/utils.py3
-rw-r--r--nova/virt/fake.py12
-rw-r--r--nova/virt/libvirt_conn.py37
-rw-r--r--nova/virt/xenapi/fake.py1
-rw-r--r--nova/virt/xenapi/vm_utils.py88
-rw-r--r--nova/virt/xenapi/vmops.py51
-rw-r--r--nova/virt/xenapi/volume_utils.py6
-rw-r--r--nova/virt/xenapi_conn.py8
-rw-r--r--nova/wsgi.py2
-rw-r--r--plugins/xenserver/doc/networking.rst144
-rwxr-xr-xplugins/xenserver/networking/etc/init.d/host-rules106
-rw-r--r--plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch22
-rwxr-xr-xplugins/xenserver/networking/etc/xensource/scripts/vif_rules.py119
-rw-r--r--plugins/xenserver/xenapi/README (renamed from plugins/xenapi/README)0
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore (renamed from plugins/xenapi/etc/xapi.d/plugins/objectstore)41
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py (renamed from plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py)0
-rwxr-xr-xrun_tests.sh12
34 files changed, 899 insertions, 112 deletions
diff --git a/.mailmap b/.mailmap
index 9ab7db743..9e7fb1ec0 100644
--- a/.mailmap
+++ b/.mailmap
@@ -27,3 +27,5 @@
<vishvananda@gmail.com> <root@ubuntu>
<sleepsonthefloor@gmail.com> <root@tonbuntu>
<rlane@wikimedia.org> <laner@controller>
+<corywright@gmail.com> <cory.wright@rackspace.com>
+<ant@openstack.org> <amesserl@rackspace.com>
diff --git a/Authors b/Authors
index 9076f3977..0fbdfde01 100644
--- a/Authors
+++ b/Authors
@@ -1,9 +1,11 @@
Andy Smith <code@term.ie>
Anne Gentle <anne@openstack.org>
Anthony Young <sleepsonthefloor@gmail.com>
+Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
Chris Behrens <cbehrens@codestud.com>
Chmouel Boudjnah <chmouel@chmouel.com>
+Cory Wright <corywright@gmail.com>
David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
Devin Carlen <devin.carlen@gmail.com>
@@ -28,6 +30,7 @@ Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org>
Ryan Lane <rlane@wikimedia.org>
Ryan Lucio <rlucio@internap.com>
+Salvatore Orlando <salvatore.orlando@eu.citrix.com>
Sandy Walsh <sandy.walsh@rackspace.com>
Soren Hansen <soren.hansen@rackspace.com>
Thierry Carrez <thierry@openstack.org>
@@ -36,3 +39,4 @@ Trey Morris <trey.morris@rackspace.com>
Vishvananda Ishaya <vishvananda@gmail.com>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Zhixue Wu <Zhixue.Wu@citrix.com>
+
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index c49399f28..66aceee2d 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -93,12 +93,14 @@ class APIRouter(wsgi.Router):
logging.debug("Including admin operations in API.")
server_members['pause'] = 'POST'
server_members['unpause'] = 'POST'
+ server_members['suspend'] = 'POST'
+ server_members['resume'] = 'POST'
mapper.resource("server", "servers", controller=servers.Controller(),
collection={'detail': 'GET'},
member=server_members)
- mapper.resource("backup_schedule", "backup_schedules",
+ mapper.resource("backup_schedule", "backup_schedule",
controller=backup_schedules.Controller(),
parent_resource=dict(member_name='server',
collection_name='servers'))
diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py
index fc70b5c6c..fcc07bdd3 100644
--- a/nova/api/openstack/backup_schedules.py
+++ b/nova/api/openstack/backup_schedules.py
@@ -23,13 +23,25 @@ from nova.api.openstack import faults
import nova.image.service
+def _translate_keys(inst):
+ """ Coerces the backup schedule into proper dictionary format """
+ return dict(backupSchedule=inst)
+
+
class Controller(wsgi.Controller):
+ """ The backup schedule API controller for the Openstack API """
+
+ _serialization_metadata = {
+ 'application/xml': {
+ 'attributes': {
+ 'backupSchedule': []}}}
def __init__(self):
pass
def index(self, req, server_id):
- return faults.Fault(exc.HTTPNotFound())
+ """ Returns the list of backup schedules for a given instance """
+ return _translate_keys({})
def create(self, req, server_id):
""" No actual update method required, since the existing API allows
@@ -37,4 +49,5 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
def delete(self, req, server_id, id):
+ """ Deletes an existing backup schedule """
return faults.Fault(exc.HTTPNotFound())
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index d3312aba8..ba35fbc78 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -30,6 +30,65 @@ from nova.api.openstack import faults
FLAGS = flags.FLAGS
+def _translate_keys(item):
+ """
+ Maps key names to Rackspace-like attributes for return
+ also pares down attributes to those we want
+ item is a dict
+
+ Note: should be removed when the set of keys expected by the api
+ and the set of keys returned by the image service are equivalent
+
+ """
+ # TODO(tr3buchet): this map is specific to s3 object store,
+ # replace with a list of keys for _filter_keys later
+ mapped_keys = {'status': 'imageState',
+ 'id': 'imageId',
+ 'name': 'imageLocation'}
+
+ mapped_item = {}
+ # TODO(tr3buchet):
+ # this chunk of code works with s3 and the local image service/glance
+ # when we switch to glance/local image service it can be replaced with
+ # a call to _filter_keys, and mapped_keys can be changed to a list
+ try:
+ for k, v in mapped_keys.iteritems():
+ # map s3 fields
+ mapped_item[k] = item[v]
+ except KeyError:
+ # return only the fields api expects
+ mapped_item = _filter_keys(item, mapped_keys.keys())
+
+ return mapped_item
+
+
+def _translate_status(item):
+ """
+ Translates status of image to match current Rackspace api bindings
+ item is a dict
+
+ Note: should be removed when the set of statuses expected by the api
+ and the set of statuses returned by the image service are equivalent
+
+ """
+ status_mapping = {
+ 'pending': 'queued',
+ 'decrypting': 'preparing',
+ 'untarring': 'saving',
+ 'available': 'active'}
+ item['status'] = status_mapping[item['status']]
+ return item
+
+
+def _filter_keys(item, keys):
+ """
+ Filters all model attributes except for keys
+ item is a dict
+
+ """
+ return dict((k, v) for k, v in item.iteritems() if k in keys)
+
+
class Controller(wsgi.Controller):
_serialization_metadata = {
@@ -42,25 +101,25 @@ class Controller(wsgi.Controller):
self._service = utils.import_object(FLAGS.image_service)
def index(self, req):
- """Return all public images in brief."""
- return dict(images=[dict(id=img['id'], name=img['name'])
- for img in self.detail(req)['images']])
+ """Return all public images in brief"""
+ items = self._service.index(req.environ['nova.context'])
+ items = common.limited(items, req)
+ items = [_filter_keys(item, ('id', 'name')) for item in items]
+ return dict(images=items)
def detail(self, req):
- """Return all public images in detail."""
+ """Return all public images in detail"""
try:
- images = self._service.detail(req.environ['nova.context'])
- images = common.limited(images, req)
+ items = self._service.detail(req.environ['nova.context'])
except NotImplementedError:
- # Emulate detail() using repeated calls to show()
- ctxt = req.environ['nova.context']
- images = self._service.index(ctxt)
- images = common.limited(images, req)
- images = [self._service.show(ctxt, i['id']) for i in images]
- return dict(images=images)
+ items = self._service.index(req.environ['nova.context'])
+ items = common.limited(items, req)
+ items = [_translate_keys(item) for item in items]
+ items = [_translate_status(item) for item in items]
+ return dict(images=items)
def show(self, req, id):
- """Return data about the given image id."""
+ """Return data about the given image id"""
return dict(image=self._service.show(req.environ['nova.context'], id))
def delete(self, req, id):
diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py
index 91a8b2e55..cbb4b897e 100644
--- a/nova/api/openstack/ratelimiting/__init__.py
+++ b/nova/api/openstack/ratelimiting/__init__.py
@@ -64,9 +64,9 @@ class RateLimitingMiddleware(wsgi.Middleware):
If the request should be rate limited, return a 413 status with a
Retry-After header giving the time when the request would succeed.
"""
- return self.limited_request(req, self.application)
+ return self.rate_limited_request(req, self.application)
- def limited_request(self, req, application):
+ def rate_limited_request(self, req, application):
"""Rate limit the request.
If the request should be rate limited, return a 413 status with a
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 8d60e2cab..845183258 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -35,18 +35,16 @@ LOG = logging.getLogger('server')
LOG.setLevel(logging.DEBUG)
-def _entity_list(entities):
- """ Coerces a list of servers into proper dictionary format """
- return dict(servers=entities)
-
-
-def _entity_detail(inst):
- """ Maps everything to Rackspace-like attributes for return"""
+def _translate_detail_keys(inst):
+ """ Coerces into dictionary format, mapping everything to Rackspace-like
+ attributes for return"""
power_mapping = {
+ None: 'build',
power_state.NOSTATE: 'build',
power_state.RUNNING: 'active',
power_state.BLOCKED: 'active',
- power_state.PAUSED: 'suspended',
+ power_state.SUSPENDED: 'suspended',
+ power_state.PAUSED: 'error',
power_state.SHUTDOWN: 'active',
power_state.SHUTOFF: 'active',
power_state.CRASHED: 'error'}
@@ -66,8 +64,9 @@ def _entity_detail(inst):
return dict(server=inst_dict)
-def _entity_inst(inst):
- """ Filters all model attributes save for id and name """
+def _translate_keys(inst):
+ """ Coerces into dictionary format, excluding all model attributes
+ save for id and name """
return dict(server=dict(id=inst['internal_id'], name=inst['display_name']))
@@ -86,29 +85,29 @@ class Controller(wsgi.Controller):
def index(self, req):
""" Returns a list of server names and ids for a given user """
- return self._items(req, entity_maker=_entity_inst)
+ return self._items(req, entity_maker=_translate_keys)
def detail(self, req):
""" Returns a list of server details for a given user """
- return self._items(req, entity_maker=_entity_detail)
+ return self._items(req, entity_maker=_translate_detail_keys)
def _items(self, req, entity_maker):
"""Returns a list of servers for a given user.
- entity_maker - either _entity_detail or _entity_inst
+ entity_maker - either _translate_detail_keys or _translate_keys
"""
instance_list = self.compute_api.get_instances(
req.environ['nova.context'])
limited_list = common.limited(instance_list, req)
res = [entity_maker(inst)['server'] for inst in limited_list]
- return _entity_list(res)
+ return dict(servers=res)
def show(self, req, id):
""" Returns server details by server id """
try:
instance = self.compute_api.get_instance(
req.environ['nova.context'], int(id))
- return _entity_detail(instance)
+ return _translate_detail_keys(instance)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
@@ -137,7 +136,7 @@ class Controller(wsgi.Controller):
description=env['server']['name'],
key_name=key_pair['name'],
key_data=key_pair['public_key'])
- return _entity_inst(instances[0])
+ return _translate_keys(instances[0])
def update(self, req, id):
""" Updates the server name or password """
@@ -152,8 +151,9 @@ class Controller(wsgi.Controller):
update_dict['display_name'] = inst_dict['server']['name']
try:
- self.compute_api.update_instance(req.environ['nova.context'],
- instance['id'],
+ ctxt = req.environ['nova.context']
+ self.compute_api.update_instance(ctxt,
+ id,
**update_dict)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
@@ -182,7 +182,7 @@ class Controller(wsgi.Controller):
self.compute_api.pause(ctxt, id)
except:
readable = traceback.format_exc()
- logging.error("Compute.api::pause %s", readable)
+ logging.error(_("Compute.api::pause %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
@@ -193,6 +193,28 @@ class Controller(wsgi.Controller):
self.compute_api.unpause(ctxt, id)
except:
readable = traceback.format_exc()
- logging.error("Compute.api::unpause %s", readable)
+ logging.error(_("Compute.api::unpause %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ def suspend(self, req, id):
+ """permit admins to suspend the server"""
+ context = req.environ['nova.context']
+ try:
+ self.compute_api.suspend(context, id)
+ except:
+ readable = traceback.format_exc()
+ logging.error(_("compute.api::suspend %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ def resume(self, req, id):
+ """permit admins to resume the server from suspend"""
+ context = req.environ['nova.context']
+ try:
+ self.compute_api.resume(context, id)
+ except:
+ readable = traceback.format_exc()
+ logging.error(_("compute.api::resume %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
diff --git a/nova/api/openstack/sharedipgroups.py b/nova/api/openstack/sharedipgroups.py
index 75d02905c..845f5bead 100644
--- a/nova/api/openstack/sharedipgroups.py
+++ b/nova/api/openstack/sharedipgroups.py
@@ -15,26 +15,51 @@
# License for the specific language governing permissions and limitations
# under the License.
+from webob import exc
+
from nova import wsgi
+from nova.api.openstack import faults
+
+
+def _translate_keys(inst):
+ """ Coerces a shared IP group instance into proper dictionary format """
+ return dict(sharedIpGroup=inst)
+
+
+def _translate_detail_keys(inst):
+ """ Coerces a shared IP group instance into proper dictionary format with
+ correctly mapped attributes """
+ return dict(sharedIpGroup=inst)
class Controller(wsgi.Controller):
""" The Shared IP Groups Controller for the Openstack API """
+ _serialization_metadata = {
+ 'application/xml': {
+ 'attributes': {
+ 'sharedIpGroup': []}}}
+
def index(self, req):
- raise NotImplementedError
+ """ Returns a list of Shared IP Groups for the user """
+ return dict(sharedIpGroups=[])
def show(self, req, id):
- raise NotImplementedError
+ """ Shows in-depth information on a specific Shared IP Group """
+ return _translate_keys({})
def update(self, req, id):
- raise NotImplementedError
+ """ You can't update a Shared IP Group """
+ raise faults.Fault(exc.HTTPNotImplemented())
def delete(self, req, id):
- raise NotImplementedError
+ """ Deletes a Shared IP Group """
+ raise faults.Fault(exc.HTTPNotFound())
- def detail(self, req):
- raise NotImplementedError
+ def detail(self, req, id):
+ """ Returns a complete list of Shared IP Groups """
+ return _translate_detail_keys({})
def create(self, req):
- raise NotImplementedError
+ """ Creates a new Shared IP group """
+ raise faults.Fault(exc.HTTPNotFound())
diff --git a/nova/compute/api.py b/nova/compute/api.py
index cc377a1e4..789514967 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -92,15 +92,16 @@ class ComputeAPI(base.Base):
is_vpn = image_id == FLAGS.vpn_image_id
if not is_vpn:
image = self.image_service.show(context, image_id)
-
- # If kernel_id/ramdisk_id isn't explicitly set in API call
- # we take the defaults from the image's metadata
if kernel_id is None:
kernel_id = image.get('kernelId', None)
if ramdisk_id is None:
ramdisk_id = image.get('ramdiskId', None)
-
- # Make sure we have access to kernel and ramdisk
+ #No kernel and ramdisk for raw images
+ if kernel_id == str(FLAGS.null_kernel):
+ kernel_id = None
+ ramdisk_id = None
+ logging.debug("Creating a raw instance")
+ # Make sure we have access to kernel and ramdisk (if not raw)
if kernel_id:
self.image_service.show(context, kernel_id)
if ramdisk_id:
@@ -285,6 +286,24 @@ class ComputeAPI(base.Base):
{"method": "unpause_instance",
"args": {"instance_id": instance['id']}})
+ def suspend(self, context, instance_id):
+ """suspend the instance with instance_id"""
+ instance = self.db.instance_get_by_internal_id(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "suspend_instance",
+ "args": {"instance_id": instance['id']}})
+
+ def resume(self, context, instance_id):
+ """resume the instance with instance_id"""
+ instance = self.db.instance_get_by_internal_id(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "resume_instance",
+ "args": {"instance_id": instance['id']}})
+
def rescue(self, context, instance_id):
"""Rescue the given instance."""
instance = self.db.instance_get_by_internal_id(context, instance_id)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index ff8202cca..70b175e7c 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -297,6 +297,39 @@ class ComputeManager(manager.Manager):
result))
@exception.wrap_exception
+ def suspend_instance(self, context, instance_id):
+ """suspend the instance with instance_id"""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ logging.debug(_('instance %s: suspending'),
+ instance_ref['internal_id'])
+ self.db.instance_set_state(context, instance_id,
+ power_state.NOSTATE,
+ 'suspending')
+ self.driver.suspend(instance_ref,
+ lambda result: self._update_state_callback(self,
+ context,
+ instance_id,
+ result))
+
+ @exception.wrap_exception
+ def resume_instance(self, context, instance_id):
+ """resume the suspended instance with instance_id"""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ logging.debug(_('instance %s: resuming'), instance_ref['internal_id'])
+ self.db.instance_set_state(context, instance_id,
+ power_state.NOSTATE,
+ 'resuming')
+ self.driver.resume(instance_ref,
+ lambda result: self._update_state_callback(self,
+ context,
+ instance_id,
+ result))
+
+ @exception.wrap_exception
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
context = context.elevated()
diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py
index cefdf2d9e..37039d2ec 100644
--- a/nova/compute/power_state.py
+++ b/nova/compute/power_state.py
@@ -26,6 +26,7 @@ PAUSED = 0x03
SHUTDOWN = 0x04
SHUTOFF = 0x05
CRASHED = 0x06
+SUSPENDED = 0x07
def name(code):
@@ -36,5 +37,6 @@ def name(code):
PAUSED: 'paused',
SHUTDOWN: 'shutdown',
SHUTOFF: 'shutdown',
- CRASHED: 'crashed'}
+ CRASHED: 'crashed',
+ SUSPENDED: 'suspended'}
return d[code]
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index f610cbf9c..1b4031217 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -223,6 +223,20 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
res = req.get_response(nova.api.API('os'))
res_dict = json.loads(res.body)
+ def _is_equivalent_subset(x, y):
+ if set(x) <= set(y):
+ for k, v in x.iteritems():
+ if x[k] != y[k]:
+ if x[k] == 'active' and y[k] == 'available':
+ continue
+ return False
+ return True
+ return False
+
for image in res_dict['images']:
- self.assertEquals(1, self.IMAGE_FIXTURES.count(image),
- "image %s not in fixtures!" % str(image))
+ for image_fixture in self.IMAGE_FIXTURES:
+ if _is_equivalent_subset(image, image_fixture):
+ break
+ else:
+ self.assertEquals(1, 2, "image %s not in fixtures!" %
+ str(image))
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 3820f5f27..5d23db588 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -88,9 +88,13 @@ class ServersTest(unittest.TestCase):
self.stubs.Set(nova.db.api, 'instance_get_floating_address',
instance_address)
self.stubs.Set(nova.compute.api.ComputeAPI, 'pause',
- fake_compute_api)
+ fake_compute_api)
self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause',
- fake_compute_api)
+ fake_compute_api)
+ self.stubs.Set(nova.compute.api.ComputeAPI, 'suspend',
+ fake_compute_api)
+ self.stubs.Set(nova.compute.api.ComputeAPI, 'resume',
+ fake_compute_api)
self.allow_admin = FLAGS.allow_admin_api
def tearDown(self):
@@ -246,6 +250,30 @@ class ServersTest(unittest.TestCase):
res = req.get_response(nova.api.API('os'))
self.assertEqual(res.status_int, 202)
+ def test_server_suspend(self):
+ FLAGS.allow_admin_api = True
+ body = dict(server=dict(
+ name='server_test', imageId=2, flavorId=2, metadata={},
+ personality={}))
+ req = webob.Request.blank('/v1.0/servers/1/suspend')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(nova.api.API('os'))
+ self.assertEqual(res.status_int, 202)
+
+ def test_server_resume(self):
+ FLAGS.allow_admin_api = True
+ body = dict(server=dict(
+ name='server_test', imageId=2, flavorId=2, metadata={},
+ personality={}))
+ req = webob.Request.blank('/v1.0/servers/1/resume')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(nova.api.API('os'))
+ self.assertEqual(res.status_int, 202)
+
def test_server_reboot(self):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 348bb3351..bcb8a1526 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -101,13 +101,13 @@ class ComputeTestCase(test.TestCase):
self.compute.run_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context())
- logging.info("Running instances: %s", instances)
+ logging.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.compute.terminate_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context())
- logging.info("After terminating instances: %s", instances)
+ logging.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
def test_run_terminate_timestamps(self):
@@ -136,6 +136,14 @@ class ComputeTestCase(test.TestCase):
self.compute.unpause_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
+ def test_suspend(self):
+ """ensure instance can be suspended"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.compute.suspend_instance(self.context, instance_id)
+ self.compute.resume_instance(self.context, instance_id)
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_reboot(self):
"""Ensure instance can be rebooted"""
instance_id = self._create_instance()
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index 8dab8de2f..4aa489d08 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -33,6 +33,7 @@ flags.DECLARE('instances_path', 'nova.compute.manager')
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
+ libvirt_conn._late_load_cheetah()
self.flags(fake_call=True)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
@@ -157,7 +158,6 @@ class LibvirtConnTestCase(test.TestCase):
(lambda t: t.find('./devices/serial/source').get(
'path').split('/')[1], 'console.log'),
(lambda t: t.find('./memory').text, '2097152')]
-
if rescue:
common_checks += [
(lambda t: t.findall('./devices/disk/source')[0].get(
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index b5d3ea395..ed2e4ffde 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -48,6 +48,7 @@ class XenAPIVolumeTestCase(test.TestCase):
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
fakes.stub_out_db_instance_api(self.stubs)
+ stubs.stub_out_get_target(self.stubs)
fake.reset()
self.values = {'name': 1, 'id': 1,
'project_id': 'fake',
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 1dacad6a3..a7e592fee 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -18,12 +18,13 @@
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake
+from nova.virt.xenapi import volume_utils
def stubout_session(stubs, cls):
- """ Stubs out two methods from XenAPISession """
+ """Stubs out two methods from XenAPISession"""
def fake_import(self):
- """ Stubs out get_imported_xenapi of XenAPISession """
+ """Stubs out get_imported_xenapi of XenAPISession"""
fake_module = 'nova.virt.xenapi.fake'
from_list = ['fake']
return __import__(fake_module, globals(), locals(), from_list, -1)
@@ -34,6 +35,14 @@ def stubout_session(stubs, cls):
fake_import)
+def stub_out_get_target(stubs):
+ """Stubs out _get_target in volume_utils"""
+ def fake_get_target(volume_id):
+ return (None, None)
+
+ stubs.Set(volume_utils, '_get_target', fake_get_target)
+
+
class FakeSessionForVMTests(fake.SessionBase):
""" Stubs out a XenAPISession for VM tests """
def __init__(self, uri):
diff --git a/nova/utils.py b/nova/utils.py
index b9045a50c..15112faa2 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -48,7 +48,8 @@ def import_class(import_str):
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
- except (ImportError, ValueError, AttributeError):
+ except (ImportError, ValueError, AttributeError), exc:
+ logging.debug(_('Inner Exception: %s'), exc)
raise exception.NotFound(_('Class %s cannot be found') % class_str)
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 238acf798..706888b0d 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -148,6 +148,18 @@ class FakeConnection(object):
"""
pass
+ def suspend(self, instance, callback):
+ """
+ suspend the specified instance
+ """
+ pass
+
+ def resume(self, instance, callback):
+ """
+ resume the specified instance
+ """
+ pass
+
def destroy(self, instance):
"""
Destroy (shutdown and delete) the specified instance.
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 651b2af93..f6a218fa4 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -58,10 +58,9 @@ from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import images
-from Cheetah.Template import Template
-
libvirt = None
libxml2 = None
+Template = None
FLAGS = flags.FLAGS
@@ -88,15 +87,26 @@ flags.DEFINE_bool('allow_project_net_traffic',
def get_connection(read_only):
# These are loaded late so that there's no need to install these
# libraries when not using libvirt.
+ # Cheetah is separate because the unit tests want to load Cheetah,
+ # but not libvirt.
global libvirt
global libxml2
if libvirt is None:
libvirt = __import__('libvirt')
if libxml2 is None:
libxml2 = __import__('libxml2')
+ _late_load_cheetah()
return LibvirtConnection(read_only)
+def _late_load_cheetah():
+ global Template
+ if Template is None:
+ t = __import__('Cheetah.Template', globals(), locals(), ['Template'],
+ -1)
+ Template = t.Template
+
+
def _get_net_and_mask(cidr):
net = IPy.IP(cidr)
return str(net.net()), str(net.netmask())
@@ -280,6 +290,14 @@ class LibvirtConnection(object):
raise exception.APIError("unpause not supported for libvirt.")
@exception.wrap_exception
+ def suspend(self, instance, callback):
+ raise exception.APIError("suspend not supported for libvirt")
+
+ @exception.wrap_exception
+ def resume(self, instance, callback):
+ raise exception.APIError("resume not supported for libvirt")
+
+ @exception.wrap_exception
def rescue(self, instance):
self.destroy(instance, False)
@@ -512,9 +530,10 @@ class LibvirtConnection(object):
if FLAGS.allow_project_net_traffic:
net, mask = _get_net_and_mask(network['cidr'])
- extra_params = ("<parameter name=\"PROJNET\" value=\"%s\" />\n"
- "<parameter name=\"PROJMASK\" value=\"%s\" />\n"
- ) % (net, mask)
+ extra_params = ("<parameter name=\"PROJNET\" "
+ "value=\"%s\" />\n"
+ "<parameter name=\"PROJMASK\" "
+ "value=\"%s\" />\n") % (net, mask)
else:
extra_params = "\n"
@@ -800,8 +819,8 @@ class NWFilterFirewall(object):
the base filter are all in place.
"""
- nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n"
- ) % instance['name']
+ nwfilter_xml = ("<filter name='nova-instance-%s' "
+ "chain='root'>\n") % instance['name']
if instance['image_id'] == FLAGS.vpn_image_id:
nwfilter_xml += " <filterref filter='nova-vpn' />\n"
@@ -814,8 +833,8 @@ class NWFilterFirewall(object):
for security_group in instance.security_groups:
self.ensure_security_group_filter(security_group['id'])
- nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n"
- ) % security_group['id']
+ nwfilter_xml += (" <filterref filter='nova-secgroup-%d' "
+ "/>\n") % security_group['id']
nwfilter_xml += "</filter>"
self._define_filter(nwfilter_xml)
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 7a6c9ee71..1eaf31c25 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -235,6 +235,7 @@ class SessionBase(object):
elif '.' in name:
impl = getattr(self, name.replace('.', '_'))
if impl is not None:
+
def callit(*params):
logging.warn('Calling %s %s', name, impl)
self._check_session(params)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 89e02c917..47fb6db53 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -39,19 +39,35 @@ XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
'Paused': power_state.PAUSED,
- 'Suspended': power_state.SHUTDOWN, # FIXME
+ 'Suspended': power_state.SUSPENDED,
'Crashed': power_state.CRASHED}
+class ImageType:
+ """
+ Enumeration class for distinguishing different image types
+ 0 - kernel/ramdisk image (goes on dom0's filesystem)
+ 1 - disk image (local SR, partitioned by objectstore plugin)
+ 2 - raw disk image (local SR, NOT partitioned by plugin)
+ """
+
+ KERNEL_RAMDISK = 0
+ DISK = 1
+ DISK_RAW = 2
+
+
class VMHelper(HelperBase):
"""
The class that wraps the helper methods together.
"""
@classmethod
- def create_vm(cls, session, instance, kernel, ramdisk):
+ def create_vm(cls, session, instance, kernel, ramdisk, pv_kernel=False):
"""Create a VM record. Returns a Deferred that gives the new
- VM reference."""
+ VM reference.
+ the pv_kernel flag indicates whether the guest is HVM or PV
+ """
+
instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
@@ -70,9 +86,9 @@ class VMHelper(HelperBase):
'actions_after_reboot': 'restart',
'actions_after_crash': 'destroy',
'PV_bootloader': '',
- 'PV_kernel': kernel,
- 'PV_ramdisk': ramdisk,
- 'PV_args': 'root=/dev/xvda1',
+ 'PV_kernel': '',
+ 'PV_ramdisk': '',
+ 'PV_args': '',
'PV_bootloader_args': '',
'PV_legacy_args': '',
'HVM_boot_policy': '',
@@ -84,7 +100,25 @@ class VMHelper(HelperBase):
'user_version': '0',
'other_config': {},
}
- logging.debug(_('Created VM %s...'), instance.name)
+ #Complete VM configuration record according to the image type
+ #non-raw/raw with PV kernel/raw in HVM mode
+ if instance.kernel_id:
+ rec['PV_bootloader'] = ''
+ rec['PV_kernel'] = kernel
+ rec['PV_ramdisk'] = ramdisk
+ rec['PV_args'] = 'root=/dev/xvda1'
+ rec['PV_bootloader_args'] = ''
+ rec['PV_legacy_args'] = ''
+ else:
+ if pv_kernel:
+ rec['PV_args'] = 'noninteractive'
+ rec['PV_bootloader'] = 'pygrub'
+ else:
+ rec['HVM_boot_policy'] = 'BIOS order'
+ rec['HVM_boot_params'] = {'order': 'dc'}
+ rec['platform'] = {'acpi': 'true', 'apic': 'true',
+ 'pae': 'true', 'viridian': 'true'}
+ logging.debug('Created VM %s...', instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
return vm_ref
@@ -170,22 +204,24 @@ class VMHelper(HelperBase):
return vif_ref
@classmethod
- def fetch_image(cls, session, image, user, project, use_sr):
- """use_sr: True to put the image as a VDI in an SR, False to place
- it on dom0's filesystem. The former is for VM disks, the latter for
- its kernel and ramdisk (if external kernels are being used).
- Returns a Deferred that gives the new VDI UUID."""
-
+ def fetch_image(cls, session, image, user, project, type):
+ """
+ type is interpreted as an ImageType instance
+ """
url = images.image_url(image)
access = AuthManager().get_access_key(user, project)
- logging.debug(_("Asking xapi to fetch %s as %s"), url, access)
- fn = use_sr and 'get_vdi' or 'get_kernel'
+ logging.debug("Asking xapi to fetch %s as %s", url, access)
+ fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
args['username'] = access
args['password'] = user.secret
- if use_sr:
+ args['add_partition'] = 'false'
+ args['raw'] = 'false'
+ if type != ImageType.KERNEL_RAMDISK:
args['add_partition'] = 'true'
+ if type == ImageType.DISK_RAW:
+ args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
#FIXME(armando): find a solution to missing instance_id
#with Josh Kearney
@@ -193,6 +229,22 @@ class VMHelper(HelperBase):
return uuid
@classmethod
+ def lookup_image(cls, session, vdi_ref):
+ logging.debug("Looking up vdi %s for PV kernel", vdi_ref)
+ fn = "is_vdi_pv"
+ args = {}
+ args['vdi-ref'] = vdi_ref
+ #TODO: Call proper function in plugin
+ task = session.async_call_plugin('objectstore', fn, args)
+ pv_str = session.wait_for_task(task)
+ if pv_str.lower() == 'true':
+ pv = True
+ elif pv_str.lower() == 'false':
+ pv = False
+ logging.debug("PV Kernel in VDI:%d", pv)
+ return pv
+
+ @classmethod
def lookup(cls, session, i):
"""Look the instance i up, and returns it if available"""
vms = session.get_xenapi().VM.get_by_name_label(i)
@@ -231,6 +283,10 @@ class VMHelper(HelperBase):
@classmethod
def compile_info(cls, record):
"""Fill record with VM status information"""
+ logging.info(_("(VM_UTILS) xenserver vm state -> |%s|"),
+ record['power_state'])
+ logging.info(_("(VM_UTILS) xenapi power_state -> |%s|"),
+ XENAPI_POWER_STATE[record['power_state']])
return {'state': XENAPI_POWER_STATE[record['power_state']],
'max_mem': long(record['memory_static_max']) >> 10,
'mem': long(record['memory_dynamic_max']) >> 10,
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 39bb0f1e8..ba502ffa2 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -29,6 +29,7 @@ from nova.auth.manager import AuthManager
from nova.compute import power_state
from nova.virt.xenapi.network_utils import NetworkHelper
from nova.virt.xenapi.vm_utils import VMHelper
+from nova.virt.xenapi.vm_utils import ImageType
class VMOps(object):
@@ -64,16 +65,30 @@ class VMOps(object):
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
- vdi_uuid = VMHelper.fetch_image(
- self._session, instance.image_id, user, project, True)
- kernel = VMHelper.fetch_image(
- self._session, instance.kernel_id, user, project, False)
- ramdisk = VMHelper.fetch_image(
- self._session, instance.ramdisk_id, user, project, False)
+ #if kernel is not present we must download a raw disk
+ if instance.kernel_id:
+ disk_image_type = ImageType.DISK
+ else:
+ disk_image_type = ImageType.DISK_RAW
+ vdi_uuid = VMHelper.fetch_image(self._session,
+ instance.image_id, user, project, disk_image_type)
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
- vm_ref = VMHelper.create_vm(
- self._session, instance, kernel, ramdisk)
+ #Have a look at the VDI and see if it has a PV kernel
+ pv_kernel = False
+ if not instance.kernel_id:
+ pv_kernel = VMHelper.lookup_image(self._session, vdi_ref)
+ kernel = None
+ if instance.kernel_id:
+ kernel = VMHelper.fetch_image(self._session,
+ instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
+ ramdisk = None
+ if instance.ramdisk_id:
+ ramdisk = VMHelper.fetch_image(self._session,
+ instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
+ vm_ref = VMHelper.create_vm(self._session,
+ instance, kernel, ramdisk, pv_kernel)
VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
+
if network_ref:
VMHelper.create_vif(self._session, vm_ref,
network_ref, instance.mac_address)
@@ -173,6 +188,26 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.unpause', vm)
self._wait_with_callback(instance.id, task, callback)
+ def suspend(self, instance, callback):
+ """suspend the specified instance"""
+ instance_name = instance.name
+ vm = VMHelper.lookup(self._session, instance_name)
+ if vm is None:
+ raise Exception(_("suspend: instance not present %s") %
+ instance_name)
+ task = self._session.call_xenapi('Async.VM.suspend', vm)
+ self._wait_with_callback(task, callback)
+
+ def resume(self, instance, callback):
+ """resume the specified instance"""
+ instance_name = instance.name
+ vm = VMHelper.lookup(self._session, instance_name)
+ if vm is None:
+ raise Exception(_("resume: instance not present %s") %
+ instance_name)
+ task = self._session.call_xenapi('Async.VM.resume', vm, False, True)
+ self._wait_with_callback(task, callback)
+
def get_info(self, instance_id):
"""Return data about VM instance"""
vm = VMHelper.lookup(self._session, instance_id)
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index a0c0a67d4..1ca813bcf 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -60,13 +60,11 @@ class VolumeHelper(HelperBase):
'port': info['targetPort'],
'targetIQN': info['targetIQN'],
'chapuser': info['chapuser'],
- 'chappassword': info['chappassword']
- }
+ 'chappassword': info['chappassword']}
else:
record = {'target': info['targetHost'],
'port': info['targetPort'],
- 'targetIQN': info['targetIQN']
- }
+ 'targetIQN': info['targetIQN']}
try:
sr_ref = session.get_xenapi().SR.create(
session.get_xenapi_host(),
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 11c66c974..7f03d6c2b 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -147,6 +147,14 @@ class XenAPIConnection(object):
"""Unpause paused VM instance"""
self._vmops.unpause(instance, callback)
+ def suspend(self, instance, callback):
+ """suspend the specified instance"""
+ self._vmops.suspend(instance, callback)
+
+ def resume(self, instance, callback):
+ """resume the specified instance"""
+ self._vmops.resume(instance, callback)
+
def get_info(self, instance_id):
"""Return data about VM instance"""
return self._vmops.get_info(instance_id)
diff --git a/nova/wsgi.py b/nova/wsgi.py
index c7ee9ed14..b5d6b96c1 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -270,7 +270,7 @@ class Serializer(object):
needed to serialize a dictionary to that type.
"""
self.metadata = metadata or {}
- req = webob.Request(environ)
+ req = webob.Request.blank('', environ)
suffix = req.path_info.split('.')[-1].lower()
if suffix == 'json':
self.handler = self._to_json
diff --git a/plugins/xenserver/doc/networking.rst b/plugins/xenserver/doc/networking.rst
new file mode 100644
index 000000000..67f2d9af3
--- /dev/null
+++ b/plugins/xenserver/doc/networking.rst
@@ -0,0 +1,144 @@
+Multi Tenancy Networking Protections in XenServer
+=================================================
+
+The purpose of the vif_rules script is to allow multi-tenancy on a XenServer
+host. In a multi-tenant cloud environment a host machine needs to be able to
+enforce network isolation amongst guest instances, at both layer two and layer
+three. The rules prevent guests from taking and using unauthorized IP addresses,
+sniffing other guests traffic, and prevents ARP poisoning attacks. This current
+revision only supports IPv4, but will support IPv6 in the future.
+
+Kernel Requirements
+===================
+
+- physdev module
+- arptables support
+- ebtables support
+- iptables support
+
+If the kernel doesn't support these, you will need to obtain the Source RPMS for
+the proper version of XenServer to recompile the dom0 kernel.
+
+XenServer Requirements (32-bit dom0)
+====================================
+
+- arptables 32-bit rpm
+- ebtables 32-bit rpm
+- python-simplejson
+
+XenServer Environment Specific Notes
+====================================
+
+- XenServer 5.5 U1 based on the 2.6.18 kernel didn't include physdev module
+ support. Support for this had to be recompiled into the kernel.
+- XenServer 5.6 based on the 2.6.27 kernel didn't include physdev, ebtables, or
+ arptables.
+- XenServer 5.6 FP1 didn't include physdev, ebtables, or arptables but they do
+ have a Cloud Supplemental pack available to partners which swaps out the
+ kernels for kernels that support the networking rules.
+
+How it works - tl;dr
+====================
+
+iptables, ebtables, and arptables drop rules are applied to all forward chains
+on the host. These are applied at boot time with an init script. They ensure
+all forwarded packets are dropped by default. Allow rules are then applied to
+the instances to ensure they have permission to talk on the internet.
+
+How it works - Long
+===================
+
+Any time an underprivileged domain or domU is started or stopped, it gets a
+unique domain id (dom_id). This dom_id is utilized in a number of places, one
+of which is it's assigned to the virtual interface (vif). The vifs are attached
+to the bridge that is attached to the physical network. For instance, if you
+had a public bridge attached to eth0 and your domain id was 5, your vif would be
+vif5.0.
+
+The networking rules are applied to the VIF directly so they apply at the lowest
+level of the networking stack. Because the VIF changes along with the domain id
+on any start, stop, or reboot of the instance, the rules need to be removed and
+re-added any time that occurs.
+
+Because the dom_id can change often, the vif_rules script is hooked into the
+/etc/xensource/scripts/vif script that gets called anytime an instance is
+started, or stopped, which includes pauses and resumes.
+
+Examples of the rules ran for the host on boot:
+
+iptables -P FORWARD DROP
+iptables -A FORWARD -m physdev --physdev-in eth0 -j ACCEPT
+ebtables -P FORWARD DROP
+ebtables -A FORWARD -o eth0 -j ACCEPT
+arptables -P FORWARD DROP
+arptables -A FORWARD --opcode Request --in-interface eth0 -j ACCEPT
+arptables -A FORWARD --opcode Reply --in-interface eth0 -j ACCEPT
+
+Examples of the rules that are ran per instance state change:
+
+iptables -A FORWARD -m physdev --physdev-in vif1.0 -s 10.1.135.22/32 -j ACCEPT
+arptables -A FORWARD --opcode Request --in-interface "vif1.0" \
+ --source-ip 10.1.135.22 -j ACCEPT
+arptables -A FORWARD --opcode Reply --in-interface "vif1.0" \
+ --source-ip 10.1.135.22 --source-mac 9e:6e:cc:19:7f:fe -j ACCEPT
+ebtables -A FORWARD -p 0806 -o vif1.0 --arp-ip-dst 10.1.135.22 -j ACCEPT
+ebtables -A FORWARD -p 0800 -o vif1.0 --ip-dst 10.1.135.22 -j ACCEPT
+ebtables -I FORWARD 1 -s ! 9e:6e:cc:19:7f:fe -i vif1.0 -j DROP
+
+Typically when you see a vif, it'll look like vif<domain id>.<network bridge>.
+vif2.1 for example would be domain 2 on the second interface.
+
+The vif_rules.py script needs to pull information about the IPs and MAC
+addresses assigned to the instance. The current implementation assumes that
+information is put into the VM Record into the xenstore-data key in a JSON
+string. The vif_rules.py script reads out of the JSON string to determine the
+IPs, and MAC addresses to protect.
+
+An example format is given below:
+
+# xe vm-param-get uuid=<uuid> param-name=xenstore-data
+xenstore-data (MRW):
+vm-data/networking/4040fa7292e4:
+{"label": "public",
+ "ips": [{"netmask":"255.255.255.0",
+ "enabled":"1",
+ "ip":"173.200.100.10"}],
+ "mac":"40:40:fa:72:92:e4",
+ "gateway":"173.200.100.1",
+ "vm_id":"123456",
+ "dns":["72.3.128.240","72.3.128.241"]};
+
+vm-data/networking/40402321c9b8:
+{"label":"private",
+ "ips":[{"netmask":"255.255.224.0",
+ "enabled":"1",
+ "ip":"10.177.10.10"}],
+ "routes":[{"route":"10.176.0.0",
+ "netmask":"255.248.0.0",
+ "gateway":"10.177.10.1"},
+ {"route":"10.191.192.0",
+ "netmask":"255.255.192.0",
+ "gateway":"10.177.10.1"}],
+ "mac":"40:40:23:21:c9:b8"}
+
+The key is used for two purposes. One, the vif_rules.py script will read from
+it to apply the rules needed after parsing the JSON. The second is that because
+it's put into the xenstore-data field, the xenstore will be populated with this
+data on boot. This allows a guest agent the ability to read out data about the
+instance and apply configurations as needed.
+
+Installation
+============
+
+- Copy host-rules into /etc/init.d/ and make sure to chmod +x host-rules.
+- Run 'chkconfig host-rules on' to add the init script to start up.
+- Copy vif_rules.py into /etc/xensource/scripts
+- Patch /etc/xensource/scripts/vif using the supplied patch file. It may vary
+ for different versions of XenServer but it should be pretty self explanatory.
+ It calls the vif_rules.py script on domain creation and tear down.
+- Run '/etc/init.d/host-rules start' to start up the host based rules.
+- The instance rules will then fire on creation of the VM as long as the correct
+ JSON is in place.
+- You can check to see if the rules are in place with: iptables --list,
+ arptables --list, or ebtables --list
+
diff --git a/plugins/xenserver/networking/etc/init.d/host-rules b/plugins/xenserver/networking/etc/init.d/host-rules
new file mode 100755
index 000000000..798da9552
--- /dev/null
+++ b/plugins/xenserver/networking/etc/init.d/host-rules
@@ -0,0 +1,106 @@
+#!/bin/bash
+#
+# host-rules Start/Stop the networking host rules
+#
+# chkconfig: 2345 85 15
+# description: Networking Host Rules for Multi Tenancy Protections
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+IPTABLES=/sbin/iptables
+EBTABLES=/sbin/ebtables
+ARPTABLES=/sbin/arptables
+
+iptables-up()
+{
+ $IPTABLES -P FORWARD DROP
+ $IPTABLES -A FORWARD -m physdev --physdev-in eth0 -j ACCEPT
+ $IPTABLES -A FORWARD -m physdev --physdev-in eth1 -j ACCEPT
+}
+
+ebtables-up()
+{
+ $EBTABLES -P FORWARD DROP
+ $EBTABLES -A FORWARD -o eth0 -j ACCEPT
+ $EBTABLES -A FORWARD -o eth1 -j ACCEPT
+}
+
+arptables-up()
+{
+ $ARPTABLES -P FORWARD DROP
+ $ARPTABLES -A FORWARD --opcode Request --in-interface eth0 -j ACCEPT
+ $ARPTABLES -A FORWARD --opcode Reply --in-interface eth0 -j ACCEPT
+ $ARPTABLES -A FORWARD --opcode Request --in-interface eth1 -j ACCEPT
+ $ARPTABLES -A FORWARD --opcode Reply --in-interface eth1 -j ACCEPT
+}
+
+iptables-down()
+{
+ $IPTABLES -P FORWARD ACCEPT
+ $IPTABLES -D FORWARD -m physdev --physdev-in eth0 -j ACCEPT
+ $IPTABLES -D FORWARD -m physdev --physdev-in eth1 -j ACCEPT
+}
+
+ebtables-down()
+{
+ $EBTABLES -P FORWARD ACCEPT
+ $EBTABLES -D FORWARD -o eth0 -j ACCEPT
+ $EBTABLES -D FORWARD -o eth1 -j ACCEPT
+}
+
+arptables-down()
+{
+ $ARPTABLES -P FORWARD ACCEPT
+ $ARPTABLES -D FORWARD --opcode Request --in-interface eth0 -j ACCEPT
+ $ARPTABLES -D FORWARD --opcode Reply --in-interface eth0 -j ACCEPT
+ $ARPTABLES -D FORWARD --opcode Request --in-interface eth1 -j ACCEPT
+ $ARPTABLES -D FORWARD --opcode Reply --in-interface eth1 -j ACCEPT
+}
+
+start()
+{
+ iptables-up
+ ebtables-up
+ arptables-up
+}
+
+stop()
+{
+ iptables-down
+ ebtables-down
+ arptables-down
+}
+
+case "$1" in
+ start)
+ start
+ RETVAL=$?
+ ;;
+ stop)
+ stop
+ RETVAL=$?
+ ;;
+ restart)
+ stop
+ start
+ RETVAL=$?
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|restart}"
+ exit 1
+ ;;
+esac
+exit $RETVAL
diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch b/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch
new file mode 100644
index 000000000..feaf1312d
--- /dev/null
+++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch
@@ -0,0 +1,22 @@
+--- vif 2010-12-20 16:39:46.000000000 +0000
++++ vif_modified 2010-11-19 23:24:37.000000000 +0000
+@@ -213,6 +213,7 @@
+
+ # xs-xen.pq.hq:91e986b8e49f netback-wait-for-hotplug
+ xenstore-write "/local/domain/0/backend/vif/${DOMID}/${DEVID}/hotplug-status" "connected"
++ python /etc/xensource/scripts/vif_rules.py ${DOMID} online 2>&1 > /dev/null
+ fi
+ ;;
+
+@@ -224,9 +225,11 @@
+
+ remove)
+ if [ "${TYPE}" = "vif" ] ;then
++ python /etc/xensource/scripts/vif_rules.py ${DOMID} offline 2>&1 > /dev/null
+ xenstore-rm "${HOTPLUG}/hotplug"
+ fi
+ logger -t scripts-vif "${dev} has been removed"
+ remove_from_bridge
+ ;;
+ esac
++
diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py
new file mode 100755
index 000000000..d60816ce7
--- /dev/null
+++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+This script is used to configure iptables, ebtables, and arptables rules on
+XenServer hosts.
+"""
+
+import os
+import subprocess
+import sys
+
+# This is written to Python 2.4, since that is what is available on XenServer
+import simplejson as json
+
+
+def main(dom_id, command, only_this_vif=None):
+ xsls = execute("/usr/bin/xenstore-ls /local/domain/%s/vm-data/networking" \
+ % dom_id, True)
+ macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
+
+ for mac in macs:
+ xsr = "/usr/bin/xenstore-read /local/domain/%s/vm-data/networking/%s"
+ xsread = execute(xsr % (dom_id, mac), True)
+ data = json.loads(xsread)
+ for ip in data['ips']:
+ if data["label"] == "public":
+ vif = "vif%s.0" % dom_id
+ else:
+ vif = "vif%s.1" % dom_id
+
+ if (only_this_vif is None) or (vif == only_this_vif):
+ params = dict(IP=ip['ip'], VIF=vif, MAC=data['mac'])
+ apply_ebtables_rules(command, params)
+ apply_arptables_rules(command, params)
+ apply_iptables_rules(command, params)
+
+
+def execute(command, return_stdout=False):
+ devnull = open(os.devnull, 'w')
+ proc = subprocess.Popen(command, shell=True, close_fds=True,
+ stdout=subprocess.PIPE, stderr=devnull)
+ devnull.close()
+ if return_stdout:
+ return proc.stdout.read()
+ else:
+ return None
+
+# A note about adding rules:
+# Whenever we add any rule to iptables, arptables or ebtables we first
+# delete the same rule to ensure the rule only exists once.
+
+
+def apply_iptables_rules(command, params):
+ iptables = lambda rule: execute("/sbin/iptables %s" % rule)
+
+ iptables("-D FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \
+ -j ACCEPT" % params)
+ if command == 'online':
+ iptables("-A FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \
+ -j ACCEPT" % params)
+
+
+def apply_arptables_rules(command, params):
+ arptables = lambda rule: execute("/sbin/arptables %s" % rule)
+
+ arptables("-D FORWARD --opcode Request --in-interface %(VIF)s \
+ --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
+ arptables("-D FORWARD --opcode Reply --in-interface %(VIF)s \
+ --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
+ if command == 'online':
+ arptables("-A FORWARD --opcode Request --in-interface %(VIF)s \
+ --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
+ arptables("-A FORWARD --opcode Reply --in-interface %(VIF)s \
+ --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
+
+
+def apply_ebtables_rules(command, params):
+ ebtables = lambda rule: execute("/sbin/ebtables %s" % rule)
+
+ ebtables("-D FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s -j ACCEPT" %
+ params)
+ ebtables("-D FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s -j ACCEPT" %
+ params)
+ if command == 'online':
+ ebtables("-A FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s \
+ -j ACCEPT" % params)
+ ebtables("-A FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s \
+ -j ACCEPT" % params)
+
+ ebtables("-D FORWARD -s ! %(MAC)s -i %(VIF)s -j DROP" % params)
+ if command == 'online':
+ ebtables("-I FORWARD 1 -s ! %(MAC)s -i %(VIF)s -j DROP" % params)
+
+
+if __name__ == "__main__":
+ if len(sys.argv) < 3:
+ print "usage: %s dom_id online|offline [vif]" % \
+ os.path.basename(sys.argv[0])
+ sys.exit(1)
+ else:
+ dom_id, command = sys.argv[1:3]
+ vif = len(sys.argv) == 4 and sys.argv[3] or None
+ main(dom_id, command, vif)
diff --git a/plugins/xenapi/README b/plugins/xenserver/xenapi/README
index fbd471035..fbd471035 100644
--- a/plugins/xenapi/README
+++ b/plugins/xenserver/xenapi/README
diff --git a/plugins/xenapi/etc/xapi.d/plugins/objectstore b/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore
index 271e7337f..8ee2f748d 100644
--- a/plugins/xenapi/etc/xapi.d/plugins/objectstore
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore
@@ -43,24 +43,43 @@ SECTOR_SIZE = 512
MBR_SIZE_SECTORS = 63
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
-
+def is_vdi_pv(session,args):
+ logging.debug("Checking wheter VDI has PV kernel")
+ vdi = exists(args, 'vdi-ref')
+ pv=with_vdi_in_dom0(session, vdi, False,
+ lambda dev: _is_vdi_pv('/dev/%s' % dev))
+ if pv:
+ return 'true'
+ else:
+ return 'false'
+
+def _is_vdi_pv(dest):
+ logging.debug("Running pygrub against %s",dest)
+ output=os.popen('pygrub -qn %s' % dest)
+ pv=False
+ for line in output.readlines():
+ #try to find kernel string
+ m=re.search('(?<=kernel:)/.*(?:>)',line)
+ if m:
+ if m.group(0).find('xen')!=-1:
+ pv=True
+ logging.debug("PV:%d",pv)
+ return pv
+
def get_vdi(session, args):
src_url = exists(args, 'src_url')
username = exists(args, 'username')
password = exists(args, 'password')
+ raw_image=validate_bool(args, 'raw', 'false')
add_partition = validate_bool(args, 'add_partition', 'false')
-
(proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url)
-
sr = find_sr(session)
if sr is None:
raise Exception('Cannot find SR to write VDI to')
-
virtual_size = \
get_content_length(proto, netloc, url_path, username, password)
if virtual_size < 0:
raise Exception('Cannot get VDI size')
-
vdi_size = virtual_size
if add_partition:
# Make room for MBR.
@@ -69,18 +88,19 @@ def get_vdi(session, args):
vdi = create_vdi(session, sr, src_url, vdi_size, False)
with_vdi_in_dom0(session, vdi, False,
lambda dev: get_vdi_(proto, netloc, url_path,
- username, password, add_partition,
+ username, password, add_partition,raw_image,
virtual_size, '/dev/%s' % dev))
return session.xenapi.VDI.get_uuid(vdi)
-def get_vdi_(proto, netloc, url_path, username, password, add_partition,
+def get_vdi_(proto, netloc, url_path, username, password, add_partition,raw_image,
virtual_size, dest):
- if add_partition:
+ #Salvatore: vdi should not be partitioned for raw images
+ if (add_partition and not raw_image):
write_partition(virtual_size, dest)
- offset = add_partition and MBR_SIZE_BYTES or 0
+ offset = (add_partition and not raw_image and MBR_SIZE_BYTES) or 0
get(proto, netloc, url_path, username, password, dest, offset)
@@ -228,4 +248,5 @@ def download_all(response, length, dest_file, offset):
if __name__ == '__main__':
XenAPIPlugin.dispatch({'get_vdi': get_vdi,
- 'get_kernel': get_kernel})
+ 'get_kernel': get_kernel,
+ 'is_vdi_pv': is_vdi_pv})
diff --git a/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
index 2d323a016..2d323a016 100755
--- a/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
diff --git a/run_tests.sh b/run_tests.sh
index 67214996d..ffb0b6295 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -21,6 +21,7 @@ function process_option {
-V|--virtual-env) let always_venv=1; let never_venv=0;;
-N|--no-virtual-env) let always_venv=0; let never_venv=1;;
-f|--force) let force=1;;
+ *) noseargs="$noseargs $1"
esac
}
@@ -29,15 +30,18 @@ with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
force=0
+noseargs=
for arg in "$@"; do
process_option $arg
done
+NOSETESTS="nosetests -v $noseargs"
+
if [ $never_venv -eq 1 ]; then
# Just run the test suites in current environment
rm -f nova.sqlite
- nosetests -v
+ $NOSETESTS
exit
fi
@@ -49,7 +53,7 @@ fi
if [ -e ${venv} ]; then
${with_venv} rm -f nova.sqlite
- ${with_venv} nosetests -v $@
+ ${with_venv} $NOSETESTS
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
@@ -62,10 +66,10 @@ else
python tools/install_venv.py
else
rm -f nova.sqlite
- nosetests -v
+ $NOSETESTS
exit
fi
fi
${with_venv} rm -f nova.sqlite
- ${with_venv} nosetests -v $@
+ ${with_venv} $NOSETESTS
fi