diff options
108 files changed, 2285 insertions, 732 deletions
diff --git a/bin/nova-novncproxy b/bin/nova-novncproxy index 657f97b48..617e2411d 100755 --- a/bin/nova-novncproxy +++ b/bin/nova-novncproxy @@ -16,10 +16,10 @@ # License for the specific language governing permissions and limitations # under the License. -''' +""" Websocket proxy that is compatible with OpenStack Nova noVNC consoles. Leverages websockify.py by Joel Martin -''' +""" import os import sys diff --git a/bin/nova-spicehtml5proxy b/bin/nova-spicehtml5proxy index 17f8cb7c2..405092942 100755 --- a/bin/nova-spicehtml5proxy +++ b/bin/nova-spicehtml5proxy @@ -16,10 +16,10 @@ # License for the specific language governing permissions and limitations # under the License. -''' +""" Websocket proxy that is compatible with OpenStack Nova SPICE HTML5 consoles. Leverages websockify.py by Joel Martin -''' +""" import os import sys diff --git a/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json new file mode 100644 index 000000000..1d308d4ae --- /dev/null +++ b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json @@ -0,0 +1,17 @@ +{ + "instance_usage_audit_logs": { + "hosts_not_run": [ + "f4eb7cfd155f4574967f8b55a7faed75" + ], + "log": {}, + "num_hosts": 1, + "num_hosts_done": 0, + "num_hosts_not_run": 1, + "num_hosts_running": 0, + "overall_status": "0 of 1 hosts done. 0 errors.", + "period_beginning": "2012-12-01 00:00:00", + "period_ending": "2013-01-01 00:00:00", + "total_errors": 0, + "total_instances": 0 + } +}
\ No newline at end of file diff --git a/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml new file mode 100644 index 000000000..82d157fb9 --- /dev/null +++ b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml @@ -0,0 +1,16 @@ +<?xml version='1.0' encoding='UTF-8'?> +<instance_usage_audit_logs> + <total_errors>0</total_errors> + <total_instances>0</total_instances> + <log/> + <num_hosts_running>0</num_hosts_running> + <num_hosts_done>0</num_hosts_done> + <num_hosts_not_run>1</num_hosts_not_run> + <hosts_not_run> + <item>107debd115684f098d4c73ffac7ec515</item> + </hosts_not_run> + <overall_status>0 of 1 hosts done. 0 errors.</overall_status> + <period_ending>2013-01-01 00:00:00</period_ending> + <period_beginning>2012-12-01 00:00:00</period_beginning> + <num_hosts>1</num_hosts> +</instance_usage_audit_logs>
\ No newline at end of file diff --git a/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json new file mode 100644 index 000000000..2b5fe54c1 --- /dev/null +++ b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json @@ -0,0 +1,17 @@ +{ + "instance_usage_audit_log": { + "hosts_not_run": [ + "8e33da2b48684ef3ab165444d6a7384c" + ], + "log": {}, + "num_hosts": 1, + "num_hosts_done": 0, + "num_hosts_not_run": 1, + "num_hosts_running": 0, + "overall_status": "0 of 1 hosts done. 0 errors.", + "period_beginning": "2012-06-01 00:00:00", + "period_ending": "2012-07-01 00:00:00", + "total_errors": 0, + "total_instances": 0 + } +}
\ No newline at end of file diff --git a/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml new file mode 100644 index 000000000..453689737 --- /dev/null +++ b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml @@ -0,0 +1,16 @@ +<?xml version='1.0' encoding='UTF-8'?> +<instance_usage_audit_log> + <total_errors>0</total_errors> + <total_instances>0</total_instances> + <log/> + <num_hosts_running>0</num_hosts_running> + <num_hosts_done>0</num_hosts_done> + <num_hosts_not_run>1</num_hosts_not_run> + <hosts_not_run> + <item>4b54478b73734afcbf0e2676a3303d1a</item> + </hosts_not_run> + <overall_status>0 of 1 hosts done. 0 errors.</overall_status> + <period_ending>2012-07-01 00:00:00</period_ending> + <period_beginning>2012-06-01 00:00:00</period_beginning> + <num_hosts>1</num_hosts> +</instance_usage_audit_log>
\ No newline at end of file diff --git a/etc/nova/policy.json b/etc/nova/policy.json index 97ae89a38..1a446263f 100644 --- a/etc/nova/policy.json +++ b/etc/nova/policy.json @@ -83,6 +83,10 @@ "compute_extension:virtual_interfaces": "", "compute_extension:virtual_storage_arrays": "", "compute_extension:volumes": "", + "compute_extension:volume_attachments:index": "", + "compute_extension:volume_attachments:show": "", + "compute_extension:volume_attachments:create": "", + "compute_extension:volume_attachments:delete": "", "compute_extension:volumetypes": "", "compute_extension:availability_zone:list": "", "compute_extension:availability_zone:detail": "rule:admin_api", diff --git a/nova/api/openstack/compute/contrib/coverage_ext.py b/nova/api/openstack/compute/contrib/coverage_ext.py index bc4d0f0f9..6edf9244f 100644 --- a/nova/api/openstack/compute/contrib/coverage_ext.py +++ b/nova/api/openstack/compute/contrib/coverage_ext.py @@ -23,7 +23,6 @@ import sys import telnetlib import tempfile -import coverage from webob import exc from nova.api.openstack import extensions @@ -47,7 +46,6 @@ class CoverageController(object): def __init__(self): self.data_path = tempfile.mkdtemp(prefix='nova-coverage_') data_out = os.path.join(self.data_path, '.nova-coverage') - self.coverInst = coverage.coverage(data_file=data_out) self.compute_api = compute_api.API() self.network_api = network_api.API() self.conductor_api = conductor_api.API() @@ -57,6 +55,12 @@ class CoverageController(object): self.cert_api = cert_api.CertAPI() self.services = [] self.combine = False + try: + import coverage + self.coverInst = coverage.coverage(data_file=data_out) + self.has_coverage = True + except ImportError: + self.has_coverage = False super(CoverageController, self).__init__() def _find_services(self, req): @@ -238,6 +242,9 @@ class CoverageController(object): 'report': self._report_coverage, } authorize(req.environ['nova.context']) + if not self.has_coverage: + msg = _("Python coverage module is not installed.") + raise exc.HTTPServiceUnavailable(explanation=msg) for action, data in body.iteritems(): if action == 'stop': return _actions[action](req) diff --git a/nova/api/openstack/compute/contrib/flavorextraspecs.py b/nova/api/openstack/compute/contrib/flavorextraspecs.py index c8deb7b4c..84f157b6a 100644 --- a/nova/api/openstack/compute/contrib/flavorextraspecs.py +++ b/nova/api/openstack/compute/contrib/flavorextraspecs.py @@ -34,6 +34,15 @@ class ExtraSpecsTemplate(xmlutil.TemplateBuilder): return xmlutil.MasterTemplate(xmlutil.make_flat_dict('extra_specs'), 1) +class ExtraSpecTemplate(xmlutil.TemplateBuilder): + def construct(self): + sel = xmlutil.Selector(xmlutil.get_items, 0) + root = xmlutil.TemplateElement('extra_spec', selector=sel) + root.set('key', 0) + root.text = 1 + return xmlutil.MasterTemplate(root, 1) + + class FlavorExtraSpecsController(object): """The flavor extra specs API controller for the OpenStack API.""" @@ -70,7 +79,7 @@ class FlavorExtraSpecsController(object): raise exc.HTTPBadRequest(explanation=unicode(error)) return body - @wsgi.serializers(xml=ExtraSpecsTemplate) + @wsgi.serializers(xml=ExtraSpecTemplate) def update(self, req, flavor_id, id, body): context = req.environ['nova.context'] authorize(context) @@ -87,10 +96,9 @@ class FlavorExtraSpecsController(object): body) except exception.MetadataLimitExceeded as error: raise exc.HTTPBadRequest(explanation=unicode(error)) - return body - @wsgi.serializers(xml=ExtraSpecsTemplate) + @wsgi.serializers(xml=ExtraSpecTemplate) def show(self, req, flavor_id, id): """Return a single extra spec item.""" context = req.environ['nova.context'] diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py index 47c717495..3fc503217 100644 --- a/nova/api/openstack/compute/contrib/volumes.py +++ b/nova/api/openstack/compute/contrib/volumes.py @@ -33,6 +33,15 @@ from nova import volume LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('compute', 'volumes') +authorize_attach_index = extensions.extension_authorizer('compute', + 'volume_attachments:index') +authorize_attach_show = extensions.extension_authorizer('compute', + 'volume_attachments:show') +authorize_attach_create = extensions.extension_authorizer('compute', + 'volume_attachments:create') +authorize_attach_delete = extensions.extension_authorizer('compute', + 'volume_attachments:delete') + def _translate_volume_detail_view(context, vol): """Maps keys for volumes details view.""" @@ -329,6 +338,8 @@ class VolumeAttachmentController(wsgi.Controller): @wsgi.serializers(xml=VolumeAttachmentsTemplate) def index(self, req, server_id): """Returns the list of volume attachments for a given instance.""" + context = req.environ['nova.context'] + authorize_attach_index(context) return self._items(req, server_id, entity_maker=_translate_attachment_summary_view) @@ -337,6 +348,7 @@ class VolumeAttachmentController(wsgi.Controller): """Return data about the given volume attachment.""" context = req.environ['nova.context'] authorize(context) + authorize_attach_show(context) volume_id = id try: @@ -377,6 +389,7 @@ class VolumeAttachmentController(wsgi.Controller): """Attach a volume to an instance.""" context = req.environ['nova.context'] authorize(context) + authorize_attach_create(context) if not self.is_valid_body(body, 'volumeAttachment'): raise exc.HTTPUnprocessableEntity() @@ -423,6 +436,7 @@ class VolumeAttachmentController(wsgi.Controller): """Detach a volume from an instance.""" context = req.environ['nova.context'] authorize(context) + authorize_attach_delete(context) volume_id = id LOG.audit(_("Detach volume %s"), volume_id, context=context) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 8b593d742..a6f255081 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -1182,10 +1182,18 @@ class Fault(webob.exc.HTTPException): # Replace the body with fault details. code = self.wrapped_exc.status_int fault_name = self._fault_names.get(code, "computeFault") + explanation = self.wrapped_exc.explanation + offset = explanation.find("Traceback") + if offset is not -1: + LOG.debug(_("API request failed, fault raised to the top of" + " the stack. Detailed stacktrace %s") % + explanation) + explanation = explanation[0:offset - 1] + fault_data = { fault_name: { 'code': code, - 'message': self.wrapped_exc.explanation}} + 'message': explanation}} if code == 413: retry = self.wrapped_exc.headers.get('Retry-After', None) if retry: diff --git a/nova/availability_zones.py b/nova/availability_zones.py index 09cbd98b8..711eee1fa 100644 --- a/nova/availability_zones.py +++ b/nova/availability_zones.py @@ -17,7 +17,6 @@ from nova import db from nova.openstack.common import cfg -from nova.openstack.common import jsonutils from nova.openstack.common import log as logging availability_zone_opts = [ diff --git a/nova/compute/api.py b/nova/compute/api.py index 06ce2e07e..a9d0a1bdd 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -508,6 +508,13 @@ class API(base.Base): availability_zone, forced_host = self._handle_availability_zone( availability_zone) + system_metadata = {} + instance_type_props = ['id', 'name', 'memory_mb', 'vcpus', + 'root_gb', 'ephemeral_gb', 'flavorid', + 'swap', 'rxtx_factor', 'vcpu_weight'] + for k in instance_type_props: + system_metadata["instance_type_%s" % k] = instance_type[k] + base_options = { 'reservation_id': reservation_id, 'image_ref': image_href, @@ -537,7 +544,8 @@ class API(base.Base): 'access_ip_v6': access_ip_v6, 'availability_zone': availability_zone, 'root_device_name': root_device_name, - 'progress': 0} + 'progress': 0, + 'system_metadata': system_metadata} options_from_image = self._inherit_properties_from_image( image, auto_disk_config) @@ -558,6 +566,11 @@ class API(base.Base): security_group, block_device_mapping) instances.append(instance) instance_uuids.append(instance['uuid']) + self._validate_bdm(context, instance) + # send a state update notification for the initial create to + # show it going from non-existent to BUILDING + notifications.send_update_with_states(context, instance, None, + vm_states.BUILDING, None, None, service="api") # In the case of any exceptions, attempt DB cleanup and rollback the # quota reservations. @@ -704,6 +717,23 @@ class API(base.Base): self.db.block_device_mapping_update_or_create(elevated_context, values) + def _validate_bdm(self, context, instance): + for bdm in self.db.block_device_mapping_get_all_by_instance( + context, instance['uuid']): + # NOTE(vish): For now, just make sure the volumes are accessible. + snapshot_id = bdm.get('snapshot_id') + volume_id = bdm.get('volume_id') + if volume_id is not None: + try: + self.volume_api.get(context, volume_id) + except Exception: + raise exception.InvalidBDMVolume(id=volume_id) + elif snapshot_id is not None: + try: + self.volume_api.get_snapshot(context, snapshot_id) + except Exception: + raise exception.InvalidBDMSnapshot(id=snapshot_id) + def _populate_instance_for_bdm(self, context, instance, instance_type, image, block_device_mapping): """Populate instance block device mapping information.""" @@ -818,11 +848,6 @@ class API(base.Base): self._populate_instance_for_bdm(context, instance, instance_type, image, block_device_mapping) - # send a state update notification for the initial create to - # show it going from non-existent to BUILDING - notifications.send_update_with_states(context, instance, None, - vm_states.BUILDING, None, None, service="api") - return instance def _check_create_policies(self, context, availability_zone, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 275611cdf..b52e85440 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -57,13 +57,11 @@ from nova import network from nova.network import model as network_model from nova.openstack.common import cfg from nova.openstack.common import excutils -from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova.openstack.common.notifier import api as notifier from nova.openstack.common import rpc -from nova.openstack.common.rpc import common as rpc_common from nova.openstack.common import timeutils from nova import paths from nova import quota @@ -229,7 +227,8 @@ def wrap_instance_fault(function): with excutils.save_and_reraise_exception(): compute_utils.add_instance_fault_from_exc(context, - kwargs['instance'], e, sys.exc_info()) + self.conductor_api, kwargs['instance'], + e, sys.exc_info()) return decorated_function @@ -732,8 +731,8 @@ class ComputeManager(manager.SchedulerDependentManager): instance_uuid = instance['uuid'] rescheduled = False - compute_utils.add_instance_fault_from_exc(context, instance, - exc_info[1], exc_info=exc_info) + compute_utils.add_instance_fault_from_exc(context, self.conductor_api, + instance, exc_info[1], exc_info=exc_info) try: self._deallocate_network(context, instance) @@ -1465,7 +1464,7 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.error(_('Cannot reboot instance: %(exc)s'), locals(), context=context, instance=instance) compute_utils.add_instance_fault_from_exc(context, - instance, exc, sys.exc_info()) + self.conductor_api, instance, exc, sys.exc_info()) # Fall through and reset task_state to None current_power_state = self._get_power_state(context, instance) @@ -1995,8 +1994,8 @@ class ComputeManager(manager.SchedulerDependentManager): rescheduled = False instance_uuid = instance['uuid'] - compute_utils.add_instance_fault_from_exc(context, instance, - exc_info[0], exc_info=exc_info) + compute_utils.add_instance_fault_from_exc(context, self.conductor_api, + instance, exc_info[0], exc_info=exc_info) try: scheduler_method = self.scheduler_rpcapi.prep_resize @@ -3069,8 +3068,8 @@ class ComputeManager(manager.SchedulerDependentManager): vm_state = instance['vm_state'] task_state = instance['task_state'] if vm_state != vm_states.RESIZED or task_state is not None: - reason = _("In states %(vm_state)s/%(task_state)s, not" - "RESIZED/None") + reason = _("In states %(vm_state)s/%(task_state)s, not " + "RESIZED/None") _set_migration_to_error(migration, reason % locals(), instance=instance) continue diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index be0360185..f5d3a8008 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -293,12 +293,14 @@ class ResourceTracker(object): # Need to create the ComputeNode record: resources['service_id'] = service['id'] self._create(context, resources) - LOG.info(_('Compute_service record created for %s ') % self.host) + LOG.info(_('Compute_service record created for %(host)s:%(node)s') + % {'host': self.host, 'node': self.nodename}) else: # just update the record: self._update(context, resources, prune_stats=True) - LOG.info(_('Compute_service record updated for %s ') % self.host) + LOG.info(_('Compute_service record updated for %(host)s:%(node)s') + % {'host': self.host, 'node': self.nodename}) def _create(self, context, values): """Create the compute node in the DB.""" diff --git a/nova/compute/utils.py b/nova/compute/utils.py index 1874e886f..daf80874c 100644 --- a/nova/compute/utils.py +++ b/nova/compute/utils.py @@ -37,7 +37,8 @@ CONF.import_opt('host', 'nova.netconf') LOG = log.getLogger(__name__) -def add_instance_fault_from_exc(context, instance, fault, exc_info=None): +def add_instance_fault_from_exc(context, conductor, + instance, fault, exc_info=None): """Adds the specified fault to the database.""" code = 500 @@ -61,7 +62,7 @@ def add_instance_fault_from_exc(context, instance, fault, exc_info=None): 'details': unicode(details), 'host': CONF.host } - db.instance_fault_create(context, values) + conductor.instance_fault_create(context, values) def get_device_name_for_instance(context, instance, bdms, device): diff --git a/nova/conductor/api.py b/nova/conductor/api.py index d05c94877..b0e5afdcb 100644 --- a/nova/conductor/api.py +++ b/nova/conductor/api.py @@ -133,6 +133,9 @@ class LocalAPI(object): def instance_type_get(self, context, instance_type_id): return self._manager.instance_type_get(context, instance_type_id) + def instance_fault_create(self, context, values): + return self._manager.instance_fault_create(context, values) + def migration_get(self, context, migration_id): return self._manager.migration_get(context, migration_id) @@ -391,6 +394,9 @@ class API(object): return self.conductor_rpcapi.instance_type_get(context, instance_type_id) + def instance_fault_create(self, context, values): + return self.conductor_rpcapi.instance_fault_create(context, values) + def migration_get(self, context, migration_id): return self.conductor_rpcapi.migration_get(context, migration_id) diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index 87b143912..c4ae33622 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -43,7 +43,7 @@ datetime_fields = ['launched_at', 'terminated_at'] class ConductorManager(manager.SchedulerDependentManager): """Mission: TBD.""" - RPC_API_VERSION = '1.35' + RPC_API_VERSION = '1.36' def __init__(self, *args, **kwargs): super(ConductorManager, self).__init__(service_name='conductor', @@ -258,6 +258,10 @@ class ConductorManager(manager.SchedulerDependentManager): result = self.db.instance_type_get(context, instance_type_id) return jsonutils.to_primitive(result) + def instance_fault_create(self, context, values): + result = self.db.instance_fault_create(context, values) + return jsonutils.to_primitive(result) + def vol_get_usage_by_time(self, context, start_time): result = self.db.vol_get_usage_by_time(context, start_time) return jsonutils.to_primitive(result) diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py index 1699c85ed..04e576cce 100644 --- a/nova/conductor/rpcapi.py +++ b/nova/conductor/rpcapi.py @@ -68,6 +68,7 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy): 1.33 - Added compute_node_create and compute_node_update 1.34 - Added service_update 1.35 - Added instance_get_active_by_window_joined + 1.36 - Added instance_fault_create """ BASE_RPC_API_VERSION = '1.0' @@ -293,6 +294,10 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy): msg = self.make_msg('instance_get_all_by_host', host=host, node=node) return self.call(context, msg, version='1.32') + def instance_fault_create(self, context, values): + msg = self.make_msg('instance_fault_create', values=values) + return self.call(context, msg, version='1.36') + def action_event_start(self, context, values): msg = self.make_msg('action_event_start', values=values) return self.call(context, msg, version='1.25') diff --git a/nova/context.py b/nova/context.py index 1a566cb5a..8731e012d 100644 --- a/nova/context.py +++ b/nova/context.py @@ -46,7 +46,7 @@ class RequestContext(object): roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, user_name=None, project_name=None, - service_catalog=None, instance_lock_checked=False, **kwargs): + service_catalog=[], instance_lock_checked=False, **kwargs): """ :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that @@ -79,7 +79,9 @@ class RequestContext(object): request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token - self.service_catalog = service_catalog + # Only include required parts of service_catalog + self.service_catalog = [s for s in service_catalog + if s.get('type') in ('volume')] self.instance_lock_checked = instance_lock_checked # NOTE(markmc): this attribute is currently only used by the diff --git a/nova/db/api.py b/nova/db/api.py index 6af81e5f9..3c1425691 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -305,7 +305,7 @@ def floating_ip_destroy(context, address): def floating_ip_disassociate(context, address): """Disassociate a floating ip from a fixed ip by address. - :returns: the address of the previous fixed ip or None + :returns: the fixed ip record joined to network record or None if the ip was not associated to an ip. """ @@ -316,7 +316,7 @@ def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): """Associate a floating ip to a fixed_ip by address. - :returns: the address of the new fixed ip (fixed_address) or None + :returns: the fixed ip record joined to network record or None if the ip was already associated to the fixed ip. """ @@ -477,9 +477,12 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time): return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time) -def fixed_ip_get(context, id): - """Get fixed ip by id or raise if it does not exist.""" - return IMPL.fixed_ip_get(context, id) +def fixed_ip_get(context, id, get_network=False): + """Get fixed ip by id or raise if it does not exist. + + If get_network is true, also return the assocated network. + """ + return IMPL.fixed_ip_get(context, id, get_network) def fixed_ip_get_all(context): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a6f585eef..61f27f31c 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -793,15 +793,16 @@ def floating_ip_fixed_ip_associate(context, floating_address, floating_ip_ref = _floating_ip_get_by_address(context, floating_address, session=session) - fixed_ip_ref = fixed_ip_get_by_address(context, - fixed_address, - session=session) + fixed_ip_ref = model_query(context, models.FixedIp, session=session).\ + filter_by(address=fixed_address).\ + options(joinedload('network')).\ + first() if floating_ip_ref.fixed_ip_id == fixed_ip_ref["id"]: return None floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"] floating_ip_ref.host = host floating_ip_ref.save(session=session) - return fixed_address + return fixed_ip_ref @require_context @@ -834,15 +835,12 @@ def floating_ip_disassociate(context, address): fixed_ip_ref = model_query(context, models.FixedIp, session=session).\ filter_by(id=floating_ip_ref['fixed_ip_id']).\ + options(joinedload('network')).\ first() - if fixed_ip_ref: - fixed_ip_address = fixed_ip_ref['address'] - else: - fixed_ip_address = None floating_ip_ref.fixed_ip_id = None floating_ip_ref.host = None floating_ip_ref.save(session=session) - return fixed_ip_address + return fixed_ip_ref @require_context @@ -1140,10 +1138,11 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time): @require_context -def fixed_ip_get(context, id): - result = model_query(context, models.FixedIp).\ - filter_by(id=id).\ - first() +def fixed_ip_get(context, id, get_network=False): + query = model_query(context, models.FixedIp).filter_by(id=id) + if get_network: + query = query.options(joinedload('network')) + result = query.first() if not result: raise exception.FixedIpNotFound(id=id) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py b/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py index a20799fbe..d93cd1ead 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py @@ -37,9 +37,9 @@ def upgrade(migrate_engine): if rec['binary'] != 'nova-compute': continue # if zone doesn't exist create - result = aggregate_metadata.select().where(aggregate_metadata.c.key == - 'availability_zone' and - aggregate_metadata.c.key == rec['availability_zone']).execute() + result = aggregate_metadata.select().where( + aggregate_metadata.c.key == 'availability_zone').where( + aggregate_metadata.c.value == rec['availability_zone']).execute() result = [r for r in result] if len(result) > 0: agg_id = result[0].aggregate_id diff --git a/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py b/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py index d4bd991f7..c49e8272b 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py @@ -4,8 +4,7 @@ from sqlalchemy.ext.compiler import compiles from sqlalchemy import MetaData, Table, Column, Index from sqlalchemy import select from sqlalchemy.sql.expression import UpdateBase -from sqlalchemy.sql import literal_column -from sqlalchemy import String, Integer, Boolean +from sqlalchemy import Integer, Boolean from sqlalchemy.types import NullType, BigInteger diff --git a/nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py new file mode 100644 index 000000000..20e75a6eb --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py @@ -0,0 +1,49 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, select, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + instance_types = Table('instance_types', meta, autoload=True) + sys_meta = Table('instance_system_metadata', meta, autoload=True) + + # Taken from nova/compute/api.py + instance_type_props = ['id', 'name', 'memory_mb', 'vcpus', + 'root_gb', 'ephemeral_gb', 'flavorid', + 'swap', 'rxtx_factor', 'vcpu_weight'] + + select_columns = [instances.c.uuid] + select_columns += [getattr(instance_types.c, name) + for name in instance_type_props] + + q = select(select_columns, from_obj=instances.join( + instance_types, + instances.c.instance_type_id == instance_types.c.id)) + + i = sys_meta.insert() + for values in q.execute(): + for index in range(0, len(instance_type_props)): + i.execute({"key": "instance_type_%s" % instance_type_props[index], + "value": str(values[index + 1]), + "instance_uuid": values[0]}) + + +def downgrade(migration_engine): + # This migration only touches data, and only metadata at that. No need + # to go through and delete old metadata items. + pass diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 05452f2ad..53d6f53bd 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -719,6 +719,12 @@ class FixedIp(BASE, NovaBase): leased = Column(Boolean, default=False) reserved = Column(Boolean, default=False) host = Column(String(255)) + network = relationship(Network, + backref=backref('fixed_ips'), + foreign_keys=network_id, + primaryjoin='and_(' + 'FixedIp.network_id == Network.id,' + 'FixedIp.deleted == False)') class FloatingIp(BASE, NovaBase): diff --git a/nova/exception.py b/nova/exception.py index c15fc1e43..6915c14bb 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -25,7 +25,6 @@ SHOULD include dedicated exception logging. """ import functools -import itertools import webob.exc @@ -227,6 +226,20 @@ class Invalid(NovaException): code = 400 +class InvalidBDM(Invalid): + message = _("Block Device Mapping is Invalid.") + + +class InvalidBDMSnapshot(InvalidBDM): + message = _("Block Device Mapping is Invalid: " + "failed to get snapshot %(id)s.") + + +class InvalidBDMVolume(InvalidBDM): + message = _("Block Device Mapping is Invalid: " + "failed to get volume %(id)s.") + + class VolumeUnattached(Invalid): message = _("Volume %(volume_id)s is not attached to anything") @@ -317,7 +330,15 @@ class InstanceSuspendFailure(Invalid): class InstanceResumeFailure(Invalid): - message = _("Failed to resume server") + ": %(reason)s." + message = _("Failed to resume instance: %(reason)s.") + + +class InstancePowerOnFailure(Invalid): + message = _("Failed to power on instance: %(reason)s.") + + +class InstancePowerOffFailure(Invalid): + message = _("Failed to power off instance: %(reason)s.") class InstanceRebootFailure(Invalid): diff --git a/nova/network/l3.py b/nova/network/l3.py index baf77c112..14abf41eb 100644 --- a/nova/network/l3.py +++ b/nova/network/l3.py @@ -48,13 +48,16 @@ class L3Driver(object): """:returns: True/False (whether the driver is initialized).""" raise NotImplementedError() - def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id): + def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id, + network=None): """Add a floating IP bound to the fixed IP with an optional l3_interface_id. Some drivers won't care about the - l3_interface_id so just pass None in that case""" + l3_interface_id so just pass None in that case. Network + is also an optional parameter.""" raise NotImplementedError() - def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id): + def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id, + network=None): raise NotImplementedError() def add_vpn(self, public_ip, port, private_ip): @@ -96,15 +99,17 @@ class LinuxNetL3(L3Driver): def remove_gateway(self, network_ref): linux_net.unplug(network_ref) - def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id): + def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id, + network=None): linux_net.bind_floating_ip(floating_ip, l3_interface_id) linux_net.ensure_floating_forward(floating_ip, fixed_ip, - l3_interface_id) + l3_interface_id, network) - def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id): + def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id, + network=None): linux_net.unbind_floating_ip(floating_ip, l3_interface_id) linux_net.remove_floating_forward(floating_ip, fixed_ip, - l3_interface_id) + l3_interface_id, network) def add_vpn(self, public_ip, port, private_ip): linux_net.ensure_vpn_forward(public_ip, port, private_ip) @@ -140,10 +145,12 @@ class NullL3(L3Driver): def remove_gateway(self, network_ref): pass - def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id): + def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id, + network=None): pass - def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id): + def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id, + network=None): pass def add_vpn(self, public_ip, port, private_ip): diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index a9b44e94a..49afc65c4 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -378,7 +378,7 @@ class IptablesManager(object): for table in tables: start, end = self._find_table(all_lines, table) all_lines[start:end] = self._modify_rules( - all_lines[start:end], tables[table]) + all_lines[start:end], tables[table], table_name=table) self.execute('%s-restore' % (cmd,), '-c', run_as_root=True, process_input='\n'.join(all_lines), attempts=5) @@ -392,18 +392,24 @@ class IptablesManager(object): start = lines.index('*%s' % table_name) - 1 except ValueError: # Couldn't find table_name - # For Unit Tests return (0, 0) end = lines[start:].index('COMMIT') + start + 2 return (start, end) - def _modify_rules(self, current_lines, table, binary=None): + def _modify_rules(self, current_lines, table, binary=None, + table_name=None): unwrapped_chains = table.unwrapped_chains chains = table.chains remove_chains = table.remove_chains rules = table.rules remove_rules = table.remove_rules + if not current_lines: + fake_table = ['#Generated by nova', + '*' + table_name, 'COMMIT', + '#Completed by nova'] + current_lines = fake_table + # Remove any trace of our rules new_filter = filter(lambda line: binary_name not in line, current_lines) @@ -418,6 +424,9 @@ class IptablesManager(object): if not rule.startswith(':'): break + if not seen_chains: + rules_index = 2 + our_rules = [] bot_rules = [] for rule in rules: @@ -645,18 +654,29 @@ def ensure_vpn_forward(public_ip, port, private_ip): iptables_manager.apply() -def ensure_floating_forward(floating_ip, fixed_ip, device): +def ensure_floating_forward(floating_ip, fixed_ip, device, network): """Ensure floating ip forwarding rule.""" for chain, rule in floating_forward_rules(floating_ip, fixed_ip, device): iptables_manager.ipv4['nat'].add_rule(chain, rule) iptables_manager.apply() + if device != network['bridge']: + ensure_ebtables_rules(*floating_ebtables_rules(fixed_ip, network)) -def remove_floating_forward(floating_ip, fixed_ip, device): +def remove_floating_forward(floating_ip, fixed_ip, device, network): """Remove forwarding for floating ip.""" for chain, rule in floating_forward_rules(floating_ip, fixed_ip, device): iptables_manager.ipv4['nat'].remove_rule(chain, rule) iptables_manager.apply() + if device != network['bridge']: + remove_ebtables_rules(*floating_ebtables_rules(fixed_ip, network)) + + +def floating_ebtables_rules(fixed_ip, network): + """Makes sure only in-network traffic is bridged.""" + return (['PREROUTING --logical-in %s -p ipv4 --ip-src %s ' + '! --ip-dst %s -j redirect --redirect-target ACCEPT' % + (network['bridge'], fixed_ip, network['cidr'])], 'nat') def floating_forward_rules(floating_ip, fixed_ip, device): @@ -1421,18 +1441,18 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): @lockutils.synchronized('ebtables', 'nova-', external=True) -def ensure_ebtables_rules(rules): +def ensure_ebtables_rules(rules, table='filter'): for rule in rules: - cmd = ['ebtables', '-D'] + rule.split() + cmd = ['ebtables', '-t', table, '-D'] + rule.split() _execute(*cmd, check_exit_code=False, run_as_root=True) - cmd[1] = '-I' + cmd[3] = '-I' _execute(*cmd, run_as_root=True) @lockutils.synchronized('ebtables', 'nova-', external=True) -def remove_ebtables_rules(rules): +def remove_ebtables_rules(rules, table='filter'): for rule in rules: - cmd = ['ebtables', '-D'] + rule.split() + cmd = ['ebtables', '-t', table, '-D'] + rule.split() _execute(*cmd, check_exit_code=False, run_as_root=True) diff --git a/nova/network/manager.py b/nova/network/manager.py index 897472d08..d1dabdfd9 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -47,7 +47,6 @@ import datetime import itertools import math import re -import socket import uuid from eventlet import greenpool @@ -294,17 +293,19 @@ class FloatingIP(object): fixed_ip_id = floating_ip.get('fixed_ip_id') if fixed_ip_id: try: - fixed_ip_ref = self.db.fixed_ip_get(admin_context, - fixed_ip_id) + fixed_ip = self.db.fixed_ip_get(admin_context, + fixed_ip_id, + get_network=True) except exception.FixedIpNotFound: msg = _('Fixed ip %(fixed_ip_id)s not found') % locals() LOG.debug(msg) continue - fixed_address = fixed_ip_ref['address'] interface = CONF.public_interface or floating_ip['interface'] try: self.l3driver.add_floating_ip(floating_ip['address'], - fixed_address, interface) + fixed_ip['address'], + interface, + fixed_ip['network']) except exception.ProcessExecutionError: LOG.debug(_('Interface %(interface)s not found'), locals()) raise exception.NoFloatingIpInterface(interface=interface) @@ -559,17 +560,17 @@ class FloatingIP(object): @lockutils.synchronized(unicode(floating_address), 'nova-') def do_associate(): # associate floating ip - res = self.db.floating_ip_fixed_ip_associate(context, - floating_address, - fixed_address, - self.host) - if not res: + fixed = self.db.floating_ip_fixed_ip_associate(context, + floating_address, + fixed_address, + self.host) + if not fixed: # NOTE(vish): ip was already associated return try: # gogo driver time self.l3driver.add_floating_ip(floating_address, fixed_address, - interface) + interface, fixed['network']) except exception.ProcessExecutionError as e: self.db.floating_ip_disassociate(context, floating_address) if "Cannot find device" in str(e): @@ -652,15 +653,15 @@ class FloatingIP(object): # don't worry about this case because the minuscule # window where the ip is on both hosts shouldn't cause # any problems. - fixed_address = self.db.floating_ip_disassociate(context, address) + fixed = self.db.floating_ip_disassociate(context, address) - if not fixed_address: + if not fixed: # NOTE(vish): ip was already disassociated return if interface: # go go driver time - self.l3driver.remove_floating_ip(address, fixed_address, - interface) + self.l3driver.remove_floating_ip(address, fixed['address'], + interface, fixed['network']) payload = dict(project_id=context.project_id, instance_id=instance_uuid, floating_ip=address) @@ -733,10 +734,12 @@ class FloatingIP(object): interface = CONF.public_interface or floating_ip['interface'] fixed_ip = self.db.fixed_ip_get(context, - floating_ip['fixed_ip_id']) + floating_ip['fixed_ip_id'], + get_network=True) self.l3driver.remove_floating_ip(floating_ip['address'], fixed_ip['address'], - interface) + interface, + fixed_ip['network']) # NOTE(wenjianhn): Make this address will not be bound to public # interface when restarts nova-network on dest compute node @@ -774,10 +777,12 @@ class FloatingIP(object): interface = CONF.public_interface or floating_ip['interface'] fixed_ip = self.db.fixed_ip_get(context, - floating_ip['fixed_ip_id']) + floating_ip['fixed_ip_id'], + get_network=True) self.l3driver.add_floating_ip(floating_ip['address'], fixed_ip['address'], - interface) + interface, + fixed_ip['network']) def _prepare_domain_entry(self, context, domain): domainref = self.db.dnsdomain_get(context, domain) diff --git a/nova/notifications.py b/nova/notifications.py index 65428d03f..f40fff7f2 100644 --- a/nova/notifications.py +++ b/nova/notifications.py @@ -21,7 +21,6 @@ the system. import nova.context from nova import db -from nova import exception from nova.image import glance from nova import network from nova.network import model as network_model diff --git a/nova/scheduler/filters/affinity_filter.py b/nova/scheduler/filters/affinity_filter.py index 03bf0dd6e..7e51a15f2 100644 --- a/nova/scheduler/filters/affinity_filter.py +++ b/nova/scheduler/filters/affinity_filter.py @@ -25,12 +25,6 @@ class AffinityFilter(filters.BaseHostFilter): def __init__(self): self.compute_api = compute.API() - def _all_hosts(self, context): - all_hosts = {} - for instance in self.compute_api.get_all(context): - all_hosts[instance['uuid']] = instance['host'] - return all_hosts - class DifferentHostFilter(AffinityFilter): '''Schedule the instance on a different host from a set of instances.''' @@ -38,15 +32,15 @@ class DifferentHostFilter(AffinityFilter): def host_passes(self, host_state, filter_properties): context = filter_properties['context'] scheduler_hints = filter_properties.get('scheduler_hints') or {} - me = host_state.host affinity_uuids = scheduler_hints.get('different_host', []) if isinstance(affinity_uuids, basestring): affinity_uuids = [affinity_uuids] if affinity_uuids: - all_hosts = self._all_hosts(context) - return not any([i for i in affinity_uuids - if all_hosts.get(i) == me]) + return not self.compute_api.get_all(context, + {'host': host_state.host, + 'uuid': affinity_uuids, + 'deleted': False}) # With no different_host key return True @@ -59,16 +53,14 @@ class SameHostFilter(AffinityFilter): def host_passes(self, host_state, filter_properties): context = filter_properties['context'] scheduler_hints = filter_properties.get('scheduler_hints') or {} - me = host_state.host affinity_uuids = scheduler_hints.get('same_host', []) if isinstance(affinity_uuids, basestring): affinity_uuids = [affinity_uuids] if affinity_uuids: - all_hosts = self._all_hosts(context) - return any([i for i - in affinity_uuids - if all_hosts.get(i) == me]) + return self.compute_api.get_all(context, {'host': host_state.host, + 'uuid': affinity_uuids, + 'deleted': False}) # With no same_host key return True diff --git a/nova/scheduler/filters/availability_zone_filter.py b/nova/scheduler/filters/availability_zone_filter.py index 585acbaf8..390276ea3 100644 --- a/nova/scheduler/filters/availability_zone_filter.py +++ b/nova/scheduler/filters/availability_zone_filter.py @@ -14,7 +14,6 @@ # under the License. -from nova import availability_zones from nova import db from nova.openstack.common import cfg from nova.scheduler import filters diff --git a/nova/service.py b/nova/service.py index c250673f4..2daceba80 100644 --- a/nova/service.py +++ b/nova/service.py @@ -38,7 +38,6 @@ from nova.openstack.common import eventlet_backdoor from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import rpc -from nova.openstack.common.rpc import common as rpc_common from nova import servicegroup from nova import utils from nova import version @@ -426,6 +425,7 @@ class Service(object): verstr = version.version_string_with_package() LOG.audit(_('Starting %(topic)s node (version %(version)s)'), {'topic': self.topic, 'version': verstr}) + self.basic_config_check() self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() @@ -570,6 +570,16 @@ class Service(object): ctxt = context.get_admin_context() return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) + def basic_config_check(self): + """Perform basic config checks before starting processing.""" + # Make sure the tempdir exists and is writable + try: + with utils.tempdir() as tmpdir: + pass + except Exception as e: + LOG.error(_('Temporary directory is invalid: %s'), e) + sys.exit(1) + class WSGIService(object): """Provides ability to launch API from a 'paste' configuration.""" diff --git a/nova/servicegroup/drivers/db.py b/nova/servicegroup/drivers/db.py index 686ee728b..18b4b74e5 100644 --- a/nova/servicegroup/drivers/db.py +++ b/nova/servicegroup/drivers/db.py @@ -16,7 +16,6 @@ from nova import conductor from nova import context -from nova import exception from nova.openstack.common import cfg from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py b/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py index bc9f66eb2..5328ec2ee 100644 --- a/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py +++ b/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py @@ -172,7 +172,6 @@ class FlavorsExtraSpecsXMLSerializerTest(test.TestCase): expected = ("<?xml version='1.0' encoding='UTF-8'?>\n" '<extra_specs><key1>value1</key1></extra_specs>') text = serializer.serialize(dict(extra_specs={"key1": "value1"})) - print text self.assertEqual(text, expected) def test_deserializer(self): @@ -182,3 +181,10 @@ class FlavorsExtraSpecsXMLSerializerTest(test.TestCase): '<extra_specs><key1>value1</key1></extra_specs>') result = deserializer.deserialize(intext)['body'] self.assertEqual(result, expected) + + def test_show_update_serializer(self): + serializer = flavorextraspecs.ExtraSpecTemplate() + expected = ("<?xml version='1.0' encoding='UTF-8'?>\n" + '<extra_spec key="key1">value1</extra_spec>') + text = serializer.serialize(dict({"key1": "value1"})) + self.assertEqual(text, expected) diff --git a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py index a72f5bf0f..0c1378a67 100644 --- a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py +++ b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py @@ -138,7 +138,6 @@ class QuotaTemplateXMLSerializerTest(test.TestCase): cores=90)) text = self.serializer.serialize(exemplar) - print text tree = etree.fromstring(text) self.assertEqual('quota_class_set', tree.tag) diff --git a/nova/tests/api/openstack/compute/contrib/test_quotas.py b/nova/tests/api/openstack/compute/contrib/test_quotas.py index dab8c136e..8d518b815 100644 --- a/nova/tests/api/openstack/compute/contrib/test_quotas.py +++ b/nova/tests/api/openstack/compute/contrib/test_quotas.py @@ -166,7 +166,6 @@ class QuotaXMLSerializerTest(test.TestCase): cores=90)) text = self.serializer.serialize(exemplar) - print text tree = etree.fromstring(text) self.assertEqual('quota_set', tree.tag) diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py index ccb58f858..231923e6d 100644 --- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py +++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py @@ -1180,7 +1180,6 @@ class TestSecurityGroupXMLSerializer(test.TestCase): rule = dict(security_group_rule=raw_rule) text = self.rule_serializer.serialize(rule) - print text tree = etree.fromstring(text) self.assertEqual('security_group_rule', self._tag(tree)) @@ -1212,7 +1211,6 @@ class TestSecurityGroupXMLSerializer(test.TestCase): sg_group = dict(security_group=raw_group) text = self.default_serializer.serialize(sg_group) - print text tree = etree.fromstring(text) self._verify_security_group(raw_group, tree) @@ -1265,7 +1263,6 @@ class TestSecurityGroupXMLSerializer(test.TestCase): sg_groups = dict(security_groups=groups) text = self.index_serializer.serialize(sg_groups) - print text tree = etree.fromstring(text) self.assertEqual('security_groups', self._tag(tree)) diff --git a/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py b/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py index ea4565e14..783275ea2 100644 --- a/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py +++ b/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py @@ -74,7 +74,6 @@ class TestServerDiagnosticsXMLSerializer(test.TestCase): exemplar = dict(diag1='foo', diag2='bar') text = serializer.serialize(exemplar) - print text tree = etree.fromstring(text) self.assertEqual('diagnostics', self._tag(tree)) diff --git a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py index b49a1feb4..13a4e9d61 100644 --- a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py +++ b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py @@ -293,7 +293,6 @@ class SimpleTenantUsageSerializerTest(test.TestCase): tenant_usage = dict(tenant_usage=raw_usage) text = serializer.serialize(tenant_usage) - print text tree = etree.fromstring(text) self._verify_tenant_usage(raw_usage, tree) @@ -378,7 +377,6 @@ class SimpleTenantUsageSerializerTest(test.TestCase): tenant_usages = dict(tenant_usages=raw_usages) text = serializer.serialize(tenant_usages) - print text tree = etree.fromstring(text) self.assertEqual('tenant_usages', tree.tag) diff --git a/nova/tests/api/openstack/compute/contrib/test_snapshots.py b/nova/tests/api/openstack/compute/contrib/test_snapshots.py index a223178fb..fa0c521fe 100644 --- a/nova/tests/api/openstack/compute/contrib/test_snapshots.py +++ b/nova/tests/api/openstack/compute/contrib/test_snapshots.py @@ -271,7 +271,6 @@ class SnapshotSerializerTest(test.TestCase): ) text = serializer.serialize(dict(snapshot=raw_snapshot)) - print text tree = etree.fromstring(text) self._verify_snapshot(raw_snapshot, tree) @@ -298,7 +297,6 @@ class SnapshotSerializerTest(test.TestCase): )] text = serializer.serialize(dict(snapshots=raw_snapshots)) - print text tree = etree.fromstring(text) self.assertEqual('snapshots', tree.tag) diff --git a/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py b/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py index 7c61cd51b..cf1c1593f 100644 --- a/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py +++ b/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py @@ -91,7 +91,6 @@ class ServerVirtualInterfaceSerializerTest(test.TestCase): vifs = dict(virtual_interfaces=raw_vifs) text = self.serializer.serialize(vifs) - print text tree = etree.fromstring(text) self.assertEqual('virtual_interfaces', self._tag(tree)) diff --git a/nova/tests/api/openstack/compute/contrib/test_volumes.py b/nova/tests/api/openstack/compute/contrib/test_volumes.py index 3119f55e8..1a8a570e8 100644 --- a/nova/tests/api/openstack/compute/contrib/test_volumes.py +++ b/nova/tests/api/openstack/compute/contrib/test_volumes.py @@ -348,7 +348,6 @@ class VolumeSerializerTest(test.TestCase): device='/foo') text = serializer.serialize(dict(volumeAttachment=raw_attach)) - print text tree = etree.fromstring(text) self.assertEqual('volumeAttachment', tree.tag) @@ -368,7 +367,6 @@ class VolumeSerializerTest(test.TestCase): device='/foo2')] text = serializer.serialize(dict(volumeAttachments=raw_attaches)) - print text tree = etree.fromstring(text) self.assertEqual('volumeAttachments', tree.tag) @@ -401,7 +399,6 @@ class VolumeSerializerTest(test.TestCase): ) text = serializer.serialize(dict(volume=raw_volume)) - print text tree = etree.fromstring(text) self._verify_volume(raw_volume, tree) @@ -450,7 +447,6 @@ class VolumeSerializerTest(test.TestCase): )] text = serializer.serialize(dict(volumes=raw_volumes)) - print text tree = etree.fromstring(text) self.assertEqual('volumes', tree.tag) diff --git a/nova/tests/api/openstack/compute/test_limits.py b/nova/tests/api/openstack/compute/test_limits.py index 375355a70..e3fff380d 100644 --- a/nova/tests/api/openstack/compute/test_limits.py +++ b/nova/tests/api/openstack/compute/test_limits.py @@ -874,7 +874,6 @@ class LimitsXMLSerializationTest(test.TestCase): "absolute": {}}} output = serializer.serialize(fixture) - print output has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>") self.assertTrue(has_dec) @@ -905,7 +904,6 @@ class LimitsXMLSerializationTest(test.TestCase): "maxPersonalitySize": 10240}}} output = serializer.serialize(fixture) - print output root = etree.XML(output) xmlutil.validate_schema(root, 'limits') @@ -940,7 +938,6 @@ class LimitsXMLSerializationTest(test.TestCase): "absolute": {}}} output = serializer.serialize(fixture) - print output root = etree.XML(output) xmlutil.validate_schema(root, 'limits') diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index 5456c23af..4bfb1c1e3 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -4508,7 +4508,6 @@ class ServerXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture) - print output has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>") self.assertTrue(has_dec) @@ -4586,7 +4585,6 @@ class ServerXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture) - print output root = etree.XML(output) xmlutil.validate_schema(root, 'server') @@ -4717,7 +4715,6 @@ class ServerXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture) - print output root = etree.XML(output) xmlutil.validate_schema(root, 'server') @@ -4814,7 +4811,6 @@ class ServerXMLSerializationTest(test.TestCase): ]} output = serializer.serialize(fixture) - print output root = etree.XML(output) xmlutil.validate_schema(root, 'servers_index') server_elems = root.findall('{0}server'.format(NS)) @@ -4878,7 +4874,6 @@ class ServerXMLSerializationTest(test.TestCase): ]} output = serializer.serialize(fixture) - print output root = etree.XML(output) xmlutil.validate_schema(root, 'servers_index') server_elems = root.findall('{0}server'.format(NS)) @@ -5165,7 +5160,6 @@ class ServerXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture) - print output root = etree.XML(output) xmlutil.validate_schema(root, 'server') diff --git a/nova/tests/api/openstack/compute/test_versions.py b/nova/tests/api/openstack/compute/test_versions.py index 28b109215..bd2e9fa7b 100644 --- a/nova/tests/api/openstack/compute/test_versions.py +++ b/nova/tests/api/openstack/compute/test_versions.py @@ -228,7 +228,6 @@ class VersionsTest(test.TestCase): self.assertEqual(res.content_type, "application/xml") root = etree.XML(res.body) - print res.body xmlutil.validate_schema(root, 'versions') self.assertTrue(root.xpath('/ns:versions', namespaces=NS)) diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 7e49e4ab8..68a5f0bf4 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -377,7 +377,6 @@ class MetadataXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture) - print output has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>") self.assertTrue(has_dec) @@ -390,7 +389,6 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture) - print output root = etree.XML(output) xmlutil.validate_schema(root, 'metadata') metadata_dict = fixture['metadata'] @@ -409,7 +407,6 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture) - print output root = etree.XML(output) xmlutil.validate_schema(root, 'metadata') metadata_dict = fixture['metadata'] @@ -428,7 +425,6 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture) - print output root = etree.XML(output) xmlutil.validate_schema(root, 'metadata') metadata_dict = fixture['metadata'] @@ -447,7 +443,6 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture) - print output root = etree.XML(output) meta_dict = fixture['meta'] (meta_key, meta_value) = meta_dict.items()[0] @@ -463,7 +458,6 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture) - print output root = etree.XML(output) xmlutil.validate_schema(root, 'metadata') metadata_dict = fixture['metadata'] @@ -482,7 +476,6 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture) - print output root = etree.XML(output) meta_dict = fixture['meta'] (meta_key, meta_value) = meta_dict.items()[0] @@ -499,7 +492,6 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture) - print output root = etree.XML(output) xmlutil.validate_schema(root, 'metadata') metadata_dict = fixture['metadata'] diff --git a/nova/tests/baremetal/test_nova_baremetal_manage.py b/nova/tests/baremetal/test_nova_baremetal_manage.py index 4d152a028..c4fdaac6b 100644 --- a/nova/tests/baremetal/test_nova_baremetal_manage.py +++ b/nova/tests/baremetal/test_nova_baremetal_manage.py @@ -20,10 +20,6 @@ import imp import os import sys -from nova import context -from nova import test -from nova.virt.baremetal import db as bmdb - from nova.tests.baremetal.db import base as bm_db_base TOPDIR = os.path.normpath(os.path.join( diff --git a/nova/tests/baremetal/test_pxe.py b/nova/tests/baremetal/test_pxe.py index dafa9bab7..09f1079bf 100644 --- a/nova/tests/baremetal/test_pxe.py +++ b/nova/tests/baremetal/test_pxe.py @@ -21,12 +21,10 @@ import os -import mox from testtools import matchers from nova import exception from nova.openstack.common import cfg -from nova import test from nova.tests.baremetal.db import base as bm_db_base from nova.tests.baremetal.db import utils as bm_db_utils from nova.tests.image import fake as fake_image diff --git a/nova/tests/baremetal/test_utils.py b/nova/tests/baremetal/test_utils.py index 2615a26cb..df5112deb 100644 --- a/nova/tests/baremetal/test_utils.py +++ b/nova/tests/baremetal/test_utils.py @@ -18,12 +18,9 @@ """Tests for baremetal utils.""" -import mox - import errno import os -from nova import exception from nova import test from nova.virt.baremetal import utils diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py index da45721ed..1208368c2 100644 --- a/nova/tests/cells/test_cells_messaging.py +++ b/nova/tests/cells/test_cells_messaging.py @@ -14,8 +14,6 @@ """ Tests For Cells Messaging module """ -import mox - from nova.cells import messaging from nova.cells import utils as cells_utils from nova import context diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index dc381d800..7b284779e 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -2722,8 +2722,11 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() - compute_utils.add_instance_fault_from_exc(ctxt, instance, - NotImplementedError('test'), exc_info) + compute_utils.add_instance_fault_from_exc(ctxt, + self.compute.conductor_api, + instance, + NotImplementedError('test'), + exc_info) def test_add_instance_fault_with_remote_error(self): instance = self._create_fake_instance() @@ -2751,8 +2754,8 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() - compute_utils.add_instance_fault_from_exc(ctxt, instance, exc, - exc_info) + compute_utils.add_instance_fault_from_exc(ctxt, + self.compute.conductor_api, instance, exc, exc_info) def test_add_instance_fault_user_error(self): instance = self._create_fake_instance() @@ -2779,8 +2782,8 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() - compute_utils.add_instance_fault_from_exc(ctxt, instance, user_exc, - exc_info) + compute_utils.add_instance_fault_from_exc(ctxt, + self.compute.conductor_api, instance, user_exc, exc_info) def test_add_instance_fault_no_exc_info(self): instance = self._create_fake_instance() @@ -2798,8 +2801,10 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() - compute_utils.add_instance_fault_from_exc(ctxt, instance, - NotImplementedError('test')) + compute_utils.add_instance_fault_from_exc(ctxt, + self.compute.conductor_api, + instance, + NotImplementedError('test')) def test_cleanup_running_deleted_instances(self): admin_context = context.get_admin_context() @@ -3780,6 +3785,28 @@ class ComputeAPITestCase(BaseTestCase): finally: db.instance_destroy(self.context, ref[0]['uuid']) + def test_create_saves_type_in_system_metadata(self): + instance_type = instance_types.get_default_instance_type() + (ref, resv_id) = self.compute_api.create( + self.context, + instance_type=instance_type, + image_href=None) + try: + sys_metadata = db.instance_system_metadata_get(self.context, + ref[0]['uuid']) + + instance_type_props = ['name', 'memory_mb', 'vcpus', 'root_gb', + 'ephemeral_gb', 'flavorid', 'swap', + 'rxtx_factor', 'vcpu_weight'] + for key in instance_type_props: + sys_meta_key = "instance_type_%s" % key + self.assertTrue(sys_meta_key in sys_metadata) + self.assertEqual(str(instance_type[key]), + str(sys_metadata[sys_meta_key])) + + finally: + db.instance_destroy(self.context, ref[0]['uuid']) + def test_create_instance_associates_security_groups(self): # Make sure create associates security groups. group = self._create_group() @@ -6752,6 +6779,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): exc_info = sys.exc_info() compute_utils.add_instance_fault_from_exc(self.context, + self.compute.conductor_api, self.instance, exc_info[0], exc_info=exc_info) self.compute._deallocate_network(self.context, self.instance).AndRaise(InnerTestingException("Error")) @@ -6802,6 +6830,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): except Exception: exc_info = sys.exc_info() compute_utils.add_instance_fault_from_exc(self.context, + self.compute.conductor_api, self.instance, exc_info[0], exc_info=exc_info) self.compute._deallocate_network(self.context, self.instance) @@ -6830,6 +6859,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): exc_info = sys.exc_info() compute_utils.add_instance_fault_from_exc(self.context, + self.compute.conductor_api, self.instance, exc_info[0], exc_info=exc_info) self.compute._deallocate_network(self.context, self.instance) diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py index 30d176bbd..c46663e50 100644 --- a/nova/tests/conductor/test_conductor.py +++ b/nova/tests/conductor/test_conductor.py @@ -426,6 +426,15 @@ class _BaseTestCase(object): 'fake-values', False) self.assertEqual(result, 'fake-result') + def test_instance_fault_create(self): + self.mox.StubOutWithMock(db, 'instance_fault_create') + db.instance_fault_create(self.context, 'fake-values').AndReturn( + 'fake-result') + self.mox.ReplayAll() + result = self.conductor.instance_fault_create(self.context, + 'fake-values') + self.assertEqual(result, 'fake-result') + class ConductorTestCase(_BaseTestCase, test.TestCase): """Conductor Manager Tests.""" diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py index dbf620196..ead43adea 100644 --- a/nova/tests/fake_policy.py +++ b/nova/tests/fake_policy.py @@ -157,6 +157,10 @@ policy_data = """ "compute_extension:virtual_interfaces": "", "compute_extension:virtual_storage_arrays": "", "compute_extension:volumes": "", + "compute_extension:volume_attachments:index": "", + "compute_extension:volume_attachments:show": "", + "compute_extension:volume_attachments:create": "", + "compute_extension:volume_attachments:delete": "", "compute_extension:volumetypes": "", "compute_extension:zones": "", "compute_extension:availability_zone:list": "", diff --git a/nova/tests/fake_volume.py b/nova/tests/fake_volume.py index f2aa3ea91..c7430ee6d 100644 --- a/nova/tests/fake_volume.py +++ b/nova/tests/fake_volume.py @@ -136,7 +136,6 @@ class API(object): def create_with_kwargs(self, context, **kwargs): volume_id = kwargs.get('volume_id', None) - print volume_id v = fake_volume(kwargs['size'], kwargs['name'], kwargs['description'], @@ -145,7 +144,6 @@ class API(object): None, None, None) - print v.vol['id'] if kwargs.get('status', None) is not None: v.vol['status'] = kwargs['status'] if kwargs['host'] is not None: diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 9dd9e5121..fb26fa4f1 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -339,7 +339,6 @@ class TestGlanceImageService(test.TestCase): def test_update(self): fixture = self._make_fixture(name='test image') image = self.service.create(self.context, fixture) - print image image_id = image['id'] fixture['name'] = 'new image name' self.service.update(self.context, image_id, fixture) diff --git a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl new file mode 100644 index 000000000..6974f360f --- /dev/null +++ b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl @@ -0,0 +1,17 @@ +{ + "instance_usage_audit_logs": { + "hosts_not_run": [ + "%(hostid)s" + ], + "log": {}, + "num_hosts": 1, + "num_hosts_done": 0, + "num_hosts_not_run": 1, + "num_hosts_running": 0, + "overall_status": "0 of 1 hosts done. 0 errors.", + "period_beginning": "%(timestamp)s", + "period_ending": "%(timestamp)s", + "total_errors": 0, + "total_instances": 0 + } +} diff --git a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl new file mode 100644 index 000000000..4eafa8b4a --- /dev/null +++ b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl @@ -0,0 +1,16 @@ +<?xml version='1.0' encoding='UTF-8'?> +<instance_usage_audit_logs> + <total_errors>0</total_errors> + <total_instances>0</total_instances> + <log/> + <num_hosts_running>0</num_hosts_running> + <num_hosts_done>0</num_hosts_done> + <num_hosts_not_run>1</num_hosts_not_run> + <hosts_not_run> + <item>%(hostid)s</item> + </hosts_not_run> + <overall_status>0 of 1 hosts done. 0 errors.</overall_status> + <period_ending>%(timestamp)s</period_ending> + <period_beginning>%(timestamp)s</period_beginning> + <num_hosts>1</num_hosts> +</instance_usage_audit_logs> diff --git a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl new file mode 100644 index 000000000..eda952304 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl @@ -0,0 +1,17 @@ +{ + "instance_usage_audit_log": { + "hosts_not_run": [ + "%(hostid)s" + ], + "log": {}, + "num_hosts": 1, + "num_hosts_done": 0, + "num_hosts_not_run": 1, + "num_hosts_running": 0, + "overall_status": "0 of 1 hosts done. 0 errors.", + "period_beginning": "%(timestamp)s", + "period_ending": "%(timestamp)s", + "total_errors": 0, + "total_instances": 0 + } +} diff --git a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl new file mode 100644 index 000000000..1ef243292 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl @@ -0,0 +1,16 @@ +<?xml version='1.0' encoding='UTF-8'?> +<instance_usage_audit_log> + <total_errors>0</total_errors> + <total_instances>0</total_instances> + <log/> + <num_hosts_running>0</num_hosts_running> + <num_hosts_done>0</num_hosts_done> + <num_hosts_not_run>1</num_hosts_not_run> + <hosts_not_run> + <item>%(hostid)s</item> + </hosts_not_run> + <overall_status>0 of 1 hosts done. 0 errors.</overall_status> + <period_ending>%(timestamp)s</period_ending> + <period_beginning>%(timestamp)s</period_beginning> + <num_hosts>1</num_hosts> +</instance_usage_audit_log> diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index 4cadbf9e5..080e4e92b 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -292,7 +292,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase): # shouldn't be an issue for this case. 'timestamp': '\d{4}-[0,1]\d-[0-3]\d[ ,T]' '\d{2}:\d{2}:\d{2}' - '(Z|(\+|-)\d{2}:\d{2}|\.\d{6})', + '(Z|(\+|-)\d{2}:\d{2}|\.\d{6}|)', 'password': '[0-9a-zA-Z]{1,12}', 'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}', 'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}', @@ -381,7 +381,6 @@ class ApiSamplesTrap(ApiSampleTestBase): do_not_approve_additions.append('os-floating-ip-dns') do_not_approve_additions.append('os-fping') do_not_approve_additions.append('os-hypervisors') - do_not_approve_additions.append('os-instance_usage_audit_log') do_not_approve_additions.append('os-networks') do_not_approve_additions.append('os-services') do_not_approve_additions.append('os-volumes') @@ -2688,3 +2687,29 @@ class FloatingIPPoolsSampleJsonTests(ApiSampleTestBase): class FloatingIPPoolsSampleXmlTests(FloatingIPPoolsSampleJsonTests): ctype = "xml" + + +class InstanceUsageAuditLogJsonTest(ApiSampleTestBase): + extension_name = ("nova.api.openstack.compute.contrib." + "instance_usage_audit_log.Instance_usage_audit_log") + + def test_show_instance_usage_audit_log(self): + response = self._do_get('os-instance_usage_audit_log/%s' % + urllib.quote('2012-07-05 10:00:00')) + self.assertEqual(response.status, 200) + subs = self._get_regexes() + subs['hostid'] = '[a-f0-9]+' + return self._verify_response('inst-usage-audit-log-show-get-resp', + subs, response) + + def test_index_instance_usage_audit_log(self): + response = self._do_get('os-instance_usage_audit_log') + self.assertEqual(response.status, 200) + subs = self._get_regexes() + subs['hostid'] = '[a-f0-9]+' + return self._verify_response('inst-usage-audit-log-index-get-resp', + subs, response) + + +class InstanceUsageAuditLogXmlTest(InstanceUsageAuditLogJsonTest): + ctype = 'xml' diff --git a/nova/tests/integrated/test_multiprocess_api.py b/nova/tests/integrated/test_multiprocess_api.py index b2361b13c..ae4fcc32f 100644 --- a/nova/tests/integrated/test_multiprocess_api.py +++ b/nova/tests/integrated/test_multiprocess_api.py @@ -63,7 +63,7 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase): try: traceback.print_exc() except BaseException: - print "Couldn't print traceback" + LOG.error("Couldn't print traceback") status = 2 # Really exit diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py index 8a7865b83..3c219f5f4 100644 --- a/nova/tests/network/test_linux_net.py +++ b/nova/tests/network/test_linux_net.py @@ -461,14 +461,14 @@ class LinuxNetworkTestCase(test.TestCase): 'bridge_interface': iface} driver.plug(network, 'fakemac') expected = [ - ('ebtables', '-D', 'INPUT', '-p', 'ARP', '-i', iface, - '--arp-ip-dst', dhcp, '-j', 'DROP'), - ('ebtables', '-I', 'INPUT', '-p', 'ARP', '-i', iface, - '--arp-ip-dst', dhcp, '-j', 'DROP'), - ('ebtables', '-D', 'OUTPUT', '-p', 'ARP', '-o', iface, - '--arp-ip-src', dhcp, '-j', 'DROP'), - ('ebtables', '-I', 'OUTPUT', '-p', 'ARP', '-o', iface, - '--arp-ip-src', dhcp, '-j', 'DROP'), + ('ebtables', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i', + iface, '--arp-ip-dst', dhcp, '-j', 'DROP'), + ('ebtables', '-t', 'filter', '-I', 'INPUT', '-p', 'ARP', '-i', + iface, '--arp-ip-dst', dhcp, '-j', 'DROP'), + ('ebtables', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o', + iface, '--arp-ip-src', dhcp, '-j', 'DROP'), + ('ebtables', '-t', 'filter', '-I', 'OUTPUT', '-p', 'ARP', '-o', + iface, '--arp-ip-src', dhcp, '-j', 'DROP'), ('iptables-save', '-c'), ('iptables-restore', '-c'), ('ip6tables-save', '-c'), @@ -500,10 +500,10 @@ class LinuxNetworkTestCase(test.TestCase): driver.unplug(network) expected = [ - ('ebtables', '-D', 'INPUT', '-p', 'ARP', '-i', iface, - '--arp-ip-dst', dhcp, '-j', 'DROP'), - ('ebtables', '-D', 'OUTPUT', '-p', 'ARP', '-o', iface, - '--arp-ip-src', dhcp, '-j', 'DROP'), + ('ebtables', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i', + iface, '--arp-ip-dst', dhcp, '-j', 'DROP'), + ('ebtables', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o', + iface, '--arp-ip-src', dhcp, '-j', 'DROP'), ('iptables-save', '-c'), ('iptables-restore', '-c'), ('ip6tables-save', '-c'), diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py index 94f297fe9..2cc19bbb8 100644 --- a/nova/tests/network/test_manager.py +++ b/nova/tests/network/test_manager.py @@ -15,7 +15,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import shutil import fixtures import mox @@ -669,7 +668,7 @@ class VlanNetworkTestCase(test.TestCase): is_admin=False) def fake1(*args, **kwargs): - return '10.0.0.1' + return {'address': '10.0.0.1', 'network': 'fakenet'} # floating ip that's already associated def fake2(*args, **kwargs): @@ -789,9 +788,9 @@ class VlanNetworkTestCase(test.TestCase): self.stubs.Set(self.network.db, 'floating_ip_get_all_by_host', get_all_by_host) - def fixed_ip_get(_context, fixed_ip_id): + def fixed_ip_get(_context, fixed_ip_id, get_network): if fixed_ip_id == 1: - return {'address': 'fakefixed'} + return {'address': 'fakefixed', 'network': 'fakenet'} raise exception.FixedIpNotFound(id=fixed_ip_id) self.stubs.Set(self.network.db, 'fixed_ip_get', fixed_ip_get) @@ -799,7 +798,8 @@ class VlanNetworkTestCase(test.TestCase): self.flags(public_interface=False) self.network.l3driver.add_floating_ip('fakefloat', 'fakefixed', - 'fakeiface') + 'fakeiface', + 'fakenet') self.mox.ReplayAll() self.network.init_host_floating_ips() self.mox.UnsetStubs() @@ -809,7 +809,8 @@ class VlanNetworkTestCase(test.TestCase): self.flags(public_interface='fooiface') self.network.l3driver.add_floating_ip('fakefloat', 'fakefixed', - 'fooiface') + 'fooiface', + 'fakenet') self.mox.ReplayAll() self.network.init_host_floating_ips() self.mox.UnsetStubs() @@ -1804,11 +1805,13 @@ class FloatingIPTestCase(test.TestCase): def fake_is_stale_floating_ip_address(context, floating_ip): return floating_ip['address'] == '172.24.4.23' - def fake_fixed_ip_get(context, fixed_ip_id): + def fake_fixed_ip_get(context, fixed_ip_id, get_network): return {'instance_uuid': 'fake_uuid', - 'address': '10.0.0.2'} + 'address': '10.0.0.2', + 'network': 'fakenet'} - def fake_remove_floating_ip(floating_addr, fixed_addr, interface): + def fake_remove_floating_ip(floating_addr, fixed_addr, interface, + network): called['count'] += 1 def fake_floating_ip_update(context, address, args): @@ -1845,11 +1848,13 @@ class FloatingIPTestCase(test.TestCase): def fake_is_stale_floating_ip_address(context, floating_ip): return floating_ip['address'] == '172.24.4.23' - def fake_fixed_ip_get(context, fixed_ip_id): + def fake_fixed_ip_get(context, fixed_ip_id, get_network): return {'instance_uuid': 'fake_uuid', - 'address': '10.0.0.2'} + 'address': '10.0.0.2', + 'network': 'fakenet'} - def fake_add_floating_ip(floating_addr, fixed_addr, interface): + def fake_add_floating_ip(floating_addr, fixed_addr, interface, + network): called['count'] += 1 def fake_floating_ip_update(context, address, args): diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py index 6fcd19d92..230e2ea03 100644 --- a/nova/tests/scheduler/test_host_filters.py +++ b/nova/tests/scheduler/test_host_filters.py @@ -337,6 +337,20 @@ class HostFiltersTestCase(test.TestCase): self.assertTrue(filt_cls.host_passes(host, filter_properties)) + def test_affinity_different_filter_handles_deleted_instance(self): + filt_cls = self.class_map['DifferentHostFilter']() + host = fakes.FakeHostState('host1', 'node1', {}) + instance = fakes.FakeInstance(context=self.context, + params={'host': 'host1'}) + instance_uuid = instance.uuid + db.instance_destroy(self.context, instance_uuid) + + filter_properties = {'context': self.context.elevated(), + 'scheduler_hints': { + 'different_host': [instance_uuid], }} + + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + def test_affinity_same_filter_no_list_passes(self): filt_cls = self.class_map['SameHostFilter']() host = fakes.FakeHostState('host1', 'node1', {}) @@ -388,6 +402,20 @@ class HostFiltersTestCase(test.TestCase): self.assertTrue(filt_cls.host_passes(host, filter_properties)) + def test_affinity_same_filter_handles_deleted_instance(self): + filt_cls = self.class_map['SameHostFilter']() + host = fakes.FakeHostState('host1', 'node1', {}) + instance = fakes.FakeInstance(context=self.context, + params={'host': 'host1'}) + instance_uuid = instance.uuid + db.instance_destroy(self.context, instance_uuid) + + filter_properties = {'context': self.context.elevated(), + 'scheduler_hints': { + 'same_host': [instance_uuid], }} + + self.assertFalse(filt_cls.host_passes(host, filter_properties)) + def test_affinity_simple_cidr_filter_passes(self): filt_cls = self.class_map['SimpleCIDRAffinityFilter']() host = fakes.FakeHostState('host1', 'node1', {}) diff --git a/nova/tests/test_availability_zones.py b/nova/tests/test_availability_zones.py index 2c5c06921..4192fa08f 100644 --- a/nova/tests/test_availability_zones.py +++ b/nova/tests/test_availability_zones.py @@ -23,7 +23,6 @@ from nova import availability_zones as az from nova import context from nova import db from nova.openstack.common import cfg -from nova import service from nova import test CONF = cfg.CONF diff --git a/nova/tests/test_bdm.py b/nova/tests/test_bdm.py index 4d62d6bbf..43ca4d7b0 100644 --- a/nova/tests/test_bdm.py +++ b/nova/tests/test_bdm.py @@ -246,6 +246,5 @@ class BlockDeviceMappingEc2CloudTestCase(test.TestCase): result = {} cloud._format_mappings(properties, result) - print result self.assertEqual(result['blockDeviceMapping'].sort(), expected_result['blockDeviceMapping'].sort()) diff --git a/nova/tests/test_context.py b/nova/tests/test_context.py index 0915bf157..527534fd5 100644 --- a/nova/tests/test_context.py +++ b/nova/tests/test_context.py @@ -74,3 +74,22 @@ class ContextTestCase(test.TestCase): self.assertTrue(c) self.assertIn("'extra_arg1': 'meow'", info['log_msg']) self.assertIn("'extra_arg2': 'wuff'", info['log_msg']) + + def test_service_catalog_default(self): + ctxt = context.RequestContext('111', '222') + self.assertEquals(ctxt.service_catalog, []) + + def test_service_catalog_cinder_only(self): + service_catalog = [ + {u'type': u'compute', u'name': u'nova'}, + {u'type': u's3', u'name': u's3'}, + {u'type': u'image', u'name': u'glance'}, + {u'type': u'volume', u'name': u'cinder'}, + {u'type': u'ec2', u'name': u'ec2'}, + {u'type': u'object-store', u'name': u'swift'}, + {u'type': u'identity', u'name': u'keystone'}] + + volume_catalog = [{u'type': u'volume', u'name': u'cinder'}] + ctxt = context.RequestContext('111', '222', + service_catalog=service_catalog) + self.assertEquals(ctxt.service_catalog, volume_catalog) diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py index 40552d1c0..4485be4f9 100644 --- a/nova/tests/test_db_api.py +++ b/nova/tests/test_db_api.py @@ -253,11 +253,11 @@ class DbApiTestCase(test.TestCase): values = {'address': 'fixed'} fixed = db.fixed_ip_create(ctxt, values) res = db.floating_ip_fixed_ip_associate(ctxt, floating, fixed, 'foo') - self.assertEqual(res, fixed) + self.assertEqual(res['address'], fixed) res = db.floating_ip_fixed_ip_associate(ctxt, floating, fixed, 'foo') self.assertEqual(res, None) res = db.floating_ip_disassociate(ctxt, floating) - self.assertEqual(res, fixed) + self.assertEqual(res['address'], fixed) res = db.floating_ip_disassociate(ctxt, floating) self.assertEqual(res, None) diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py index 8142312b9..611519514 100644 --- a/nova/tests/test_imagecache.py +++ b/nova/tests/test_imagecache.py @@ -331,7 +331,6 @@ class ImageCacheManagerTestCase(test.TestCase): base_file1 = os.path.join(base_dir, fingerprint) base_file2 = os.path.join(base_dir, fingerprint + '_sm') base_file3 = os.path.join(base_dir, fingerprint + '_10737418240') - print res self.assertTrue(res == [(base_file1, False, False), (base_file2, True, False), (base_file3, False, True)]) diff --git a/nova/tests/test_iptables_network.py b/nova/tests/test_iptables_network.py index c8f310303..95af25ebd 100644 --- a/nova/tests/test_iptables_network.py +++ b/nova/tests/test_iptables_network.py @@ -170,3 +170,22 @@ class IptablesManagerTestCase(test.TestCase): self.assertTrue('[0:0] -A %s -j %s-%s' % (chain, self.binary_name, chain) in new_lines, "Built-in chain %s not wrapped" % (chain,)) + + def test_missing_table(self): + current_lines = [] + new_lines = self.manager._modify_rules(current_lines, + self.manager.ipv4['filter'], + table_name='filter') + + for line in ['*filter', + 'COMMIT']: + self.assertTrue(line in new_lines, "One of iptables key lines" + "went missing.") + + self.assertTrue(len(new_lines) > 4, "No iptables rules added") + + self.assertTrue("#Generated by nova" == new_lines[0] and + "*filter" == new_lines[1] and + "COMMIT" == new_lines[-2] and + "#Completed by nova" == new_lines[-1], + "iptables rules not generated in the correct order") diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index f96536893..5a90f3348 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -3833,7 +3833,6 @@ class IptablesFirewallTestCase(test.TestCase): if '*filter' in lines: self.out6_rules = lines return '', '' - print cmd, kwargs network_model = _fake_network_info(self.stubs, 1, spectacular=True) diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py index 3861d7dfa..7ce81cc09 100644 --- a/nova/tests/test_libvirt_vif.py +++ b/nova/tests/test_libvirt_vif.py @@ -16,6 +16,8 @@ from lxml import etree +from nova import exception +from nova.network import model as network_model from nova.openstack.common import cfg from nova import test from nova import utils @@ -48,7 +50,8 @@ class LibvirtVifTestCase(test.TestCase): 'ips': [{'ip': '101.168.1.9'}], 'dhcp_server': '191.168.1.1', 'vif_uuid': 'vif-xxx-yyy-zzz', - 'vif_devname': 'tap-xxx-yyy-zzz' + 'vif_devname': 'tap-xxx-yyy-zzz', + 'vif_type': network_model.VIF_TYPE_BRIDGE, } net_ovs = { @@ -75,6 +78,15 @@ class LibvirtVifTestCase(test.TestCase): 'ovs_interfaceid': 'aaa-bbb-ccc', } + mapping_none = { + 'mac': 'ca:fe:de:ad:be:ef', + 'gateway_v6': net_bridge['gateway_v6'], + 'ips': [{'ip': '101.168.1.9'}], + 'dhcp_server': '191.168.1.1', + 'vif_uuid': 'vif-xxx-yyy-zzz', + 'vif_devname': 'tap-xxx-yyy-zzz', + } + instance = { 'name': 'instance-name', 'uuid': 'instance-uuid' @@ -149,7 +161,7 @@ class LibvirtVifTestCase(test.TestCase): self.flags(libvirt_use_virtio_for_bridges=False, libvirt_type='kvm') - d = vif.LibvirtBridgeDriver() + d = vif.LibvirtGenericVIFDriver() xml = self._get_instance_xml(d, self.net_bridge, self.mapping_bridge) @@ -168,7 +180,7 @@ class LibvirtVifTestCase(test.TestCase): self.flags(libvirt_use_virtio_for_bridges=True, libvirt_type='kvm') - d = vif.LibvirtBridgeDriver() + d = vif.LibvirtGenericVIFDriver() xml = self._get_instance_xml(d, self.net_bridge, self.mapping_bridge) @@ -187,7 +199,7 @@ class LibvirtVifTestCase(test.TestCase): self.flags(libvirt_use_virtio_for_bridges=True, libvirt_type='qemu') - d = vif.LibvirtBridgeDriver() + d = vif.LibvirtGenericVIFDriver() xml = self._get_instance_xml(d, self.net_bridge, self.mapping_bridge) @@ -206,7 +218,7 @@ class LibvirtVifTestCase(test.TestCase): self.flags(libvirt_use_virtio_for_bridges=True, libvirt_type='xen') - d = vif.LibvirtBridgeDriver() + d = vif.LibvirtGenericVIFDriver() xml = self._get_instance_xml(d, self.net_bridge, self.mapping_bridge) @@ -221,8 +233,15 @@ class LibvirtVifTestCase(test.TestCase): ret = node.findall("driver") self.assertEqual(len(ret), 0) - def test_bridge_driver(self): - d = vif.LibvirtBridgeDriver() + def test_generic_driver_none(self): + d = vif.LibvirtGenericVIFDriver() + self.assertRaises(exception.NovaException, + self._get_instance_xml, + d, + self.net_bridge, + self.mapping_none) + + def _check_bridge_driver(self, d): xml = self._get_instance_xml(d, self.net_bridge, self.mapping_bridge) @@ -237,6 +256,14 @@ class LibvirtVifTestCase(test.TestCase): mac = node.find("mac").get("address") self.assertEqual(mac, self.mapping_bridge['mac']) + def test_bridge_driver(self): + d = vif.LibvirtBridgeDriver() + self._check_bridge_driver(d) + + def test_generic_driver_bridge(self): + d = vif.LibvirtGenericVIFDriver() + self._check_bridge_driver(d) + def test_ovs_ethernet_driver(self): d = vif.LibvirtOpenVswitchDriver() xml = self._get_instance_xml(d, diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py index f0ed0a863..8c18d5511 100644 --- a/nova/tests/test_migrations.py +++ b/nova/tests/test_migrations.py @@ -24,6 +24,7 @@ properly both upgrading and downgrading, and that no data loss occurs if possible. """ +import collections import commands import ConfigParser import os @@ -87,6 +88,16 @@ def _have_mysql(): return present.lower() in ('', 'true') +def get_table(engine, name): + """Returns an sqlalchemy table dynamically from db. + + Needed because the models don't work for us in migrations + as models will be far out of sync with the current data.""" + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + return sqlalchemy.Table(name, metadata, autoload=True) + + class TestMigrations(test.TestCase): """Test sqlalchemy-migrate migrations.""" @@ -227,19 +238,11 @@ class TestMigrations(test.TestCase): self.engines["mysqlcitest"] = engine self.test_databases["mysqlcitest"] = connect_string - # Test that we end in an innodb - self._check_mysql_innodb(engine) - # Test IP transition - self._check_mysql_migration_149(engine) - - def _check_mysql_innodb(self, engine): # build a fully populated mysql database with all the tables self._reset_databases() self._walk_versions(engine, False, False) - uri = _get_connect_string("mysql", database="information_schema") - connection = sqlalchemy.create_engine(uri).connect() - + connection = engine.connect() # sanity check total = connection.execute("SELECT count(*) " "from information_schema.TABLES " @@ -253,92 +256,8 @@ class TestMigrations(test.TestCase): "and TABLE_NAME!='migrate_version'") count = noninnodb.scalar() self.assertEqual(count, 0, "%d non InnoDB tables created" % count) - - def test_migration_149_postgres(self): - """Test updating a table with IPAddress columns.""" - if not _is_backend_avail('postgres'): - self.skipTest("postgres not available") - - connect_string = _get_connect_string("postgres") - engine = sqlalchemy.create_engine(connect_string) - - self.engines["postgrescitest"] = engine - self.test_databases["postgrescitest"] = connect_string - - self._reset_databases() - migration_api.version_control(engine, TestMigrations.REPOSITORY, - migration.INIT_VERSION) - - connection = engine.connect() - - self._migrate_up(engine, 148) - IPS = ("127.0.0.1", "255.255.255.255", "2001:db8::1:2", "::1") - connection.execute("INSERT INTO provider_fw_rules " - " (protocol, from_port, to_port, cidr)" - "VALUES ('tcp', 1234, 1234, '%s'), " - " ('tcp', 1234, 1234, '%s'), " - " ('tcp', 1234, 1234, '%s'), " - " ('tcp', 1234, 1234, '%s')" % IPS) - self.assertEqual('character varying', - connection.execute( - "SELECT data_type FROM INFORMATION_SCHEMA.COLUMNS " - "WHERE table_name='provider_fw_rules' " - "AND table_catalog='openstack_citest' " - "AND column_name='cidr'").scalar()) - - self._migrate_up(engine, 149) - self.assertEqual(IPS, - tuple(tup[0] for tup in connection.execute( - "SELECT cidr from provider_fw_rules").fetchall())) - self.assertEqual('inet', - connection.execute( - "SELECT data_type FROM INFORMATION_SCHEMA.COLUMNS " - "WHERE table_name='provider_fw_rules' " - "AND table_catalog='openstack_citest' " - "AND column_name='cidr'").scalar()) connection.close() - def _check_mysql_migration_149(self, engine): - """Test updating a table with IPAddress columns.""" - self._reset_databases() - migration_api.version_control(engine, TestMigrations.REPOSITORY, - migration.INIT_VERSION) - - uri = _get_connect_string("mysql", database="openstack_citest") - connection = sqlalchemy.create_engine(uri).connect() - - self._migrate_up(engine, 148) - - IPS = ("127.0.0.1", "255.255.255.255", "2001:db8::1:2", "::1") - connection.execute("INSERT INTO provider_fw_rules " - " (protocol, from_port, to_port, cidr)" - "VALUES ('tcp', 1234, 1234, '%s'), " - " ('tcp', 1234, 1234, '%s'), " - " ('tcp', 1234, 1234, '%s'), " - " ('tcp', 1234, 1234, '%s')" % IPS) - self.assertEqual('varchar(255)', - connection.execute( - "SELECT column_type FROM INFORMATION_SCHEMA.COLUMNS " - "WHERE table_name='provider_fw_rules' " - "AND table_schema='openstack_citest' " - "AND column_name='cidr'").scalar()) - - connection.close() - - self._migrate_up(engine, 149) - - connection = sqlalchemy.create_engine(uri).connect() - - self.assertEqual(IPS, - tuple(tup[0] for tup in connection.execute( - "SELECT cidr from provider_fw_rules").fetchall())) - self.assertEqual('varchar(39)', - connection.execute( - "SELECT column_type FROM INFORMATION_SCHEMA.COLUMNS " - "WHERE table_name='provider_fw_rules' " - "AND table_schema='openstack_citest' " - "AND column_name='cidr'").scalar()) - def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): # Determine latest version script from the repo, then # upgrade from 1 through to the latest, with no data @@ -360,7 +279,7 @@ class TestMigrations(test.TestCase): for version in xrange(migration.INIT_VERSION + 2, TestMigrations.REPOSITORY.latest + 1): # upgrade -> downgrade -> upgrade - self._migrate_up(engine, version) + self._migrate_up(engine, version, with_data=True) if snake_walk: self._migrate_down(engine, version) self._migrate_up(engine, version) @@ -385,7 +304,19 @@ class TestMigrations(test.TestCase): migration_api.db_version(engine, TestMigrations.REPOSITORY)) - def _migrate_up(self, engine, version): + def _migrate_up(self, engine, version, with_data=False): + """migrate up to a new version of the db. + + We allow for data insertion and post checks at every + migration version with special _prerun_### and + _check_### functions in the main test. + """ + if with_data: + data = None + prerun = getattr(self, "_prerun_%d" % version, None) + if prerun: + data = prerun(engine) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, version) @@ -393,168 +324,198 @@ class TestMigrations(test.TestCase): migration_api.db_version(engine, TestMigrations.REPOSITORY)) - def test_migration_146(self): - name = 'name' - az = 'custom_az' - - def _145_check(): - agg = aggregates.select(aggregates.c.id == 1).execute().first() - self.assertEqual(name, agg.name) - self.assertEqual(az, agg.availability_zone) - - for key, engine in self.engines.items(): - migration_api.version_control(engine, TestMigrations.REPOSITORY, - migration.INIT_VERSION) - migration_api.upgrade(engine, TestMigrations.REPOSITORY, 145) - metadata = sqlalchemy.schema.MetaData() - metadata.bind = engine - aggregates = sqlalchemy.Table('aggregates', metadata, - autoload=True) - - aggregates.insert().values(id=1, availability_zone=az, - aggregate_name=1, name=name).execute() - - _145_check() - - migration_api.upgrade(engine, TestMigrations.REPOSITORY, 146) - - aggregate_metadata = sqlalchemy.Table('aggregate_metadata', - metadata, autoload=True) - metadata = aggregate_metadata.select(aggregate_metadata.c. - aggregate_id == 1).execute().first() - self.assertEqual(az, metadata['value']) - - migration_api.downgrade(engine, TestMigrations.REPOSITORY, 145) - _145_check() - - def test_migration_147(self): + if with_data: + check = getattr(self, "_check_%d" % version, None) + if check: + check(engine, data) + + # migration 146, availability zone transition + def _prerun_146(self, engine): + data = { + 'id': 1, + 'availability_zone': 'custom_az', + 'aggregate_name': 1, + 'name': 'name', + } + + aggregates = get_table(engine, 'aggregates') + aggregates.insert().values(data).execute() + return data + + def _check_146(self, engine, data): + aggregate_md = get_table(engine, 'aggregate_metadata') + md = aggregate_md.select( + aggregate_md.c.aggregate_id == 1).execute().first() + self.assertEqual(data['availability_zone'], md['value']) + + # migration 147, availability zone transition for services + def _prerun_147(self, engine): az = 'test_zone' host1 = 'compute-host1' host2 = 'compute-host2' - - def _146_check(): - service = services.select(services.c.id == 1).execute().first() - self.assertEqual(az, service.availability_zone) - self.assertEqual(host1, service.host) - service = services.select(services.c.id == 2).execute().first() - self.assertNotEqual(az, service.availability_zone) - service = services.select(services.c.id == 3).execute().first() - self.assertEqual(az, service.availability_zone) - self.assertEqual(host2, service.host) - - for key, engine in self.engines.items(): - migration_api.version_control(engine, TestMigrations.REPOSITORY, - migration.INIT_VERSION) - migration_api.upgrade(engine, TestMigrations.REPOSITORY, 146) - metadata = sqlalchemy.schema.MetaData() - metadata.bind = engine - - #populate service table - services = sqlalchemy.Table('services', metadata, - autoload=True) - services.insert().values(id=1, host=host1, - binary='nova-compute', topic='compute', report_count=0, - availability_zone=az).execute() - services.insert().values(id=2, host='sched-host', - binary='nova-scheduler', topic='scheduler', report_count=0, - availability_zone='ignore_me').execute() - services.insert().values(id=3, host=host2, - binary='nova-compute', topic='compute', report_count=0, - availability_zone=az).execute() - - _146_check() - - migration_api.upgrade(engine, TestMigrations.REPOSITORY, 147) - - # check aggregate metadata - aggregate_metadata = sqlalchemy.Table('aggregate_metadata', - metadata, autoload=True) - aggregate_hosts = sqlalchemy.Table('aggregate_hosts', - metadata, autoload=True) - metadata = aggregate_metadata.select(aggregate_metadata.c. - aggregate_id == 1).execute().first() - self.assertEqual(az, metadata['value']) - self.assertEqual(aggregate_hosts.select( - aggregate_hosts.c.aggregate_id == 1).execute(). - first().host, host1) - blank = [h for h in aggregate_hosts.select( - aggregate_hosts.c.aggregate_id == 2).execute()] - self.assertEqual(blank, []) - - migration_api.downgrade(engine, TestMigrations.REPOSITORY, 146) - - _146_check() - - def test_migration_152(self): + # start at id == 2 because we already inserted one + data = [ + {'id': 1, 'host': host1, + 'binary': 'nova-compute', 'topic': 'compute', + 'report_count': 0, 'availability_zone': az}, + {'id': 2, 'host': 'sched-host', + 'binary': 'nova-scheduler', 'topic': 'scheduler', + 'report_count': 0, 'availability_zone': 'ignore_me'}, + {'id': 3, 'host': host2, + 'binary': 'nova-compute', 'topic': 'compute', + 'report_count': 0, 'availability_zone': az}, + ] + + services = get_table(engine, 'services') + engine.execute(services.insert(), data) + return data + + def _check_147(self, engine, data): + aggregate_md = get_table(engine, 'aggregate_metadata') + aggregate_hosts = get_table(engine, 'aggregate_hosts') + # NOTE(sdague): hard coded to id == 2, because we added to + # aggregate_metadata previously + for item in data: + md = aggregate_md.select( + aggregate_md.c.aggregate_id == 2).execute().first() + if item['binary'] == "nova-compute": + self.assertEqual(item['availability_zone'], md['value']) + + host = aggregate_hosts.select( + aggregate_hosts.c.aggregate_id == 2 + ).execute().first() + self.assertEqual(host['host'], data[0]['host']) + + # NOTE(sdague): id 3 is just non-existent + host = aggregate_hosts.select( + aggregate_hosts.c.aggregate_id == 3 + ).execute().first() + self.assertEqual(host, None) + + # migration 149, changes IPAddr storage format + def _prerun_149(self, engine): + provider_fw_rules = get_table(engine, 'provider_fw_rules') + data = [ + {'protocol': 'tcp', 'from_port': 1234, + 'to_port': 1234, 'cidr': "127.0.0.1"}, + {'protocol': 'tcp', 'from_port': 1234, + 'to_port': 1234, 'cidr': "255.255.255.255"}, + {'protocol': 'tcp', 'from_port': 1234, + 'to_port': 1234, 'cidr': "2001:db8::1:2"}, + {'protocol': 'tcp', 'from_port': 1234, + 'to_port': 1234, 'cidr': "::1"} + ] + engine.execute(provider_fw_rules.insert(), data) + return data + + def _check_149(self, engine, data): + provider_fw_rules = get_table(engine, 'provider_fw_rules') + result = provider_fw_rules.select().execute() + + iplist = map(lambda x: x['cidr'], data) + + for row in result: + self.assertIn(row['cidr'], iplist) + + # migration 152 - convert deleted from boolean to int + def _prerun_152(self, engine): host1 = 'compute-host1' host2 = 'compute-host2' - - def _151_check(services, volumes): - service = services.select(services.c.id == 1).execute().first() - self.assertEqual(False, service.deleted) - service = services.select(services.c.id == 2).execute().first() - self.assertEqual(True, service.deleted) - - volume = volumes.select(volumes.c.id == "first").execute().first() - self.assertEqual(False, volume.deleted) - volume = volumes.select(volumes.c.id == "second").execute().first() - self.assertEqual(True, volume.deleted) - - for key, engine in self.engines.items(): - migration_api.version_control(engine, TestMigrations.REPOSITORY, - migration.INIT_VERSION) - migration_api.upgrade(engine, TestMigrations.REPOSITORY, 151) - metadata = sqlalchemy.schema.MetaData() - metadata.bind = engine - - # NOTE(boris-42): It is enough to test one table with type of `id` - # column Integer and one with type String. - services = sqlalchemy.Table('services', metadata, autoload=True) - volumes = sqlalchemy.Table('volumes', metadata, autoload=True) - - engine.execute( - services.insert(), - [ - {'id': 1, 'host': host1, 'binary': 'nova-compute', - 'report_count': 0, 'topic': 'compute', 'deleted': False}, - {'id': 2, 'host': host1, 'binary': 'nova-compute', - 'report_count': 0, 'topic': 'compute', 'deleted': True} - ] - ) - - engine.execute( - volumes.insert(), - [ - {'id': 'first', 'host': host1, 'deleted': False}, - {'id': 'second', 'host': host2, 'deleted': True} - ] - ) - - _151_check(services, volumes) - - migration_api.upgrade(engine, TestMigrations.REPOSITORY, 152) - # NOTE(boris-42): One more time get from DB info about tables. - metadata2 = sqlalchemy.schema.MetaData() - metadata2.bind = engine - - services = sqlalchemy.Table('services', metadata2, autoload=True) - - service = services.select(services.c.id == 1).execute().first() - self.assertEqual(0, service.deleted) - service = services.select(services.c.id == 2).execute().first() - self.assertEqual(service.id, service.deleted) - - volumes = sqlalchemy.Table('volumes', metadata2, autoload=True) - volume = volumes.select(volumes.c.id == "first").execute().first() - self.assertEqual("", volume.deleted) - volume = volumes.select(volumes.c.id == "second").execute().first() - self.assertEqual(volume.id, volume.deleted) - - migration_api.downgrade(engine, TestMigrations.REPOSITORY, 151) - # NOTE(boris-42): One more time get from DB info about tables. - metadata = sqlalchemy.schema.MetaData() - metadata.bind = engine - services = sqlalchemy.Table('services', metadata, autoload=True) - volumes = sqlalchemy.Table('volumes', metadata, autoload=True) - - _151_check(services, volumes) + # NOTE(sdague): start at #4 because services data already in table + # from 147 + services_data = [ + {'id': 4, 'host': host1, 'binary': 'nova-compute', + 'report_count': 0, 'topic': 'compute', 'deleted': False}, + {'id': 5, 'host': host1, 'binary': 'nova-compute', + 'report_count': 0, 'topic': 'compute', 'deleted': True} + ] + volumes_data = [ + {'id': 'first', 'host': host1, 'deleted': False}, + {'id': 'second', 'host': host2, 'deleted': True} + ] + + services = get_table(engine, 'services') + engine.execute(services.insert(), services_data) + + volumes = get_table(engine, 'volumes') + engine.execute(volumes.insert(), volumes_data) + return dict(services=services_data, volumes=volumes_data) + + def _check_152(self, engine, data): + services = get_table(engine, 'services') + service = services.select(services.c.id == 4).execute().first() + self.assertEqual(0, service.deleted) + service = services.select(services.c.id == 5).execute().first() + self.assertEqual(service.id, service.deleted) + + volumes = get_table(engine, 'volumes') + volume = volumes.select(volumes.c.id == "first").execute().first() + self.assertEqual("", volume.deleted) + volume = volumes.select(volumes.c.id == "second").execute().first() + self.assertEqual(volume.id, volume.deleted) + + # migration 153, copy flavor information into system_metadata + def _prerun_153(self, engine): + fake_types = [ + dict(id=10, name='type1', memory_mb=128, vcpus=1, + root_gb=10, ephemeral_gb=0, flavorid="1", swap=0, + rxtx_factor=1.0, vcpu_weight=1, disabled=False, + is_public=True), + dict(id=11, name='type2', memory_mb=512, vcpus=1, + root_gb=10, ephemeral_gb=5, flavorid="2", swap=0, + rxtx_factor=1.5, vcpu_weight=2, disabled=False, + is_public=True), + dict(id=12, name='type3', memory_mb=128, vcpus=1, + root_gb=10, ephemeral_gb=0, flavorid="3", swap=0, + rxtx_factor=1.0, vcpu_weight=1, disabled=False, + is_public=False), + dict(id=13, name='type4', memory_mb=128, vcpus=1, + root_gb=10, ephemeral_gb=0, flavorid="4", swap=0, + rxtx_factor=1.0, vcpu_weight=1, disabled=True, + is_public=True), + dict(id=14, name='type5', memory_mb=128, vcpus=1, + root_gb=10, ephemeral_gb=0, flavorid="5", swap=0, + rxtx_factor=1.0, vcpu_weight=1, disabled=True, + is_public=False), + ] + + fake_instances = [ + dict(uuid='m153-uuid1', instance_type_id=10), + dict(uuid='m153-uuid2', instance_type_id=11), + dict(uuid='m153-uuid3', instance_type_id=12), + dict(uuid='m153-uuid4', instance_type_id=13), + # NOTE(danms): no use of type5 + ] + + instances = get_table(engine, 'instances') + instance_types = get_table(engine, 'instance_types') + engine.execute(instance_types.insert(), fake_types) + engine.execute(instances.insert(), fake_instances) + + return fake_types, fake_instances + + def _check_153(self, engine, data): + fake_types, fake_instances = data + # NOTE(danms): Fetch all the tables and data from scratch after change + instances = get_table(engine, 'instances') + instance_types = get_table(engine, 'instance_types') + sys_meta = get_table(engine, 'instance_system_metadata') + + # Collect all system metadata, indexed by instance_uuid + metadata = collections.defaultdict(dict) + for values in sys_meta.select().execute(): + metadata[values['instance_uuid']][values['key']] = values['value'] + + # Taken from nova/compute/api.py + instance_type_props = ['id', 'name', 'memory_mb', 'vcpus', + 'root_gb', 'ephemeral_gb', 'flavorid', + 'swap', 'rxtx_factor', 'vcpu_weight'] + + for instance in fake_instances: + inst_sys_meta = metadata[instance['uuid']] + inst_type = fake_types[instance['instance_type_id'] - 10] + for prop in instance_type_props: + prop_name = 'instance_type_%s' % prop + self.assertIn(prop_name, inst_sys_meta) + self.assertEqual(str(inst_sys_meta[prop_name]), + str(inst_type[prop])) diff --git a/nova/tests/test_notifications.py b/nova/tests/test_notifications.py index a300028a0..aec6c8f67 100644 --- a/nova/tests/test_notifications.py +++ b/nova/tests/test_notifications.py @@ -187,8 +187,6 @@ class NotificationsTestCase(test.TestCase): params = {"task_state": task_states.SPAWNING} (old_ref, new_ref) = db.instance_update_and_get_original(self.context, self.instance['uuid'], params) - print old_ref["task_state"] - print new_ref["task_state"] notifications.send_update(self.context, old_ref, new_ref) self.assertEquals(1, len(test_notifier.NOTIFICATIONS)) diff --git a/nova/tests/test_periodic_tasks.py b/nova/tests/test_periodic_tasks.py index 3c63f6d4a..621e86b3a 100644 --- a/nova/tests/test_periodic_tasks.py +++ b/nova/tests/test_periodic_tasks.py @@ -17,7 +17,6 @@ import time -import fixtures from testtools import matchers from nova import manager diff --git a/nova/tests/test_virt_disk.py b/nova/tests/test_virt_disk.py index e6a57e085..0c51e8267 100644 --- a/nova/tests/test_virt_disk.py +++ b/nova/tests/test_virt_disk.py @@ -67,7 +67,7 @@ class VirtDiskTest(test.TestCase): "key was injected by Nova\nmysshkey\n", 'gid': 100, 'uid': 100, - 'mode': 0700}) + 'mode': 0600}) vfs.teardown() @@ -101,7 +101,7 @@ class VirtDiskTest(test.TestCase): "key was injected by Nova\nmysshkey\n", 'gid': 100, 'uid': 100, - 'mode': 0700}) + 'mode': 0600}) vfs.teardown() diff --git a/nova/tests/test_virt_disk_vfs_localfs.py b/nova/tests/test_virt_disk_vfs_localfs.py index 806ed01d8..af4571dd2 100644 --- a/nova/tests/test_virt_disk_vfs_localfs.py +++ b/nova/tests/test_virt_disk_vfs_localfs.py @@ -104,7 +104,6 @@ def fake_execute(*args, **kwargs): else: path = args[1] append = False - print str(files) if not path in files: files[path] = { "content": "Hello World", diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py index 6ea2d0ef7..e8e7c329a 100644 --- a/nova/tests/test_virt_drivers.py +++ b/nova/tests/test_virt_drivers.py @@ -119,8 +119,6 @@ class _FakeDriverBackendTestCase(object): def _teardown_fakelibvirt(self): # Restore libvirt - import nova.virt.libvirt.driver - import nova.virt.libvirt.firewall if self.saved_libvirt: sys.modules['libvirt'] = self.saved_libvirt diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py index 8db4c80ad..34f03a555 100644 --- a/nova/tests/test_vmwareapi.py +++ b/nova/tests/test_vmwareapi.py @@ -1,5 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack LLC. # @@ -41,7 +42,9 @@ class VMwareAPIVMTestCase(test.TestCase): self.context = context.RequestContext('fake', 'fake', is_admin=False) self.flags(vmwareapi_host_ip='test_url', vmwareapi_host_username='test_username', - vmwareapi_host_password='test_pass') + vmwareapi_host_password='test_pass', + vnc_enabled=False, + use_linked_clone=False) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) @@ -211,7 +214,7 @@ class VMwareAPIVMTestCase(test.TestCase): self._check_vm_info(info, power_state.RUNNING) self.conn.suspend(self.instance) info = self.conn.get_info({'name': 1}) - self._check_vm_info(info, power_state.PAUSED) + self._check_vm_info(info, power_state.SUSPENDED) self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot, self.instance, self.network_info, 'SOFT') @@ -221,7 +224,7 @@ class VMwareAPIVMTestCase(test.TestCase): self._check_vm_info(info, power_state.RUNNING) self.conn.suspend(self.instance) info = self.conn.get_info({'name': 1}) - self._check_vm_info(info, power_state.PAUSED) + self._check_vm_info(info, power_state.SUSPENDED) def test_suspend_non_existent(self): self._create_instance_in_the_db() @@ -234,7 +237,7 @@ class VMwareAPIVMTestCase(test.TestCase): self._check_vm_info(info, power_state.RUNNING) self.conn.suspend(self.instance) info = self.conn.get_info({'name': 1}) - self._check_vm_info(info, power_state.PAUSED) + self._check_vm_info(info, power_state.SUSPENDED) self.conn.resume(self.instance, self.network_info) info = self.conn.get_info({'name': 1}) self._check_vm_info(info, power_state.RUNNING) @@ -251,6 +254,43 @@ class VMwareAPIVMTestCase(test.TestCase): self.assertRaises(exception.InstanceResumeFailure, self.conn.resume, self.instance, self.network_info) + def test_power_on(self): + self._create_vm() + info = self.conn.get_info({'name': 1}) + self._check_vm_info(info, power_state.RUNNING) + self.conn.power_off(self.instance) + info = self.conn.get_info({'name': 1}) + self._check_vm_info(info, power_state.SHUTDOWN) + self.conn.power_on(self.instance) + info = self.conn.get_info({'name': 1}) + self._check_vm_info(info, power_state.RUNNING) + + def test_power_on_non_existent(self): + self._create_instance_in_the_db() + self.assertRaises(exception.InstanceNotFound, self.conn.power_on, + self.instance) + + def test_power_off(self): + self._create_vm() + info = self.conn.get_info({'name': 1}) + self._check_vm_info(info, power_state.RUNNING) + self.conn.power_off(self.instance) + info = self.conn.get_info({'name': 1}) + self._check_vm_info(info, power_state.SHUTDOWN) + + def test_power_off_non_existent(self): + self._create_instance_in_the_db() + self.assertRaises(exception.InstanceNotFound, self.conn.power_off, + self.instance) + + def test_power_off_suspended(self): + self._create_vm() + self.conn.suspend(self.instance) + info = self.conn.get_info({'name': 1}) + self._check_vm_info(info, power_state.SUSPENDED) + self.assertRaises(exception.InstancePowerOffFailure, + self.conn.power_off, self.instance) + def test_get_info(self): self._create_vm() info = self.conn.get_info({'name': 1}) @@ -282,3 +322,48 @@ class VMwareAPIVMTestCase(test.TestCase): def test_get_console_output(self): pass + + +class VMwareAPIHostTestCase(test.TestCase): + """Unit tests for Vmware API host calls.""" + + def setUp(self): + super(VMwareAPIHostTestCase, self).setUp() + self.flags(vmwareapi_host_ip='test_url', + vmwareapi_host_username='test_username', + vmwareapi_host_password='test_pass') + vmwareapi_fake.reset() + stubs.set_stubs(self.stubs) + self.conn = driver.VMwareESXDriver(False) + + def tearDown(self): + super(VMwareAPIHostTestCase, self).tearDown() + vmwareapi_fake.cleanup() + + def test_host_state(self): + stats = self.conn.get_host_stats() + self.assertEquals(stats['vcpus'], 16) + self.assertEquals(stats['disk_total'], 1024) + self.assertEquals(stats['disk_available'], 500) + self.assertEquals(stats['disk_used'], 1024 - 500) + self.assertEquals(stats['host_memory_total'], 1024) + self.assertEquals(stats['host_memory_free'], 1024 - 500) + + def _test_host_action(self, method, action, expected=None): + result = method('host', action) + self.assertEqual(result, expected) + + def test_host_reboot(self): + self._test_host_action(self.conn.host_power_action, 'reboot') + + def test_host_shutdown(self): + self._test_host_action(self.conn.host_power_action, 'shutdown') + + def test_host_startup(self): + self._test_host_action(self.conn.host_power_action, 'startup') + + def test_host_maintenance_on(self): + self._test_host_action(self.conn.host_maintenance_mode, True) + + def test_host_maintenance_off(self): + self._test_host_action(self.conn.host_maintenance_mode, False) diff --git a/nova/tests/virt/xenapi/test_vm_utils.py b/nova/tests/virt/xenapi/test_vm_utils.py index 275088af0..633e6f835 100644 --- a/nova/tests/virt/xenapi/test_vm_utils.py +++ b/nova/tests/virt/xenapi/test_vm_utils.py @@ -19,10 +19,8 @@ import contextlib import fixtures import mox -import uuid from nova import test -from nova.tests.xenapi import stubs from nova import utils from nova.virt.xenapi import vm_utils diff --git a/nova/utils.py b/nova/utils.py index 97091e42c..52d4868c9 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -76,6 +76,9 @@ utils_opts = [ default="/etc/nova/rootwrap.conf", help='Path to the rootwrap configuration file to use for ' 'running commands as root'), + cfg.StrOpt('tempdir', + default=None, + help='Explicitly specify the temporary working directory'), ] CONF = cfg.CONF CONF.register_opts(monkey_patch_opts) @@ -1147,6 +1150,7 @@ def temporary_chown(path, owner_uid=None): @contextlib.contextmanager def tempdir(**kwargs): + tempfile.tempdir = CONF.tempdir tmpdir = tempfile.mkdtemp(**kwargs) try: yield tmpdir diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py index 11af99d41..5a6f58655 100644 --- a/nova/virt/baremetal/pxe.py +++ b/nova/virt/baremetal/pxe.py @@ -27,11 +27,9 @@ from nova import exception from nova.openstack.common import cfg from nova.openstack.common import fileutils from nova.openstack.common import log as logging -from nova import utils from nova.virt.baremetal import base from nova.virt.baremetal import db from nova.virt.baremetal import utils as bm_utils -from nova.virt.disk import api as disk pxe_opts = [ diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py index 3d7d0f516..304781a64 100644 --- a/nova/virt/disk/api.py +++ b/nova/virt/disk/api.py @@ -443,6 +443,7 @@ def _inject_key_into_fs(key, fs): ]) _inject_file_into_fs(fs, keyfile, key_data, append=True) + fs.set_permissions(keyfile, 0600) _setup_selinux_for_keys(fs, sshdir) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 46b63d1c6..9ed7a054c 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -47,6 +47,7 @@ import os import shutil import sys import tempfile +import time import uuid from eventlet import greenthread @@ -140,7 +141,7 @@ libvirt_opts = [ 'raw, qcow2, vmdk, vdi). ' 'Defaults to same as source image'), cfg.StrOpt('libvirt_vif_driver', - default='nova.virt.libvirt.vif.LibvirtBridgeDriver', + default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver', help='The libvirt VIF driver to configure the VIFs.'), cfg.ListOpt('libvirt_volume_drivers', default=[ @@ -254,6 +255,10 @@ MIN_LIBVIRT_VERSION = (0, 9, 6) # When the above version matches/exceeds this version # delete it & corresponding code using it MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10) +# Live snapshot requirements +REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU" +MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0) +MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0) def _get_eph_disk(ephemeral): @@ -325,16 +330,29 @@ class LibvirtDriver(driver.ComputeDriver): self._host_state = HostState(self.virtapi, self.read_only) return self._host_state - def has_min_version(self, ver): - libvirt_version = self._conn.getLibVersion() - + def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None): def _munge_version(ver): return ver[0] * 1000000 + ver[1] * 1000 + ver[2] - if libvirt_version < _munge_version(ver): - return False + try: + if lv_ver is not None: + libvirt_version = self._conn.getLibVersion() + if libvirt_version < _munge_version(lv_ver): + return False - return True + if hv_ver is not None: + hypervisor_version = self._conn.getVersion() + if hypervisor_version < _munge_version(hv_ver): + return False + + if hv_type is not None: + hypervisor_type = self._conn.getType() + if hypervisor_type != hv_type: + return False + + return True + except Exception: + return False def init_host(self, host): if not self.has_min_version(MIN_LIBVIRT_VERSION): @@ -806,35 +824,64 @@ class LibvirtDriver(driver.ComputeDriver): (state, _max_mem, _mem, _cpus, _t) = virt_dom.info() state = LIBVIRT_POWER_STATE[state] + # NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0. + # These restrictions can be relaxed as other configurations + # can be validated. + if self.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION, + MIN_QEMU_LIVESNAPSHOT_VERSION, + REQ_HYPERVISOR_LIVESNAPSHOT): + live_snapshot = True + else: + live_snapshot = False + + # NOTE(rmk): We cannot perform live snapshots when a managedSave + # file is present, so we will use the cold/legacy method + # for instances which are shutdown. + if state == power_state.SHUTDOWN: + live_snapshot = False + # NOTE(dkang): managedSave does not work for LXC - if CONF.libvirt_type != 'lxc': + if CONF.libvirt_type != 'lxc' and not live_snapshot: if state == power_state.RUNNING or state == power_state.PAUSED: virt_dom.managedSave(0) - # Make the snapshot - snapshot = self.image_backend.snapshot(disk_path, snapshot_name, - image_type=source_format) + if live_snapshot: + LOG.info(_("Beginning live snapshot process"), + instance=instance) + else: + LOG.info(_("Beginning cold snapshot process"), + instance=instance) + snapshot = self.image_backend.snapshot(disk_path, snapshot_name, + image_type=source_format) + snapshot.create() - snapshot.create() update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) - - # Export the snapshot to a raw image snapshot_directory = CONF.libvirt_snapshots_directory fileutils.ensure_tree(snapshot_directory) with utils.tempdir(dir=snapshot_directory) as tmpdir: try: out_path = os.path.join(tmpdir, snapshot_name) - snapshot.extract(out_path, image_format) + if live_snapshot: + # NOTE (rmk): libvirt needs to be able to write to the + # temp directory, which is owned nova. + utils.execute('chmod', '777', tmpdir, run_as_root=True) + self._live_snapshot(virt_dom, disk_path, out_path, + image_format) + else: + snapshot.extract(out_path, image_format) finally: - snapshot.delete() + if not live_snapshot: + snapshot.delete() # NOTE(dkang): because previous managedSave is not called # for LXC, _create_domain must not be called. - if CONF.libvirt_type != 'lxc': + if CONF.libvirt_type != 'lxc' and not live_snapshot: if state == power_state.RUNNING: self._create_domain(domain=virt_dom) elif state == power_state.PAUSED: self._create_domain(domain=virt_dom, launch_flags=libvirt.VIR_DOMAIN_START_PAUSED) + LOG.info(_("Snapshot extracted, beginning image upload"), + instance=instance) # Upload that image to the image service @@ -845,6 +892,72 @@ class LibvirtDriver(driver.ComputeDriver): image_href, metadata, image_file) + LOG.info(_("Snapshot image upload complete"), + instance=instance) + + def _live_snapshot(self, domain, disk_path, out_path, image_format): + """Snapshot an instance without downtime.""" + # Save a copy of the domain's running XML file + xml = domain.XMLDesc(0) + + # Abort is an idempotent operation, so make sure any block + # jobs which may have failed are ended. + try: + domain.blockJobAbort(disk_path, 0) + except Exception: + pass + + def _wait_for_block_job(domain, disk_path): + status = domain.blockJobInfo(disk_path, 0) + try: + cur = status.get('cur', 0) + end = status.get('end', 0) + except Exception: + return False + + if cur == end and cur != 0 and end != 0: + return False + else: + return True + + # NOTE (rmk): We are using shallow rebases as a workaround to a bug + # in QEMU 1.3. In order to do this, we need to create + # a destination image with the original backing file + # and matching size of the instance root disk. + src_disk_size = libvirt_utils.get_disk_size(disk_path) + src_back_path = libvirt_utils.get_disk_backing_file(disk_path, + basename=False) + disk_delta = out_path + '.delta' + libvirt_utils.create_cow_image(src_back_path, disk_delta, + src_disk_size) + + try: + # NOTE (rmk): blockRebase cannot be executed on persistent + # domains, so we need to temporarily undefine it. + # If any part of this block fails, the domain is + # re-defined regardless. + if domain.isPersistent(): + domain.undefine() + + # NOTE (rmk): Establish a temporary mirror of our root disk and + # issue an abort once we have a complete copy. + domain.blockRebase(disk_path, disk_delta, 0, + libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | + libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT | + libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW) + + while _wait_for_block_job(domain, disk_path): + time.sleep(0.5) + + domain.blockJobAbort(disk_path, 0) + libvirt_utils.chown(disk_delta, os.getuid()) + finally: + self._conn.defineXML(xml) + + # Convert the delta (CoW) image with a backing file to a flat + # image with no backing file. + libvirt_utils.extract_snapshot(disk_delta, 'qcow2', None, + out_path, image_format) def reboot(self, instance, network_info, reboot_type='SOFT', block_device_info=None): diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py index 4b3517da7..bd4ec685c 100644 --- a/nova/virt/libvirt/utils.py +++ b/nova/virt/libvirt/utils.py @@ -63,7 +63,7 @@ def create_image(disk_format, path, size): execute('qemu-img', 'create', '-f', disk_format, path, size) -def create_cow_image(backing_file, path): +def create_cow_image(backing_file, path, size=None): """Create COW image Creates a COW image with the given backing file @@ -89,6 +89,8 @@ def create_cow_image(backing_file, path): # cow_opts += ['preallocation=%s' % base_details['preallocation']] if base_details and base_details.encryption: cow_opts += ['encryption=%s' % base_details.encryption] + if size is not None: + cow_opts += ['size=%s' % size] if cow_opts: # Format as a comma separated list csv_opts = ",".join(cow_opts) @@ -292,14 +294,14 @@ def get_disk_size(path): return int(size) -def get_disk_backing_file(path): +def get_disk_backing_file(path, basename=True): """Get the backing file of a disk image :param path: Path to the disk image :returns: a path to the image's backing store """ backing_file = images.qemu_img_info(path).backing_file - if backing_file: + if backing_file and basename: backing_file = os.path.basename(backing_file) return backing_file @@ -403,16 +405,16 @@ def extract_snapshot(disk_path, source_fmt, snapshot_name, out_path, dest_fmt): # NOTE(markmc): ISO is just raw to qemu-img if dest_fmt == 'iso': dest_fmt = 'raw' - qemu_img_cmd = ('qemu-img', - 'convert', - '-f', - source_fmt, - '-O', - dest_fmt, - '-s', - snapshot_name, - disk_path, - out_path) + + qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', + dest_fmt, '-s', snapshot_name, disk_path, out_path) + + # When snapshot name is omitted we do a basic convert, which + # is used by live snapshots. + if snapshot_name is None: + qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', + dest_fmt, disk_path, out_path) + execute(*qemu_img_cmd) diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index d90a5e295..45c299830 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -28,7 +28,7 @@ from nova import utils from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import designer -from nova.virt import netutils + LOG = logging.getLogger(__name__) libvirt_vif_opts = [ @@ -72,19 +72,22 @@ class LibvirtBaseVIFDriver(object): return conf + def plug(self, instance, vif): + pass + + def unplug(self, instance, vif): + pass -class LibvirtBridgeDriver(LibvirtBaseVIFDriver): - """VIF driver for Linux bridge.""" + +class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): + """Generic VIF driver for libvirt networking.""" def get_bridge_name(self, network): return network['bridge'] - def get_config(self, instance, network, mapping): + def get_config_bridge(self, instance, network, mapping): """Get VIF configurations for bridge type.""" - - mac_id = mapping['mac'].replace(':', '') - - conf = super(LibvirtBridgeDriver, + conf = super(LibvirtGenericVIFDriver, self).get_config(instance, network, mapping) @@ -93,6 +96,7 @@ class LibvirtBridgeDriver(LibvirtBaseVIFDriver): conf, self.get_bridge_name(network), self.get_vif_devname(mapping)) + mac_id = mapping['mac'].replace(':', '') name = "nova-instance-" + instance['name'] + "-" + mac_id primary_addr = mapping['ips'][0]['ip'] dhcp_server = ra_server = ipv4_cidr = ipv6_cidr = None @@ -112,8 +116,29 @@ class LibvirtBridgeDriver(LibvirtBaseVIFDriver): return conf - def plug(self, instance, vif): + def get_config(self, instance, network, mapping): + vif_type = mapping.get('vif_type') + + LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s " + "network=%(network)s mapping=%(mapping)s") + % locals()) + + if vif_type is None: + raise exception.NovaException( + _("vif_type parameter must be present " + "for this vif_driver implementation")) + + if vif_type == network_model.VIF_TYPE_BRIDGE: + return self.get_config_bridge(instance, network, mapping) + else: + raise exception.NovaException( + _("Unexpected vif_type=%s") % vif_type) + + def plug_bridge(self, instance, vif): """Ensure that the bridge exists, and add VIF to it.""" + super(LibvirtGenericVIFDriver, + self).plug(instance, vif) + network, mapping = vif if (not network.get('multi_host') and mapping.get('should_create_bridge')): @@ -135,9 +160,71 @@ class LibvirtBridgeDriver(LibvirtBaseVIFDriver): self.get_bridge_name(network), iface) - def unplug(self, instance, vif): + def plug(self, instance, vif): + network, mapping = vif + vif_type = mapping.get('vif_type') + + LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s " + "network=%(network)s mapping=%(mapping)s") + % locals()) + + if vif_type is None: + raise exception.NovaException( + _("vif_type parameter must be present " + "for this vif_driver implementation")) + + if vif_type == network_model.VIF_TYPE_BRIDGE: + self.plug_bridge(instance, vif) + else: + raise exception.NovaException( + _("Unexpected vif_type=%s") % vif_type) + + def unplug_bridge(self, instance, vif): """No manual unplugging required.""" - pass + super(LibvirtGenericVIFDriver, + self).unplug(instance, vif) + + def unplug(self, instance, vif): + network, mapping = vif + vif_type = mapping.get('vif_type') + + LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s " + "network=%(network)s mapping=%(mapping)s") + % locals()) + + if vif_type is None: + raise exception.NovaException( + _("vif_type parameter must be present " + "for this vif_driver implementation")) + + if vif_type == network_model.VIF_TYPE_BRIDGE: + self.unplug_bridge(instance, vif) + else: + raise exception.NovaException( + _("Unexpected vif_type=%s") % vif_type) + + +class LibvirtBridgeDriver(LibvirtGenericVIFDriver): + """Deprecated in favour of LibvirtGenericVIFDriver. + Retained in Grizzly for compatibility with Quantum + drivers which do not yet report 'vif_type' port binding. + To be removed in Hxxxx.""" + + def __init__(self): + LOG.deprecated( + _("LibvirtBridgeDriver is deprecated and " + "will be removed in the Hxxxx release. Please " + "update the 'libvirt_vif_driver' config parameter " + "to use the LibvirtGenericVIFDriver class instead")) + + def get_config(self, instance, network, mapping): + return self.get_config_bridge(instance, network, mapping) + + def plug(self, instance, vif): + self.plug_bridge(instance, vif) + + def unplug(self, instance, vif): + self.unplug_bridge(instance, vif) class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver): diff --git a/nova/virt/powervm/blockdev.py b/nova/virt/powervm/blockdev.py index fb3a0210c..76caca1b9 100644 --- a/nova/virt/powervm/blockdev.py +++ b/nova/virt/powervm/blockdev.py @@ -18,16 +18,11 @@ import hashlib import os import re -from eventlet import greenthread - -from nova import utils - from nova.image import glance - from nova.openstack.common import cfg from nova.openstack.common import excutils from nova.openstack.common import log as logging - +from nova import utils from nova.virt import images from nova.virt.powervm import command from nova.virt.powervm import common diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py index ccba3cf73..0ce313535 100644 --- a/nova/virt/powervm/driver.py +++ b/nova/virt/powervm/driver.py @@ -14,19 +14,11 @@ # License for the specific language governing permissions and limitations # under the License. -import os import time -from nova.compute import task_states -from nova.compute import vm_states - -from nova import context as nova_context - from nova.image import glance - from nova.openstack.common import cfg from nova.openstack.common import log as logging - from nova.virt import driver from nova.virt.powervm import operator diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index d5a7e5875..67822f2c9 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -1,5 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack LLC. # @@ -20,16 +21,19 @@ A connection to the VMware ESX platform. **Related Flags** -:vmwareapi_host_ip: IPAddress of VMware ESX server. -:vmwareapi_host_username: Username for connection to VMware ESX Server. -:vmwareapi_host_password: Password for connection to VMware ESX Server. -:vmwareapi_task_poll_interval: The interval (seconds) used for polling of - remote tasks - (default: 1.0). -:vmwareapi_api_retry_count: The API retry count in case of failure such as - network failures (socket errors etc.) - (default: 10). - +:vmwareapi_host_ip: IP address of VMware ESX server. +:vmwareapi_host_username: Username for connection to VMware ESX Server. +:vmwareapi_host_password: Password for connection to VMware ESX Server. +:vmwareapi_task_poll_interval: The interval (seconds) used for polling of + remote tasks + (default: 5.0). +:vmwareapi_api_retry_count: The API retry count in case of failure such as + network failures (socket errors etc.) + (default: 10). +:vnc_port: VNC starting port (default: 5900) +:vnc_port_total: Total number of VNC ports (default: 10000) +:vnc_password: VNC password +:use_linked_clone: Whether to use linked clone (default: True) """ import time @@ -38,10 +42,12 @@ from eventlet import event from nova import exception from nova.openstack.common import cfg +from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import utils from nova.virt import driver from nova.virt.vmwareapi import error_util +from nova.virt.vmwareapi import host from nova.virt.vmwareapi import vim from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vmops @@ -76,6 +82,18 @@ vmwareapi_opts = [ 'socket error, etc. ' 'Used only if compute_driver is ' 'vmwareapi.VMwareESXDriver.'), + cfg.IntOpt('vnc_port', + default=5900, + help='VNC starting port'), + cfg.IntOpt('vnc_port_total', + default=10000, + help='Total number of VNC ports'), + cfg.StrOpt('vnc_password', + default=None, + help='VNC password'), + cfg.BoolOpt('use_linked_clone', + default=True, + help='Whether to use linked clone'), ] CONF = cfg.CONF @@ -100,20 +118,31 @@ class VMwareESXDriver(driver.ComputeDriver): def __init__(self, virtapi, read_only=False, scheme="https"): super(VMwareESXDriver, self).__init__(virtapi) - host_ip = CONF.vmwareapi_host_ip + self._host_ip = CONF.vmwareapi_host_ip host_username = CONF.vmwareapi_host_username host_password = CONF.vmwareapi_host_password api_retry_count = CONF.vmwareapi_api_retry_count - if not host_ip or host_username is None or host_password is None: + if not self._host_ip or host_username is None or host_password is None: raise Exception(_("Must specify vmwareapi_host_ip," "vmwareapi_host_username " "and vmwareapi_host_password to use" "compute_driver=vmwareapi.VMwareESXDriver")) - self._session = VMwareAPISession(host_ip, host_username, host_password, - api_retry_count, scheme=scheme) + self._session = VMwareAPISession(self._host_ip, + host_username, host_password, + api_retry_count, scheme=scheme) self._volumeops = volumeops.VMwareVolumeOps(self._session) - self._vmops = vmops.VMwareVMOps(self._session) + self._vmops = vmops.VMwareVMOps(self._session, self.virtapi, + self._volumeops) + self._host = host.Host(self._session) + self._host_state = None + + @property + def host_state(self): + if not self._host_state: + self._host_state = host.HostState(self._session, + self._host_ip) + return self._host_state def init_host(self, host): """Do the initialization that needs to be done.""" @@ -130,7 +159,8 @@ class VMwareESXDriver(driver.ComputeDriver): def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Create VM instance.""" - self._vmops.spawn(context, instance, image_meta, network_info) + self._vmops.spawn(context, instance, image_meta, network_info, + block_device_info) def snapshot(self, context, instance, name, update_task_state): """Create snapshot from a running VM instance.""" @@ -162,6 +192,61 @@ class VMwareESXDriver(driver.ComputeDriver): """Resume the suspended VM instance.""" self._vmops.resume(instance) + def rescue(self, context, instance, network_info, image_meta, + rescue_password): + """Rescue the specified instance.""" + self._vmops.rescue(context, instance, network_info, image_meta) + + def unrescue(self, instance, network_info): + """Unrescue the specified instance.""" + self._vmops.unrescue(instance) + + def power_off(self, instance): + """Power off the specified instance.""" + self._vmops.power_off(instance) + + def power_on(self, instance): + """Power on the specified instance.""" + self._vmops.power_on(instance) + + def migrate_disk_and_power_off(self, context, instance, dest, + instance_type, network_info, + block_device_info=None): + """ + Transfers the disk of a running instance in multiple phases, turning + off the instance before the end. + """ + return self._vmops.migrate_disk_and_power_off(context, instance, + dest, instance_type) + + def confirm_migration(self, migration, instance, network_info): + """Confirms a resize, destroying the source VM.""" + self._vmops.confirm_migration(migration, instance, network_info) + + def finish_revert_migration(self, instance, network_info, + block_device_info=None): + """Finish reverting a resize, powering back on the instance.""" + self._vmops.finish_revert_migration(instance) + + def finish_migration(self, context, migration, instance, disk_info, + network_info, image_meta, resize_instance=False, + block_device_info=None): + """Completes a resize, turning on the migrated instance.""" + self._vmops.finish_migration(context, migration, instance, disk_info, + network_info, image_meta, resize_instance) + + def live_migration(self, context, instance_ref, dest, + post_method, recover_method, block_migration=False, + migrate_data=None): + """Live migration of an instance to another host.""" + self._vmops.live_migration(context, instance_ref, dest, + post_method, recover_method, + block_migration) + + def poll_rebooting_instances(self, timeout, instances): + """Poll for rebooting instances.""" + self._vmops.poll_rebooting_instances(timeout, instances) + def get_info(self, instance): """Return info about the VM instance.""" return self._vmops.get_info(instance) @@ -174,10 +259,18 @@ class VMwareESXDriver(driver.ComputeDriver): """Return snapshot of console.""" return self._vmops.get_console_output(instance) + def get_vnc_console(self, instance): + """Return link to instance's VNC console.""" + return self._vmops.get_vnc_console(instance) + def get_volume_connector(self, instance): """Return volume connector information.""" return self._volumeops.get_volume_connector(instance) + def get_host_ip_addr(self): + """Retrieves the IP address of the ESX host.""" + return self._host_ip + def attach_volume(self, connection_info, instance, mountpoint): """Attach volume storage to VM instance.""" return self._volumeops.attach_volume(connection_info, @@ -197,8 +290,53 @@ class VMwareESXDriver(driver.ComputeDriver): 'password': CONF.vmwareapi_host_password} def get_available_resource(self, nodename): - """This method is supported only by libvirt.""" - return + """Retrieve resource info. + + This method is called when nova-compute launches, and + as part of a periodic task. + + :returns: dictionary describing resources + + """ + host_stats = self.get_host_stats(refresh=True) + + # Updating host information + dic = {'vcpus': host_stats["vcpus"], + 'memory_mb': host_stats['host_memory_total'], + 'local_gb': host_stats['disk_total'], + 'vcpus_used': 0, + 'memory_mb_used': host_stats['host_memory_total'] - + host_stats['host_memory_free'], + 'local_gb_used': host_stats['disk_used'], + 'hypervisor_type': host_stats['hypervisor_type'], + 'hypervisor_version': host_stats['hypervisor_version'], + 'hypervisor_hostname': host_stats['hypervisor_hostname'], + 'cpu_info': jsonutils.dumps(host_stats['cpu_info'])} + + return dic + + def update_host_status(self): + """Update the status info of the host, and return those values + to the calling program.""" + return self.host_state.update_status() + + def get_host_stats(self, refresh=False): + """Return the current state of the host. If 'refresh' is + True, run the update first.""" + return self.host_state.get_host_stats(refresh=refresh) + + def host_power_action(self, host, action): + """Reboots, shuts down or powers up the host.""" + return self._host.host_power_action(host, action) + + def host_maintenance_mode(self, host, mode): + """Start/Stop host maintenance window. On start, it triggers + guest VMs evacuation.""" + return self._host.host_maintenance_mode(host, mode) + + def set_host_enabled(self, host, enabled): + """Sets the specified host's ability to accept new instances.""" + return self._host.set_host_enabled(host, enabled) def inject_network_info(self, instance, network_info): """inject network info for specified instance.""" diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py index 7fb014075..692e5f253 100644 --- a/nova/virt/vmwareapi/fake.py +++ b/nova/virt/vmwareapi/fake.py @@ -255,6 +255,8 @@ class Datastore(ManagedObject): super(Datastore, self).__init__("Datastore") self.set("summary.type", "VMFS") self.set("summary.name", "fake-ds") + self.set("summary.capacity", 1024 * 1024 * 1024) + self.set("summary.freeSpace", 500 * 1024 * 1024) class HostNetworkSystem(ManagedObject): @@ -285,6 +287,29 @@ class HostSystem(ManagedObject): host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj self.set("configManager.networkSystem", host_net_sys) + summary = DataObject() + hardware = DataObject() + hardware.numCpuCores = 8 + hardware.numCpuPkgs = 2 + hardware.numCpuThreads = 16 + hardware.vendor = "Intel" + hardware.cpuModel = "Intel(R) Xeon(R)" + hardware.memorySize = 1024 * 1024 * 1024 + summary.hardware = hardware + + quickstats = DataObject() + quickstats.overallMemoryUsage = 500 + summary.quickStats = quickstats + + product = DataObject() + product.name = "VMware ESXi" + product.version = "5.0.0" + config = DataObject() + config.product = product + summary.config = config + + self.set("summary", summary) + if _db_content.get("Network", None) is None: create_network() net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj @@ -599,6 +624,11 @@ class FakeVim(object): """Fakes a return.""" return + def _just_return_task(self, method): + """Fakes a task return.""" + task_mdo = create_task(method, "success") + return task_mdo.obj + def _unregister_vm(self, method, *args, **kwargs): """Unregisters a VM from the Host System.""" vm_ref = args[0] @@ -627,7 +657,7 @@ class FakeVim(object): def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"): """Sets power state for the VM.""" if _db_content.get("VirtualMachine", None) is None: - raise exception.NotFound(_(" No Virtual Machine has been " + raise exception.NotFound(_("No Virtual Machine has been " "registered yet")) if vm_ref not in _db_content.get("VirtualMachine"): raise exception.NotFound(_("Virtual Machine with ref %s is not " @@ -722,6 +752,9 @@ class FakeVim(object): elif attr_name == "DeleteVirtualDisk_Task": return lambda *args, **kwargs: self._delete_disk(attr_name, *args, **kwargs) + elif attr_name == "Destroy_Task": + return lambda *args, **kwargs: self._unregister_vm(attr_name, + *args, **kwargs) elif attr_name == "UnregisterVM": return lambda *args, **kwargs: self._unregister_vm(attr_name, *args, **kwargs) @@ -739,3 +772,15 @@ class FakeVim(object): elif attr_name == "AddPortGroup": return lambda *args, **kwargs: self._add_port_group(attr_name, *args, **kwargs) + elif attr_name == "RebootHost_Task": + return lambda *args, **kwargs: self._just_return_task(attr_name) + elif attr_name == "ShutdownHost_Task": + return lambda *args, **kwargs: self._just_return_task(attr_name) + elif attr_name == "PowerDownHostToStandBy_Task": + return lambda *args, **kwargs: self._just_return_task(attr_name) + elif attr_name == "PowerUpHostFromStandBy_Task": + return lambda *args, **kwargs: self._just_return_task(attr_name) + elif attr_name == "EnterMaintenanceMode_Task": + return lambda *args, **kwargs: self._just_return_task(attr_name) + elif attr_name == "ExitMaintenanceMode_Task": + return lambda *args, **kwargs: self._just_return_task(attr_name) diff --git a/nova/virt/vmwareapi/host.py b/nova/virt/vmwareapi/host.py new file mode 100644 index 000000000..09b8f1fe3 --- /dev/null +++ b/nova/virt/vmwareapi/host.py @@ -0,0 +1,140 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Management class for host-related functions (start, reboot, etc). +""" + +from nova import exception +from nova.openstack.common import log as logging +from nova.virt.vmwareapi import vim_util +from nova.virt.vmwareapi import vm_util + +LOG = logging.getLogger(__name__) + + +class Host(object): + """ + Implements host related operations. + """ + def __init__(self, session): + self._session = session + + def host_power_action(self, host, action): + """Reboots or shuts down the host.""" + host_mor = self._session._call_method(vim_util, "get_objects", + "HostSystem")[0].obj + LOG.debug(_("%(action)s %(host)s") % locals()) + if action == "reboot": + host_task = self._session._call_method( + self._session._get_vim(), + "RebootHost_Task", host_mor, + force=False) + elif action == "shutdown": + host_task = self._session._call_method( + self._session._get_vim(), + "ShutdownHost_Task", host_mor, + force=False) + elif action == "startup": + host_task = self._session._call_method( + self._session._get_vim(), + "PowerUpHostFromStandBy_Task", host_mor, + timeoutSec=60) + self._session._wait_for_task(host, host_task) + + def host_maintenance_mode(self, host, mode): + """Start/Stop host maintenance window. On start, it triggers + guest VMs evacuation.""" + host_mor = self._session._call_method(vim_util, "get_objects", + "HostSystem")[0].obj + LOG.debug(_("Set maintenance mod on %(host)s to %(mode)s") % locals()) + if mode: + host_task = self._session._call_method( + self._session._get_vim(), + "EnterMaintenanceMode_Task", + host_mor, timeout=0, + evacuatePoweredOffVms=True) + else: + host_task = self._session._call_method( + self._session._get_vim(), + "ExitMaintenanceMode_Task", + host_mor, timeout=0) + self._session._wait_for_task(host, host_task) + + def set_host_enabled(self, _host, enabled): + """Sets the specified host's ability to accept new instances.""" + pass + + +class HostState(object): + """Manages information about the ESX host this compute + node is running on. + """ + def __init__(self, session, host_name): + super(HostState, self).__init__() + self._session = session + self._host_name = host_name + self._stats = {} + self.update_status() + + def get_host_stats(self, refresh=False): + """Return the current state of the host. If 'refresh' is + True, run the update first. + """ + if refresh: + self.update_status() + return self._stats + + def update_status(self): + """Update the current state of the host. + """ + host_mor = self._session._call_method(vim_util, "get_objects", + "HostSystem")[0].obj + summary = self._session._call_method(vim_util, + "get_dynamic_property", + host_mor, + "HostSystem", + "summary") + + if summary is None: + return + + try: + ds = vm_util.get_datastore_ref_and_name(self._session) + except exception.DatastoreNotFound: + ds = (None, None, 0, 0) + + data = {} + data["vcpus"] = summary.hardware.numCpuThreads + data["cpu_info"] = \ + {"vendor": summary.hardware.vendor, + "model": summary.hardware.cpuModel, + "topology": {"cores": summary.hardware.numCpuCores, + "sockets": summary.hardware.numCpuPkgs, + "threads": summary.hardware.numCpuThreads} + } + data["disk_total"] = ds[2] / (1024 * 1024) + data["disk_available"] = ds[3] / (1024 * 1024) + data["disk_used"] = data["disk_total"] - data["disk_available"] + data["host_memory_total"] = summary.hardware.memorySize / (1024 * 1024) + data["host_memory_free"] = data["host_memory_total"] - \ + summary.quickStats.overallMemoryUsage + data["hypervisor_type"] = summary.config.product.name + data["hypervisor_version"] = summary.config.product.version + data["hypervisor_hostname"] = self._host_name + + self._stats = data + return data diff --git a/nova/virt/vmwareapi/read_write_util.py b/nova/virt/vmwareapi/read_write_util.py index 39ea8e2e8..5dcdc6fdb 100644 --- a/nova/virt/vmwareapi/read_write_util.py +++ b/nova/virt/vmwareapi/read_write_util.py @@ -140,7 +140,7 @@ class VMwareHTTPWriteFile(VMwareHTTPFile): self.conn.getresponse() except Exception, excep: LOG.debug(_("Exception during HTTP connection close in " - "VMwareHTTpWrite. Exception is %s") % excep) + "VMwareHTTPWrite. Exception is %s") % excep) super(VMwareHTTPWriteFile, self).close() diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py index c5b524186..5684e6aa6 100644 --- a/nova/virt/vmwareapi/vif.py +++ b/nova/virt/vmwareapi/vif.py @@ -45,7 +45,7 @@ def ensure_vlan_bridge(self, session, network): # Check if the vlan_interface physical network adapter exists on the # host. if not network_util.check_if_vlan_interface_exists(session, - vlan_interface): + vlan_interface): raise exception.NetworkAdapterNotFound(adapter=vlan_interface) # Get the vSwitch associated with the Physical Adapter diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index 381c47193..af481b566 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -20,6 +20,7 @@ The VMware API VM utility module to build SOAP object specs. """ import copy +from nova import exception from nova.virt.vmwareapi import vim_util @@ -360,6 +361,27 @@ def delete_virtual_disk_spec(client_factory, device): return virtual_device_config +def clone_vm_spec(client_factory, location, + power_on=False, snapshot=None, template=False): + """Builds the VM clone spec.""" + clone_spec = client_factory.create('ns0:VirtualMachineCloneSpec') + clone_spec.location = location + clone_spec.powerOn = power_on + clone_spec.snapshot = snapshot + clone_spec.template = template + return clone_spec + + +def relocate_vm_spec(client_factory, datastore=None, host=None, + disk_move_type="moveAllDiskBackingsAndAllowSharing"): + """Builds the VM relocation spec.""" + rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec') + rel_spec.datastore = datastore + rel_spec.diskMoveType = disk_move_type + rel_spec.host = host + return rel_spec + + def get_dummy_vm_create_spec(client_factory, name, data_store_name): """Builds the dummy VM create spec.""" config_spec = client_factory.create('ns0:VirtualMachineConfigSpec') @@ -423,6 +445,31 @@ def get_add_vswitch_port_group_spec(client_factory, vswitch_name, return vswitch_port_group_spec +def get_vnc_config_spec(client_factory, port, password): + """Builds the vnc config spec.""" + virtual_machine_config_spec = client_factory.create( + 'ns0:VirtualMachineConfigSpec') + + opt_enabled = client_factory.create('ns0:OptionValue') + opt_enabled.key = "RemoteDisplay.vnc.enabled" + opt_enabled.value = "true" + opt_port = client_factory.create('ns0:OptionValue') + opt_port.key = "RemoteDisplay.vnc.port" + opt_port.value = port + opt_pass = client_factory.create('ns0:OptionValue') + opt_pass.key = "RemoteDisplay.vnc.password" + opt_pass.value = password + virtual_machine_config_spec.extraConfig = [opt_enabled, opt_port, opt_pass] + return virtual_machine_config_spec + + +def search_datastore_spec(client_factory, file_name): + """Builds the datastore search spec.""" + search_spec = client_factory.create('ns0:HostDatastoreBrowserSearchSpec') + search_spec.matchPattern = [file_name] + return search_spec + + def get_vm_ref_from_name(session, vm_name): """Get reference to the VM with the name specified.""" vms = session._call_method(vim_util, "get_objects", @@ -431,3 +478,31 @@ def get_vm_ref_from_name(session, vm_name): if vm.propSet[0].val == vm_name: return vm.obj return None + + +def get_datastore_ref_and_name(session): + """Get the datastore list and choose the first local storage.""" + data_stores = session._call_method(vim_util, "get_objects", + "Datastore", ["summary.type", "summary.name", + "summary.capacity", "summary.freeSpace"]) + for elem in data_stores: + ds_name = None + ds_type = None + ds_cap = None + ds_free = None + for prop in elem.propSet: + if prop.name == "summary.type": + ds_type = prop.val + elif prop.name == "summary.name": + ds_name = prop.val + elif prop.name == "summary.capacity": + ds_cap = prop.val + elif prop.name == "summary.freeSpace": + ds_free = prop.val + # Local storage identifier + if ds_type == "VMFS" or ds_type == "NFS": + data_store_name = ds_name + return elem.obj, data_store_name, ds_cap, ds_free + + if data_store_name is None: + raise exception.DatastoreNotFound() diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 187fc449d..75f85454b 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -27,12 +27,16 @@ import urllib import urllib2 import uuid +from nova import block_device +from nova.compute import api as compute from nova.compute import power_state from nova.compute import task_states +from nova import context as nova_context from nova import exception from nova.openstack.common import cfg -from nova.openstack.common import importutils +from nova.openstack.common import excutils from nova.openstack.common import log as logging +from nova.virt import driver from nova.virt.vmwareapi import network_util from nova.virt.vmwareapi import vif as vmwarevif from nova.virt.vmwareapi import vim_util @@ -52,21 +56,33 @@ vmware_group = cfg.OptGroup(name='vmware', CONF = cfg.CONF CONF.register_group(vmware_group) CONF.register_opts(vmware_vif_opts, vmware_group) +CONF.import_opt('vnc_enabled', 'nova.vnc') LOG = logging.getLogger(__name__) VMWARE_POWER_STATES = { 'poweredOff': power_state.SHUTDOWN, 'poweredOn': power_state.RUNNING, - 'suspended': power_state.PAUSED} + 'suspended': power_state.SUSPENDED} +VMWARE_PREFIX = 'vmware' + + +RESIZE_TOTAL_STEPS = 4 class VMwareVMOps(object): """Management class for VM-related tasks.""" - def __init__(self, session): + def __init__(self, session, virtapi, volumeops): """Initializer.""" + self.compute_api = compute.API() self._session = session + self._virtapi = virtapi + self._volumeops = volumeops + self._instance_path_base = VMWARE_PREFIX + CONF.base_dir_name + self._default_root_device = 'vda' + self._rescue_suffix = '-rescue' + self._poll_rescue_last_ran = None def list_instances(self): """Lists the VM instances that are registered with the ESX host.""" @@ -83,13 +99,14 @@ class VMwareVMOps(object): vm_name = prop.val elif prop.name == "runtime.connectionState": conn_state = prop.val - # Ignoring the oprhaned or inaccessible VMs + # Ignoring the orphaned or inaccessible VMs if conn_state not in ["orphaned", "inaccessible"]: lst_vm_names.append(vm_name) LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names))) return lst_vm_names - def spawn(self, context, instance, image_meta, network_info): + def spawn(self, context, instance, image_meta, network_info, + block_device_info=None): """ Creates a VM instance. @@ -97,44 +114,28 @@ class VMwareVMOps(object): 1. Create a VM with no disk and the specifics in the instance object like RAM size. - 2. Create a dummy vmdk of the size of the disk file that is to be - uploaded. This is required just to create the metadata file. - 3. Delete the -flat.vmdk file created in the above step and retain - the metadata .vmdk file. - 4. Upload the disk file. - 5. Attach the disk to the VM by reconfiguring the same. - 6. Power on the VM. + 2. For flat disk + 2.1. Create a dummy vmdk of the size of the disk file that is to be + uploaded. This is required just to create the metadata file. + 2.2. Delete the -flat.vmdk file created in the above step and retain + the metadata .vmdk file. + 2.3. Upload the disk file. + 3. For sparse disk + 3.1. Upload the disk file to a -sparse.vmdk file. + 3.2. Copy/Clone the -sparse.vmdk file to a thin vmdk. + 3.3. Delete the -sparse.vmdk file. + 4. Attach the disk to the VM by reconfiguring the same. + 5. Power on the VM. """ - vm_ref = self._get_vm_ref_from_the_name(instance.name) + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name']) if vm_ref: - raise exception.InstanceExists(name=instance.name) + raise exception.InstanceExists(name=instance['name']) client_factory = self._session._get_vim().client.factory service_content = self._session._get_vim().get_service_content() - - def _get_datastore_ref(): - """Get the datastore list and choose the first local storage.""" - data_stores = self._session._call_method(vim_util, "get_objects", - "Datastore", ["summary.type", "summary.name"]) - for elem in data_stores: - ds_name = None - ds_type = None - for prop in elem.propSet: - if prop.name == "summary.type": - ds_type = prop.val - elif prop.name == "summary.name": - ds_name = prop.val - # Local storage identifier - if ds_type == "VMFS": - data_store_name = ds_name - return data_store_name - - if data_store_name is None: - msg = _("Couldn't get a local Datastore reference") - LOG.error(msg, instance=instance) - raise exception.NovaException(msg) - - data_store_name = _get_datastore_ref() + ds = vm_util.get_datastore_ref_and_name(self._session) + data_store_ref = ds[0] + data_store_name = ds[1] def _get_image_properties(): """ @@ -142,31 +143,21 @@ class VMwareVMOps(object): repository. """ _image_info = vmware_images.get_vmdk_size_and_properties(context, - instance.image_ref, - instance) + instance['image_ref'], + instance) image_size, image_properties = _image_info vmdk_file_size_in_kb = int(image_size) / 1024 os_type = image_properties.get("vmware_ostype", "otherGuest") adapter_type = image_properties.get("vmware_adaptertype", "lsiLogic") - return vmdk_file_size_in_kb, os_type, adapter_type - - vmdk_file_size_in_kb, os_type, adapter_type = _get_image_properties() + disk_type = image_properties.get("vmware_disktype", + "preallocated") + return vmdk_file_size_in_kb, os_type, adapter_type, disk_type - def _get_vmfolder_and_res_pool_mors(): - """Get the Vm folder ref from the datacenter.""" - dc_objs = self._session._call_method(vim_util, "get_objects", - "Datacenter", ["vmFolder"]) - # There is only one default datacenter in a standalone ESX host - vm_folder_mor = dc_objs[0].propSet[0].val + (vmdk_file_size_in_kb, os_type, adapter_type, + disk_type) = _get_image_properties() - # Get the resource pool. Taking the first resource pool coming our - # way. Assuming that is the default resource pool. - res_pool_mor = self._session._call_method(vim_util, "get_objects", - "ResourcePool")[0].obj - return vm_folder_mor, res_pool_mor - - vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors() + vm_folder_ref, res_pool_ref = self._get_vmfolder_and_res_pool_refs() def _check_if_network_bridge_exists(network_name): network_ref = network_util.get_network_with_the_name( @@ -177,6 +168,8 @@ class VMwareVMOps(object): def _get_vif_infos(): vif_infos = [] + if network_info is None: + return vif_infos for (network, mapping) in network_info: mac_address = mapping['mac'] network_name = network['bridge'] or \ @@ -201,33 +194,29 @@ class VMwareVMOps(object): def _execute_create_vm(): """Create VM on ESX host.""" - LOG.debug(_("Creating VM on the ESX host"), instance=instance) + LOG.debug(_("Creating VM on the ESX host"), instance=instance) # Create the VM on the ESX host vm_create_task = self._session._call_method( self._session._get_vim(), - "CreateVM_Task", vm_folder_mor, - config=config_spec, pool=res_pool_mor) + "CreateVM_Task", vm_folder_ref, + config=config_spec, pool=res_pool_ref) self._session._wait_for_task(instance['uuid'], vm_create_task) - LOG.debug(_("Created VM on the ESX host"), instance=instance) + LOG.debug(_("Created VM on the ESX host"), instance=instance) _execute_create_vm() + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name']) # Set the machine.id parameter of the instance to inject # the NIC configuration inside the VM if CONF.flat_injected: self._set_machine_id(client_factory, instance, network_info) - # Naming the VM files in correspondence with the VM instance name - # The flat vmdk file name - flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % (instance.name, - instance.name) - # The vmdk meta-data file - uploaded_vmdk_name = "%s/%s.vmdk" % (instance.name, instance.name) - flat_uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name, - flat_uploaded_vmdk_name) - uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name, - uploaded_vmdk_name) + # Set the vnc configuration of the instance, vnc port starts from 5900 + if CONF.vnc_enabled: + vnc_port = self._get_vnc_port(vm_ref) + vnc_pass = CONF.vnc_password or '' + self._set_vnc_config(client_factory, instance, vnc_port, vnc_pass) def _create_virtual_disk(): """Create a virtual disk of the size of flat vmdk file.""" @@ -238,103 +227,186 @@ class VMwareVMOps(object): # Here we assume thick provisioning and lsiLogic for the adapter # type LOG.debug(_("Creating Virtual Disk of size " - "%(vmdk_file_size_in_kb)s KB and adapter type " - "%(adapter_type)s on the ESX host local store" - " %(data_store_name)s") % + "%(vmdk_file_size_in_kb)s KB and adapter type " + "%(adapter_type)s on the ESX host local store " + "%(data_store_name)s") % {"vmdk_file_size_in_kb": vmdk_file_size_in_kb, "adapter_type": adapter_type, "data_store_name": data_store_name}, instance=instance) vmdk_create_spec = vm_util.get_vmdk_create_spec(client_factory, - vmdk_file_size_in_kb, adapter_type) + vmdk_file_size_in_kb, adapter_type, + disk_type) vmdk_create_task = self._session._call_method( self._session._get_vim(), "CreateVirtualDisk_Task", service_content.virtualDiskManager, name=uploaded_vmdk_path, - datacenter=self._get_datacenter_name_and_ref()[0], + datacenter=dc_ref, spec=vmdk_create_spec) self._session._wait_for_task(instance['uuid'], vmdk_create_task) LOG.debug(_("Created Virtual Disk of size %(vmdk_file_size_in_kb)s" - " KB on the ESX host local store " - "%(data_store_name)s") % + " KB and type %(disk_type)s on " + "the ESX host local store %(data_store_name)s") % {"vmdk_file_size_in_kb": vmdk_file_size_in_kb, + "disk_type": disk_type, "data_store_name": data_store_name}, instance=instance) - _create_virtual_disk() - - def _delete_disk_file(): - LOG.debug(_("Deleting the file %(flat_uploaded_vmdk_path)s " + def _delete_disk_file(vmdk_path): + LOG.debug(_("Deleting the file %(vmdk_path)s " "on the ESX host local" "store %(data_store_name)s") % - {"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path, + {"vmdk_path": vmdk_path, "data_store_name": data_store_name}, instance=instance) - # Delete the -flat.vmdk file created. .vmdk file is retained. + # Delete the vmdk file. vmdk_delete_task = self._session._call_method( self._session._get_vim(), "DeleteDatastoreFile_Task", service_content.fileManager, - name=flat_uploaded_vmdk_path) + name=vmdk_path, + datacenter=dc_ref) self._session._wait_for_task(instance['uuid'], vmdk_delete_task) - LOG.debug(_("Deleted the file %(flat_uploaded_vmdk_path)s on the " + LOG.debug(_("Deleted the file %(vmdk_path)s on the " "ESX host local store %(data_store_name)s") % - {"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path, + {"vmdk_path": vmdk_path, "data_store_name": data_store_name}, instance=instance) - _delete_disk_file() - - cookies = self._session._get_vim().client.options.transport.cookiejar - def _fetch_image_on_esx_datastore(): """Fetch image from Glance to ESX datastore.""" LOG.debug(_("Downloading image file data %(image_ref)s to the ESX " "data store %(data_store_name)s") % - {'image_ref': instance.image_ref, + {'image_ref': instance['image_ref'], 'data_store_name': data_store_name}, instance=instance) - # Upload the -flat.vmdk file whose meta-data file we just created - # above + # For flat disk, upload the -flat.vmdk file whose meta-data file + # we just created above + # For sparse disk, upload the -sparse.vmdk file to be copied into + # a flat vmdk + upload_vmdk_name = sparse_uploaded_vmdk_name \ + if disk_type == "sparse" else flat_uploaded_vmdk_name vmware_images.fetch_image( context, - instance.image_ref, + instance['image_ref'], instance, host=self._session._host_ip, - data_center_name=self._get_datacenter_name_and_ref()[1], + data_center_name=self._get_datacenter_ref_and_name()[1], datastore_name=data_store_name, cookies=cookies, - file_path=flat_uploaded_vmdk_name) - LOG.debug(_("Downloaded image file data %(image_ref)s to the ESX " - "data store %(data_store_name)s") % - {'image_ref': instance.image_ref, + file_path=upload_vmdk_name) + LOG.debug(_("Downloaded image file data %(image_ref)s to " + "%(upload_vmdk_name)s on the ESX data store " + "%(data_store_name)s") % + {'image_ref': instance['image_ref'], + 'upload_vmdk_name': upload_vmdk_name, 'data_store_name': data_store_name}, instance=instance) - _fetch_image_on_esx_datastore() - - vm_ref = self._get_vm_ref_from_the_name(instance.name) - def _attach_vmdk_to_the_vm(): - """ - Attach the vmdk uploaded to the VM. VM reconfigure is done - to do so. - """ - vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec( - client_factory, - adapter_type, "preallocated", - uploaded_vmdk_path, vmdk_file_size_in_kb) - LOG.debug(_("Reconfiguring VM instance to attach the image disk"), - instance=instance) - reconfig_task = self._session._call_method( - self._session._get_vim(), - "ReconfigVM_Task", vm_ref, - spec=vmdk_attach_config_spec) - self._session._wait_for_task(instance['uuid'], reconfig_task) - LOG.debug(_("Reconfigured VM instance to attach the image disk"), + def _copy_virtual_disk(): + """Copy a sparse virtual disk to a thin virtual disk.""" + # Copy a sparse virtual disk to a thin virtual disk. This is also + # done to generate the meta-data file whose specifics + # depend on the size of the disk, thin/thick provisioning and the + # storage adapter type. + LOG.debug(_("Copying Virtual Disk of size " + "%(vmdk_file_size_in_kb)s KB and adapter type " + "%(adapter_type)s on the ESX host local store " + "%(data_store_name)s to disk type %(disk_type)s") % + {"vmdk_file_size_in_kb": vmdk_file_size_in_kb, + "adapter_type": adapter_type, + "data_store_name": data_store_name, + "disk_type": disk_type}, instance=instance) + vmdk_copy_spec = vm_util.get_vmdk_create_spec(client_factory, + vmdk_file_size_in_kb, adapter_type, + disk_type) + vmdk_copy_task = self._session._call_method( + self._session._get_vim(), + "CopyVirtualDisk_Task", + service_content.virtualDiskManager, + sourceName=sparse_uploaded_vmdk_path, + sourceDatacenter=self._get_datacenter_ref_and_name()[0], + destName=uploaded_vmdk_path, + destSpec=vmdk_copy_spec) + self._session._wait_for_task(instance['uuid'], vmdk_copy_task) + LOG.debug(_("Copied Virtual Disk of size %(vmdk_file_size_in_kb)s" + " KB and type %(disk_type)s on " + "the ESX host local store %(data_store_name)s") % + {"vmdk_file_size_in_kb": vmdk_file_size_in_kb, + "disk_type": disk_type, + "data_store_name": data_store_name}, + instance=instance) + + ebs_root = self._volume_in_mapping(self._default_root_device, + block_device_info) - _attach_vmdk_to_the_vm() + if not ebs_root: + linked_clone = CONF.use_linked_clone + if linked_clone: + upload_folder = self._instance_path_base + upload_name = instance['image_ref'] + else: + upload_folder = instance['name'] + upload_name = instance['name'] + + # The vmdk meta-data file + uploaded_vmdk_name = "%s/%s.vmdk" % (upload_folder, upload_name) + uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name, + uploaded_vmdk_name) + + if not (linked_clone and self._check_if_folder_file_exists( + data_store_ref, data_store_name, + upload_folder, upload_name + ".vmdk")): + + # Naming the VM files in correspondence with the VM instance + # The flat vmdk file name + flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % ( + upload_folder, upload_name) + # The sparse vmdk file name for sparse disk image + sparse_uploaded_vmdk_name = "%s/%s-sparse.vmdk" % ( + upload_folder, upload_name) + + flat_uploaded_vmdk_path = vm_util.build_datastore_path( + data_store_name, + flat_uploaded_vmdk_name) + sparse_uploaded_vmdk_path = vm_util.build_datastore_path( + data_store_name, + sparse_uploaded_vmdk_name) + dc_ref = self._get_datacenter_ref_and_name()[0] + + if disk_type != "sparse": + # Create a flat virtual disk and retain the metadata file. + _create_virtual_disk() + _delete_disk_file(flat_uploaded_vmdk_path) + + cookies = \ + self._session._get_vim().client.options.transport.cookiejar + _fetch_image_on_esx_datastore() + + if disk_type == "sparse": + # Copy the sparse virtual disk to a thin virtual disk. + disk_type = "thin" + _copy_virtual_disk() + _delete_disk_file(sparse_uploaded_vmdk_path) + else: + # linked clone base disk exists + if disk_type == "sparse": + disk_type = "thin" + + # Attach the vmdk uploaded to the VM. + self._volumeops.attach_disk_to_vm( + vm_ref, instance, + adapter_type, disk_type, uploaded_vmdk_path, + vmdk_file_size_in_kb, linked_clone) + else: + # Attach the root disk to the VM. + root_disk = driver.block_device_info_get_mapping( + block_device_info)[0] + connection_info = root_disk['connection_info'] + self._volumeops.attach_volume(connection_info, instance['name'], + self._default_root_device) def _power_on_vm(): """Power on the VM.""" @@ -362,9 +434,9 @@ class VMwareVMOps(object): 4. Now upload the -flat.vmdk file to the image store. 5. Delete the coalesced .vmdk and -flat.vmdk created. """ - vm_ref = self._get_vm_ref_from_the_name(instance.name) + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name']) if vm_ref is None: - raise exception.InstanceNotFound(instance_id=instance.id) + raise exception.InstanceNotFound(instance_id=instance['uuid']) client_factory = self._session._get_vim().client.factory service_content = self._session._get_vim().get_service_content() @@ -378,7 +450,7 @@ class VMwareVMOps(object): disk_type, unit_number) = vm_util.get_vmdk_path_and_adapter_type( hardware_devices) datastore_name = vm_util.split_datastore_path( - vmdk_file_path_before_snapshot)[0] + vmdk_file_path_before_snapshot)[0] os_type = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "summary.config.guestId") @@ -395,7 +467,7 @@ class VMwareVMOps(object): snapshot_task = self._session._call_method( self._session._get_vim(), "CreateSnapshot_Task", vm_ref, - name="%s-snapshot" % instance.name, + name="%s-snapshot" % instance['name'], description="Taking Snapshot of the VM", memory=False, quiesce=True) @@ -437,13 +509,14 @@ class VMwareVMOps(object): random_name = str(uuid.uuid4()) dest_vmdk_file_location = vm_util.build_datastore_path(datastore_name, "vmware-tmp/%s.vmdk" % random_name) - dc_ref = self._get_datacenter_name_and_ref()[0] + dc_ref = self._get_datacenter_ref_and_name()[0] def _copy_vmdk_content(): # Copy the contents of the disk ( or disks, if there were snapshots # done earlier) to a temporary vmdk file. copy_spec = vm_util.get_copy_virtual_disk_spec(client_factory, - adapter_type) + adapter_type, + disk_type) LOG.debug(_('Copying disk data before snapshot of the VM'), instance=instance) copy_disk_task = self._session._call_method( @@ -476,7 +549,7 @@ class VMwareVMOps(object): adapter_type=adapter_type, image_version=1, host=self._session._host_ip, - data_center_name=self._get_datacenter_name_and_ref()[1], + data_center_name=self._get_datacenter_ref_and_name()[1], datastore_name=datastore_name, cookies=cookies, file_path="vmware-tmp/%s-flat.vmdk" % random_name) @@ -509,9 +582,9 @@ class VMwareVMOps(object): def reboot(self, instance, network_info): """Reboot a VM instance.""" - vm_ref = self._get_vm_ref_from_the_name(instance.name) + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name']) if vm_ref is None: - raise exception.InstanceNotFound(instance_id=instance.id) + raise exception.InstanceNotFound(instance_id=instance['uuid']) self.plug_vifs(instance, network_info) @@ -552,6 +625,38 @@ class VMwareVMOps(object): self._session._wait_for_task(instance['uuid'], reset_task) LOG.debug(_("Did hard reboot of VM"), instance=instance) + def _delete(self, instance, network_info): + """ + Destroy a VM instance. Steps followed are: + 1. Power off the VM, if it is in poweredOn state. + 2. Destroy the VM. + """ + try: + vm_ref = vm_util.get_vm_ref_from_name(self._session, + instance['name']) + if vm_ref is None: + LOG.debug(_("instance not present"), instance=instance) + return + + self.power_off(instance) + + try: + LOG.debug(_("Destroying the VM"), instance=instance) + destroy_task = self._session._call_method( + self._session._get_vim(), + "Destroy_Task", vm_ref) + self._session._wait_for_task(instance['uuid'], destroy_task) + LOG.debug(_("Destroyed the VM"), instance=instance) + except Exception, excep: + LOG.warn(_("In vmwareapi:vmops:delete, got this exception" + " while destroying the VM: %s") % str(excep), + instance=instance) + + if network_info: + self.unplug_vifs(instance, network_info) + except Exception, exc: + LOG.exception(exc, instance=instance) + def destroy(self, instance, network_info, destroy_disks=True): """ Destroy a VM instance. Steps followed are: @@ -560,7 +665,8 @@ class VMwareVMOps(object): 3. Delete the contents of the folder holding the VM related data. """ try: - vm_ref = self._get_vm_ref_from_the_name(instance.name) + vm_ref = vm_util.get_vm_ref_from_name(self._session, + instance['name']) if vm_ref is None: LOG.debug(_("instance not present"), instance=instance) return @@ -592,14 +698,15 @@ class VMwareVMOps(object): try: LOG.debug(_("Unregistering the VM"), instance=instance) self._session._call_method(self._session._get_vim(), - "UnregisterVM", vm_ref) + "UnregisterVM", vm_ref) LOG.debug(_("Unregistered the VM"), instance=instance) except Exception, excep: LOG.warn(_("In vmwareapi:vmops:destroy, got this exception" " while un-registering the VM: %s") % str(excep), instance=instance) - self.unplug_vifs(instance, network_info) + if network_info: + self.unplug_vifs(instance, network_info) # Delete the folder holding the VM related content on # the datastore. @@ -617,7 +724,8 @@ class VMwareVMOps(object): vim, "DeleteDatastoreFile_Task", vim.get_service_content().fileManager, - name=dir_ds_compliant_path) + name=dir_ds_compliant_path, + datacenter=self._get_datacenter_ref_and_name()[0]) self._session._wait_for_task(instance['uuid'], delete_task) LOG.debug(_("Deleted contents of the VM from " "datastore %(datastore_name)s") % @@ -642,9 +750,9 @@ class VMwareVMOps(object): def suspend(self, instance): """Suspend the specified instance.""" - vm_ref = self._get_vm_ref_from_the_name(instance.name) + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name']) if vm_ref is None: - raise exception.InstanceNotFound(instance_id=instance.id) + raise exception.InstanceNotFound(instance_id=instance['uuid']) pwr_state = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, @@ -658,17 +766,17 @@ class VMwareVMOps(object): LOG.debug(_("Suspended the VM"), instance=instance) # Raise Exception if VM is poweredOff elif pwr_state == "poweredOff": - reason = _("instance is powered off and can not be suspended.") + reason = _("instance is powered off and cannot be suspended.") raise exception.InstanceSuspendFailure(reason=reason) - - LOG.debug(_("VM was already in suspended state. So returning " - "without doing anything"), instance=instance) + else: + LOG.debug(_("VM was already in suspended state. So returning " + "without doing anything"), instance=instance) def resume(self, instance): """Resume the specified instance.""" - vm_ref = self._get_vm_ref_from_the_name(instance.name) + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name']) if vm_ref is None: - raise exception.InstanceNotFound(instance_id=instance.id) + raise exception.InstanceNotFound(instance_id=instance['uuid']) pwr_state = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, @@ -684,9 +792,263 @@ class VMwareVMOps(object): reason = _("instance is not in a suspended state") raise exception.InstanceResumeFailure(reason=reason) + def rescue(self, context, instance, network_info, image_meta): + """Rescue the specified instance. + + - shutdown the instance VM. + - spawn a rescue VM (the vm name-label will be instance-N-rescue). + + """ + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name']) + if vm_ref is None: + raise exception.InstanceNotFound(instance_id=instance['uuid']) + + self.power_off(instance) + instance['name'] = instance['name'] + self._rescue_suffix + self.spawn(context, instance, image_meta, network_info) + + # Attach vmdk to the rescue VM + hardware_devices = self._session._call_method(vim_util, + "get_dynamic_property", vm_ref, + "VirtualMachine", "config.hardware.device") + vmdk_path, controller_key, adapter_type, disk_type, unit_number \ + = vm_util.get_vmdk_path_and_adapter_type(hardware_devices) + # Figure out the correct unit number + unit_number = unit_number + 1 + rescue_vm_ref = vm_util.get_vm_ref_from_name(self._session, + instance['name']) + self._volumeops.attach_disk_to_vm( + rescue_vm_ref, instance, + adapter_type, disk_type, vmdk_path, + controller_key=controller_key, + unit_number=unit_number) + + def unrescue(self, instance): + """Unrescue the specified instance.""" + instance_orig_name = instance['name'] + instance['name'] = instance['name'] + self._rescue_suffix + self.destroy(instance, None) + instance['name'] = instance_orig_name + self.power_on(instance) + + def power_off(self, instance): + """Power off the specified instance.""" + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name']) + if vm_ref is None: + raise exception.InstanceNotFound(instance_id=instance['uuid']) + + pwr_state = self._session._call_method(vim_util, + "get_dynamic_property", vm_ref, + "VirtualMachine", "runtime.powerState") + # Only PoweredOn VMs can be powered off. + if pwr_state == "poweredOn": + LOG.debug(_("Powering off the VM"), instance=instance) + poweroff_task = self._session._call_method( + self._session._get_vim(), + "PowerOffVM_Task", vm_ref) + self._session._wait_for_task(instance['uuid'], poweroff_task) + LOG.debug(_("Powered off the VM"), instance=instance) + # Raise Exception if VM is suspended + elif pwr_state == "suspended": + reason = _("instance is suspended and cannot be powered off.") + raise exception.InstancePowerOffFailure(reason=reason) + else: + LOG.debug(_("VM was already in powered off state. So returning " + "without doing anything"), instance=instance) + + def power_on(self, instance): + """Power on the specified instance.""" + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name']) + if vm_ref is None: + raise exception.InstanceNotFound(instance_id=instance['uuid']) + + pwr_state = self._session._call_method(vim_util, + "get_dynamic_property", vm_ref, + "VirtualMachine", "runtime.powerState") + if pwr_state == "poweredOn": + LOG.debug(_("VM was already in powered on state. So returning " + "without doing anything"), instance=instance) + # Only PoweredOff and Suspended VMs can be powered on. + else: + LOG.debug(_("Powering on the VM"), instance=instance) + poweron_task = self._session._call_method( + self._session._get_vim(), + "PowerOnVM_Task", vm_ref) + self._session._wait_for_task(instance['uuid'], poweron_task) + LOG.debug(_("Powered on the VM"), instance=instance) + + def _get_orig_vm_name_label(self, instance): + return instance['name'] + '-orig' + + def _update_instance_progress(self, context, instance, step, total_steps): + """Update instance progress percent to reflect current step number + """ + # Divide the action's workflow into discrete steps and "bump" the + # instance's progress field as each step is completed. + # + # For a first cut this should be fine, however, for large VM images, + # the clone disk step begins to dominate the equation. A + # better approximation would use the percentage of the VM image that + # has been streamed to the destination host. + progress = round(float(step) / total_steps * 100) + instance_uuid = instance['uuid'] + LOG.debug(_("Updating instance '%(instance_uuid)s' progress to" + " %(progress)d") % locals(), instance=instance) + self._virtapi.instance_update(context, instance_uuid, + {'progress': progress}) + + def migrate_disk_and_power_off(self, context, instance, dest, + instance_type): + """ + Transfers the disk of a running instance in multiple phases, turning + off the instance before the end. + """ + # 0. Zero out the progress to begin + self._update_instance_progress(context, instance, + step=0, + total_steps=RESIZE_TOTAL_STEPS) + + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name']) + if vm_ref is None: + raise exception.InstanceNotFound(instance_id=instance['name']) + host_ref = self._get_host_ref_from_name(dest) + if host_ref is None: + raise exception.HostNotFound(host=dest) + + # 1. Power off the instance + self.power_off(instance) + self._update_instance_progress(context, instance, + step=1, + total_steps=RESIZE_TOTAL_STEPS) + + # 2. Rename the original VM with suffix '-orig' + name_label = self._get_orig_vm_name_label(instance) + LOG.debug(_("Renaming the VM to %s") % name_label, + instance=instance) + rename_task = self._session._call_method( + self._session._get_vim(), + "Rename_Task", vm_ref, newName=name_label) + self._session._wait_for_task(instance['uuid'], rename_task) + LOG.debug(_("Renamed the VM to %s") % name_label, + instance=instance) + self._update_instance_progress(context, instance, + step=2, + total_steps=RESIZE_TOTAL_STEPS) + + # Get the clone vm spec + ds_ref = vm_util.get_datastore_ref_and_name(self._session)[0] + client_factory = self._session._get_vim().client.factory + rel_spec = vm_util.relocate_vm_spec(client_factory, ds_ref, host_ref) + clone_spec = vm_util.clone_vm_spec(client_factory, rel_spec) + vm_folder_ref, res_pool_ref = self._get_vmfolder_and_res_pool_refs() + + # 3. Clone VM on ESX host + LOG.debug(_("Cloning VM to host %s") % dest, instance=instance) + vm_clone_task = self._session._call_method( + self._session._get_vim(), + "CloneVM_Task", vm_ref, + folder=vm_folder_ref, + name=instance['name'], + spec=clone_spec) + self._session._wait_for_task(instance['uuid'], vm_clone_task) + LOG.debug(_("Cloned VM to host %s") % dest, instance=instance) + self._update_instance_progress(context, instance, + step=3, + total_steps=RESIZE_TOTAL_STEPS) + + def confirm_migration(self, migration, instance, network_info): + """Confirms a resize, destroying the source VM.""" + instance_name = self._get_orig_vm_name_label(instance) + # Destroy the original VM. + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name) + if vm_ref is None: + LOG.debug(_("instance not present"), instance=instance) + return + + try: + LOG.debug(_("Destroying the VM"), instance=instance) + destroy_task = self._session._call_method( + self._session._get_vim(), + "Destroy_Task", vm_ref) + self._session._wait_for_task(instance['uuid'], destroy_task) + LOG.debug(_("Destroyed the VM"), instance=instance) + except Exception, excep: + LOG.warn(_("In vmwareapi:vmops:confirm_migration, got this " + "exception while destroying the VM: %s") % str(excep)) + + if network_info: + self.unplug_vifs(instance, network_info) + + def finish_revert_migration(self, instance): + """Finish reverting a resize, powering back on the instance.""" + # The original vm was suffixed with '-orig'; find it using + # the old suffix, remove the suffix, then power it back on. + name_label = self._get_orig_vm_name_label(instance) + vm_ref = vm_util.get_vm_ref_from_name(self._session, name_label) + if vm_ref is None: + raise exception.InstanceNotFound(instance_id=name_label) + + LOG.debug(_("Renaming the VM from %s") % name_label, + instance=instance) + rename_task = self._session._call_method( + self._session._get_vim(), + "Rename_Task", vm_ref, newName=instance['name']) + self._session._wait_for_task(instance['uuid'], rename_task) + LOG.debug(_("Renamed the VM from %s") % name_label, + instance=instance) + self.power_on(instance) + + def finish_migration(self, context, migration, instance, disk_info, + network_info, image_meta, resize_instance=False): + """Completes a resize, turning on the migrated instance.""" + # 4. Start VM + self.power_on(instance) + self._update_instance_progress(context, instance, + step=4, + total_steps=RESIZE_TOTAL_STEPS) + + def live_migration(self, context, instance_ref, dest, + post_method, recover_method, block_migration=False): + """Spawning live_migration operation for distributing high-load.""" + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_ref.name) + if vm_ref is None: + raise exception.InstanceNotFound(instance_id=instance_ref.name) + host_ref = self._get_host_ref_from_name(dest) + if host_ref is None: + raise exception.HostNotFound(host=dest) + + LOG.debug(_("Migrating VM to host %s") % dest, instance=instance_ref) + try: + vm_migrate_task = self._session._call_method( + self._session._get_vim(), + "MigrateVM_Task", vm_ref, + host=host_ref, + priority="defaultPriority") + self._session._wait_for_task(instance_ref['uuid'], vm_migrate_task) + except Exception: + with excutils.save_and_reraise_exception(): + recover_method(context, instance_ref, dest, block_migration) + post_method(context, instance_ref, dest, block_migration) + LOG.debug(_("Migrated VM to host %s") % dest, instance=instance_ref) + + def poll_rebooting_instances(self, timeout, instances): + """Poll for rebooting instances.""" + ctxt = nova_context.get_admin_context() + + instances_info = dict(instance_count=len(instances), + timeout=timeout) + + if instances_info["instance_count"] > 0: + LOG.info(_("Found %(instance_count)d hung reboots " + "older than %(timeout)d seconds") % instances_info) + + for instance in instances: + LOG.info(_("Automatically hard rebooting %d") % instance['uuid']) + self.compute_api.reboot(ctxt, instance, "HARD") + def get_info(self, instance): """Return data about the VM instance.""" - vm_ref = self._get_vm_ref_from_the_name(instance['name']) + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name']) if vm_ref is None: raise exception.InstanceNotFound(instance_id=instance['name']) @@ -722,9 +1084,9 @@ class VMwareVMOps(object): def get_console_output(self, instance): """Return snapshot of console.""" - vm_ref = self._get_vm_ref_from_the_name(instance.name) + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name']) if vm_ref is None: - raise exception.InstanceNotFound(instance_id=instance.id) + raise exception.InstanceNotFound(instance_id=instance['uuid']) param_list = {"id": str(vm_ref)} base_url = "%s://%s/screen?%s" % (self._session._scheme, self._session._host_ip, @@ -741,6 +1103,24 @@ class VMwareVMOps(object): else: return "" + def get_vnc_console(self, instance): + """Return connection info for a vnc console.""" + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name']) + if vm_ref is None: + raise exception.InstanceNotFound(instance_id=instance['uuid']) + + return {'host': CONF.vmwareapi_host_ip, + 'port': self._get_vnc_port(vm_ref), + 'internal_access_path': None} + + @staticmethod + def _get_vnc_port(vm_ref): + """Return VNC port for an VM.""" + vm_id = int(vm_ref.value.replace('vm-', '')) + port = CONF.vnc_port + vm_id % CONF.vnc_port_total + + return port + def _set_machine_id(self, client_factory, instance, network_info): """ Set the machine id of the VM for guest tools to pick up and reconfigure @@ -786,12 +1166,56 @@ class VMwareVMOps(object): "with ip - %(ip_addr)s") % {'ip_addr': ip_v4['ip']}, instance=instance) - def _get_datacenter_name_and_ref(self): + def _set_vnc_config(self, client_factory, instance, port, password): + """ + Set the vnc configuration of the VM. + """ + vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name']) + if vm_ref is None: + raise exception.InstanceNotFound(instance_id=instance['uuid']) + + vnc_config_spec = vm_util.get_vnc_config_spec( + client_factory, port, password) + + LOG.debug(_("Reconfiguring VM instance to enable vnc on " + "port - %(port)s") % {'port': port}, + instance=instance) + reconfig_task = self._session._call_method(self._session._get_vim(), + "ReconfigVM_Task", vm_ref, + spec=vnc_config_spec) + self._session._wait_for_task(instance['uuid'], reconfig_task) + LOG.debug(_("Reconfigured VM instance to enable vnc on " + "port - %(port)s") % {'port': port}, + instance=instance) + + def _get_datacenter_ref_and_name(self): """Get the datacenter name and the reference.""" dc_obj = self._session._call_method(vim_util, "get_objects", "Datacenter", ["name"]) return dc_obj[0].obj, dc_obj[0].propSet[0].val + def _get_host_ref_from_name(self, host_name): + """Get reference to the host with the name specified.""" + host_objs = self._session._call_method(vim_util, "get_objects", + "HostSystem", ["name"]) + for host in host_objs: + if host.propSet[0].val == host_name: + return host.obj + return None + + def _get_vmfolder_and_res_pool_refs(self): + """Get the Vm folder ref from the datacenter.""" + dc_objs = self._session._call_method(vim_util, "get_objects", + "Datacenter", ["vmFolder"]) + # There is only one default datacenter in a standalone ESX host + vm_folder_ref = dc_objs[0].propSet[0].val + + # Get the resource pool. Taking the first resource pool coming our + # way. Assuming that is the default resource pool. + res_pool_ref = self._session._call_method(vim_util, "get_objects", + "ResourcePool")[0].obj + return vm_folder_ref, res_pool_ref + def _path_exists(self, ds_browser, ds_path): """Check if the path exists on the datastore.""" search_task = self._session._call_method(self._session._get_vim(), @@ -812,6 +1236,32 @@ class VMwareVMOps(object): return False return True + def _path_file_exists(self, ds_browser, ds_path, file_name): + """Check if the path and file exists on the datastore.""" + client_factory = self._session._get_vim().client.factory + search_spec = vm_util.search_datastore_spec(client_factory, file_name) + search_task = self._session._call_method(self._session._get_vim(), + "SearchDatastore_Task", + ds_browser, + datastorePath=ds_path, + searchSpec=search_spec) + # Wait till the state changes from queued or running. + # If an error state is returned, it means that the path doesn't exist. + while True: + task_info = self._session._call_method(vim_util, + "get_dynamic_property", + search_task, "Task", "info") + if task_info.state in ['queued', 'running']: + time.sleep(2) + continue + break + if task_info.state == "error": + return False, False + + file_exists = (getattr(task_info.result, 'file', False) and + task_info.result.file[0].path == file_name) + return True, file_exists + def _mkdir(self, ds_path): """ Creates a directory at the path specified. If it is just "NAME", @@ -824,14 +1274,23 @@ class VMwareVMOps(object): name=ds_path, createParentDirectories=False) LOG.debug(_("Created directory with path %s") % ds_path) - def _get_vm_ref_from_the_name(self, vm_name): - """Get reference to the VM with the name specified.""" - vms = self._session._call_method(vim_util, "get_objects", - "VirtualMachine", ["name"]) - for vm in vms: - if vm.propSet[0].val == vm_name: - return vm.obj - return None + def _check_if_folder_file_exists(self, ds_ref, ds_name, + folder_name, file_name): + ds_browser = vim_util.get_dynamic_property( + self._session._get_vim(), + ds_ref, + "Datastore", + "browser") + # Check if the folder exists or not. If not, create one + # Check if the file exists or not. + folder_path = vm_util.build_datastore_path(ds_name, folder_name) + folder_exists, file_exists = self._path_file_exists(ds_browser, + folder_path, + file_name) + if not folder_exists: + self._mkdir(vm_util.build_datastore_path(ds_name, folder_name)) + + return file_exists def inject_network_info(self, instance, network_info): """inject network info for specified instance.""" @@ -872,3 +1331,21 @@ class VMwareVMOps(object): interfaces.append(device.key) return interfaces + + @staticmethod + def _volume_in_mapping(mount_device, block_device_info): + block_device_list = [block_device.strip_dev(vol['mount_device']) + for vol in + driver.block_device_info_get_mapping( + block_device_info)] + swap = driver.block_device_info_get_swap(block_device_info) + if driver.swap_is_usable(swap): + block_device_list.append( + block_device.strip_dev(swap['device_name'])) + block_device_list += [block_device.strip_dev(ephemeral['device_name']) + for ephemeral in + driver.block_device_info_get_ephemerals( + block_device_info)] + + LOG.debug(_("block_device_list %s"), block_device_list) + return block_device.strip_dev(mount_device) in block_device_list diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py index 7c4480ea0..e8510b36e 100644 --- a/nova/virt/vmwareapi/vmware_images.py +++ b/nova/virt/vmwareapi/vmware_images.py @@ -1,5 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack LLC. # @@ -17,7 +18,6 @@ """ Utility functions for Image transfer. """ -import StringIO from nova import exception from nova.image import glance @@ -56,7 +56,7 @@ def start_transfer(context, read_file_handle, data_size, write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle) # In case of VMware - Glance transfer, we relinquish VMware HTTP file read # handle to Glance Client instance, but to be sure of the transfer we need - # to be sure of the status of the image on glnace changing to active. + # to be sure of the status of the image on glance changing to active. # The GlanceWriteThread handles the same for us. elif image_service and image_id: write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe, @@ -93,9 +93,8 @@ def fetch_image(context, image, instance, **kwargs): (image_service, image_id) = glance.get_remote_image_service(context, image) metadata = image_service.show(context, image_id) file_size = int(metadata['size']) - f = StringIO.StringIO() - image_service.download(context, image_id, f) - read_file_handle = read_write_util.GlanceFileRead(f) + read_iter = image_service.download(context, image_id) + read_file_handle = read_write_util.GlanceFileRead(read_iter) write_file_handle = read_write_util.VMwareHTTPWriteFile( kwargs.get("host"), kwargs.get("data_center_name"), @@ -122,10 +121,9 @@ def upload_image(context, image, instance, **kwargs): file_size = read_file_handle.get_size() (image_service, image_id) = glance.get_remote_image_service(context, image) # The properties and other fields that we need to set for the image. - image_metadata = {"is_public": True, - "disk_format": "vmdk", + image_metadata = {"disk_format": "vmdk", "container_format": "bare", - "type": "vmdk", + "size": file_size, "properties": {"vmware_adaptertype": kwargs.get("adapter_type"), "vmware_ostype": kwargs.get("os_type"), diff --git a/nova/virt/vmwareapi/volume_util.py b/nova/virt/vmwareapi/volume_util.py index 9d556cd26..2af3381a4 100644 --- a/nova/virt/vmwareapi/volume_util.py +++ b/nova/virt/vmwareapi/volume_util.py @@ -22,7 +22,6 @@ and storage repositories import re import string -from nova import exception from nova.openstack.common import log as logging from nova.virt.vmwareapi import vim_util diff --git a/nova/virt/vmwareapi/volumeops.py b/nova/virt/vmwareapi/volumeops.py index 5ec389f80..922d2135b 100644 --- a/nova/virt/vmwareapi/volumeops.py +++ b/nova/virt/vmwareapi/volumeops.py @@ -18,7 +18,6 @@ Management class for Storage-related functions (attach, detach, etc). """ -from nova import context from nova import exception from nova.openstack.common import cfg from nova.openstack.common import log as logging @@ -110,7 +109,8 @@ class VMwareVolumeOps(object): iqn = volume_util.get_host_iqn(self._session) return { 'ip': CONF.vmwareapi_host_ip, - 'initiator': iqn + 'initiator': iqn, + 'host': CONF.vmwareapi_host_ip } def attach_volume(self, connection_info, instance, mountpoint): diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py index daca69854..05918f83d 100644 --- a/nova/volume/cinder.py +++ b/nova/volume/cinder.py @@ -63,8 +63,10 @@ def cinderclient(context): # FIXME: the cinderclient ServiceCatalog object is mis-named. # It actually contains the entire access blob. + # Only needed parts of the service catalog are passed in, see + # nova/context.py. compat_catalog = { - 'access': {'serviceCatalog': context.service_catalog or {}} + 'access': {'serviceCatalog': context.service_catalog or []} } sc = service_catalog.ServiceCatalog(compat_catalog) if CONF.cinder_endpoint_template: diff --git a/run_tests.sh b/run_tests.sh index 11bc8b518..5bb2842b2 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -142,6 +142,7 @@ function run_pep8 { echo "Running pep8" ${wrapper} python tools/hacking.py ${ignore} ${srcfiles} + ${wrapper} bash tools/unused_imports.sh # NOTE(sdague): as of grizzly-2 these are passing however leaving the comment # in here in case we need to break it out when we get more of our hacking working # again. diff --git a/smoketests/base.py b/smoketests/base.py index 7c7d19838..c90da102c 100644 --- a/smoketests/base.py +++ b/smoketests/base.py @@ -169,7 +169,6 @@ class SmokeTestCase(unittest.TestCase): cmd += ' --kernel true' status, output = commands.getstatusoutput(cmd) if status != 0: - print '%s -> \n %s' % (cmd, output) raise Exception(output) return True @@ -178,7 +177,6 @@ class SmokeTestCase(unittest.TestCase): cmd += '%s -m %s/%s.manifest.xml' % (bucket_name, tempdir, image) status, output = commands.getstatusoutput(cmd) if status != 0: - print '%s -> \n %s' % (cmd, output) raise Exception(output) return True @@ -186,7 +184,6 @@ class SmokeTestCase(unittest.TestCase): cmd = 'euca-delete-bundle --clear -b %s' % (bucket_name) status, output = commands.getstatusoutput(cmd) if status != 0: - print '%s -> \n%s' % (cmd, output) raise Exception(output) return True diff --git a/smoketests/public_network_smoketests.py b/smoketests/public_network_smoketests.py index 4fb843e0f..f20b0923e 100644 --- a/smoketests/public_network_smoketests.py +++ b/smoketests/public_network_smoketests.py @@ -97,7 +97,6 @@ class InstanceTestsFromPublic(base.UserSmokeTestCase): self.data['ip_v6'], TEST_KEY) conn.close() except Exception as ex: - print ex time.sleep(1) else: break diff --git a/tools/flakes.py b/tools/flakes.py index 4b93abc21..f805fd156 100644 --- a/tools/flakes.py +++ b/tools/flakes.py @@ -8,7 +8,7 @@ import __builtin__ import os import sys -from pyflakes.scripts.pyflakes import main +from pyflakes.scripts import pyflakes if __name__ == "__main__": names = os.environ.get('PYFLAKES_BUILTINS', '_') @@ -19,4 +19,4 @@ if __name__ == "__main__": del names, os, __builtin__ - sys.exit(main()) + sys.exit(pyflakes.main()) diff --git a/tools/pip-requires b/tools/pip-requires index 231d5cfe5..126f0125c 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -20,7 +20,7 @@ Babel>=0.9.6 iso8601>=0.1.4 httplib2 setuptools_git>=0.4 -python-cinderclient +python-cinderclient>=1.0.1 python-quantumclient>=2.1 python-glanceclient>=0.5.0,<2 python-keystoneclient>=0.2.0 diff --git a/tools/unused_imports.sh b/tools/unused_imports.sh new file mode 100755 index 000000000..0e0294517 --- /dev/null +++ b/tools/unused_imports.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +#snakefood sfood-checker detects even more unused imports +! pyflakes nova/ | grep "imported but unused" @@ -15,13 +15,16 @@ sitepackages = True downloadcache = ~/cache/pip [testenv:pep8] -deps=pep8==1.3.3 +deps= + pep8==1.3.3 + pyflakes commands = python tools/hacking.py --doctest python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \ --exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg . python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \ --filename=nova* bin + bash tools/unused_imports.sh [testenv:pylint] setenv = VIRTUAL_ENV={envdir} |