summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--nova/compute/api.py52
-rw-r--r--nova/compute/manager.py11
-rw-r--r--nova/virt/libvirt_conn.py67
3 files changed, 45 insertions, 85 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 020d3b06d..5ec88adbd 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -105,32 +105,6 @@ class API(base.Base):
if len(content) > content_limit:
raise quota.QuotaError(code="OnsetFileContentLimitExceeded")
- def _check_metadata_quota(self, context, metadata):
- num_metadata = len(metadata)
- quota_metadata = quota.allowed_metadata_items(context, num_metadata)
- if quota_metadata < num_metadata:
- pid = context.project_id
- msg = (_("Quota exceeeded for %(pid)s,"
- " tried to set %(num_metadata)s metadata properties")
- % locals())
- LOG.warn(msg)
- raise quota.QuotaError(msg, "MetadataLimitExceeded")
-
- def _check_metadata_item_length(self, context, metadata):
- # Because metadata is stored in the DB, we hard-code the size limits
- # In future, we may support more variable length strings, so we act
- # as if this is quota-controlled for forwards compatibility
- for metadata_item in metadata:
- k = metadata_item['key']
- v = metadata_item['value']
- if len(k) > 255 or len(v) > 255:
- pid = context.project_id
- msg = (_("Quota exceeeded for %(pid)s,"
- " metadata property key or value too long")
- % locals())
- LOG.warn(msg)
- raise quota.QuotaError(msg, "MetadataLimitExceeded")
-
def create(self, context, instance_type,
image_id, kernel_id=None, ramdisk_id=None,
min_count=1, max_count=1,
@@ -267,6 +241,32 @@ class API(base.Base):
return [dict(x.iteritems()) for x in instances]
+ def _check_metadata_quota(self, context, metadata):
+ num_metadata = len(metadata)
+ quota_metadata = quota.allowed_metadata_items(context, num_metadata)
+ if quota_metadata < num_metadata:
+ pid = context.project_id
+ msg = (_("Quota exceeeded for %(pid)s,"
+ " tried to set %(num_metadata)s metadata properties")
+ % locals())
+ LOG.warn(msg)
+ raise quota.QuotaError(msg, "MetadataLimitExceeded")
+
+ def _check_metadata_item_length(self, context, metadata):
+ # Because metadata is stored in the DB, we hard-code the size limits
+ # In future, we may support more variable length strings, so we act
+ # as if this is quota-controlled for forwards compatibility
+ for metadata_item in metadata:
+ k = metadata_item['key']
+ v = metadata_item['value']
+ if len(k) > 255 or len(v) > 255:
+ pid = context.project_id
+ msg = (_("Quota exceeeded for %(pid)s,"
+ " metadata property key or value too long")
+ % locals())
+ LOG.warn(msg)
+ raise quota.QuotaError(msg, "MetadataLimitExceeded")
+
def has_finished_migration(self, context, instance_id):
"""Retrieves whether or not a finished migration exists for
an instance"""
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 51f5322cd..04f2abc49 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -150,10 +150,11 @@ class ComputeManager(manager.SchedulerDependentManager):
info = self.driver.get_info(instance_ref['name'])
except exception.NotFound:
info = None
- state = power_state.FAILED
if info is not None:
state = info['state']
+ else:
+ state = power_state.FAILED
self.db.instance_set_state(context, instance_id, state)
@@ -246,10 +247,10 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.spawn(instance_ref)
self._update_launched_at(context, instance_id)
except Exception as ex: # pylint: disable=W0702
- LOG.debug(ex)
- LOG.exception(_("Instance '%s' failed to spawn. Is virtualization"
- " enabled in the BIOS?"), instance_id,
- context=context)
+ msg = _("Instance '%(instance_id)s' failed to spawn. Is "
+ "virtualization enabled in the BIOS? Details: "
+ "%(ex)s") % locals()
+ LOG.exception(msg)
self._update_state(context, instance_id)
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 9714773b2..53382a315 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -309,27 +309,6 @@ class LibvirtConnection(driver.ComputeDriver):
return infos
def destroy(self, instance, cleanup=True):
-<<<<<<< TREE
- """Delete the VM instance from the hypervisor.
-
- :param instance: Object representing the instance to destroy
- :param cleanup: Should we erase all of the VM's associated files?
- """
- name = instance['name']
-
- try:
- virt_dom = self._conn.lookupByName(name)
- except libvirt.libvirtError as ex:
- msg = _("Instance %s not found.") % name
- raise exception.NotFound(msg)
-
- try:
- virt_dom.destroy()
- except libvirt.libvirtError as ex:
- # If the instance is already terminated, we're still happy
- msg = _("Error encountered during `libvirt.destroy`: %s") % ex
- LOG.debug(msg)
-=======
instance_name = instance['name']
# TODO(justinsb): Refactor all lookupByName calls for error-handling
@@ -624,38 +603,16 @@ class LibvirtConnection(driver.ComputeDriver):
# for xenapi(tr3buchet)
@exception.wrap_exception
def spawn(self, instance, network_info=None):
- """Create the given VM instance using the libvirt connection.
-
- :param instance: Object representing the instance to create
- :param network_info: Associated network information
- """
- _id = instance['id']
- name = instance['name']
xml = self.to_xml(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
self._create_image(instance, xml, network_info)
-<<<<<<< TREE
-
- try:
- self._conn.createXML(xml, 0)
- except libvirt.libvirtError as ex:
- msg = _("Error encountered creating VM '%(name)s': %(ex)s")
- LOG.error(msg % locals())
- return False
-
- LOG.debug(_("VM %s successfully created.") % name)
-
-=======
domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name'])
->>>>>>> MERGE-SOURCE
self.firewall_driver.apply_instance_filter(instance)
-<<<<<<< TREE
-=======
if FLAGS.start_guests_on_host_boot:
LOG.debug(_("instance %s: setting autostart ON") %
instance['name'])
@@ -663,21 +620,23 @@ class LibvirtConnection(driver.ComputeDriver):
timer = utils.LoopingCall(f=None)
->>>>>>> MERGE-SOURCE
def _wait_for_boot():
- """Check to see if the VM is running."""
try:
- state = self.get_info(name)['state']
- except (exception.NotFound, libvirt.libvirtError) as ex:
- msg = _("Error while waiting for VM '%(_id)s' to run: %(ex)s")
- LOG.debug(msg % locals())
- timer.stop()
-
- if state == power_state.RUNNING:
- LOG.debug(_('VM %s is now running.') % name)
+ state = self.get_info(instance['name'])['state']
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'], state)
+ if state == power_state.RUNNING:
+ LOG.debug(_('instance %s: booted'), instance['name'])
+ timer.stop()
+ except:
+ LOG.exception(_('instance %s: failed to boot'),
+ instance['name'])
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'],
+ power_state.SHUTDOWN)
timer.stop()
- timer = utils.LoopingCall(f=_wait_for_boot)
+ timer.f = _wait_for_boot
return timer.start(interval=0.5, now=True)
def _flush_xen_console(self, virsh_output):