summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
Diffstat (limited to 'nova')
-rwxr-xr-xnova/compute/manager.py101
-rw-r--r--nova/db/sqlalchemy/api.py142
-rw-r--r--nova/exception.py4
-rw-r--r--nova/network/quantumv2/api.py5
-rw-r--r--nova/tests/compute/test_compute.py22
-rw-r--r--nova/tests/network/test_quantumv2.py13
-rw-r--r--nova/virt/xenapi/vm_utils.py114
7 files changed, 177 insertions, 224 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index dbef94596..6d18952bf 100755
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -995,15 +995,19 @@ class ComputeManager(manager.SchedulerDependentManager):
set_access_ip=set_access_ip)
except exception.InstanceNotFound:
# the instance got deleted during the spawn
- with excutils.save_and_reraise_exception():
- # Make sure the async call finishes
- if network_info is not None:
- network_info.wait(do_raise=False)
- try:
- self._deallocate_network(context, instance)
- except Exception:
- LOG.exception(_('Failed to dealloc network for '
- 'deleted instance'), instance=instance)
+ # Make sure the async call finishes
+ msg = _("Instance disappeared during build")
+ if network_info is not None:
+ network_info.wait(do_raise=False)
+ try:
+ self._deallocate_network(context, instance)
+ except Exception:
+ msg = _('Failed to dealloc network '
+ 'for deleted instance')
+ LOG.exception(msg, instance=instance)
+ raise exception.BuildAbortException(
+ instance_uuid=instance['uuid'],
+ reason=msg)
except exception.UnexpectedTaskStateError as e:
exc_info = sys.exc_info()
# Make sure the async call finishes
@@ -1950,53 +1954,70 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
- instance = self._instance_update(context, instance['uuid'],
- power_state=current_power_state)
-
- LOG.audit(_('instance snapshotting'), context=context,
+ try:
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state)
+ LOG.audit(_('instance snapshotting'), context=context,
instance=instance)
- if instance['power_state'] != power_state.RUNNING:
- state = instance['power_state']
- running = power_state.RUNNING
- LOG.warn(_('trying to snapshot a non-running instance: '
+ if instance['power_state'] != power_state.RUNNING:
+ state = instance['power_state']
+ running = power_state.RUNNING
+ LOG.warn(_('trying to snapshot a non-running instance: '
'(state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
instance=instance)
- self._notify_about_instance_usage(
+ self._notify_about_instance_usage(
context, instance, "snapshot.start")
- if image_type == 'snapshot':
- expected_task_state = task_states.IMAGE_SNAPSHOT
+ if image_type == 'snapshot':
+ expected_task_state = task_states.IMAGE_SNAPSHOT
- elif image_type == 'backup':
- expected_task_state = task_states.IMAGE_BACKUP
+ elif image_type == 'backup':
+ expected_task_state = task_states.IMAGE_BACKUP
- def update_task_state(task_state, expected_state=expected_task_state):
- return self._instance_update(context, instance['uuid'],
- task_state=task_state,
- expected_task_state=expected_state)
+ def update_task_state(task_state,
+ expected_state=expected_task_state):
+ return self._instance_update(context, instance['uuid'],
+ task_state=task_state,
+ expected_task_state=expected_state
+ )
- self.driver.snapshot(context, instance, image_id, update_task_state)
- # The instance could have changed from the driver. But since
- # we're doing a fresh update here, we'll grab the changes.
+ self.driver.snapshot(context, instance, image_id,
+ update_task_state)
+ # The instance could have changed from the driver. But since
+ # we're doing a fresh update here, we'll grab the changes.
- instance = self._instance_update(context, instance['uuid'],
- task_state=None,
- expected_task_state=task_states.IMAGE_UPLOADING)
+ instance = self._instance_update(context, instance['uuid'],
+ task_state=None,
+ expected_task_state=
+ task_states.IMAGE_UPLOADING)
- if image_type == 'snapshot' and rotation:
- raise exception.ImageRotationNotAllowed()
+ if image_type == 'snapshot' and rotation:
+ raise exception.ImageRotationNotAllowed()
- elif image_type == 'backup' and rotation >= 0:
- self._rotate_backups(context, instance, backup_type, rotation)
+ elif image_type == 'backup' and rotation >= 0:
+ self._rotate_backups(context, instance, backup_type, rotation)
- elif image_type == 'backup':
- raise exception.RotationRequiredForBackup()
+ elif image_type == 'backup':
+ raise exception.RotationRequiredForBackup()
- self._notify_about_instance_usage(
- context, instance, "snapshot.end")
+ self._notify_about_instance_usage(context, instance,
+ "snapshot.end")
+
+ except exception.InstanceNotFound:
+ # the instance got deleted during the snapshot
+ # Quickly bail out of here
+ msg = _("Instance disappeared during snapshot")
+ LOG.debug(msg, instance=instance)
+ except exception.UnexpectedTaskStateError as e:
+ actual_task_state = e.kwargs.get('actual', None)
+ if actual_task_state == 'deleting':
+ msg = _('Instance was deleted during snapshot.')
+ LOG.debug(msg, instance=instance)
+ else:
+ raise
@wrap_instance_fault
def _rotate_backups(self, context, instance, backup_type, rotation):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 3b491ca6e..03dd43946 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1458,8 +1458,7 @@ def instance_create(context, values):
def _get_sec_group_models(session, security_groups):
models = []
- default_group = security_group_ensure_default(context,
- session=session)
+ default_group = security_group_ensure_default(context)
if 'default' in security_groups:
models.append(default_group)
# Generate a new list, so we don't modify the original
@@ -3208,6 +3207,16 @@ def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
###################
+def _security_group_create(context, values, session=None):
+ security_group_ref = models.SecurityGroup()
+ # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
+ # once save() is called. This will get cleaned up in next orm pass.
+ security_group_ref.rules
+ security_group_ref.update(values)
+ security_group_ref.save(session=session)
+ return security_group_ref
+
+
def _security_group_get_query(context, session=None, read_deleted=None,
project_only=False, join_rules=True):
query = model_query(context, models.SecurityGroup, session=session,
@@ -3223,7 +3232,7 @@ def _security_group_get_by_names(context, session, project_id, group_names):
Raise SecurityGroupNotFoundForProject for a name not found.
"""
query = _security_group_get_query(context, session=session,
- read_deleted="no", join_rules=False).\
+ read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter(models.SecurityGroup.name.in_(group_names))
sg_models = query.all()
@@ -3244,11 +3253,10 @@ def security_group_get_all(context):
@require_context
-def security_group_get(context, security_group_id, columns_to_join=None,
- session=None):
- query = _security_group_get_query(context, session=session,
- project_only=True).\
- filter_by(id=security_group_id)
+def security_group_get(context, security_group_id, columns_to_join=None):
+ query = _security_group_get_query(context, project_only=True).\
+ filter_by(id=security_group_id)
+
if columns_to_join is None:
columns_to_join = []
if 'instances' in columns_to_join:
@@ -3264,12 +3272,9 @@ def security_group_get(context, security_group_id, columns_to_join=None,
@require_context
def security_group_get_by_name(context, project_id, group_name,
- columns_to_join=None, session=None):
- if session is None:
- session = get_session()
-
- query = _security_group_get_query(context, session=session,
- read_deleted="no", join_rules=False).\
+ columns_to_join=None):
+ query = _security_group_get_query(context,
+ read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter_by(name=group_name)
@@ -3334,70 +3339,77 @@ def security_group_in_use(context, group_id):
@require_context
-def security_group_create(context, values, session=None):
- security_group_ref = models.SecurityGroup()
- # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
- # once save() is called. This will get cleaned up in next orm pass.
- security_group_ref.rules
- security_group_ref.update(values)
- if session is None:
- session = get_session()
- security_group_ref.save(session=session)
- return security_group_ref
+def security_group_create(context, values):
+ return _security_group_create(context, values)
@require_context
-def security_group_update(context, security_group_id, values, session=None):
- security_group_ref = model_query(context, models.SecurityGroup,
- session=session).filter_by(id=security_group_id).first()
+def security_group_update(context, security_group_id, values):
+ session = get_session()
+ with session.begin():
+ security_group_ref = model_query(context, models.SecurityGroup,
+ session=session).\
+ filter_by(id=security_group_id).\
+ first()
- if not security_group_ref:
- raise exception.SecurityGroupNotFound(
- security_group_id=security_group_id)
- security_group_ref.update(values)
- return security_group_ref
+ if not security_group_ref:
+ raise exception.SecurityGroupNotFound(
+ security_group_id=security_group_id)
+ security_group_ref.update(values)
+ return security_group_ref
-def security_group_ensure_default(context, session=None):
+def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id."""
- try:
- default_group = security_group_get_by_name(context,
- context.project_id, 'default',
- columns_to_join=[], session=session)
- except exception.NotFound:
- values = {'name': 'default',
- 'description': 'default',
- 'user_id': context.user_id,
- 'project_id': context.project_id}
- default_group = security_group_create(context, values,
- session=session)
- for default_rule in security_group_default_rule_list(context):
- # This is suboptimal, it should be programmatic to know
- # the values of the default_rule
- rule_values = {'protocol': default_rule.protocol,
- 'from_port': default_rule.from_port,
- 'to_port': default_rule.to_port,
- 'cidr': default_rule.cidr,
- 'parent_group_id': default_group.id,
- }
- security_group_rule_create(context, rule_values)
- return default_group
+ session = get_session()
+ with session.begin():
+ try:
+ default_group = _security_group_get_by_names(context,
+ session,
+ context.project_id,
+ ['default'])[0]
+ except exception.NotFound:
+ values = {'name': 'default',
+ 'description': 'default',
+ 'user_id': context.user_id,
+ 'project_id': context.project_id}
+ default_group = _security_group_create(context, values,
+ session=session)
+ default_rules = _security_group_rule_get_default_query(context,
+ session=session).all()
+ for default_rule in default_rules:
+ # This is suboptimal, it should be programmatic to know
+ # the values of the default_rule
+ rule_values = {'protocol': default_rule.protocol,
+ 'from_port': default_rule.from_port,
+ 'to_port': default_rule.to_port,
+ 'cidr': default_rule.cidr,
+ 'parent_group_id': default_group.id,
+ }
+ _security_group_rule_create(context,
+ rule_values,
+ session=session)
+ return default_group
@require_context
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
- session.query(models.SecurityGroup).\
+ model_query(context, models.SecurityGroup,
+ session=session).\
filter_by(id=security_group_id).\
soft_delete()
- session.query(models.SecurityGroupInstanceAssociation).\
+ model_query(context, models.SecurityGroupInstanceAssociation,
+ session=session).\
filter_by(security_group_id=security_group_id).\
soft_delete()
- session.query(models.SecurityGroupIngressRule).\
+ model_query(context, models.SecurityGroupIngressRule,
+ session=session).\
filter_by(group_id=security_group_id).\
soft_delete()
- session.query(models.SecurityGroupIngressRule).\
+ model_query(context, models.SecurityGroupIngressRule,
+ session=session).\
filter_by(parent_group_id=security_group_id).\
soft_delete()
@@ -3413,6 +3425,13 @@ def security_group_count_by_project(context, project_id, session=None):
###################
+def _security_group_rule_create(context, values, session=None):
+ security_group_rule_ref = models.SecurityGroupIngressRule()
+ security_group_rule_ref.update(values)
+ security_group_rule_ref.save(session=session)
+ return security_group_rule_ref
+
+
def _security_group_rule_get_query(context, session=None):
return model_query(context, models.SecurityGroupIngressRule,
session=session)
@@ -3451,10 +3470,7 @@ def security_group_rule_get_by_security_group_grantee(context,
@require_context
def security_group_rule_create(context, values):
- security_group_rule_ref = models.SecurityGroupIngressRule()
- security_group_rule_ref.update(values)
- security_group_rule_ref.save()
- return security_group_rule_ref
+ return _security_group_rule_create(context, values)
@require_context
diff --git a/nova/exception.py b/nova/exception.py
index 350fb2f77..b64abd4db 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -982,10 +982,6 @@ class InsufficientFreeMemory(NovaException):
message = _("Insufficient free memory on compute node to start %(uuid)s.")
-class CouldNotFetchMetrics(NovaException):
- message = _("Could not fetch bandwidth/cpu/disk metrics for this host.")
-
-
class NoValidHost(NovaException):
message = _("No valid host was found. %(reason)s")
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index c7f4ffd58..814b66ee0 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -182,6 +182,11 @@ class API(base.Base):
nets = self._get_available_networks(context, instance['project_id'],
net_ids)
+
+ if not nets:
+ LOG.warn(_("No network configured!"), instance=instance)
+ return []
+
security_groups = kwargs.get('security_groups', [])
security_group_ids = []
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 0e1766077..b93559831 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -1084,9 +1084,7 @@ class ComputeTestCase(BaseTestCase):
self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
- self.assertRaises(exception.InstanceNotFound,
- self.compute.run_instance,
- self.context, instance=instance)
+ self.compute.run_instance(self.context, instance=instance)
def test_run_instance_bails_on_missing_instance(self):
# Make sure that run_instance() will quickly ignore a deleted instance
@@ -1939,6 +1937,24 @@ class ComputeTestCase(BaseTestCase):
self._assert_state({'task_state': None})
self.compute.terminate_instance(self.context, instance=instance)
+ def test_snapshot_handles_cases_when_instance_is_deleted(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance(
+ {'vm_state': 'deleting'}))
+ self.compute.run_instance(self.context, instance=instance)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.DELETING})
+ self.compute.snapshot_instance(self.context, "failing_snapshot",
+ instance=instance)
+ self.compute.terminate_instance(self.context, instance=instance)
+
+ def test_snapshot_handles_cases_when_instance_is_not_found(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance(
+ {'vm_state': 'deleting'}))
+ instance["uuid"] = str(uuid.uuid4())
+ self.compute.snapshot_instance(self.context, "failing_snapshot",
+ instance=instance)
+ self.compute.terminate_instance(self.context, instance=instance)
+
def _assert_state(self, state_dict):
"""Assert state of VM is equal to state passed as parameter."""
instances = db.instance_get_all(self.context)
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index 2ddeb72bf..a2e7efcc0 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -594,6 +594,19 @@ class TestQuantumv2(test.TestCase):
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
+ def test_allocate_for_instance_no_networks(self):
+ """verify the exception thrown when there are no networks defined."""
+ api = quantumapi.API()
+ self.moxed_client.list_networks(
+ tenant_id=self.instance['project_id'],
+ shared=False).AndReturn(
+ {'networks': []})
+ self.moxed_client.list_networks(shared=True).AndReturn(
+ {'networks': []})
+ self.mox.ReplayAll()
+ nwinfo = api.allocate_for_instance(self.context, self.instance)
+ self.assertEqual(len(nwinfo), 0)
+
def test_allocate_for_instance_ex1(self):
"""verify we will delete created ports
if we fail to allocate all net resources.
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 6e9f09184..01c640e37 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -22,7 +22,6 @@ their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import contextlib
-import decimal
import os
import re
import time
@@ -1553,22 +1552,6 @@ def fetch_bandwidth(session):
return bw
-def compile_metrics(start_time, stop_time=None):
- """Compile bandwidth usage, cpu, and disk metrics for all VMs on
- this host.
- Note that some stats, like bandwidth, do not seem to be very
- accurate in some of the data from XenServer (mdragon).
- """
- start_time = int(start_time)
-
- xml = _get_rrd_updates(_get_rrd_server(), start_time)
- if xml:
- doc = minidom.parseString(xml)
- return _parse_rrd_update(doc, start_time, stop_time)
-
- raise exception.CouldNotFetchMetrics()
-
-
def _scan_sr(session, sr_ref=None):
"""Scans the SR specified by sr_ref."""
if sr_ref:
@@ -1690,103 +1673,6 @@ def _get_rrd(server, vm_uuid):
return None
-def _get_rrd_updates(server, start_time):
- """Return the RRD updates XML as a string."""
- try:
- xml = urllib.urlopen("%s://%s:%s@%s/rrd_updates?start=%s" % (
- server[0],
- CONF.xenapi_connection_username,
- CONF.xenapi_connection_password,
- server[1],
- start_time))
- return xml.read()
- except IOError:
- LOG.exception(_('Unable to obtain RRD XML updates with '
- 'server details: %s.'), server)
- return None
-
-
-def _parse_rrd_meta(doc):
- data = {}
- meta = doc.getElementsByTagName('meta')[0]
- for tag in ('start', 'end', 'step'):
- data[tag] = int(meta.getElementsByTagName(tag)[0].firstChild.data)
- legend = meta.getElementsByTagName('legend')[0]
- data['legend'] = [child.firstChild.data for child in legend.childNodes]
- return data
-
-
-def _parse_rrd_data(doc):
- dnode = doc.getElementsByTagName('data')[0]
- return [dict(
- time=int(child.getElementsByTagName('t')[0].firstChild.data),
- values=[decimal.Decimal(valnode.firstChild.data)
- for valnode in child.getElementsByTagName('v')])
- for child in dnode.childNodes]
-
-
-def _parse_rrd_update(doc, start, until=None):
- sum_data = {}
- meta = _parse_rrd_meta(doc)
- data = _parse_rrd_data(doc)
- for col, collabel in enumerate(meta['legend']):
- _datatype, _objtype, uuid, name = collabel.split(':')
- vm_data = sum_data.get(uuid, dict())
- if name.startswith('vif'):
- vm_data[name] = _integrate_series(data, col, start, until)
- else:
- vm_data[name] = _average_series(data, col, until)
- sum_data[uuid] = vm_data
- return sum_data
-
-
-def _average_series(data, col, until=None):
- vals = [row['values'][col] for row in data
- if (not until or (row['time'] <= until)) and
- row['values'][col].is_finite()]
- if vals:
- try:
- return (sum(vals) / len(vals)).quantize(decimal.Decimal('1.0000'))
- except decimal.InvalidOperation:
- # (mdragon) Xenserver occasionally returns odd values in
- # data that will throw an error on averaging (see bug 918490)
- # These are hard to find, since, whatever those values are,
- # Decimal seems to think they are a valid number, sortof.
- # We *think* we've got the the cases covered, but just in
- # case, log and return NaN, so we don't break reporting of
- # other statistics.
- LOG.error(_("Invalid statistics data from Xenserver: %s")
- % str(vals))
- return decimal.Decimal('NaN')
- else:
- return decimal.Decimal('0.0000')
-
-
-def _integrate_series(data, col, start, until=None):
- total = decimal.Decimal('0.0000')
- prev_time = int(start)
- prev_val = None
- for row in reversed(data):
- if not until or (row['time'] <= until):
- time = row['time']
- val = row['values'][col]
- if val.is_nan():
- val = decimal.Decimal('0.0000')
- if prev_val is None:
- prev_val = val
- if prev_val >= val:
- total += ((val * (time - prev_time)) +
- (decimal.Decimal('0.5000') * (prev_val - val) *
- (time - prev_time)))
- else:
- total += ((prev_val * (time - prev_time)) +
- (decimal.Decimal('0.5000') * (val - prev_val) *
- (time - prev_time)))
- prev_time = time
- prev_val = val
- return total.quantize(decimal.Decimal('1.0000'))
-
-
def _get_all_vdis_in_sr(session, sr_ref):
for vdi_ref in session.call_xenapi('SR.get_VDIs', sr_ref):
try: