summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--HACKING.rst306
-rwxr-xr-xnova/compute/manager.py101
-rw-r--r--nova/db/sqlalchemy/api.py142
-rw-r--r--nova/exception.py4
-rw-r--r--nova/network/quantumv2/api.py5
-rw-r--r--nova/tests/compute/test_compute.py22
-rw-r--r--nova/tests/network/test_quantumv2.py13
-rw-r--r--nova/virt/xenapi/vm_utils.py114
8 files changed, 193 insertions, 514 deletions
diff --git a/HACKING.rst b/HACKING.rst
index 082beb87d..8ec767b7e 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -1,233 +1,14 @@
Nova Style Commandments
=======================
-- Step 1: Read http://www.python.org/dev/peps/pep-0008/
-- Step 2: Read http://www.python.org/dev/peps/pep-0008/ again
-- Step 3: Read on
+- Step 1: Read the OpenStack Style Commandments
+ https://github.com/openstack-dev/hacking/blob/master/HACKING.rst
+- Step 2: Read on
+Nova Specific Commandments
+---------------------------
-General
--------
-- Put two newlines between top-level code (funcs, classes, etc)
-- Use only UNIX style newlines ("\n"), not Windows style ("\r\n")
-- Put one newline between methods in classes and anywhere else
-- Long lines should be wrapped in parentheses
- in preference to using a backslash for line continuation.
-- Do not write "except:", use "except Exception:" at the very least
-- Include your name with TODOs as in "#TODO(termie)"
-- Do not shadow a built-in or reserved word. Example::
-
- def list():
- return [1, 2, 3]
-
- mylist = list() # BAD, shadows `list` built-in
-
- class Foo(object):
- def list(self):
- return [1, 2, 3]
-
- mylist = Foo().list() # OKAY, does not shadow built-in
-
-- Use the "is not" operator when testing for unequal identities. Example::
-
- if not X is Y: # BAD, intended behavior is ambiguous
- pass
-
- if X is not Y: # OKAY, intuitive
- pass
-
-- Use the "not in" operator for evaluating membership in a collection. Example::
-
- if not X in Y: # BAD, intended behavior is ambiguous
- pass
-
- if X not in Y: # OKAY, intuitive
- pass
-
- if not (X in Y or X in Z): # OKAY, still better than all those 'not's
- pass
-
-
-Imports
--------
-- Do not import objects, only modules (*)
-- Do not import more than one module per line (*)
-- Do not use wildcard ``*`` import (*)
-- Do not make relative imports
-- Do not make new nova.db imports in nova/virt/*
-- Order your imports by the full module path
-- Organize your imports according to the following template
-
-(*) exceptions are:
-
-- imports from ``migrate`` package
-- imports from ``sqlalchemy`` package
-- imports from ``nova.db.sqlalchemy.session`` module
-- imports from ``nova.db.sqlalchemy.migration.versioning_api`` package
-
-Example::
-
- # vim: tabstop=4 shiftwidth=4 softtabstop=4
- {{stdlib imports in human alphabetical order}}
- \n
- {{third-party lib imports in human alphabetical order}}
- \n
- {{nova imports in human alphabetical order}}
- \n
- \n
- {{begin your code}}
-
-
-Human Alphabetical Order Examples
----------------------------------
-Example::
-
- import httplib
- import logging
- import random
- import StringIO
- import time
- import unittest
-
- import eventlet
- import webob.exc
-
- import nova.api.ec2
- from nova.api import openstack
- from nova.auth import users
- from nova.endpoint import cloud
- import nova.flags
- from nova import test
-
-
-Docstrings
-----------
-Example::
-
- """A one line docstring looks like this and ends in a period."""
-
-
- """A multi line docstring has a one-line summary, less than 80 characters.
-
- Then a new paragraph after a newline that explains in more detail any
- general information about the function, class or method. Example usages
- are also great to have here if it is a complex class for function.
-
- When writing the docstring for a class, an extra line should be placed
- after the closing quotations. For more in-depth explanations for these
- decisions see http://www.python.org/dev/peps/pep-0257/
-
- If you are going to describe parameters and return values, use Sphinx, the
- appropriate syntax is as follows.
-
- :param foo: the foo parameter
- :param bar: the bar parameter
- :returns: return_type -- description of the return value
- :returns: description of the return value
- :raises: AttributeError, KeyError
- """
-
-
-Dictionaries/Lists
-------------------
-If a dictionary (dict) or list object is longer than 80 characters, its items
-should be split with newlines. Embedded iterables should have their items
-indented. Additionally, the last item in the dictionary should have a trailing
-comma. This increases readability and simplifies future diffs.
-
-Example::
-
- my_dictionary = {
- "image": {
- "name": "Just a Snapshot",
- "size": 2749573,
- "properties": {
- "user_id": 12,
- "arch": "x86_64",
- },
- "things": [
- "thing_one",
- "thing_two",
- ],
- "status": "ACTIVE",
- },
- }
-
-
-Calling Methods
----------------
-Calls to methods 80 characters or longer should format each argument with
-newlines. This is not a requirement, but a guideline::
-
- unnecessarily_long_function_name('string one',
- 'string two',
- kwarg1=constants.ACTIVE,
- kwarg2=['a', 'b', 'c'])
-
-
-Rather than constructing parameters inline, it is better to break things up::
-
- list_of_strings = [
- 'what_a_long_string',
- 'not as long',
- ]
-
- dict_of_numbers = {
- 'one': 1,
- 'two': 2,
- 'twenty four': 24,
- }
-
- object_one.call_a_method('string three',
- 'string four',
- kwarg1=list_of_strings,
- kwarg2=dict_of_numbers)
-
-
-Internationalization (i18n) Strings
------------------------------------
-In order to support multiple languages, we have a mechanism to support
-automatic translations of exception and log strings.
-
-Example::
-
- msg = _("An error occurred")
- raise HTTPBadRequest(explanation=msg)
-
-If you have a variable to place within the string, first internationalize the
-template string then do the replacement.
-
-Example::
-
- msg = _("Missing parameter: %s") % ("flavor",)
- LOG.error(msg)
-
-If you have multiple variables to place in the string, use keyword parameters.
-This helps our translators reorder parameters when needed.
-
-Example::
-
- msg = _("The server with id %(s_id)s has no key %(m_key)s")
- LOG.error(msg % {"s_id": "1234", "m_key": "imageId"})
-
-
-Python 3.x compatibility
-------------------------
-Nova code should stay Python 3.x compatible. That means all Python 2.x-only
-constructs should be avoided. An example is
-
- except x,y:
-
-Use
-
- except x as y:
-
-instead. Other Python 3.x compatility issues, like e.g. print operator
-can be avoided in new code by using
-
- from __future__ import print_function
-
-at the top of your module.
+- ``nova.db`` imports are not allowed in ``nova/virt/*``
Creating Unit Tests
@@ -239,32 +20,32 @@ submitted bug fix does have a unit test, be sure to add a new one that fails
without the patch and passes with the patch.
For more information on creating unit tests and utilizing the testing
-infrastructure in OpenStack Nova, please read nova/tests/README.rst.
+infrastructure in OpenStack Nova, please read ``nova/tests/README.rst``.
Running Tests
-------------
The testing system is based on a combination of tox and testr. The canonical
-approach to running tests is to simply run the command `tox`. This will
-create virtual environments, populate them with depenedencies and run all of
+approach to running tests is to simply run the command ``tox``. This will
+create virtual environments, populate them with dependencies and run all of
the tests that OpenStack CI systems run. Behind the scenes, tox is running
-`testr run --parallel`, but is set up such that you can supply any additional
+``testr run --parallel``, but is set up such that you can supply any additional
testr arguments that are needed to tox. For example, you can run:
-`tox -- --analyze-isolation` to cause tox to tell testr to add
+``tox -- --analyze-isolation`` to cause tox to tell testr to add
--analyze-isolation to its argument list.
It is also possible to run the tests inside of a virtual environment
you have created, or it is possible that you have all of the dependencies
installed locally already. In this case, you can interact with the testr
-command directly. Running `testr run` will run the entire test suite. `testr
-run --parallel` will run it in parallel (this is the default incantation tox
+command directly. Running ``testr run`` will run the entire test suite. ``testr
+run --parallel`` will run it in parallel (this is the default incantation tox
uses.) More information about testr can be found at:
http://wiki.openstack.org/testr
Building Docs
-------------
-Normal Sphinx docs can be built via the setuptools `build_sphinx` command. To
-do this via `tox`, simply run `tox -evenv -- python setup.py build_sphinx`,
+Normal Sphinx docs can be built via the setuptools ``build_sphinx`` command. To
+do this via ``tox``, simply run ``tox -evenv -- python setup.py build_sphinx``,
which will cause a virtualenv with all of the needed dependencies to be
created and then inside of the virtualenv, the docs will be created and
put into doc/build/html.
@@ -274,7 +55,7 @@ additionally some fonts. On Ubuntu systems, you can get what you need with::
apt-get install texlive-latex-recommended texlive-latex-extra texlive-fonts-recommended
-Then run `build_sphinx_latex`, change to the build dir and run `make`.
+Then run ``build_sphinx_latex``, change to the build dir and run ``make``.
Like so::
tox -evenv -- python setup.py build_sphinx_latex
@@ -282,58 +63,3 @@ Like so::
make
You should wind up with a PDF - Nova.pdf.
-
-oslo-incubator
-----------------
-
-A number of modules from oslo-incubator are imported into the project.
-
-These modules are "incubating" in oslo-incubator and are kept in sync
-with the help of oslo's update.py script. See:
-
- https://wiki.openstack.org/wiki/Oslo#Incubation
-
-The copy of the code should never be directly modified here. Please
-always update oslo-incubator first and then run the script to copy
-the changes across.
-
-OpenStack Trademark
--------------------
-
-OpenStack is a registered trademark of the OpenStack Foundation, and uses the
-following capitalization:
-
- OpenStack
-
-
-Commit Messages
----------------
-Using a common format for commit messages will help keep our git history
-readable. Follow these guidelines:
-
- First, provide a brief summary of 50 characters or less. Summaries
- of greater then 72 characters will be rejected by the gate.
-
- The first line of the commit message should provide an accurate
- description of the change, not just a reference to a bug or
- blueprint. It must be followed by a single blank line.
-
- If the change relates to a specific driver (libvirt, xenapi, qpid, etc...),
- begin the first line of the commit message with the driver name, lowercased,
- followed by a colon.
-
- Following your brief summary, provide a more detailed description of
- the patch, manually wrapping the text at 72 characters. This
- description should provide enough detail that one does not have to
- refer to external resources to determine its high-level functionality.
-
- Once you use 'git review', two lines will be appended to the commit
- message: a blank line followed by a 'Change-Id'. This is important
- to correlate this commit with a specific review in Gerrit, and it
- should not be modified.
-
-For further information on constructing high quality commit messages,
-and how to split up commits into a series of changes, consult the
-project wiki:
-
- http://wiki.openstack.org/GitCommitMessages
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index dbef94596..6d18952bf 100755
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -995,15 +995,19 @@ class ComputeManager(manager.SchedulerDependentManager):
set_access_ip=set_access_ip)
except exception.InstanceNotFound:
# the instance got deleted during the spawn
- with excutils.save_and_reraise_exception():
- # Make sure the async call finishes
- if network_info is not None:
- network_info.wait(do_raise=False)
- try:
- self._deallocate_network(context, instance)
- except Exception:
- LOG.exception(_('Failed to dealloc network for '
- 'deleted instance'), instance=instance)
+ # Make sure the async call finishes
+ msg = _("Instance disappeared during build")
+ if network_info is not None:
+ network_info.wait(do_raise=False)
+ try:
+ self._deallocate_network(context, instance)
+ except Exception:
+ msg = _('Failed to dealloc network '
+ 'for deleted instance')
+ LOG.exception(msg, instance=instance)
+ raise exception.BuildAbortException(
+ instance_uuid=instance['uuid'],
+ reason=msg)
except exception.UnexpectedTaskStateError as e:
exc_info = sys.exc_info()
# Make sure the async call finishes
@@ -1950,53 +1954,70 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
- instance = self._instance_update(context, instance['uuid'],
- power_state=current_power_state)
-
- LOG.audit(_('instance snapshotting'), context=context,
+ try:
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state)
+ LOG.audit(_('instance snapshotting'), context=context,
instance=instance)
- if instance['power_state'] != power_state.RUNNING:
- state = instance['power_state']
- running = power_state.RUNNING
- LOG.warn(_('trying to snapshot a non-running instance: '
+ if instance['power_state'] != power_state.RUNNING:
+ state = instance['power_state']
+ running = power_state.RUNNING
+ LOG.warn(_('trying to snapshot a non-running instance: '
'(state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
instance=instance)
- self._notify_about_instance_usage(
+ self._notify_about_instance_usage(
context, instance, "snapshot.start")
- if image_type == 'snapshot':
- expected_task_state = task_states.IMAGE_SNAPSHOT
+ if image_type == 'snapshot':
+ expected_task_state = task_states.IMAGE_SNAPSHOT
- elif image_type == 'backup':
- expected_task_state = task_states.IMAGE_BACKUP
+ elif image_type == 'backup':
+ expected_task_state = task_states.IMAGE_BACKUP
- def update_task_state(task_state, expected_state=expected_task_state):
- return self._instance_update(context, instance['uuid'],
- task_state=task_state,
- expected_task_state=expected_state)
+ def update_task_state(task_state,
+ expected_state=expected_task_state):
+ return self._instance_update(context, instance['uuid'],
+ task_state=task_state,
+ expected_task_state=expected_state
+ )
- self.driver.snapshot(context, instance, image_id, update_task_state)
- # The instance could have changed from the driver. But since
- # we're doing a fresh update here, we'll grab the changes.
+ self.driver.snapshot(context, instance, image_id,
+ update_task_state)
+ # The instance could have changed from the driver. But since
+ # we're doing a fresh update here, we'll grab the changes.
- instance = self._instance_update(context, instance['uuid'],
- task_state=None,
- expected_task_state=task_states.IMAGE_UPLOADING)
+ instance = self._instance_update(context, instance['uuid'],
+ task_state=None,
+ expected_task_state=
+ task_states.IMAGE_UPLOADING)
- if image_type == 'snapshot' and rotation:
- raise exception.ImageRotationNotAllowed()
+ if image_type == 'snapshot' and rotation:
+ raise exception.ImageRotationNotAllowed()
- elif image_type == 'backup' and rotation >= 0:
- self._rotate_backups(context, instance, backup_type, rotation)
+ elif image_type == 'backup' and rotation >= 0:
+ self._rotate_backups(context, instance, backup_type, rotation)
- elif image_type == 'backup':
- raise exception.RotationRequiredForBackup()
+ elif image_type == 'backup':
+ raise exception.RotationRequiredForBackup()
- self._notify_about_instance_usage(
- context, instance, "snapshot.end")
+ self._notify_about_instance_usage(context, instance,
+ "snapshot.end")
+
+ except exception.InstanceNotFound:
+ # the instance got deleted during the snapshot
+ # Quickly bail out of here
+ msg = _("Instance disappeared during snapshot")
+ LOG.debug(msg, instance=instance)
+ except exception.UnexpectedTaskStateError as e:
+ actual_task_state = e.kwargs.get('actual', None)
+ if actual_task_state == 'deleting':
+ msg = _('Instance was deleted during snapshot.')
+ LOG.debug(msg, instance=instance)
+ else:
+ raise
@wrap_instance_fault
def _rotate_backups(self, context, instance, backup_type, rotation):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 3b491ca6e..03dd43946 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1458,8 +1458,7 @@ def instance_create(context, values):
def _get_sec_group_models(session, security_groups):
models = []
- default_group = security_group_ensure_default(context,
- session=session)
+ default_group = security_group_ensure_default(context)
if 'default' in security_groups:
models.append(default_group)
# Generate a new list, so we don't modify the original
@@ -3208,6 +3207,16 @@ def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
###################
+def _security_group_create(context, values, session=None):
+ security_group_ref = models.SecurityGroup()
+ # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
+ # once save() is called. This will get cleaned up in next orm pass.
+ security_group_ref.rules
+ security_group_ref.update(values)
+ security_group_ref.save(session=session)
+ return security_group_ref
+
+
def _security_group_get_query(context, session=None, read_deleted=None,
project_only=False, join_rules=True):
query = model_query(context, models.SecurityGroup, session=session,
@@ -3223,7 +3232,7 @@ def _security_group_get_by_names(context, session, project_id, group_names):
Raise SecurityGroupNotFoundForProject for a name not found.
"""
query = _security_group_get_query(context, session=session,
- read_deleted="no", join_rules=False).\
+ read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter(models.SecurityGroup.name.in_(group_names))
sg_models = query.all()
@@ -3244,11 +3253,10 @@ def security_group_get_all(context):
@require_context
-def security_group_get(context, security_group_id, columns_to_join=None,
- session=None):
- query = _security_group_get_query(context, session=session,
- project_only=True).\
- filter_by(id=security_group_id)
+def security_group_get(context, security_group_id, columns_to_join=None):
+ query = _security_group_get_query(context, project_only=True).\
+ filter_by(id=security_group_id)
+
if columns_to_join is None:
columns_to_join = []
if 'instances' in columns_to_join:
@@ -3264,12 +3272,9 @@ def security_group_get(context, security_group_id, columns_to_join=None,
@require_context
def security_group_get_by_name(context, project_id, group_name,
- columns_to_join=None, session=None):
- if session is None:
- session = get_session()
-
- query = _security_group_get_query(context, session=session,
- read_deleted="no", join_rules=False).\
+ columns_to_join=None):
+ query = _security_group_get_query(context,
+ read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter_by(name=group_name)
@@ -3334,70 +3339,77 @@ def security_group_in_use(context, group_id):
@require_context
-def security_group_create(context, values, session=None):
- security_group_ref = models.SecurityGroup()
- # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
- # once save() is called. This will get cleaned up in next orm pass.
- security_group_ref.rules
- security_group_ref.update(values)
- if session is None:
- session = get_session()
- security_group_ref.save(session=session)
- return security_group_ref
+def security_group_create(context, values):
+ return _security_group_create(context, values)
@require_context
-def security_group_update(context, security_group_id, values, session=None):
- security_group_ref = model_query(context, models.SecurityGroup,
- session=session).filter_by(id=security_group_id).first()
+def security_group_update(context, security_group_id, values):
+ session = get_session()
+ with session.begin():
+ security_group_ref = model_query(context, models.SecurityGroup,
+ session=session).\
+ filter_by(id=security_group_id).\
+ first()
- if not security_group_ref:
- raise exception.SecurityGroupNotFound(
- security_group_id=security_group_id)
- security_group_ref.update(values)
- return security_group_ref
+ if not security_group_ref:
+ raise exception.SecurityGroupNotFound(
+ security_group_id=security_group_id)
+ security_group_ref.update(values)
+ return security_group_ref
-def security_group_ensure_default(context, session=None):
+def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id."""
- try:
- default_group = security_group_get_by_name(context,
- context.project_id, 'default',
- columns_to_join=[], session=session)
- except exception.NotFound:
- values = {'name': 'default',
- 'description': 'default',
- 'user_id': context.user_id,
- 'project_id': context.project_id}
- default_group = security_group_create(context, values,
- session=session)
- for default_rule in security_group_default_rule_list(context):
- # This is suboptimal, it should be programmatic to know
- # the values of the default_rule
- rule_values = {'protocol': default_rule.protocol,
- 'from_port': default_rule.from_port,
- 'to_port': default_rule.to_port,
- 'cidr': default_rule.cidr,
- 'parent_group_id': default_group.id,
- }
- security_group_rule_create(context, rule_values)
- return default_group
+ session = get_session()
+ with session.begin():
+ try:
+ default_group = _security_group_get_by_names(context,
+ session,
+ context.project_id,
+ ['default'])[0]
+ except exception.NotFound:
+ values = {'name': 'default',
+ 'description': 'default',
+ 'user_id': context.user_id,
+ 'project_id': context.project_id}
+ default_group = _security_group_create(context, values,
+ session=session)
+ default_rules = _security_group_rule_get_default_query(context,
+ session=session).all()
+ for default_rule in default_rules:
+ # This is suboptimal, it should be programmatic to know
+ # the values of the default_rule
+ rule_values = {'protocol': default_rule.protocol,
+ 'from_port': default_rule.from_port,
+ 'to_port': default_rule.to_port,
+ 'cidr': default_rule.cidr,
+ 'parent_group_id': default_group.id,
+ }
+ _security_group_rule_create(context,
+ rule_values,
+ session=session)
+ return default_group
@require_context
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
- session.query(models.SecurityGroup).\
+ model_query(context, models.SecurityGroup,
+ session=session).\
filter_by(id=security_group_id).\
soft_delete()
- session.query(models.SecurityGroupInstanceAssociation).\
+ model_query(context, models.SecurityGroupInstanceAssociation,
+ session=session).\
filter_by(security_group_id=security_group_id).\
soft_delete()
- session.query(models.SecurityGroupIngressRule).\
+ model_query(context, models.SecurityGroupIngressRule,
+ session=session).\
filter_by(group_id=security_group_id).\
soft_delete()
- session.query(models.SecurityGroupIngressRule).\
+ model_query(context, models.SecurityGroupIngressRule,
+ session=session).\
filter_by(parent_group_id=security_group_id).\
soft_delete()
@@ -3413,6 +3425,13 @@ def security_group_count_by_project(context, project_id, session=None):
###################
+def _security_group_rule_create(context, values, session=None):
+ security_group_rule_ref = models.SecurityGroupIngressRule()
+ security_group_rule_ref.update(values)
+ security_group_rule_ref.save(session=session)
+ return security_group_rule_ref
+
+
def _security_group_rule_get_query(context, session=None):
return model_query(context, models.SecurityGroupIngressRule,
session=session)
@@ -3451,10 +3470,7 @@ def security_group_rule_get_by_security_group_grantee(context,
@require_context
def security_group_rule_create(context, values):
- security_group_rule_ref = models.SecurityGroupIngressRule()
- security_group_rule_ref.update(values)
- security_group_rule_ref.save()
- return security_group_rule_ref
+ return _security_group_rule_create(context, values)
@require_context
diff --git a/nova/exception.py b/nova/exception.py
index 350fb2f77..b64abd4db 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -982,10 +982,6 @@ class InsufficientFreeMemory(NovaException):
message = _("Insufficient free memory on compute node to start %(uuid)s.")
-class CouldNotFetchMetrics(NovaException):
- message = _("Could not fetch bandwidth/cpu/disk metrics for this host.")
-
-
class NoValidHost(NovaException):
message = _("No valid host was found. %(reason)s")
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index c7f4ffd58..814b66ee0 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -182,6 +182,11 @@ class API(base.Base):
nets = self._get_available_networks(context, instance['project_id'],
net_ids)
+
+ if not nets:
+ LOG.warn(_("No network configured!"), instance=instance)
+ return []
+
security_groups = kwargs.get('security_groups', [])
security_group_ids = []
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 73e260990..11c393279 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -1182,9 +1182,7 @@ class ComputeTestCase(BaseTestCase):
self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
- self.assertRaises(exception.InstanceNotFound,
- self.compute.run_instance,
- self.context, instance=instance)
+ self.compute.run_instance(self.context, instance=instance)
def test_run_instance_bails_on_missing_instance(self):
# Make sure that run_instance() will quickly ignore a deleted instance
@@ -2037,6 +2035,24 @@ class ComputeTestCase(BaseTestCase):
self._assert_state({'task_state': None})
self.compute.terminate_instance(self.context, instance=instance)
+ def test_snapshot_handles_cases_when_instance_is_deleted(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance(
+ {'vm_state': 'deleting'}))
+ self.compute.run_instance(self.context, instance=instance)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.DELETING})
+ self.compute.snapshot_instance(self.context, "failing_snapshot",
+ instance=instance)
+ self.compute.terminate_instance(self.context, instance=instance)
+
+ def test_snapshot_handles_cases_when_instance_is_not_found(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance(
+ {'vm_state': 'deleting'}))
+ instance["uuid"] = str(uuid.uuid4())
+ self.compute.snapshot_instance(self.context, "failing_snapshot",
+ instance=instance)
+ self.compute.terminate_instance(self.context, instance=instance)
+
def _assert_state(self, state_dict):
"""Assert state of VM is equal to state passed as parameter."""
instances = db.instance_get_all(self.context)
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index 2ddeb72bf..a2e7efcc0 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -594,6 +594,19 @@ class TestQuantumv2(test.TestCase):
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
+ def test_allocate_for_instance_no_networks(self):
+ """verify the exception thrown when there are no networks defined."""
+ api = quantumapi.API()
+ self.moxed_client.list_networks(
+ tenant_id=self.instance['project_id'],
+ shared=False).AndReturn(
+ {'networks': []})
+ self.moxed_client.list_networks(shared=True).AndReturn(
+ {'networks': []})
+ self.mox.ReplayAll()
+ nwinfo = api.allocate_for_instance(self.context, self.instance)
+ self.assertEqual(len(nwinfo), 0)
+
def test_allocate_for_instance_ex1(self):
"""verify we will delete created ports
if we fail to allocate all net resources.
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 6e9f09184..01c640e37 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -22,7 +22,6 @@ their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import contextlib
-import decimal
import os
import re
import time
@@ -1553,22 +1552,6 @@ def fetch_bandwidth(session):
return bw
-def compile_metrics(start_time, stop_time=None):
- """Compile bandwidth usage, cpu, and disk metrics for all VMs on
- this host.
- Note that some stats, like bandwidth, do not seem to be very
- accurate in some of the data from XenServer (mdragon).
- """
- start_time = int(start_time)
-
- xml = _get_rrd_updates(_get_rrd_server(), start_time)
- if xml:
- doc = minidom.parseString(xml)
- return _parse_rrd_update(doc, start_time, stop_time)
-
- raise exception.CouldNotFetchMetrics()
-
-
def _scan_sr(session, sr_ref=None):
"""Scans the SR specified by sr_ref."""
if sr_ref:
@@ -1690,103 +1673,6 @@ def _get_rrd(server, vm_uuid):
return None
-def _get_rrd_updates(server, start_time):
- """Return the RRD updates XML as a string."""
- try:
- xml = urllib.urlopen("%s://%s:%s@%s/rrd_updates?start=%s" % (
- server[0],
- CONF.xenapi_connection_username,
- CONF.xenapi_connection_password,
- server[1],
- start_time))
- return xml.read()
- except IOError:
- LOG.exception(_('Unable to obtain RRD XML updates with '
- 'server details: %s.'), server)
- return None
-
-
-def _parse_rrd_meta(doc):
- data = {}
- meta = doc.getElementsByTagName('meta')[0]
- for tag in ('start', 'end', 'step'):
- data[tag] = int(meta.getElementsByTagName(tag)[0].firstChild.data)
- legend = meta.getElementsByTagName('legend')[0]
- data['legend'] = [child.firstChild.data for child in legend.childNodes]
- return data
-
-
-def _parse_rrd_data(doc):
- dnode = doc.getElementsByTagName('data')[0]
- return [dict(
- time=int(child.getElementsByTagName('t')[0].firstChild.data),
- values=[decimal.Decimal(valnode.firstChild.data)
- for valnode in child.getElementsByTagName('v')])
- for child in dnode.childNodes]
-
-
-def _parse_rrd_update(doc, start, until=None):
- sum_data = {}
- meta = _parse_rrd_meta(doc)
- data = _parse_rrd_data(doc)
- for col, collabel in enumerate(meta['legend']):
- _datatype, _objtype, uuid, name = collabel.split(':')
- vm_data = sum_data.get(uuid, dict())
- if name.startswith('vif'):
- vm_data[name] = _integrate_series(data, col, start, until)
- else:
- vm_data[name] = _average_series(data, col, until)
- sum_data[uuid] = vm_data
- return sum_data
-
-
-def _average_series(data, col, until=None):
- vals = [row['values'][col] for row in data
- if (not until or (row['time'] <= until)) and
- row['values'][col].is_finite()]
- if vals:
- try:
- return (sum(vals) / len(vals)).quantize(decimal.Decimal('1.0000'))
- except decimal.InvalidOperation:
- # (mdragon) Xenserver occasionally returns odd values in
- # data that will throw an error on averaging (see bug 918490)
- # These are hard to find, since, whatever those values are,
- # Decimal seems to think they are a valid number, sortof.
- # We *think* we've got the the cases covered, but just in
- # case, log and return NaN, so we don't break reporting of
- # other statistics.
- LOG.error(_("Invalid statistics data from Xenserver: %s")
- % str(vals))
- return decimal.Decimal('NaN')
- else:
- return decimal.Decimal('0.0000')
-
-
-def _integrate_series(data, col, start, until=None):
- total = decimal.Decimal('0.0000')
- prev_time = int(start)
- prev_val = None
- for row in reversed(data):
- if not until or (row['time'] <= until):
- time = row['time']
- val = row['values'][col]
- if val.is_nan():
- val = decimal.Decimal('0.0000')
- if prev_val is None:
- prev_val = val
- if prev_val >= val:
- total += ((val * (time - prev_time)) +
- (decimal.Decimal('0.5000') * (prev_val - val) *
- (time - prev_time)))
- else:
- total += ((prev_val * (time - prev_time)) +
- (decimal.Decimal('0.5000') * (val - prev_val) *
- (time - prev_time)))
- prev_time = time
- prev_val = val
- return total.quantize(decimal.Decimal('1.0000'))
-
-
def _get_all_vdis_in_sr(session, sr_ref):
for vdi_ref in session.call_xenapi('SR.get_VDIs', sr_ref):
try: