summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-baremetal-manage234
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.json8
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.xml3
-rw-r--r--doc/api_samples/os-aggregates/aggregate-update-post-resp.json8
-rw-r--r--doc/api_samples/os-aggregates/aggregate-update-post-resp.xml8
-rw-r--r--doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json8
-rw-r--r--doc/api_samples/os-aggregates/aggregates-add-host-post-resp.xml8
-rw-r--r--doc/api_samples/os-aggregates/aggregates-get-resp.json8
-rw-r--r--doc/api_samples/os-aggregates/aggregates-get-resp.xml8
-rw-r--r--doc/api_samples/os-aggregates/aggregates-list-get-resp.json6
-rw-r--r--doc/api_samples/os-aggregates/aggregates-list-get-resp.xml8
-rw-r--r--doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json3
-rw-r--r--doc/api_samples/os-aggregates/aggregates-metadata-post-resp.xml3
-rw-r--r--doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json6
-rw-r--r--doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml6
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json20
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml5
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json94
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml23
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json10
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml9
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json20
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml5
-rw-r--r--doc/api_samples/os-server-password/get-password-resp.json3
-rw-r--r--doc/api_samples/os-server-password/get-password-resp.xml2
-rw-r--r--doc/api_samples/os-server-password/server-post-req.json16
-rw-r--r--doc/api_samples/os-server-password/server-post-req.xml19
-rw-r--r--doc/api_samples/os-server-password/server-post-resp.json16
-rw-r--r--doc/api_samples/os-server-password/server-post-resp.xml6
-rw-r--r--doc/source/man/nova-baremetal-manage.rst67
-rw-r--r--etc/nova/nova.conf.sample5
-rw-r--r--etc/nova/policy.json1
-rw-r--r--nova/api/ec2/cloud.py19
-rw-r--r--nova/api/openstack/compute/contrib/flavorextraspecs.py10
-rw-r--r--nova/api/openstack/compute/contrib/keypairs.py4
-rw-r--r--nova/api/openstack/compute/contrib/server_password.py87
-rw-r--r--nova/api/openstack/compute/server_metadata.py8
-rw-r--r--nova/api/openstack/compute/servers.py3
-rw-r--r--nova/api/openstack/compute/views/images.py4
-rw-r--r--nova/block_device.py2
-rw-r--r--nova/compute/api.py32
-rw-r--r--nova/compute/manager.py71
-rw-r--r--nova/compute/resource_tracker.py7
-rw-r--r--nova/compute/utils.py60
-rw-r--r--nova/conductor/api.py14
-rw-r--r--nova/conductor/manager.py11
-rw-r--r--nova/conductor/rpcapi.py16
-rw-r--r--nova/config.py13
-rw-r--r--nova/db/api.py8
-rw-r--r--nova/db/sqlalchemy/api.py82
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/146_aggregate_zones.py57
-rw-r--r--nova/db/sqlalchemy/models.py9
-rw-r--r--nova/db/sqlalchemy/session.py6
-rw-r--r--nova/image/glance.py22
-rw-r--r--nova/network/dns_driver.py2
-rw-r--r--nova/network/linux_net.py6
-rw-r--r--nova/network/minidns.py18
-rw-r--r--nova/network/noop_dns_driver.py2
-rw-r--r--nova/notifications.py3
-rw-r--r--nova/openstack/common/notifier/api.py7
-rw-r--r--nova/openstack/common/notifier/rpc_notifier.py2
-rw-r--r--nova/openstack/common/notifier/rpc_notifier2.py51
-rw-r--r--nova/openstack/common/rpc/__init__.py5
-rw-r--r--nova/openstack/common/rpc/amqp.py23
-rw-r--r--nova/openstack/common/rpc/common.py144
-rw-r--r--nova/openstack/common/rpc/dispatcher.py18
-rw-r--r--nova/openstack/common/rpc/impl_kombu.py28
-rw-r--r--nova/openstack/common/rpc/impl_qpid.py25
-rw-r--r--nova/openstack/common/rpc/impl_zmq.py25
-rw-r--r--nova/scheduler/filters/availability_zone_filter.py17
-rw-r--r--nova/servicegroup/api.py2
-rw-r--r--nova/tests/api/ec2/test_cloud.py10
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_aggregates.py16
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_password.py86
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py1
-rw-r--r--nova/tests/api/openstack/compute/test_images.py26
-rw-r--r--nova/tests/api/openstack/compute/test_server_actions.py25
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py9
-rw-r--r--nova/tests/api/openstack/test_common.py38
-rw-r--r--nova/tests/baremetal/test_nova_baremetal_manage.py49
-rw-r--r--nova/tests/cells/fakes.py9
-rw-r--r--nova/tests/compute/test_compute.py224
-rw-r--r--nova/tests/compute/test_compute_utils.py104
-rw-r--r--nova/tests/conductor/test_conductor.py7
-rw-r--r--nova/tests/fake_policy.py1
-rw-r--r--nova/tests/image/test_glance.py17
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl20
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl94
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl23
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl10
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl20
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-server-password/get-password-resp.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-server-password/get-password-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-server-password/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-server-password/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-server-password/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-server-password/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/test_api_samples.py86
-rw-r--r--nova/tests/monkey_patch_example/__init__.py2
-rw-r--r--nova/tests/scheduler/test_host_filters.py7
-rw-r--r--nova/tests/test_api.py2
-rw-r--r--nova/tests/test_cinder.py17
-rw-r--r--nova/tests/test_db_api.py133
-rw-r--r--nova/tests/test_imagecache.py2
-rw-r--r--nova/tests/test_libvirt.py78
-rw-r--r--nova/tests/test_migrations.py34
-rw-r--r--nova/tests/test_powervm.py29
-rw-r--r--nova/tests/test_utils.py17
-rw-r--r--nova/tests/test_xenapi.py21
-rw-r--r--nova/tests/xenapi/stubs.py10
-rw-r--r--nova/utils.py16
-rw-r--r--nova/virt/baremetal/vif_driver.py3
-rw-r--r--nova/virt/driver.py25
-rw-r--r--nova/virt/fake.py12
-rw-r--r--nova/virt/firewall.py2
-rw-r--r--nova/virt/hyperv/driver.py5
-rw-r--r--nova/virt/hyperv/hostops.py4
-rw-r--r--nova/virt/hyperv/vmops.py32
-rw-r--r--nova/virt/hyperv/volumeutils.py4
-rw-r--r--nova/virt/hyperv/volumeutilsV2.py2
-rw-r--r--nova/virt/libvirt/driver.py60
-rw-r--r--nova/virt/libvirt/vif.py2
-rw-r--r--nova/virt/libvirt/volume_nfs.py4
-rw-r--r--nova/virt/powervm/blockdev.py145
-rw-r--r--nova/virt/powervm/common.py48
-rw-r--r--nova/virt/powervm/driver.py57
-rw-r--r--nova/virt/powervm/exception.py6
-rw-r--r--nova/virt/powervm/operator.py38
-rw-r--r--nova/virt/vmwareapi/driver.py5
-rw-r--r--nova/virt/vmwareapi/vmops.py52
-rw-r--r--nova/virt/xenapi/driver.py32
-rw-r--r--nova/virt/xenapi/firewall.py5
-rw-r--r--nova/virt/xenapi/vmops.py27
-rw-r--r--nova/volume/cinder.py6
-rwxr-xr-xrun_tests.sh2
-rw-r--r--setup.py1
-rwxr-xr-xtools/hacking.py17
-rw-r--r--tox.ini4
155 files changed, 2978 insertions, 568 deletions
diff --git a/bin/nova-baremetal-manage b/bin/nova-baremetal-manage
new file mode 100755
index 000000000..34a98caf2
--- /dev/null
+++ b/bin/nova-baremetal-manage
@@ -0,0 +1,234 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Interactive shell based on Django:
+#
+# Copyright (c) 2005, the Lawrence Journal-World
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# 3. Neither the name of Django nor the names of its contributors may be
+# used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""
+ CLI interface for nova bare-metal management.
+"""
+
+import ast
+import errno
+import gettext
+import math
+import netaddr
+import optparse
+import os
+import sys
+
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
+ sys.path.insert(0, POSSIBLE_TOPDIR)
+
+gettext.install('nova', unicode=1)
+
+from nova import config
+from nova import context
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import cliutils
+from nova.openstack.common import importutils
+from nova.openstack.common import log as logging
+from nova.openstack.common import rpc
+from nova.openstack.common import timeutils
+from nova import utils
+from nova import version
+from nova.virt.baremetal import db as bmdb
+from nova.virt.baremetal.db import migration as bmdb_migration
+
+CONF = cfg.CONF
+
+
+# Decorators for actions
+def args(*args, **kwargs):
+ def _decorator(func):
+ func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
+ return func
+ return _decorator
+
+
+class BareMetalDbCommands(object):
+ """Class for managing the bare-metal database."""
+
+ def __init__(self):
+ pass
+
+ @args('--version', dest='version', metavar='<version>',
+ help='Bare-metal Database version')
+ def sync(self, version=None):
+ """Sync the database up to the most recent version."""
+ bmdb_migration.db_sync(version)
+
+ def version(self):
+ """Print the current database version."""
+ v = bmdb_migration.db_version()
+ print(v)
+ # return for unittest
+ return v
+
+
+CATEGORIES = {
+ 'db': BareMetalDbCommands,
+}
+
+
+def methods_of(obj):
+ """Get all callable methods of an object that don't start with underscore
+ returns a list of tuples of the form (method_name, method)"""
+ result = []
+ for i in dir(obj):
+ if callable(getattr(obj, i)) and not i.startswith('_'):
+ result.append((i, getattr(obj, i)))
+ return result
+
+
+def add_command_parsers(subparsers):
+ parser = subparsers.add_parser('bash-completion')
+ parser.add_argument('query_category', nargs='?')
+
+ for category in CATEGORIES:
+ command_object = CATEGORIES[category]()
+
+ parser = subparsers.add_parser(category)
+ parser.set_defaults(command_object=command_object)
+
+ category_subparsers = parser.add_subparsers(dest='action')
+
+ for (action, action_fn) in methods_of(command_object):
+ parser = category_subparsers.add_parser(action)
+
+ action_kwargs = []
+ for args, kwargs in getattr(action_fn, 'args', []):
+ action_kwargs.append(kwargs['dest'])
+ kwargs['dest'] = 'action_kwarg_' + kwargs['dest']
+ parser.add_argument(*args, **kwargs)
+
+ parser.set_defaults(action_fn=action_fn)
+ parser.set_defaults(action_kwargs=action_kwargs)
+
+ parser.add_argument('action_args', nargs='*')
+
+
+category_opt = cfg.SubCommandOpt('category',
+ title='Command categories',
+ help='Available categories',
+ handler=add_command_parsers)
+
+
+def main():
+ """Parse options and call the appropriate class/method."""
+ CONF.register_cli_opt(category_opt)
+ try:
+ config.parse_args(sys.argv)
+ logging.setup("nova")
+ except cfg.ConfigFilesNotFoundError:
+ cfgfile = CONF.config_file[-1] if CONF.config_file else None
+ if cfgfile and not os.access(cfgfile, os.R_OK):
+ st = os.stat(cfgfile)
+ print(_("Could not read %s. Re-running with sudo") % cfgfile)
+ try:
+ os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
+ except Exception:
+ print(_('sudo failed, continuing as if nothing happened'))
+
+ print(_('Please re-run nova-manage as root.'))
+ sys.exit(2)
+
+ if CONF.category.name == "version":
+ print(_("%(version)s (%(vcs)s)") %
+ {'version': version.version_string(),
+ 'vcs': version.version_string_with_vcs()})
+ sys.exit(0)
+
+ if CONF.category.name == "bash-completion":
+ if not CONF.category.query_category:
+ print(" ".join(CATEGORIES.keys()))
+ elif CONF.category.query_category in CATEGORIES:
+ fn = CATEGORIES[CONF.category.query_category]
+ command_object = fn()
+ actions = methods_of(command_object)
+ print(" ".join([k for (k, v) in actions]))
+ sys.exit(0)
+
+ fn = CONF.category.action_fn
+ fn_args = [arg.decode('utf-8') for arg in CONF.category.action_args]
+ fn_kwargs = {}
+ for k in CONF.category.action_kwargs:
+ v = getattr(CONF.category, 'action_kwarg_' + k)
+ if v is None:
+ continue
+ if isinstance(v, basestring):
+ v = v.decode('utf-8')
+ fn_kwargs[k] = v
+
+ # call the action with the remaining arguments
+ # check arguments
+ try:
+ cliutils.validate_args(fn, *fn_args, **fn_kwargs)
+ except cliutils.MissingArgs as e:
+ print(fn.__doc__)
+ parser.print_help()
+ print(e)
+ sys.exit(1)
+ try:
+ fn(*fn_args, **fn_kwargs)
+ sys.exit(0)
+ except Exception:
+ print(_("Command failed, please check log for more info"))
+ raise
+
+
+if __name__ == '__main__':
+ main()
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index f2ed617bf..f59ffc546 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -353,6 +353,14 @@
"updated": "2011-12-21T00:00:00+00:00"
},
{
+ "alias": "os-server-password",
+ "description": "Server password support",
+ "links": [],
+ "name": "ServerPassword",
+ "namespace": "http://docs.openstack.org/compute/ext/server-password/api/v2",
+ "updated": "2012-11-29T00:00:00+00:00"
+ },
+ {
"alias": "os-server-start-stop",
"description": "Start/Stop instance compute API support",
"links": [],
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index a17f415f5..8e22e5ec9 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -146,6 +146,9 @@
<extension alias="os-server-diagnostics" updated="2011-12-21T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/server-diagnostics/api/v1.1" name="ServerDiagnostics">
<description>Allow Admins to view server diagnostics through server action</description>
</extension>
+ <extension alias="os-server-password" updated="2012-11-29T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/server-password/api/v2" name="ServerPassword">
+ <description>Server password support</description>
+ </extension>
<extension alias="os-server-start-stop" updated="2012-01-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/servers/api/v1.1" name="ServerStartStop">
<description>Start/Stop instance compute API support</description>
</extension>
diff --git a/doc/api_samples/os-aggregates/aggregate-update-post-resp.json b/doc/api_samples/os-aggregates/aggregate-update-post-resp.json
index 81869e730..6636f0a17 100644
--- a/doc/api_samples/os-aggregates/aggregate-update-post-resp.json
+++ b/doc/api_samples/os-aggregates/aggregate-update-post-resp.json
@@ -1,13 +1,15 @@
{
"aggregate": {
"availability_zone": "nova2",
- "created_at": "2012-10-01T18:50:27.781065",
+ "created_at": "2012-12-04T12:04:27.075065",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova2"
+ },
"name": "newname",
- "updated_at": "2012-10-01T18:50:27.791392"
+ "updated_at": "2012-12-04T12:04:27.242597"
}
} \ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/aggregate-update-post-resp.xml b/doc/api_samples/os-aggregates/aggregate-update-post-resp.xml
index ad9498aa0..25227669b 100644
--- a/doc/api_samples/os-aggregates/aggregate-update-post-resp.xml
+++ b/doc/api_samples/os-aggregates/aggregate-update-post-resp.xml
@@ -3,10 +3,12 @@
<name>newname</name>
<availability_zone>nova2</availability_zone>
<deleted>False</deleted>
- <created_at>2012-10-01 18:50:35.506667</created_at>
- <updated_at>2012-10-01 18:50:35.517397</updated_at>
+ <created_at>2012-12-04 12:04:30.245284</created_at>
+ <updated_at>2012-12-04 12:04:30.357795</updated_at>
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova2</availability_zone>
+ </metadata>
</aggregate> \ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json b/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json
index 518f4176a..1f7918ba8 100644
--- a/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json
+++ b/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json
@@ -1,14 +1,16 @@
{
"aggregate": {
"availability_zone": "nova",
- "created_at": "2012-10-01T18:50:27.511586",
+ "created_at": "2012-12-04T12:04:24.399784",
"deleted": false,
"deleted_at": null,
"hosts": [
- "581d29b9e3504d8a895caddb13839b15"
+ "0438c6a4e8d841ad823b801d681f4680"
],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
diff --git a/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.xml b/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.xml
index a4c9de5fd..ad11f3859 100644
--- a/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.xml
+++ b/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.xml
@@ -3,12 +3,14 @@
<name>name</name>
<availability_zone>nova</availability_zone>
<deleted>False</deleted>
- <created_at>2012-10-01 18:50:35.236556</created_at>
+ <created_at>2012-12-04 12:04:27.574038</created_at>
<updated_at>None</updated_at>
<hosts>
- <host>7c9e00dbca5e4fb88538b021c0f933a5</host>
+ <host>392adba19dd449179804eaff16ff4a97</host>
</hosts>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
</aggregate> \ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/aggregates-get-resp.json b/doc/api_samples/os-aggregates/aggregates-get-resp.json
index cde446e51..101a6584d 100644
--- a/doc/api_samples/os-aggregates/aggregates-get-resp.json
+++ b/doc/api_samples/os-aggregates/aggregates-get-resp.json
@@ -1,13 +1,15 @@
{
"aggregate": {
"availability_zone": "nova",
- "created_at": "2012-10-01T18:50:27.048605",
+ "created_at": "2012-11-16T06:22:23.032493",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
-} \ No newline at end of file
+}
diff --git a/doc/api_samples/os-aggregates/aggregates-get-resp.xml b/doc/api_samples/os-aggregates/aggregates-get-resp.xml
index be1349bd2..431e59cf4 100644
--- a/doc/api_samples/os-aggregates/aggregates-get-resp.xml
+++ b/doc/api_samples/os-aggregates/aggregates-get-resp.xml
@@ -3,10 +3,12 @@
<name>name</name>
<availability_zone>nova</availability_zone>
<deleted>False</deleted>
- <created_at>2012-10-01 18:50:34.764838</created_at>
+ <created_at>2012-11-16 06:22:25.587739</created_at>
<updated_at>None</updated_at>
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
-</aggregate> \ No newline at end of file
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
+</aggregate>
diff --git a/doc/api_samples/os-aggregates/aggregates-list-get-resp.json b/doc/api_samples/os-aggregates/aggregates-list-get-resp.json
index 75b412b53..53d278c63 100644
--- a/doc/api_samples/os-aggregates/aggregates-list-get-resp.json
+++ b/doc/api_samples/os-aggregates/aggregates-list-get-resp.json
@@ -2,12 +2,14 @@
"aggregates": [
{
"availability_zone": "nova",
- "created_at": "2012-10-01T18:50:27.252869",
+ "created_at": "2012-11-16T06:22:23.361359",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
diff --git a/doc/api_samples/os-aggregates/aggregates-list-get-resp.xml b/doc/api_samples/os-aggregates/aggregates-list-get-resp.xml
index c5590855b..8d92e1466 100644
--- a/doc/api_samples/os-aggregates/aggregates-list-get-resp.xml
+++ b/doc/api_samples/os-aggregates/aggregates-list-get-resp.xml
@@ -4,11 +4,13 @@
<name>name</name>
<availability_zone>nova</availability_zone>
<deleted>False</deleted>
- <created_at>2012-10-01 18:50:34.970677</created_at>
+ <created_at>2012-11-16 06:22:25.935099</created_at>
<updated_at>None</updated_at>
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
</aggregate>
-</aggregates> \ No newline at end of file
+</aggregates>
diff --git a/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json b/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json
index dc4806a4f..33b4702ef 100644
--- a/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json
+++ b/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json
@@ -1,12 +1,13 @@
{
"aggregate": {
"availability_zone": "nova",
- "created_at": "2012-10-01T18:50:26.604176",
+ "created_at": "2012-11-16T06:22:22.342791",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
"metadata": {
+ "availability_zone": "nova",
"key": "value"
},
"name": "name",
diff --git a/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.xml b/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.xml
index 7eeefb8b7..5e2193d06 100644
--- a/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.xml
+++ b/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.xml
@@ -3,12 +3,13 @@
<name>name</name>
<availability_zone>nova</availability_zone>
<deleted>False</deleted>
- <created_at>2012-10-01 18:50:34.313003</created_at>
+ <created_at>2012-11-16 06:22:24.864471</created_at>
<updated_at>None</updated_at>
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
<metadata>
<key>value</key>
+ <availability_zone>nova</availability_zone>
</metadata>
</aggregate> \ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json b/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json
index 497fcb7fb..ba9d4e00a 100644
--- a/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json
+++ b/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json
@@ -1,12 +1,14 @@
{
"aggregate": {
"availability_zone": "nova",
- "created_at": "2012-10-01T18:50:27.511586",
+ "created_at": "2012-12-04T12:04:26.557909",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
diff --git a/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml b/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml
index dc8a55330..33dce2838 100644
--- a/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml
+++ b/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml
@@ -3,10 +3,12 @@
<name>name</name>
<availability_zone>nova</availability_zone>
<deleted>False</deleted>
- <created_at>2012-10-01 18:50:35.236556</created_at>
+ <created_at>2012-12-04 12:04:29.722109</created_at>
<updated_at>None</updated_at>
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
</aggregate> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json
new file mode 100644
index 000000000..63eaddeb6
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml
new file mode 100644
index 000000000..78b430eca
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" rxtx_factor="1.0">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+</flavor> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json
new file mode 100644
index 000000000..81e9c993c
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ },
+ {
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ },
+ {
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "rxtx_factor": 1.0,
+ "vcpus": 2
+ },
+ {
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "rxtx_factor": 1.0,
+ "vcpus": 4
+ },
+ {
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "rxtx_factor": 1.0,
+ "vcpus": 8
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml
new file mode 100644
index 000000000..05192e3f9
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" rxtx_factor="1.0">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" rxtx_factor="1.0">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" rxtx_factor="1.0">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" rxtx_factor="1.0">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" rxtx_factor="1.0">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json
new file mode 100644
index 000000000..b86a63df6
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "flavortest",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "100",
+ "rxtx_factor": 2.0
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml
new file mode 100644
index 000000000..7038e1b88
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
+ name="flavortest"
+ ram="1024"
+ vcpus="2"
+ disk="10"
+ id="100"
+ rxtx_factor="2.0" /> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json
new file mode 100644
index 000000000..5129dbeb8
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "100",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/100",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/100",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "flavortest",
+ "ram": 1024,
+ "rxtx_factor": 2.0,
+ "vcpus": 2
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml
new file mode 100644
index 000000000..8fb4f46be
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="flavortest" id="100" rxtx_factor="2.0">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/100" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/100" rel="bookmark"/>
+</flavor> \ No newline at end of file
diff --git a/doc/api_samples/os-server-password/get-password-resp.json b/doc/api_samples/os-server-password/get-password-resp.json
new file mode 100644
index 000000000..4becaf292
--- /dev/null
+++ b/doc/api_samples/os-server-password/get-password-resp.json
@@ -0,0 +1,3 @@
+{
+ "password": "xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtVVzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNXJjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrjQskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+AcX//PXk3uJ5kC7d67fPXaVz4WaQRYMg=="
+} \ No newline at end of file
diff --git a/doc/api_samples/os-server-password/get-password-resp.xml b/doc/api_samples/os-server-password/get-password-resp.xml
new file mode 100644
index 000000000..64b46a571
--- /dev/null
+++ b/doc/api_samples/os-server-password/get-password-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<password>xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtVVzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNXJjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrjQskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+AcX//PXk3uJ5kC7d67fPXaVz4WaQRYMg==</password> \ No newline at end of file
diff --git a/doc/api_samples/os-server-password/server-post-req.json b/doc/api_samples/os-server-password/server-post-req.json
new file mode 100644
index 000000000..d88eb4122
--- /dev/null
+++ b/doc/api_samples/os-server-password/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-server-password/server-post-req.xml b/doc/api_samples/os-server-password/server-post-req.xml
new file mode 100644
index 000000000..0a3c8bb53
--- /dev/null
+++ b/doc/api_samples/os-server-password/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-server-password/server-post-resp.json b/doc/api_samples/os-server-password/server-post-resp.json
new file mode 100644
index 000000000..0f477be49
--- /dev/null
+++ b/doc/api_samples/os-server-password/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "78AtBtuxTqZV",
+ "id": "66fd64e1-de18-4506-bfb6-b5e73ef78a43",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/66fd64e1-de18-4506-bfb6-b5e73ef78a43",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/66fd64e1-de18-4506-bfb6-b5e73ef78a43",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-server-password/server-post-resp.xml b/doc/api_samples/os-server-password/server-post-resp.xml
new file mode 100644
index 000000000..cac50bc9b
--- /dev/null
+++ b/doc/api_samples/os-server-password/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="b68e3354-0b1a-4e92-a664-8b332cff27f5" adminPass="sLV7uLzmgoHu">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/b68e3354-0b1a-4e92-a664-8b332cff27f5" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/b68e3354-0b1a-4e92-a664-8b332cff27f5" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/source/man/nova-baremetal-manage.rst b/doc/source/man/nova-baremetal-manage.rst
new file mode 100644
index 000000000..1fab368e5
--- /dev/null
+++ b/doc/source/man/nova-baremetal-manage.rst
@@ -0,0 +1,67 @@
+=====================
+nova-baremetal-manage
+=====================
+
+------------------------------------------------------
+Manage bare-metal DB in OpenStack Nova
+------------------------------------------------------
+
+:Author: openstack@lists.launchpad.net
+:Date: 2012-10-17
+:Copyright: OpenStack LLC
+:Version: 2013.1
+:Manual section: 1
+:Manual group: cloud computing
+
+SYNOPSIS
+========
+
+ nova-baremetal-manage <category> <action> [<args>]
+
+DESCRIPTION
+===========
+
+nova-baremetal-manage manages bare-metal DB schema.
+
+OPTIONS
+=======
+
+The standard pattern for executing a nova-baremetal-manage command is:
+``nova-baremetal-manage <category> <command> [<args>]``
+
+Run without arguments to see a list of available command categories:
+``nova-baremetal-manage``
+
+Categories are db. Detailed descriptions are below.
+
+You can also run with a category argument such as "db" to see a list of all commands in that category:
+``nova-baremetal-manage db``
+
+These sections describe the available categories and arguments for nova-baremetal-manage.
+
+Bare-Metal DB
+~~~~~~~~~~~~~
+
+``nova-baremetal-manage db version``
+
+ Print the current database version.
+
+``nova-baremetal-manage db sync``
+
+ Sync the database up to the most recent version. This is the standard way to create the db as well.
+
+FILES
+========
+
+/etc/nova/nova.conf: get location of bare-metal DB
+
+SEE ALSO
+========
+
+* `OpenStack Nova <http://nova.openstack.org>`__
+
+BUGS
+====
+
+* Nova is maintained in Launchpad so you can view current bugs at `OpenStack Nova <https://bugs.launchpad.net/nova>`__
+
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
index c3c10239c..bee408412 100644
--- a/etc/nova/nova.conf.sample
+++ b/etc/nova/nova.conf.sample
@@ -1806,5 +1806,8 @@
#### (StrOpt) Override service catalog lookup with template for cinder
#### endpoint e.g. http://localhost:8776/v1/%(project_id)s
+# cinder_http_retries=3
+#### (IntOpt) Number of cinderclient retries on failed http calls
-# Total option count: 462
+
+# Total option count: 463
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index d7596deab..04766371e 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -73,6 +73,7 @@
"compute_extension:rescue": "",
"compute_extension:security_groups": "",
"compute_extension:server_diagnostics": "rule:admin_api",
+ "compute_extension:server_password": "",
"compute_extension:services": "rule:admin_api",
"compute_extension:simple_tenant_usage:show": "rule:admin_or_owner",
"compute_extension:simple_tenant_usage:list": "rule:admin_api",
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index d40f25c4d..eb8139ad0 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -201,7 +201,7 @@ def _format_mappings(properties, result):
class CloudController(object):
- """ CloudController provides the critical dispatch between
+ """CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
@@ -257,12 +257,18 @@ class CloudController(object):
if not zone in available_zones:
available_zones.append(zone)
+ # aggregate based availability_zones
+ metadata = db.aggregate_host_get_by_metadata_key(context,
+ key='availability_zone')
+ for zone_set in metadata.values():
+ for zone in zone_set:
+ if zone not in available_zones:
+ available_zones.append(zone)
not_available_zones = []
for zone in [service.availability_zone for service in disabled_services
if not service['availability_zone'] in available_zones]:
if not zone in not_available_zones:
not_available_zones.append(zone)
-
return (available_zones, not_available_zones)
def _describe_availability_zones(self, context, **kwargs):
@@ -294,6 +300,15 @@ class CloudController(object):
host_services.setdefault(service['host'], [])
host_services[service['host']].append(service)
+ # aggregate based available_zones
+ metadata = db.aggregate_host_get_by_metadata_key(context,
+ key='availability_zone')
+ # metdata: {machine: set( az1, az2 )}
+ for host, zones in metadata.items():
+ for zone in zones:
+ zone_hosts.setdefault(zone, [])
+ if host not in zone_hosts[zone]:
+ zone_hosts[zone].append(host)
result = []
for zone in available_zones:
diff --git a/nova/api/openstack/compute/contrib/flavorextraspecs.py b/nova/api/openstack/compute/contrib/flavorextraspecs.py
index 77af25c9f..1abb525ad 100644
--- a/nova/api/openstack/compute/contrib/flavorextraspecs.py
+++ b/nova/api/openstack/compute/contrib/flavorextraspecs.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-""" The instance type extra specs extension"""
+"""The instance type extra specs extension"""
from webob import exc
@@ -35,7 +35,7 @@ class ExtraSpecsTemplate(xmlutil.TemplateBuilder):
class FlavorExtraSpecsController(object):
- """ The flavor extra specs API controller for the OpenStack API """
+ """The flavor extra specs API controller for the OpenStack API """
def _get_extra_specs(self, context, flavor_id):
extra_specs = db.instance_type_extra_specs_get(context, flavor_id)
@@ -51,7 +51,7 @@ class FlavorExtraSpecsController(object):
@wsgi.serializers(xml=ExtraSpecsTemplate)
def index(self, req, flavor_id):
- """ Returns the list of extra specs for a givenflavor """
+ """Returns the list of extra specs for a givenflavor """
context = req.environ['nova.context']
authorize(context)
return self._get_extra_specs(context, flavor_id)
@@ -92,7 +92,7 @@ class FlavorExtraSpecsController(object):
@wsgi.serializers(xml=ExtraSpecsTemplate)
def show(self, req, flavor_id, id):
- """ Return a single extra spec item """
+ """Return a single extra spec item """
context = req.environ['nova.context']
authorize(context)
specs = self._get_extra_specs(context, flavor_id)
@@ -102,7 +102,7 @@ class FlavorExtraSpecsController(object):
raise exc.HTTPNotFound()
def delete(self, req, flavor_id, id):
- """ Deletes an existing extra spec """
+ """Deletes an existing extra spec """
context = req.environ['nova.context']
authorize(context)
db.instance_type_extra_specs_delete(context, flavor_id, id)
diff --git a/nova/api/openstack/compute/contrib/keypairs.py b/nova/api/openstack/compute/contrib/keypairs.py
index e5e1e37fd..9b3b39384 100644
--- a/nova/api/openstack/compute/contrib/keypairs.py
+++ b/nova/api/openstack/compute/contrib/keypairs.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-""" Keypair management extension"""
+"""Keypair management extension"""
import webob
import webob.exc
@@ -49,7 +49,7 @@ class KeypairsTemplate(xmlutil.TemplateBuilder):
class KeypairController(object):
- """ Keypair API controller for the OpenStack API """
+ """Keypair API controller for the OpenStack API """
def __init__(self):
self.api = compute_api.KeypairAPI()
diff --git a/nova/api/openstack/compute/contrib/server_password.py b/nova/api/openstack/compute/contrib/server_password.py
new file mode 100644
index 000000000..b4b2e04a5
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/server_password.py
@@ -0,0 +1,87 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The server password extension."""
+
+import webob
+
+from nova.api.metadata import password
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova import compute
+from nova import exception
+
+
+authorize = extensions.extension_authorizer('compute', 'server_password')
+
+
+class ServerPasswordTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('password', selector='password')
+ root.text = unicode
+ return xmlutil.MasterTemplate(root, 1)
+
+
+class ServerPasswordController(object):
+ """The flavor access API controller for the OpenStack API."""
+ def __init__(self):
+ self.compute_api = compute.API()
+
+ def _get_instance(self, context, server_id):
+ try:
+ return self.compute_api.get(context, server_id)
+ except exception.InstanceNotFound as exp:
+ raise webob.exc.HTTPNotFound(explanation=unicode(exp))
+
+ @wsgi.serializers(xml=ServerPasswordTemplate)
+ def index(self, req, server_id):
+ context = req.environ['nova.context']
+ authorize(context)
+ instance = self._get_instance(context, server_id)
+
+ passw = password.extract_password(instance)
+ return {'password': passw or ''}
+
+ @wsgi.response(204)
+ def delete(self, req, server_id):
+ context = req.environ['nova.context']
+ authorize(context)
+ instance = self._get_instance(context, server_id)
+ password.set_password(context, instance['uuid'], None)
+
+
+class Server_password(extensions.ExtensionDescriptor):
+ """Server password support"""
+
+ name = "ServerPassword"
+ alias = "os-server-password"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "server-password/api/v2")
+ updated = "2012-11-29T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+
+ res = extensions.ResourceExtension(
+ 'os-server-password',
+ controller=ServerPasswordController(),
+ collection_actions={'delete': 'DELETE'},
+ parent=dict(member_name='server', collection_name='servers'))
+ resources.append(res)
+
+ return resources
diff --git a/nova/api/openstack/compute/server_metadata.py b/nova/api/openstack/compute/server_metadata.py
index 4e5a3ee02..910d88c30 100644
--- a/nova/api/openstack/compute/server_metadata.py
+++ b/nova/api/openstack/compute/server_metadata.py
@@ -24,7 +24,7 @@ from nova import exception
class Controller(object):
- """ The server metadata API controller for the OpenStack API """
+ """The server metadata API controller for the OpenStack API """
def __init__(self):
self.compute_api = compute.API()
@@ -45,7 +45,7 @@ class Controller(object):
@wsgi.serializers(xml=common.MetadataTemplate)
def index(self, req, server_id):
- """ Returns the list of metadata for a given instance """
+ """Returns the list of metadata for a given instance """
context = req.environ['nova.context']
return {'metadata': self._get_metadata(context, server_id)}
@@ -138,7 +138,7 @@ class Controller(object):
@wsgi.serializers(xml=common.MetaItemTemplate)
def show(self, req, server_id, id):
- """ Return a single metadata item """
+ """Return a single metadata item """
context = req.environ['nova.context']
data = self._get_metadata(context, server_id)
@@ -150,7 +150,7 @@ class Controller(object):
@wsgi.response(204)
def delete(self, req, server_id, id):
- """ Deletes an existing metadata """
+ """Deletes an existing metadata """
context = req.environ['nova.context']
metadata = self._get_metadata(context, server_id)
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index f992dc445..ff616a2e8 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -1028,6 +1028,9 @@ class Controller(wsgi.Controller):
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
+ except exception.InstanceTypeNotFound:
+ msg = _("Flavor used by the instance could not be found.")
+ raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize')
diff --git a/nova/api/openstack/compute/views/images.py b/nova/api/openstack/compute/views/images.py
index 13662c546..6438f0aa9 100644
--- a/nova/api/openstack/compute/views/images.py
+++ b/nova/api/openstack/compute/views/images.py
@@ -18,7 +18,7 @@
import os.path
from nova.api.openstack import common
-from nova import utils
+from nova.image import glance
class ViewBuilder(common.ViewBuilder):
@@ -117,7 +117,7 @@ class ViewBuilder(common.ViewBuilder):
def _get_alternate_link(self, request, identifier):
"""Create an alternate link for a specific image id."""
- glance_url = utils.generate_glance_url()
+ glance_url = glance.generate_glance_url()
glance_url = self._update_glance_link_prefix(glance_url)
return os.path.join(glance_url,
request.environ["nova.context"].project_id,
diff --git a/nova/block_device.py b/nova/block_device.py
index ea4ff9c3b..7e1e5374a 100644
--- a/nova/block_device.py
+++ b/nova/block_device.py
@@ -83,7 +83,7 @@ _pref = re.compile('^((x?v|s)d)')
def strip_prefix(device_name):
- """ remove both leading /dev/ and xvd or sd or vd """
+ """remove both leading /dev/ and xvd or sd or vd """
device_name = strip_dev(device_name)
return _pref.sub('', device_name)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index abbc0bd92..b840dbc6b 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -313,9 +313,10 @@ class API(base.Base):
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_networks(self, context, requested_networks):
- """ Check if the networks requested belongs to the project
- and the fixed IP address for each network provided is within
- same the network block
+ """
+ Check if the networks requested belongs to the project
+ and the fixed IP address for each network provided is within
+ same the network block
"""
if not requested_networks:
return
@@ -2186,22 +2187,15 @@ class AggregateAPI(base.Base):
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
- zones = [s['availability_zone'] for s in
- self.db.service_get_all_by_topic(context,
- CONF.compute_topic)]
- if availability_zone in zones:
- values = {"name": aggregate_name,
- "availability_zone": availability_zone}
- aggregate = self.db.aggregate_create(context, values)
- aggregate = self._get_aggregate_info(context, aggregate)
- # To maintain the same API result as before.
- del aggregate['hosts']
- del aggregate['metadata']
- return aggregate
- else:
- raise exception.InvalidAggregateAction(action='create_aggregate',
- aggregate_id="'N/A'",
- reason='invalid zone')
+
+ values = {"name": aggregate_name}
+ aggregate = self.db.aggregate_create(context, values,
+ metadata={'availability_zone': availability_zone})
+ aggregate = self._get_aggregate_info(context, aggregate)
+ # To maintain the same API result as before.
+ del aggregate['hosts']
+ del aggregate['metadata']
+ return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index b161f504c..f542fedf2 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -386,6 +386,71 @@ class ComputeManager(manager.SchedulerDependentManager):
return self.conductor_api.instance_get_all_by_host(context, self.host)
+ def _destroy_evacuated_instances(self, context):
+ """Destroys evacuated instances.
+
+ While the compute was down the instances running on it could be
+ evacuated to another host. Checking that instance host identical to
+ current host. Otherwise destroying it
+ """
+
+ # getting all vms on this host
+ local_instances = []
+ try:
+ # try to find all local instances by uuid
+ for uuid in self.driver.list_instance_uuids():
+ try:
+ local_instances.append(self.conductor_api.
+ instance_get_by_uuid(context, uuid))
+ except exception.InstanceNotFound as e:
+ LOG.error(_('Instance %(uuid)s found in the '
+ 'hypervisor, but not in the database'),
+ locals())
+ continue
+ except NotImplementedError:
+ # the driver doesn't support uuids listing, will do it in ugly way
+ for instance_name in self.driver.list_instances():
+ try:
+ # couldn't find better way to find instance in db by it's
+ # name if i will run on the list of this host instances it
+ # will be hard to ignore instances that were created
+ # outside openstack. returns -1 if instance name doesn't
+ # match template
+ instance_id = compute_utils.parse_decimal_id(CONF
+ .instance_name_template, instance_name)
+
+ if instance_id == -1:
+ continue
+
+ local_instances.append(self.conductor_api.
+ instance_get(context, instance_id))
+ except exception.InstanceNotFound as e:
+ LOG.error(_('Instance %(instance_name)s found in the '
+ 'hypervisor, but not in the database'),
+ locals())
+ continue
+
+ for instance in local_instances:
+ instance_host = instance['host']
+ host = self.host
+ instance_name = instance['name']
+ if instance['host'] != host:
+ LOG.info(_('instance host %(instance_host)s is not equal to '
+ 'current host %(host)s. '
+ 'Deleting zombie instance %(instance_name)s'),
+ locals())
+
+ network_info = self._get_instance_nw_info(context, instance)
+ bdi = self._get_instance_volume_block_device_info(context,
+ instance['uuid'])
+
+ self.driver.destroy(instance,
+ self._legacy_nw_info(network_info),
+ bdi,
+ False)
+
+ LOG.info(_('zombie vm destroyed'))
+
def _init_instance(self, context, instance):
'''Initialize this instance during service init.'''
db_state = instance['power_state']
@@ -450,6 +515,8 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.filter_defer_apply_on()
try:
+ # checking that instance was not already evacuated to other host
+ self._destroy_evacuated_instances(context)
for instance in instances:
self._init_instance(context, instance)
finally:
@@ -1259,7 +1326,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
# to point to the new one... we have to override it.
- orig_image_ref_url = utils.generate_image_url(orig_image_ref)
+ orig_image_ref_url = glance.generate_image_url(orig_image_ref)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
compute_utils.notify_usage_exists(context, instance,
current_period=True, system_metadata=orig_sys_metadata,
@@ -2809,7 +2876,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref, dest)
def rollback_live_migration_at_destination(self, context, instance):
- """ Cleaning up image directory that is created pre_live_migration.
+ """Cleaning up image directory that is created pre_live_migration.
:param context: security context
:param instance: an Instance dict sent over rpc
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index c784fd83d..ba1915f42 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -453,7 +453,12 @@ class ResourceTracker(object):
filtered[uuid] = migration
for migration in filtered.values():
- self._update_usage_from_migration(resources, migration)
+ try:
+ self._update_usage_from_migration(resources, migration)
+ except exception.InstanceTypeNotFound:
+ LOG.warn(_("InstanceType could not be found, skipping "
+ "migration."), instance_uuid=uuid)
+ continue
def _update_usage_from_instance(self, resources, instance):
"""Update usage for a single instance."""
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 8852cb820..6d6b7cac9 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -253,3 +253,63 @@ def usage_volume_info(vol_usage):
vol_usage['curr_write_bytes'])
return usage_info
+
+
+def parse_decimal_id(template, instance_name):
+ """Finds instance decimal id from instance name
+
+ :param template: template e.g. instance-%03x-james
+ :param instance_name: instance name like instance-007-james
+
+ :returns: parsed decimal id, e.g. 7 from the input above
+ """
+
+ # find pattern like %05x, %d..etc.
+ reg = re.search('(%\d*)([ioxds])', template)
+ format = reg.group(0)
+
+ # split template to get prefix and suffix
+ tokens = template.split(format)
+
+ if tokens[0]:
+ if not instance_name.startswith(tokens[0]):
+ # template prefix not match
+ return -1
+ instance_name = instance_name[len(tokens[0]):]
+
+ if tokens[1]:
+ if not instance_name.endswith(tokens[1]):
+ # template suffix not match
+ return -1
+ instance_name = instance_name[:-len(tokens[1])]
+
+ # validate that instance_id length matches
+ expected_length = format[1:-1]
+
+ # if expected length is empty it means instance_id can be of any length
+ if expected_length:
+ if len(instance_name) < int(expected_length):
+ return -1
+ # if instance_id has preciding zeroes it must be of expected length
+ if (instance_name[:1] == '0' and
+ len(instance_name) != int(expected_length)):
+ return -1
+
+ # if the minimal expected length empty, there should be no preceding zeros
+ elif instance_name[0] == '0':
+ return -1
+
+ # finding base of the template to convert to decimal
+ base_fmt = format[-1:]
+ base = 10
+ if base_fmt == 'x':
+ base = 16
+ elif base_fmt == 'o':
+ base = 8
+
+ try:
+ res = int(instance_name, base)
+ except ValueError:
+ res = -1
+
+ return res
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 66badb756..a95332f08 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -75,6 +75,9 @@ class LocalAPI(object):
"""Perform an instance update in the database"""
return self._manager.instance_update(context, instance_uuid, updates)
+ def instance_get(self, context, instance_id):
+ return self._manager.instance_get(context, instance_id)
+
def instance_get_by_uuid(self, context, instance_uuid):
return self._manager.instance_get_by_uuid(context, instance_uuid)
@@ -82,10 +85,10 @@ class LocalAPI(object):
return self._manager.instance_destroy(context, instance)
def instance_get_all(self, context):
- return self.instance_get_all_by_filters(context, {})
+ return self._manager.instance_get_all(context)
def instance_get_all_by_host(self, context, host):
- return self.instance_get_all_by_filters(context, {'host': host})
+ return self._manager.instance_get_all_by_host(context, host)
def instance_get_all_by_filters(self, context, filters,
sort_key='created_at',
@@ -252,15 +255,18 @@ class API(object):
def instance_destroy(self, context, instance):
return self.conductor_rpcapi.instance_destroy(context, instance)
+ def instance_get(self, context, instance_id):
+ return self.conductor_rpcapi.instance_get(context, instance_id)
+
def instance_get_by_uuid(self, context, instance_uuid):
return self.conductor_rpcapi.instance_get_by_uuid(context,
instance_uuid)
def instance_get_all(self, context):
- return self.instance_get_all_by_filters(context, {})
+ return self.conductor_rpcapi.instance_get_all(context)
def instance_get_all_by_host(self, context, host):
- return self.instance_get_all_by_filters(context, {'host': host})
+ return self.conductor_rpcapi.instance_get_all_by_host(context, host)
def instance_get_all_by_filters(self, context, filters,
sort_key='created_at',
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 123e7e13f..9a1a62712 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -43,7 +43,7 @@ datetime_fields = ['launched_at', 'terminated_at']
class ConductorManager(manager.SchedulerDependentManager):
"""Mission: TBD"""
- RPC_API_VERSION = '1.22'
+ RPC_API_VERSION = '1.24'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
@@ -71,11 +71,18 @@ class ConductorManager(manager.SchedulerDependentManager):
return jsonutils.to_primitive(instance_ref)
@rpc_common.client_exceptions(exception.InstanceNotFound)
+ def instance_get(self, context, instance_id):
+ return jsonutils.to_primitive(
+ self.db.instance_get(context, instance_id))
+
+ @rpc_common.client_exceptions(exception.InstanceNotFound)
def instance_get_by_uuid(self, context, instance_uuid):
return jsonutils.to_primitive(
self.db.instance_get_by_uuid(context, instance_uuid))
- # NOTE(danms): This should go away in RPC version 2
+ def instance_get_all(self, context):
+ return jsonutils.to_primitive(self.db.instance_get_all(context))
+
def instance_get_all_by_host(self, context, host):
return jsonutils.to_primitive(
self.db.instance_get_all_by_host(context.elevated(), host))
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 0f2fe1f0c..c7143ade9 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -54,6 +54,9 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.20 - Added migration_get_unconfirmed_by_dest_compute
1.21 - Added service_get_all_by
1.22 - Added ping
+ 1.23 - Added instance_get_all
+ Un-Deprecate instance_get_all_by_host
+ 1.24 - Added instance_get
"""
BASE_RPC_API_VERSION = '1.0'
@@ -75,6 +78,11 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
instance_uuid=instance_uuid,
updates=updates_p))
+ def instance_get(self, context, instance_id):
+ msg = self.make_msg('instance_get',
+ instance_id=instance_id)
+ return self.call(context, msg, version='1.24')
+
def instance_get_by_uuid(self, context, instance_uuid):
msg = self.make_msg('instance_get_by_uuid',
instance_uuid=instance_uuid)
@@ -245,3 +253,11 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def service_get_all_by(self, context, topic=None, host=None):
msg = self.make_msg('service_get_all_by', topic=topic, host=host)
return self.call(context, msg, version='1.21')
+
+ def instance_get_all(self, context):
+ msg = self.make_msg('instance_get_all')
+ return self.call(context, msg, version='1.23')
+
+ def instance_get_all_by_host(self, context, host):
+ msg = self.make_msg('instance_get_all_by_host', host=host)
+ return self.call(context, msg, version='1.23')
diff --git a/nova/config.py b/nova/config.py
index c84725b84..f87dbfdd3 100644
--- a/nova/config.py
+++ b/nova/config.py
@@ -47,16 +47,6 @@ global_opts = [
cfg.StrOpt('my_ip',
default=_get_my_ip(),
help='ip address of this host'),
- cfg.StrOpt('glance_host',
- default='$my_ip',
- help='default glance hostname or ip'),
- cfg.IntOpt('glance_port',
- default=9292,
- help='default glance port'),
- cfg.StrOpt('glance_protocol',
- default='http',
- help='Default protocol to use when connecting to glance. '
- 'Set to https for SSL.'),
cfg.IntOpt('s3_port',
default=3333,
help='port used when accessing the s3 api'),
@@ -87,9 +77,6 @@ global_opts = [
help='When set, compute API will consider duplicate hostnames '
'invalid within the specified scope, regardless of case. '
'Should be empty, "project" or "global".'),
- cfg.StrOpt('osapi_path',
- default='/v1.1/',
- help='the path prefix used to call the openstack api server'),
cfg.StrOpt('default_instance_type',
default='m1.small',
help='default instance type to use, testing only'),
diff --git a/nova/db/api.py b/nova/db/api.py
index 1322c29e9..8d93701c8 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -1543,6 +1543,14 @@ def aggregate_metadata_get_by_host(context, host, key=None):
return IMPL.aggregate_metadata_get_by_host(context, host, key)
+def aggregate_host_get_by_metadata_key(context, key):
+ """Get hosts with a specific metadata key metadata for all aggregates.
+
+ Returns a dictionary where each key is a hostname and each value is the
+ key value"""
+ return IMPL.aggregate_host_get_by_metadata_key(context, key)
+
+
def aggregate_update(context, aggregate_id, values):
"""Update the attributes of an aggregates. If values contains a metadata
key, it updates the aggregate metadata too."""
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index d70522d73..f89ebfaa3 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1833,6 +1833,30 @@ def instance_update_and_get_original(context, instance_uuid, values):
copy_old_instance=True)
+# NOTE(danms): This updates the instance's metadata list in-place and in
+# the database to avoid stale data and refresh issues. It assumes the
+# delete=True behavior of instance_metadata_update(...)
+def _instance_metadata_update_in_place(context, instance, metadata, session):
+ to_delete = []
+ for keyvalue in instance['metadata']:
+ key = keyvalue['key']
+ if key in metadata:
+ keyvalue['value'] = metadata.pop(key)
+ elif key not in metadata:
+ to_delete.append(keyvalue)
+
+ for condemned in to_delete:
+ instance['metadata'].remove(condemned)
+ condemned.soft_delete(session=session)
+
+ for key, value in metadata.iteritems():
+ newitem = models.InstanceMetadata()
+ newitem.update({'key': key, 'value': value,
+ 'instance_uuid': instance['uuid']})
+ session.add(newitem)
+ instance['metadata'].append(newitem)
+
+
def _instance_update(context, instance_uuid, values, copy_old_instance=False):
session = get_session()
@@ -1877,9 +1901,9 @@ def _instance_update(context, instance_uuid, values, copy_old_instance=False):
metadata = values.get('metadata')
if metadata is not None:
- instance_metadata_update(context, instance_ref['uuid'],
- values.pop('metadata'), True,
- session=session)
+ _instance_metadata_update_in_place(context, instance_ref,
+ values.pop('metadata'),
+ session)
system_metadata = values.get('system_metadata')
if system_metadata is not None:
@@ -4232,12 +4256,13 @@ def _aggregate_get_query(context, model_class, id_field=None, id=None,
@require_admin_context
def aggregate_create(context, values, metadata=None):
session = get_session()
- aggregate = _aggregate_get_query(context,
- models.Aggregate,
- models.Aggregate.name,
- values['name'],
- session=session,
- read_deleted='no').first()
+ query = _aggregate_get_query(context,
+ models.Aggregate,
+ models.Aggregate.name,
+ values['name'],
+ session=session,
+ read_deleted='no')
+ aggregate = query.options(joinedload('_metadata')).first()
if not aggregate:
aggregate = models.Aggregate()
aggregate.update(values)
@@ -4250,15 +4275,16 @@ def aggregate_create(context, values, metadata=None):
raise exception.AggregateNameExists(aggregate_name=values['name'])
if metadata:
aggregate_metadata_add(context, aggregate.id, metadata)
- return aggregate
+ return aggregate_get(context, aggregate.id)
@require_admin_context
def aggregate_get(context, aggregate_id):
- aggregate = _aggregate_get_query(context,
- models.Aggregate,
- models.Aggregate.id,
- aggregate_id).first()
+ query = _aggregate_get_query(context,
+ models.Aggregate,
+ models.Aggregate.id,
+ aggregate_id)
+ aggregate = query.options(joinedload('_metadata')).first()
if not aggregate:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
@@ -4290,18 +4316,38 @@ def aggregate_metadata_get_by_host(context, host, key=None):
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
- return metadata
+ return dict(metadata)
+
+
+@require_admin_context
+def aggregate_host_get_by_metadata_key(context, key):
+ query = model_query(context, models.Aggregate).join(
+ "_metadata").filter(models.AggregateMetadata.key == key)
+ rows = query.all()
+ metadata = collections.defaultdict(set)
+ for agg in rows:
+ for agghost in agg._hosts:
+ metadata[agghost.host].add(agg._metadata[0]['value'])
+ return dict(metadata)
@require_admin_context
def aggregate_update(context, aggregate_id, values):
session = get_session()
- aggregate = _aggregate_get_query(context,
+ aggregate = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id,
- session=session).first()
+ session=session).
+ options(joinedload('_metadata')).first())
+
if aggregate:
+ if "availability_zone" in values:
+ az = values.pop('availability_zone')
+ if 'metadata' not in values:
+ values['metadata'] = {'availability_zone': az}
+ else:
+ values['metadata']['availability_zone'] = az
metadata = values.get('metadata')
if metadata is not None:
aggregate_metadata_add(context,
@@ -4312,7 +4358,7 @@ def aggregate_update(context, aggregate_id, values):
aggregate.update(values)
aggregate.save(session=session)
values['metadata'] = metadata
- return aggregate
+ return aggregate_get(context, aggregate.id)
else:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/146_aggregate_zones.py b/nova/db/sqlalchemy/migrate_repo/versions/146_aggregate_zones.py
new file mode 100644
index 000000000..04f31ce5f
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/146_aggregate_zones.py
@@ -0,0 +1,57 @@
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import String, Column, MetaData, Table, delete, select
+
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ aggregates = Table('aggregates', meta, autoload=True)
+ aggregate_metadata = Table('aggregate_metadata', meta, autoload=True)
+ # migrate data
+ record_list = list(aggregates.select().execute())
+ for rec in record_list:
+ row = aggregate_metadata.insert()
+ row.execute({'created_at': rec['created_at'],
+ 'updated_at': rec['updated_at'],
+ 'deleted_at': rec['deleted_at'],
+ 'deleted': rec['deleted'],
+ 'key': 'availability_zone',
+ 'value': rec['availability_zone'],
+ 'aggregate_id': rec['id'],
+ })
+ aggregates.drop_column('availability_zone')
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ aggregates = Table('aggregates', meta, autoload=True)
+ aggregate_metadata = Table('aggregate_metadata', meta, autoload=True)
+ availability_zone = Column('availability_zone', String(255))
+ aggregates.create_column(availability_zone)
+ # migrate data
+ aggregates.update().values(availability_zone=select(
+ [aggregate_metadata.c.value]).where(aggregates.c.id ==
+ aggregate_metadata.c.aggregate_id).where(aggregate_metadata.c.key ==
+ 'availability_zone')).execute()
+ delete(aggregate_metadata, aggregate_metadata.c.key == 'availability_zone')
+ aggregates.c.availability_zone.alter(nullable=False)
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index cdd140b6e..8a161efdf 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -866,7 +866,6 @@ class Aggregate(BASE, NovaBase):
__tablename__ = 'aggregates'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
- availability_zone = Column(String(255), nullable=False)
_hosts = relationship(AggregateHost,
lazy="joined",
secondary="aggregate_hosts",
@@ -893,7 +892,7 @@ class Aggregate(BASE, NovaBase):
backref='aggregates')
def _extra_keys(self):
- return ['hosts', 'metadetails']
+ return ['hosts', 'metadetails', 'availability_zone']
@property
def hosts(self):
@@ -903,6 +902,12 @@ class Aggregate(BASE, NovaBase):
def metadetails(self):
return dict([(m.key, m.value) for m in self._metadata])
+ @property
+ def availability_zone(self):
+ if 'availability_zone' not in self.metadetails:
+ return None
+ return self.metadetails['availability_zone']
+
class AgentBuild(BASE, NovaBase):
"""Represents an agent build."""
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index 5263b9674..7c52cd36c 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -335,8 +335,10 @@ _RE_DB = {
def raise_if_duplicate_entry_error(integrity_error, engine_name):
- """ In this function will be raised DBDuplicateEntry exception if integrity
- error wrap unique constraint violation. """
+ """
+ In this function will be raised DBDuplicateEntry exception if integrity
+ error wrap unique constraint violation.
+ """
def get_columns_from_uniq_cons_or_name(columns):
# note(boris-42): UniqueConstraint name convention: "uniq_c1_x_c2_x_c3"
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 9a93df2ab..6a5406d9e 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -36,6 +36,16 @@ from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
glance_opts = [
+ cfg.StrOpt('glance_host',
+ default='$my_ip',
+ help='default glance hostname or ip'),
+ cfg.IntOpt('glance_port',
+ default=9292,
+ help='default glance port'),
+ cfg.StrOpt('glance_protocol',
+ default='http',
+ help='Default protocol to use when connecting to glance. '
+ 'Set to https for SSL.'),
cfg.ListOpt('glance_api_servers',
default=['$glance_host:$glance_port'],
help='A list of the glance api servers available to nova. '
@@ -54,6 +64,18 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(glance_opts)
CONF.import_opt('auth_strategy', 'nova.api.auth')
+CONF.import_opt('my_ip', 'nova.config')
+
+
+def generate_glance_url():
+ """Generate the URL to glance."""
+ return "%s://%s:%d" % (CONF.glance_protocol, CONF.glance_host,
+ CONF.glance_port)
+
+
+def generate_image_url(image_ref):
+ """Generate an image URL from an image_ref."""
+ return "%s/images/%s" % (generate_glance_url(), image_ref)
def _parse_image_ref(image_href):
diff --git a/nova/network/dns_driver.py b/nova/network/dns_driver.py
index 09335b7cc..6e7cbf556 100644
--- a/nova/network/dns_driver.py
+++ b/nova/network/dns_driver.py
@@ -14,7 +14,7 @@
class DNSDriver(object):
- """ Defines the DNS manager interface. Does nothing. """
+ """Defines the DNS manager interface. Does nothing. """
def __init__(self):
pass
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 77a1014a3..215dd0092 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -1134,8 +1134,10 @@ def get_dev(network):
class LinuxNetInterfaceDriver(object):
- """Abstract class that defines generic network host API"""
- """ for for all Linux interface drivers."""
+ """
+ Abstract class that defines generic network host API
+ for for all Linux interface drivers.
+ """
def plug(self, network, mac_address):
"""Create Linux device, return device name"""
diff --git a/nova/network/minidns.py b/nova/network/minidns.py
index 11c59dcc9..c565f368e 100644
--- a/nova/network/minidns.py
+++ b/nova/network/minidns.py
@@ -27,14 +27,16 @@ LOG = logging.getLogger(__name__)
class MiniDNS(dns_driver.DNSDriver):
- """ Trivial DNS driver. This will read/write to a local, flat file
- and have no effect on your actual DNS system. This class is
- strictly for testing purposes, and should keep you out of dependency
- hell.
-
- Note that there is almost certainly a race condition here that
- will manifest anytime instances are rapidly created and deleted.
- A proper implementation will need some manner of locking."""
+ """
+ Trivial DNS driver. This will read/write to a local, flat file
+ and have no effect on your actual DNS system. This class is
+ strictly for testing purposes, and should keep you out of dependency
+ hell.
+
+ Note that there is almost certainly a race condition here that
+ will manifest anytime instances are rapidly created and deleted.
+ A proper implementation will need some manner of locking.
+ """
def __init__(self):
if CONF.log_dir:
diff --git a/nova/network/noop_dns_driver.py b/nova/network/noop_dns_driver.py
index 23d1d1f3e..be29f4d9a 100644
--- a/nova/network/noop_dns_driver.py
+++ b/nova/network/noop_dns_driver.py
@@ -19,7 +19,7 @@ from nova.network import dns_driver
class NoopDNSDriver(dns_driver.DNSDriver):
- """ No-op DNS manager. Does nothing. """
+ """No-op DNS manager. Does nothing. """
def __init__(self):
pass
diff --git a/nova/notifications.py b/nova/notifications.py
index 9f92e3dd4..f399ac55d 100644
--- a/nova/notifications.py
+++ b/nova/notifications.py
@@ -22,6 +22,7 @@ the system.
import nova.context
from nova import db
from nova import exception
+from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova.openstack.common import cfg
@@ -277,7 +278,7 @@ def info_from_instance(context, instance_ref, network_info,
def null_safe_str(s):
return str(s) if s else ''
- image_ref_url = utils.generate_image_url(instance_ref['image_ref'])
+ image_ref_url = glance.generate_image_url(instance_ref['image_ref'])
instance_type_name = instance_ref.get('instance_type', {}).get('name', '')
diff --git a/nova/openstack/common/notifier/api.py b/nova/openstack/common/notifier/api.py
index 76b725c0a..0ec55fbf0 100644
--- a/nova/openstack/common/notifier/api.py
+++ b/nova/openstack/common/notifier/api.py
@@ -137,10 +137,11 @@ def notify(context, publisher_id, event_type, priority, payload):
for driver in _get_drivers():
try:
driver.notify(context, msg)
- except Exception, e:
+ except Exception as e:
LOG.exception(_("Problem '%(e)s' attempting to "
"send to notification system. "
- "Payload=%(payload)s") % locals())
+ "Payload=%(payload)s")
+ % dict(e=e, payload=payload))
_drivers = None
@@ -166,7 +167,7 @@ def add_driver(notification_driver):
try:
driver = importutils.import_module(notification_driver)
_drivers[notification_driver] = driver
- except ImportError as e:
+ except ImportError:
LOG.exception(_("Failed to load notifier %s. "
"These notifications will not be sent.") %
notification_driver)
diff --git a/nova/openstack/common/notifier/rpc_notifier.py b/nova/openstack/common/notifier/rpc_notifier.py
index aa9e8860e..8316f7ab7 100644
--- a/nova/openstack/common/notifier/rpc_notifier.py
+++ b/nova/openstack/common/notifier/rpc_notifier.py
@@ -41,6 +41,6 @@ def notify(context, message):
topic = '%s.%s' % (topic, priority)
try:
rpc.notify(context, topic, message)
- except Exception, e:
+ except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals())
diff --git a/nova/openstack/common/notifier/rpc_notifier2.py b/nova/openstack/common/notifier/rpc_notifier2.py
new file mode 100644
index 000000000..beb2e9f71
--- /dev/null
+++ b/nova/openstack/common/notifier/rpc_notifier2.py
@@ -0,0 +1,51 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+'''messaging based notification driver, with message envelopes'''
+
+from nova.openstack.common import cfg
+from nova.openstack.common import context as req_context
+from nova.openstack.common.gettextutils import _
+from nova.openstack.common import log as logging
+from nova.openstack.common import rpc
+
+LOG = logging.getLogger(__name__)
+
+notification_topic_opt = cfg.ListOpt(
+ 'topics', default=['notifications', ],
+ help='AMQP topic(s) used for openstack notifications')
+
+opt_group = cfg.OptGroup(name='rpc_notifier2',
+ title='Options for rpc_notifier2')
+
+CONF = cfg.CONF
+CONF.register_group(opt_group)
+CONF.register_opt(notification_topic_opt, opt_group)
+
+
+def notify(context, message):
+ """Sends a notification via RPC"""
+ if not context:
+ context = req_context.get_admin_context()
+ priority = message.get('priority',
+ CONF.default_notification_level)
+ priority = priority.lower()
+ for topic in CONF.rpc_notifier2.topics:
+ topic = '%s.%s' % (topic, priority)
+ try:
+ rpc.notify(context, topic, message, envelope=True)
+ except Exception:
+ LOG.exception(_("Could not send notification to %(topic)s. "
+ "Payload=%(message)s"), locals())
diff --git a/nova/openstack/common/rpc/__init__.py b/nova/openstack/common/rpc/__init__.py
index cfdac03bd..d43b48fa2 100644
--- a/nova/openstack/common/rpc/__init__.py
+++ b/nova/openstack/common/rpc/__init__.py
@@ -178,17 +178,18 @@ def multicall(context, topic, msg, timeout=None):
return _get_impl().multicall(cfg.CONF, context, topic, msg, timeout)
-def notify(context, topic, msg):
+def notify(context, topic, msg, envelope=False):
"""Send notification event.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the notification to.
:param msg: This is a dict of content of event.
+ :param envelope: Set to True to enable message envelope for notifications.
:returns: None
"""
- return _get_impl().notify(cfg.CONF, context, topic, msg)
+ return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
def cleanup():
diff --git a/nova/openstack/common/rpc/amqp.py b/nova/openstack/common/rpc/amqp.py
index 6464914db..105e6fcbe 100644
--- a/nova/openstack/common/rpc/amqp.py
+++ b/nova/openstack/common/rpc/amqp.py
@@ -33,7 +33,6 @@ from eventlet import greenpool
from eventlet import pools
from eventlet import semaphore
-from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import local
@@ -169,7 +168,7 @@ def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
'failure': failure}
if ending:
msg['ending'] = True
- conn.direct_send(msg_id, msg)
+ conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
@@ -294,6 +293,10 @@ class ProxyCallback(object):
ctxt.reply(None, sys.exc_info(),
connection_pool=self.connection_pool)
+ def wait(self):
+ """Wait for all callback threads to exit."""
+ self.pool.waitall()
+
class MulticallWaiter(object):
def __init__(self, conf, connection, timeout):
@@ -356,7 +359,7 @@ def multicall(conf, context, topic, msg, timeout, connection_pool):
# that will continue to use the connection. When it's done,
# connection.close() will get called which will put it back into
# the pool
- LOG.debug(_('Making asynchronous call on %s ...'), topic)
+ LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
@@ -365,7 +368,7 @@ def multicall(conf, context, topic, msg, timeout, connection_pool):
conn = ConnectionContext(conf, connection_pool)
wait_msg = MulticallWaiter(conf, conn, timeout)
conn.declare_direct_consumer(msg_id, wait_msg)
- conn.topic_send(topic, msg)
+ conn.topic_send(topic, rpc_common.serialize_msg(msg))
return wait_msg
@@ -384,7 +387,7 @@ def cast(conf, context, topic, msg, connection_pool):
LOG.debug(_('Making asynchronous cast on %s...'), topic)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
- conn.topic_send(topic, msg)
+ conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast(conf, context, topic, msg, connection_pool):
@@ -392,7 +395,7 @@ def fanout_cast(conf, context, topic, msg, connection_pool):
LOG.debug(_('Making asynchronous fanout cast...'))
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
- conn.fanout_send(topic, msg)
+ conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
@@ -400,7 +403,7 @@ def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
- conn.topic_send(topic, msg)
+ conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast_to_server(conf, context, server_params, topic, msg,
@@ -409,16 +412,18 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg,
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
- conn.fanout_send(topic, msg)
+ conn.fanout_send(topic, rpc_common.serialize_msg(msg))
-def notify(conf, context, topic, msg, connection_pool):
+def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
+ if envelope:
+ msg = rpc_common.serialize_msg(msg, force_envelope=True)
conn.notify_send(topic, msg)
diff --git a/nova/openstack/common/rpc/common.py b/nova/openstack/common/rpc/common.py
index a941ab1d0..bf4f5a3de 100644
--- a/nova/openstack/common/rpc/common.py
+++ b/nova/openstack/common/rpc/common.py
@@ -21,6 +21,7 @@ import copy
import sys
import traceback
+from nova.openstack.common import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -28,9 +29,50 @@ from nova.openstack.common import local
from nova.openstack.common import log as logging
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
+'''RPC Envelope Version.
+
+This version number applies to the top level structure of messages sent out.
+It does *not* apply to the message payload, which must be versioned
+independently. For example, when using rpc APIs, a version number is applied
+for changes to the API being exposed over rpc. This version number is handled
+in the rpc proxy and dispatcher modules.
+
+This version number applies to the message envelope that is used in the
+serialization done inside the rpc layer. See serialize_msg() and
+deserialize_msg().
+
+The current message format (version 2.0) is very simple. It is:
+
+ {
+ 'nova.version': <RPC Envelope Version as a String>,
+ 'nova.message': <Application Message Payload, JSON encoded>
+ }
+
+Message format version '1.0' is just considered to be the messages we sent
+without a message envelope.
+
+So, the current message envelope just includes the envelope version. It may
+eventually contain additional information, such as a signature for the message
+payload.
+
+We will JSON encode the application message payload. The message envelope,
+which includes the JSON encoded application message body, will be passed down
+to the messaging libraries as a dict.
+'''
+_RPC_ENVELOPE_VERSION = '2.0'
+
+_VERSION_KEY = 'nova.version'
+_MESSAGE_KEY = 'nova.message'
+
+
+# TODO(russellb) Turn this on after Grizzly.
+_SEND_RPC_ENVELOPE = False
+
+
class RPCException(Exception):
message = _("An unknown RPC related exception occurred.")
@@ -91,6 +133,11 @@ class UnsupportedRpcVersion(RPCException):
"this endpoint.")
+class UnsupportedRpcEnvelopeVersion(RPCException):
+ message = _("Specified RPC envelope version, %(version)s, "
+ "not supported by this endpoint.")
+
+
class Connection(object):
"""A connection, returned by rpc.create_connection().
@@ -165,8 +212,12 @@ class Connection(object):
def _safe_log(log_func, msg, msg_data):
"""Sanitizes the msg_data field before logging."""
- SANITIZE = {'set_admin_password': ('new_pass',),
- 'run_instance': ('admin_password',), }
+ SANITIZE = {'set_admin_password': [('args', 'new_pass')],
+ 'run_instance': [('args', 'admin_password')],
+ 'route_message': [('args', 'message', 'args', 'method_info',
+ 'method_kwargs', 'password'),
+ ('args', 'message', 'args', 'method_info',
+ 'method_kwargs', 'admin_password')]}
has_method = 'method' in msg_data and msg_data['method'] in SANITIZE
has_context_token = '_context_auth_token' in msg_data
@@ -178,14 +229,16 @@ def _safe_log(log_func, msg, msg_data):
msg_data = copy.deepcopy(msg_data)
if has_method:
- method = msg_data['method']
- if method in SANITIZE:
- args_to_sanitize = SANITIZE[method]
- for arg in args_to_sanitize:
- try:
- msg_data['args'][arg] = "<SANITIZED>"
- except KeyError:
- pass
+ for arg in SANITIZE.get(msg_data['method'], []):
+ try:
+ d = msg_data
+ for elem in arg[:-1]:
+ d = d[elem]
+ d[arg[-1]] = '<SANITIZED>'
+ except KeyError, e:
+ LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'),
+ {'item': arg,
+ 'err': e})
if has_context_token:
msg_data['_context_auth_token'] = '<SANITIZED>'
@@ -344,3 +397,74 @@ def client_exceptions(*exceptions):
return catch_client_exception(exceptions, func, *args, **kwargs)
return inner
return outer
+
+
+def version_is_compatible(imp_version, version):
+ """Determine whether versions are compatible.
+
+ :param imp_version: The version implemented
+ :param version: The version requested by an incoming message.
+ """
+ version_parts = version.split('.')
+ imp_version_parts = imp_version.split('.')
+ if int(version_parts[0]) != int(imp_version_parts[0]): # Major
+ return False
+ if int(version_parts[1]) > int(imp_version_parts[1]): # Minor
+ return False
+ return True
+
+
+def serialize_msg(raw_msg, force_envelope=False):
+ if not _SEND_RPC_ENVELOPE and not force_envelope:
+ return raw_msg
+
+ # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
+ # information about this format.
+ msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
+ _MESSAGE_KEY: jsonutils.dumps(raw_msg)}
+
+ return msg
+
+
+def deserialize_msg(msg):
+ # NOTE(russellb): Hang on to your hats, this road is about to
+ # get a little bumpy.
+ #
+ # Robustness Principle:
+ # "Be strict in what you send, liberal in what you accept."
+ #
+ # At this point we have to do a bit of guessing about what it
+ # is we just received. Here is the set of possibilities:
+ #
+ # 1) We received a dict. This could be 2 things:
+ #
+ # a) Inspect it to see if it looks like a standard message envelope.
+ # If so, great!
+ #
+ # b) If it doesn't look like a standard message envelope, it could either
+ # be a notification, or a message from before we added a message
+ # envelope (referred to as version 1.0).
+ # Just return the message as-is.
+ #
+ # 2) It's any other non-dict type. Just return it and hope for the best.
+ # This case covers return values from rpc.call() from before message
+ # envelopes were used. (messages to call a method were always a dict)
+
+ if not isinstance(msg, dict):
+ # See #2 above.
+ return msg
+
+ base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
+ if not all(map(lambda key: key in msg, base_envelope_keys)):
+ # See #1.b above.
+ return msg
+
+ # At this point we think we have the message envelope
+ # format we were expecting. (#1.a above)
+
+ if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
+ raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
+
+ raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
+
+ return raw_msg
diff --git a/nova/openstack/common/rpc/dispatcher.py b/nova/openstack/common/rpc/dispatcher.py
index 4dee5d509..5f4cc3a7c 100644
--- a/nova/openstack/common/rpc/dispatcher.py
+++ b/nova/openstack/common/rpc/dispatcher.py
@@ -103,21 +103,6 @@ class RpcDispatcher(object):
self.callbacks = callbacks
super(RpcDispatcher, self).__init__()
- @staticmethod
- def _is_compatible(mversion, version):
- """Determine whether versions are compatible.
-
- :param mversion: The API version implemented by a callback.
- :param version: The API version requested by an incoming message.
- """
- version_parts = version.split('.')
- mversion_parts = mversion.split('.')
- if int(version_parts[0]) != int(mversion_parts[0]): # Major
- return False
- if int(version_parts[1]) > int(mversion_parts[1]): # Minor
- return False
- return True
-
def dispatch(self, ctxt, version, method, **kwargs):
"""Dispatch a message based on a requested version.
@@ -139,7 +124,8 @@ class RpcDispatcher(object):
rpc_api_version = proxyobj.RPC_API_VERSION
else:
rpc_api_version = '1.0'
- is_compatible = self._is_compatible(rpc_api_version, version)
+ is_compatible = rpc_common.version_is_compatible(rpc_api_version,
+ version)
had_compatible = had_compatible or is_compatible
if not hasattr(proxyobj, method):
continue
diff --git a/nova/openstack/common/rpc/impl_kombu.py b/nova/openstack/common/rpc/impl_kombu.py
index bb0ade27c..bf38201f5 100644
--- a/nova/openstack/common/rpc/impl_kombu.py
+++ b/nova/openstack/common/rpc/impl_kombu.py
@@ -162,7 +162,8 @@ class ConsumerBase(object):
def _callback(raw_message):
message = self.channel.message_to_python(raw_message)
try:
- callback(message.payload)
+ msg = rpc_common.deserialize_msg(message.payload)
+ callback(msg)
message.ack()
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
@@ -196,7 +197,7 @@ class DirectConsumer(ConsumerBase):
# Default options
options = {'durable': False,
'auto_delete': True,
- 'exclusive': True}
+ 'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=msg_id,
type='direct',
@@ -269,7 +270,7 @@ class FanoutConsumer(ConsumerBase):
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
- 'exclusive': True}
+ 'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
durable=options['durable'],
@@ -316,7 +317,7 @@ class DirectPublisher(Publisher):
options = {'durable': False,
'auto_delete': True,
- 'exclusive': True}
+ 'exclusive': False}
options.update(kwargs)
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
type='direct', **options)
@@ -350,7 +351,7 @@ class FanoutPublisher(Publisher):
"""
options = {'durable': False,
'auto_delete': True,
- 'exclusive': True}
+ 'exclusive': False}
options.update(kwargs)
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
None, type='fanout', **options)
@@ -387,6 +388,7 @@ class Connection(object):
def __init__(self, conf, server_params=None):
self.consumers = []
self.consumer_thread = None
+ self.proxy_callbacks = []
self.conf = conf
self.max_retries = self.conf.rabbit_max_retries
# Try forever?
@@ -469,7 +471,7 @@ class Connection(object):
LOG.info(_("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % params)
try:
- self.connection.close()
+ self.connection.release()
except self.connection_errors:
pass
# Setting this in case the next statement fails, though
@@ -573,12 +575,14 @@ class Connection(object):
def close(self):
"""Close/release this connection"""
self.cancel_consumer_thread()
+ self.wait_on_proxy_callbacks()
self.connection.release()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again"""
self.cancel_consumer_thread()
+ self.wait_on_proxy_callbacks()
self.channel.close()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
@@ -644,6 +648,11 @@ class Connection(object):
pass
self.consumer_thread = None
+ def wait_on_proxy_callbacks(self):
+ """Wait for all proxy callback threads to exit."""
+ for proxy_cb in self.proxy_callbacks:
+ proxy_cb.wait()
+
def publisher_send(self, cls, topic, msg, **kwargs):
"""Send to a publisher based on the publisher class"""
@@ -719,6 +728,7 @@ class Connection(object):
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
+ self.proxy_callbacks.append(proxy_cb)
if fanout:
self.declare_fanout_consumer(topic, proxy_cb)
@@ -730,6 +740,7 @@ class Connection(object):
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
+ self.proxy_callbacks.append(proxy_cb)
self.declare_topic_consumer(topic, proxy_cb, pool_name)
@@ -782,11 +793,12 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg):
rpc_amqp.get_connection_pool(conf, Connection))
-def notify(conf, context, topic, msg):
+def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(
conf, context, topic, msg,
- rpc_amqp.get_connection_pool(conf, Connection))
+ rpc_amqp.get_connection_pool(conf, Connection),
+ envelope)
def cleanup():
diff --git a/nova/openstack/common/rpc/impl_qpid.py b/nova/openstack/common/rpc/impl_qpid.py
index 5570ea867..2e05f02f1 100644
--- a/nova/openstack/common/rpc/impl_qpid.py
+++ b/nova/openstack/common/rpc/impl_qpid.py
@@ -124,7 +124,8 @@ class ConsumerBase(object):
"""Fetch the message and pass it to the callback object"""
message = self.receiver.fetch()
try:
- self.callback(message.content)
+ msg = rpc_common.deserialize_msg(message.content)
+ self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
@@ -277,8 +278,16 @@ class Connection(object):
self.session = None
self.consumers = {}
self.consumer_thread = None
+ self.proxy_callbacks = []
self.conf = conf
+ if server_params and 'hostname' in server_params:
+ # NOTE(russellb) This enables support for cast_to_server.
+ server_params['qpid_hosts'] = [
+ '%s:%d' % (server_params['hostname'],
+ server_params.get('port', 5672))
+ ]
+
params = {
'qpid_hosts': self.conf.qpid_hosts,
'username': self.conf.qpid_username,
@@ -367,12 +376,14 @@ class Connection(object):
def close(self):
"""Close/release this connection"""
self.cancel_consumer_thread()
+ self.wait_on_proxy_callbacks()
self.connection.close()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again"""
self.cancel_consumer_thread()
+ self.wait_on_proxy_callbacks()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
@@ -427,6 +438,11 @@ class Connection(object):
pass
self.consumer_thread = None
+ def wait_on_proxy_callbacks(self):
+ """Wait for all proxy callback threads to exit."""
+ for proxy_cb in self.proxy_callbacks:
+ proxy_cb.wait()
+
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class"""
@@ -502,6 +518,7 @@ class Connection(object):
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
+ self.proxy_callbacks.append(proxy_cb)
if fanout:
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
@@ -517,6 +534,7 @@ class Connection(object):
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
+ self.proxy_callbacks.append(proxy_cb)
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
name=pool_name)
@@ -575,10 +593,11 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg):
rpc_amqp.get_connection_pool(conf, Connection))
-def notify(conf, context, topic, msg):
+def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(conf, context, topic, msg,
- rpc_amqp.get_connection_pool(conf, Connection))
+ rpc_amqp.get_connection_pool(conf, Connection),
+ envelope)
def cleanup():
diff --git a/nova/openstack/common/rpc/impl_zmq.py b/nova/openstack/common/rpc/impl_zmq.py
index 4ab47b8f8..d99d390f2 100644
--- a/nova/openstack/common/rpc/impl_zmq.py
+++ b/nova/openstack/common/rpc/impl_zmq.py
@@ -205,7 +205,9 @@ class ZmqClient(object):
def __init__(self, addr, socket_type=zmq.PUSH, bind=False):
self.outq = ZmqSocket(addr, socket_type, bind=bind)
- def cast(self, msg_id, topic, data):
+ def cast(self, msg_id, topic, data, serialize=True, force_envelope=False):
+ if serialize:
+ data = rpc_common.serialize_msg(data, force_envelope)
self.outq.send([str(msg_id), str(topic), str('cast'),
_serialize(data)])
@@ -250,7 +252,7 @@ class InternalContext(object):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
- data.setdefault('args', [])
+ data.setdefault('args', {})
try:
result = proxy.dispatch(
@@ -321,7 +323,7 @@ class ConsumerBase(object):
return
data.setdefault('version', None)
- data.setdefault('args', [])
+ data.setdefault('args', {})
proxy.dispatch(ctx, data['version'],
data['method'], **data['args'])
@@ -433,7 +435,7 @@ class ZmqProxy(ZmqBaseReactor):
sock_type = zmq.PUB
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
- inside = _deserialize(in_msg)
+ inside = rpc_common.deserialize_msg(_deserialize(in_msg))
msg_id = inside[-1]['args']['msg_id']
response = inside[-1]['args']['response']
LOG.debug(_("->response->%s"), response)
@@ -480,7 +482,7 @@ class ZmqReactor(ZmqBaseReactor):
msg_id, topic, style, in_msg = data
- ctx, request = _deserialize(in_msg)
+ ctx, request = rpc_common.deserialize_msg(_deserialize(in_msg))
ctx = RpcContext.unmarshal(ctx)
proxy = self.proxies[sock]
@@ -531,7 +533,8 @@ class Connection(rpc_common.Connection):
self.reactor.consume_in_thread()
-def _cast(addr, context, msg_id, topic, msg, timeout=None):
+def _cast(addr, context, msg_id, topic, msg, timeout=None, serialize=True,
+ force_envelope=False):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
@@ -540,7 +543,7 @@ def _cast(addr, context, msg_id, topic, msg, timeout=None):
conn = ZmqClient(addr)
# assumes cast can't return an exception
- conn.cast(msg_id, topic, payload)
+ conn.cast(msg_id, topic, payload, serialize, force_envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
@@ -609,7 +612,8 @@ def _call(addr, context, msg_id, topic, msg, timeout=None):
return responses[-1]
-def _multi_send(method, context, topic, msg, timeout=None):
+def _multi_send(method, context, topic, msg, timeout=None, serialize=True,
+ force_envelope=False):
"""
Wraps the sending of messages,
dispatches to the matchmaker and sends
@@ -635,7 +639,8 @@ def _multi_send(method, context, topic, msg, timeout=None):
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
- _topic, _topic, msg, timeout)
+ _topic, _topic, msg, timeout, serialize,
+ force_envelope)
return
return method(_addr, context, _topic, _topic, msg, timeout)
@@ -676,6 +681,8 @@ def notify(conf, context, topic, msg, **kwargs):
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic.replace('.', '-')
+ kwargs['serialize'] = kwargs.pop('envelope')
+ kwargs['force_envelope'] = True
cast(conf, context, topic, msg, **kwargs)
diff --git a/nova/scheduler/filters/availability_zone_filter.py b/nova/scheduler/filters/availability_zone_filter.py
index 4e55d0b0c..24ea0dd35 100644
--- a/nova/scheduler/filters/availability_zone_filter.py
+++ b/nova/scheduler/filters/availability_zone_filter.py
@@ -14,11 +14,17 @@
# under the License.
+from nova import db
from nova.scheduler import filters
class AvailabilityZoneFilter(filters.BaseHostFilter):
- """Filters Hosts by availability zone."""
+ """Filters Hosts by availability zone.
+
+ Works with both service and aggregate metadata.
+ For aggregate metadata uses the key 'availability_zone'
+ Note: in theory a compute node can be part of multiple availability_zones
+ """
def host_passes(self, host_state, filter_properties):
spec = filter_properties.get('request_spec', {})
@@ -26,5 +32,12 @@ class AvailabilityZoneFilter(filters.BaseHostFilter):
availability_zone = props.get('availability_zone')
if availability_zone:
- return availability_zone == host_state.service['availability_zone']
+ if availability_zone == host_state.service['availability_zone']:
+ return True
+ context = filter_properties['context'].elevated()
+ metadata = db.aggregate_metadata_get_by_host(
+ context, host_state.host, key='availability_zone')
+ if 'availability_zone' in metadata:
+ return availability_zone in metadata['availability_zone']
+ return False
return True
diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py
index 59afb68e0..895fc8490 100644
--- a/nova/servicegroup/api.py
+++ b/nova/servicegroup/api.py
@@ -110,7 +110,7 @@ class ServiceGroupDriver(object):
raise NotImplementedError()
def is_up(self, member):
- """ Check whether the given member is up. """
+ """Check whether the given member is up. """
raise NotImplementedError()
def leave(self, member_id, group_id):
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index d452c18cb..429746dac 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -710,8 +710,16 @@ class CloudTestCase(test.TestCase):
'topic': 'compute',
'report_count': 0,
'availability_zone': "zone2"})
+ # Aggregate based zones
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg1'}, {'availability_zone': 'aggzones'})
+ db.aggregate_host_add(self.context, agg.id, 'host2_zones')
result = self.cloud.describe_availability_zones(self.context)
- self.assertEqual(len(result['availabilityZoneInfo']), 3)
+ self.assertEqual(len(result['availabilityZoneInfo']), 4)
+ admin_ctxt = context.get_admin_context(read_deleted="no")
+ result = self.cloud.describe_availability_zones(admin_ctxt,
+ zone_name='verbose')
+ self.assertEqual(len(result['availabilityZoneInfo']), 18)
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
diff --git a/nova/tests/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
index 0f60b8128..c57d6a91b 100644
--- a/nova/tests/api/openstack/compute/contrib/test_aggregates.py
+++ b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
@@ -123,7 +123,7 @@ class AggregateTestCase(test.TestCase):
def test_create_with_extra_invalid_arg(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, dict(name="test",
- availablity_zone="nova1",
+ availability_zone="nova1",
foo='bar'))
def test_show(self):
@@ -183,9 +183,7 @@ class AggregateTestCase(test.TestCase):
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
-
result = self.controller.update(self.req, "1", body=body)
-
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_no_updates(self):
@@ -261,18 +259,6 @@ class AggregateTestCase(test.TestCase):
self.req, "bogus_aggregate",
body={"add_host": {"host": "host1"}})
- def test_add_host_with_host_in_wrong_availability_zone(self):
- def stub_add_host_to_aggregate(context, aggregate, host):
- raise exception.InvalidAggregateAction(action='create_aggregate',
- aggregate_id="'N/A'",
- reason='wrong zone')
- self.stubs.Set(self.controller.api, "add_host_to_aggregate",
- stub_add_host_to_aggregate)
-
- self.assertRaises(exc.HTTPConflict, self.controller.action,
- self.req, "bogus_aggregate",
- body={"add_host": {"host": "host1"}})
-
def test_add_host_with_missing_host(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"asdf": "asdf"})
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_password.py b/nova/tests/api/openstack/compute/contrib/test_server_password.py
new file mode 100644
index 000000000..600c4eda4
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_server_password.py
@@ -0,0 +1,86 @@
+# Copyright 2012 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import webob
+
+from nova.api.metadata import password
+from nova import compute
+from nova.openstack.common import cfg
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+CONF = cfg.CONF
+CONF.import_opt('osapi_compute_ext_list', 'nova.api.openstack.compute.contrib')
+
+
+class ServerPasswordTest(test.TestCase):
+ content_type = 'application/json'
+
+ def setUp(self):
+ super(ServerPasswordTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', lambda *a, **kw: {'uuid': ''})
+ self.password = 'fakepass'
+
+ def fake_extract_password(instance):
+ return self.password
+
+ def fake_set_password(context, instance_uuid, password):
+ self.password = password
+
+ self.stubs.Set(password, 'extract_password', fake_extract_password)
+ self.stubs.Set(password, 'set_password', fake_set_password)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Server_password'])
+
+ def _make_request(self, url, method='GET'):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ req.method = method
+ res = req.get_response(
+ fakes.wsgi_app(init_only=('servers', 'os-server-password')))
+ return res
+
+ def _get_pass(self, body):
+ return jsonutils.loads(body).get('password')
+
+ def test_get_password(self):
+ url = '/v2/fake/servers/fake/os-server-password'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(self._get_pass(res.body), 'fakepass')
+
+ def test_reset_password(self):
+ url = '/v2/fake/servers/fake/os-server-password'
+ res = self._make_request(url, 'DELETE')
+ self.assertEqual(res.status_int, 204)
+
+ res = self._make_request(url)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(self._get_pass(res.body), '')
+
+
+class ServerPasswordXmlTest(ServerPasswordTest):
+ content_type = 'application/xml'
+
+ def _get_pass(self, body):
+ # NOTE(vish): first element is password
+ return etree.XML(body).text or ''
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
index 66dac3feb..3df6b549b 100644
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ b/nova/tests/api/openstack/compute/test_extensions.py
@@ -193,6 +193,7 @@ class ExtensionControllerTest(ExtensionTestCase):
"SchedulerHints",
"SecurityGroups",
"ServerDiagnostics",
+ "ServerPassword",
"ServerStartStop",
"Services",
"SimpleTenantUsage",
diff --git a/nova/tests/api/openstack/compute/test_images.py b/nova/tests/api/openstack/compute/test_images.py
index 89a09fa2f..09e727da3 100644
--- a/nova/tests/api/openstack/compute/test_images.py
+++ b/nova/tests/api/openstack/compute/test_images.py
@@ -29,10 +29,10 @@ from nova.api.openstack.compute import images
from nova.api.openstack.compute.views import images as images_view
from nova.api.openstack import xmlutil
from nova import exception
+from nova.image import glance
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import matchers
-from nova import utils
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
@@ -62,7 +62,7 @@ class ImagesControllerTest(test.TestCase):
href = "http://localhost/v2/fake/images/124"
bookmark = "http://localhost/fake/images/124"
- alternate = "%s/fake/images/124" % utils.generate_glance_url()
+ alternate = "%s/fake/images/124" % glance.generate_glance_url()
server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
server_href = "http://localhost/v2/fake/servers/" + server_uuid
server_bookmark = "http://localhost/fake/servers/" + server_uuid
@@ -200,7 +200,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": alternate % (utils.generate_glance_url(), 123),
+ "href": alternate % (glance.generate_glance_url(), 123),
}],
},
{
@@ -238,7 +238,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": alternate % (utils.generate_glance_url(), 124),
+ "href": alternate % (glance.generate_glance_url(), 124),
}],
},
{
@@ -276,7 +276,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": "%s/fake/images/125" % utils.generate_glance_url()
+ "href": "%s/fake/images/125" % glance.generate_glance_url()
}],
},
{
@@ -314,7 +314,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": "%s/fake/images/126" % utils.generate_glance_url()
+ "href": "%s/fake/images/126" % glance.generate_glance_url()
}],
},
{
@@ -352,7 +352,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": "%s/fake/images/127" % utils.generate_glance_url()
+ "href": "%s/fake/images/127" % glance.generate_glance_url()
}],
},
{
@@ -390,7 +390,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": "%s/fake/images/128" % utils.generate_glance_url()
+ "href": "%s/fake/images/128" % glance.generate_glance_url()
}],
},
{
@@ -428,7 +428,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": "%s/fake/images/129" % utils.generate_glance_url()
+ "href": "%s/fake/images/129" % glance.generate_glance_url()
}],
},
{
@@ -452,7 +452,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": "%s/fake/images/130" % utils.generate_glance_url()
+ "href": "%s/fake/images/130" % glance.generate_glance_url()
}],
},
]
@@ -491,7 +491,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": alternate % (utils.generate_glance_url(), 123),
+ "href": alternate % (glance.generate_glance_url(), 123),
}],
},
{
@@ -529,7 +529,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": alternate % (utils.generate_glance_url(), 124),
+ "href": alternate % (glance.generate_glance_url(), 124),
}],
}]
@@ -656,7 +656,7 @@ class ImagesControllerTest(test.TestCase):
view = images_view.ViewBuilder()
request = fakes.HTTPRequest.blank('/v2/fake/images/1')
generated_url = view._get_alternate_link(request, 1)
- actual_url = "%s/fake/images/1" % utils.generate_glance_url()
+ actual_url = "%s/fake/images/1" % glance.generate_glance_url()
self.assertEqual(generated_url, actual_url)
def test_delete_image(self):
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
index 414d70c7c..3c2d795cd 100644
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/test_server_actions.py
@@ -40,7 +40,7 @@ FAKE_UUID = fakes.FAKE_UUID
INSTANCE_IDS = {FAKE_UUID: 1}
-def return_server_not_found(context, uuid):
+def return_server_not_found(*arg, **kwarg):
raise exception.NotFound()
@@ -604,6 +604,29 @@ class ServerActionsControllerTest(test.TestCase):
self.controller._action_resize,
req, FAKE_UUID, body)
+ def test_resize_with_server_not_found(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ self.stubs.Set(compute_api.API, 'get', return_server_not_found)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ def test_resize_with_too_many_instances(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ def fake_resize(*args, **kwargs):
+ raise exception.TooManyInstances(message="TooManyInstance")
+
+ self.stubs.Set(compute_api.API, 'resize', fake_resize)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(exception.TooManyInstances,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
def test_resize_raises_conflict_on_invalid_state(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index f916925fd..938783be7 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -1782,17 +1782,12 @@ class ServersControllerCreateTest(test.TestCase):
fake_method)
def _check_admin_pass_len(self, server_dict):
- """ utility function - check server_dict for adminPass
- length.
-
- """
+ """utility function - check server_dict for adminPass length."""
self.assertEqual(CONF.password_length,
len(server_dict["adminPass"]))
def _check_admin_pass_missing(self, server_dict):
- """ utility function - check server_dict for absence
- of adminPass
- """
+ """utility function - check server_dict for absence of adminPass."""
self.assertTrue("adminPass" not in server_dict)
def _test_create_instance(self):
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 28bbb3d25..db1c9ede2 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -43,7 +43,7 @@ class LimiterTest(test.TestCase):
"""
def setUp(self):
- """ Run before each test. """
+ """Run before each test. """
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
@@ -51,7 +51,7 @@ class LimiterTest(test.TestCase):
self.large = range(10000)
def test_limiter_offset_zero(self):
- """ Test offset key works with 0. """
+ """Test offset key works with 0. """
req = webob.Request.blank('/?offset=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -59,7 +59,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
- """ Test offset key works with a medium sized number. """
+ """Test offset key works with a medium sized number. """
req = webob.Request.blank('/?offset=10')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), self.small[10:])
@@ -67,7 +67,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
- """ Test offset key works with a number over 1000 (max_limit). """
+ """Test offset key works with a number over 1000 (max_limit). """
req = webob.Request.blank('/?offset=1001')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), [])
@@ -76,19 +76,19 @@ class LimiterTest(test.TestCase):
common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
- """ Test offset key works with a blank offset. """
+ """Test offset key works with a blank offset. """
req = webob.Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
- """ Test offset key works with a BAD offset. """
+ """Test offset key works with a BAD offset. """
req = webob.Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
- """ Test request with no offset or limit """
+ """Test request with no offset or limit """
req = webob.Request.blank('/')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -96,7 +96,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
- """ Test limit of zero. """
+ """Test limit of zero. """
req = webob.Request.blank('/?limit=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -104,7 +104,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_medium(self):
- """ Test limit of 10. """
+ """Test limit of 10. """
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -112,7 +112,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
- """ Test limit of 3000. """
+ """Test limit of 3000. """
req = webob.Request.blank('/?limit=3000')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -120,7 +120,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
- """ Test request with both limit and offset. """
+ """Test request with both limit and offset. """
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(common.limited(items, req), items[1:4])
@@ -132,7 +132,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(items, req), [])
def test_limiter_custom_max_limit(self):
- """ Test a max_limit other than 1000. """
+ """Test a max_limit other than 1000. """
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
@@ -147,13 +147,13 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(items, req, max_limit=2000), [])
def test_limiter_negative_limit(self):
- """ Test a negative limit. """
+ """Test a negative limit. """
req = webob.Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
- """ Test a negative offset. """
+ """Test a negative offset. """
req = webob.Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
@@ -167,30 +167,30 @@ class PaginationParamsTest(test.TestCase):
"""
def test_no_params(self):
- """ Test no params. """
+ """Test no params. """
req = webob.Request.blank('/')
self.assertEqual(common.get_pagination_params(req), {})
def test_valid_marker(self):
- """ Test valid marker param. """
+ """Test valid marker param. """
req = webob.Request.blank(
'/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
self.assertEqual(common.get_pagination_params(req),
{'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
def test_valid_limit(self):
- """ Test valid limit param. """
+ """Test valid limit param. """
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.get_pagination_params(req), {'limit': 10})
def test_invalid_limit(self):
- """ Test invalid limit param. """
+ """Test invalid limit param. """
req = webob.Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_marker(self):
- """ Test valid limit and marker parameters. """
+ """Test valid limit and marker parameters. """
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?limit=20&marker=%s' % marker)
self.assertEqual(common.get_pagination_params(req),
diff --git a/nova/tests/baremetal/test_nova_baremetal_manage.py b/nova/tests/baremetal/test_nova_baremetal_manage.py
new file mode 100644
index 000000000..4d152a028
--- /dev/null
+++ b/nova/tests/baremetal/test_nova_baremetal_manage.py
@@ -0,0 +1,49 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright 2011 OpenStack LLC
+# Copyright 2011 Ilya Alekseyev
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import imp
+import os
+import sys
+
+from nova import context
+from nova import test
+from nova.virt.baremetal import db as bmdb
+
+from nova.tests.baremetal.db import base as bm_db_base
+
+TOPDIR = os.path.normpath(os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ os.pardir,
+ os.pardir,
+ os.pardir))
+BM_MAN_PATH = os.path.join(TOPDIR, 'bin', 'nova-baremetal-manage')
+
+sys.dont_write_bytecode = True
+bm_man = imp.load_source('bm_man', BM_MAN_PATH)
+sys.dont_write_bytecode = False
+
+
+class BareMetalDbCommandsTestCase(bm_db_base.BMDBTestCase):
+ def setUp(self):
+ super(BareMetalDbCommandsTestCase, self).setUp()
+ self.commands = bm_man.BareMetalDbCommands()
+
+ def test_sync_and_version(self):
+ self.commands.sync()
+ v = self.commands.version()
+ self.assertTrue(v > 0)
diff --git a/nova/tests/cells/fakes.py b/nova/tests/cells/fakes.py
index e1f3b6e70..e996cbe13 100644
--- a/nova/tests/cells/fakes.py
+++ b/nova/tests/cells/fakes.py
@@ -22,6 +22,7 @@ from nova.cells import messaging
from nova.cells import state as cells_state
import nova.db
from nova.db import base
+from nova import exception
from nova.openstack.common import cfg
CONF = cfg.CONF
@@ -43,6 +44,10 @@ CELL_NAME_TO_STUB_INFO = {}
class FakeDBApi(object):
+ """Cells uses a different DB in each cell. This means in order to
+ stub out things differently per cell, I need to create a fake DBApi
+ object that is instantiated by each fake cell.
+ """
def __init__(self, cell_db_entries):
self.cell_db_entries = cell_db_entries
@@ -58,8 +63,8 @@ class FakeDBApi(object):
def instance_get_all_by_filters(self, ctxt, *args, **kwargs):
return []
- def instance_get_by_uuid(self, ctxt, *args, **kwargs):
- return None
+ def instance_get_by_uuid(self, ctxt, instance_uuid):
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
class FakeCellsDriver(driver.BaseCellsDriver):
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 4c26e42b5..99504f580 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -41,6 +41,7 @@ from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
+from nova.image import glance
from nova.network import api as network_api
from nova.network import model as network_model
from nova.openstack.common import cfg
@@ -573,7 +574,7 @@ class ComputeTestCase(BaseTestCase):
'task_state': task_states.SCHEDULING})
def test_run_instance_setup_block_device_mapping_fail(self):
- """ block device mapping failure test.
+ """block device mapping failure test.
Make sure that when there is a block device mapping problem,
the instance goes to ERROR state, keeping the task state
@@ -593,7 +594,7 @@ class ComputeTestCase(BaseTestCase):
'task_state': None})
def test_run_instance_spawn_fail(self):
- """ spawn failure test.
+ """spawn failure test.
Make sure that when there is a spawning problem,
the instance goes to ERROR state, keeping the task state"""
@@ -611,7 +612,7 @@ class ComputeTestCase(BaseTestCase):
'task_state': None})
def test_run_instance_dealloc_network_instance_not_found(self):
- """ spawn network deallocate test.
+ """spawn network deallocate test.
Make sure that when an instance is not found during spawn
that the network is deallocated"""
@@ -1455,7 +1456,7 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertTrue(payload['launched_at'])
- image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
@@ -1496,7 +1497,7 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue('launched_at' in payload)
self.assertTrue('deleted_at' in payload)
self.assertEqual(payload['deleted_at'], timeutils.strtime(cur_time))
- image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_run_instance_existing(self):
@@ -1824,8 +1825,8 @@ class ComputeTestCase(BaseTestCase):
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
- image_ref_url = utils.generate_image_url(image_ref)
- new_image_ref_url = utils.generate_image_url(new_image_ref)
+ image_ref_url = glance.generate_image_url(image_ref)
+ new_image_ref_url = glance.generate_image_url(new_image_ref)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 3)
msg = test_notifier.NOTIFICATIONS[0]
@@ -1904,7 +1905,7 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
- image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(new_instance))
@@ -1951,7 +1952,7 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
- image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance=new_instance)
@@ -3110,6 +3111,149 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(timeouts.count(10), 10)
self.assertTrue(None in timeouts)
+ def test_init_host_with_evacuated_instances_uuid_list(self):
+ # creating testdata
+ c = context.get_admin_context()
+
+ # instances in central db
+ instances = [
+ # those are still related to this host
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'host': self.compute.host})),
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'host': self.compute.host})),
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'host': self.compute.host}))
+ ]
+
+ # those are already been evacuated to other host
+ evacuated_instance = self._create_fake_instance({'host': 'otherhost'})
+
+ # creating mocks
+ self.mox.StubOutWithMock(self.compute.driver, 'init_host')
+
+ self.compute.driver.init_host(host=self.compute.host)
+
+ def fake_get_admin_context():
+ return c
+
+ def fake_all(*args, **kwargs):
+ pass
+
+ def fake_list_instance_uuids():
+ return [
+ # those are still related to this host
+ instances[0]['uuid'],
+ instances[1]['uuid'],
+ instances[2]['uuid'],
+ # and this one already been evacuated to other host
+ evacuated_instance['uuid']
+ ]
+
+ def fake_destroy(instance, nw, bdi, destroyDisks):
+ self.assertFalse(destroyDisks)
+ self.assertEqual(instance['uuid'], evacuated_instance['uuid'])
+
+ self.stubs.Set(nova.context,
+ 'get_admin_context',
+ fake_get_admin_context)
+ self.stubs.Set(self.compute.driver, 'filter_defer_apply_on', fake_all)
+ self.stubs.Set(self.compute.driver,
+ 'list_instance_uuids',
+ fake_list_instance_uuids)
+ self.stubs.Set(self.compute, '_get_instance_nw_info', fake_all)
+ self.stubs.Set(self.compute, '_get_instance_volume_block_device_info',
+ fake_all)
+ self.stubs.Set(self.compute.driver, 'destroy', fake_destroy)
+ self.stubs.Set(self.compute, '_legacy_nw_info', fake_all)
+ self.stubs.Set(self.compute, '_init_instance', fake_all)
+
+ self.stubs.Set(self.compute.driver, 'filter_defer_apply_off', fake_all)
+ self.stubs.Set(self.compute, '_report_driver_status', fake_all)
+ self.stubs.Set(self.compute, 'publish_service_capabilities', fake_all)
+ # start test
+ self.mox.ReplayAll()
+ self.compute.init_host()
+
+ db.instance_destroy(c, evacuated_instance['uuid'])
+ for instance in instances:
+ db.instance_destroy(c, instance['uuid'])
+
+ def test_init_host_with_evacuated_instances_names_list(self):
+ # creating testdata
+ c = context.get_admin_context()
+
+ # instances in central db
+ instances = [
+ # those are still related to this host
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'host': self.compute.host})),
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'host': self.compute.host})),
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'host': self.compute.host}))
+ ]
+
+ # those are already been evacuated to other host
+ evacuated_instance = self._create_fake_instance({'host': 'otherhost'})
+
+ # creating mocks
+ self.mox.StubOutWithMock(self.compute.driver, 'init_host')
+
+ self.compute.driver.init_host(host=self.compute.host)
+
+ def fake_get_admin_context():
+ return c
+
+ def fake_all(*args, **kwargs):
+ pass
+
+ def fake_list_instances():
+ return [
+ # those are still related to this host
+ CONF.instance_name_template % instances[0]['id'],
+ CONF.instance_name_template % instances[1]['id'],
+ CONF.instance_name_template % instances[2]['id'],
+ # and this one already been evacuated to other host
+ CONF.instance_name_template % evacuated_instance['id']
+ ]
+
+ def fake_list_instance_uuids():
+ raise NotImplementedError()
+
+ def fake_destroy(instance, nw, bdi, destroyDisks):
+ self.assertFalse(destroyDisks)
+ self.assertEqual(instance['uuid'], evacuated_instance['uuid'])
+
+ self.stubs.Set(nova.context,
+ 'get_admin_context',
+ fake_get_admin_context)
+ self.stubs.Set(self.compute.driver, 'filter_defer_apply_on', fake_all)
+ self.stubs.Set(self.compute.driver,
+ 'list_instances',
+ fake_list_instances)
+ self.stubs.Set(self.compute.driver,
+ 'list_instance_uuids',
+ fake_list_instance_uuids)
+
+ self.stubs.Set(self.compute, '_get_instance_nw_info', fake_all)
+ self.stubs.Set(self.compute, '_get_instance_volume_block_device_info',
+ fake_all)
+ self.stubs.Set(self.compute.driver, 'destroy', fake_destroy)
+ self.stubs.Set(self.compute, '_legacy_nw_info', fake_all)
+ self.stubs.Set(self.compute, '_init_instance', fake_all)
+
+ self.stubs.Set(self.compute.driver, 'filter_defer_apply_off', fake_all)
+ self.stubs.Set(self.compute, '_report_driver_status', fake_all)
+ self.stubs.Set(self.compute, 'publish_service_capabilities', fake_all)
+ # start test
+ self.mox.ReplayAll()
+ self.compute.init_host()
+
+ db.instance_destroy(c, evacuated_instance['uuid'])
+ for instance in instances:
+ db.instance_destroy(c, instance['uuid'])
+
class ComputeAPITestCase(BaseTestCase):
@@ -4254,6 +4398,40 @@ class ComputeAPITestCase(BaseTestCase):
instance_types.destroy(name)
self.compute.terminate_instance(self.context, instance=instance)
+ def test_resize_revert_deleted_flavor_fails(self):
+ orig_name = 'test_resize_revert_orig_flavor'
+ orig_flavorid = 11
+ memory_mb = 128
+ root_gb = 0
+ vcpus = 1
+ instance_types.create(orig_name, memory_mb, vcpus, root_gb, 0,
+ orig_flavorid, 0, 1.0, True)
+
+ instance = self._create_fake_instance(type_name=orig_name)
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ instance = jsonutils.to_primitive(instance)
+ self.compute.run_instance(self.context, instance=instance)
+
+ old_instance_type_id = instance['instance_type_id']
+ new_flavor = instance_types.get_instance_type_by_name('m1.tiny')
+ new_flavorid = new_flavor['flavorid']
+ new_instance_type_id = new_flavor['id']
+ self.compute_api.resize(self.context, instance, new_flavorid)
+
+ db.migration_create(self.context.elevated(),
+ {'instance_uuid': instance['uuid'],
+ 'old_instance_type_id': old_instance_type_id,
+ 'new_instance_type_id': new_instance_type_id,
+ 'status': 'finished'})
+ instance = db.instance_update(self.context, instance['uuid'],
+ {'task_state': None,
+ 'vm_state': vm_states.RESIZED})
+ instance_types.destroy(orig_name)
+ self.assertRaises(exception.InstanceTypeNotFound,
+ self.compute_api.revert_resize,
+ self.context, instance)
+ self.compute.terminate_instance(self.context, instance=instance)
+
def test_migrate(self):
instance = self._create_fake_instance()
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
@@ -5365,15 +5543,8 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.stubs.Set(rpc, 'call', fake_rpc_method)
self.stubs.Set(rpc, 'cast', fake_rpc_method)
- def test_create_invalid_availability_zone(self):
- """Ensure InvalidAggregateAction is raised with wrong avail_zone."""
- self.assertRaises(exception.InvalidAggregateAction,
- self.api.create_aggregate,
- self.context, 'fake_aggr', 'fake_avail_zone')
-
def test_update_aggregate_metadata(self):
"""Ensure metadata can be updated"""
- _create_service_entries(self.context, {'fake_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
metadata = {'foo_key1': 'foo_value1',
@@ -5384,11 +5555,11 @@ class ComputeAPIAggrTestCase(BaseTestCase):
expected = self.api.update_aggregate_metadata(self.context,
aggr['id'], metadata)
self.assertThat(expected['metadata'],
- matchers.DictMatches({'foo_key2': 'foo_value2'}))
+ matchers.DictMatches({'availability_zone': 'fake_zone',
+ 'foo_key2': 'foo_value2'}))
def test_delete_aggregate(self):
"""Ensure we can delete an aggregate."""
- _create_service_entries(self.context, {'fake_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.api.delete_aggregate(self.context, aggr['id'])
@@ -5429,19 +5600,8 @@ class ComputeAPIAggrTestCase(BaseTestCase):
aggr['id'], host)
self.assertEqual(len(aggr['hosts']), len(values[fake_zone]))
- def test_add_host_to_aggregate_zones_mismatch(self):
- """Ensure InvalidAggregateAction is raised when zones don't match."""
- _create_service_entries(self.context, {'fake_zoneX': ['fake_host1'],
- 'fake_zoneY': ['fake_host2']})
- aggr = self.api.create_aggregate(self.context,
- 'fake_aggregate', 'fake_zoneY')
- self.assertRaises(exception.InvalidAggregateAction,
- self.api.add_host_to_aggregate,
- self.context, aggr['id'], 'fake_host1')
-
def test_add_host_to_aggregate_raise_not_found(self):
"""Ensure ComputeHostNotFound is raised when adding invalid host."""
- _create_service_entries(self.context, {'fake_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.assertRaises(exception.ComputeHostNotFound,
@@ -5492,9 +5652,9 @@ class ComputeAggrTestCase(BaseTestCase):
def setUp(self):
super(ComputeAggrTestCase, self).setUp()
self.context = context.get_admin_context()
- values = {'name': 'test_aggr',
- 'availability_zone': 'test_zone'}
- self.aggr = db.aggregate_create(self.context, values)
+ values = {'name': 'test_aggr'}
+ az = {'availability_zone': 'test_zone'}
+ self.aggr = db.aggregate_create(self.context, values, metadata=az)
def test_add_aggregate_host(self):
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
index 5acc1cc53..23374994f 100644
--- a/nova/tests/compute/test_compute_utils.py
+++ b/nova/tests/compute/test_compute_utils.py
@@ -24,6 +24,7 @@ from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
+from nova.image import glance
from nova.network import api as network_api
from nova.openstack.common import cfg
from nova.openstack.common import importutils
@@ -33,7 +34,6 @@ from nova.openstack.common.notifier import test_notifier
from nova import test
from nova.tests import fake_network
import nova.tests.image.fake
-from nova import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@@ -281,7 +281,7 @@ class UsageInfoTestCase(test.TestCase):
msg="Key %s not in payload" % attr)
self.assertEquals(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
- image_ref_url = "%s/images/1" % utils.generate_glance_url()
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance)
@@ -317,7 +317,7 @@ class UsageInfoTestCase(test.TestCase):
msg="Key %s not in payload" % attr)
self.assertEquals(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
- image_ref_url = "%s/images/1" % utils.generate_glance_url()
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_notify_usage_exists_instance_not_found(self):
@@ -343,7 +343,7 @@ class UsageInfoTestCase(test.TestCase):
self.assertTrue(attr in payload,
msg="Key %s not in payload" % attr)
self.assertEquals(payload['image_meta'], {})
- image_ref_url = "%s/images/1" % utils.generate_glance_url()
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_notify_about_instance_usage(self):
@@ -376,7 +376,7 @@ class UsageInfoTestCase(test.TestCase):
self.assertEquals(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
self.assertEquals(payload['image_name'], 'fake_name')
- image_ref_url = "%s/images/1" % utils.generate_glance_url()
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance)
@@ -390,3 +390,97 @@ class MetadataToDictTestCase(test.TestCase):
def test_metadata_to_dict_empty(self):
self.assertEqual(compute_utils.metadata_to_dict([]), {})
+
+
+class ParseDecimalIDTestCase(test.TestCase):
+
+ def setUp(self):
+ super(ParseDecimalIDTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+
+ self.templates = [
+ CONF.instance_name_template,
+ 'instance-%08x',
+ 'instance-%08o',
+ 'instance-%08d',
+ 'instance-%04x',
+ 'instance-%04o',
+ 'instance-%04d',
+ 'instance-%x',
+ 'instance-%o',
+ 'instance-%d',
+ 'james-%07x-bond',
+ 'james-%07o-bond',
+ 'james-%07d-bond',
+ 'xxxx%xxxx',
+ 'oooo%oooo',
+ 'dddd%dddd',
+ '%02x',
+ '%02o',
+ '%02d',
+ '%x',
+ '%o',
+ '%d',
+ '%07x-bond',
+ '%07o-bond',
+ '%07d-bond',
+ '123%xxxx',
+ '123%oooo',
+ '123%dddd',
+ '007%02x',
+ '007%02o',
+ '007%02d',
+ '42%x',
+ '42%o',
+ '42%d',
+ '700%07x007',
+ '700%07o007',
+ '700%07d007']
+
+ self.ids = [
+ 1,
+ 5,
+ 10,
+ 42,
+ 90,
+ 100,
+ 256,
+ 500,
+ 1000,
+ 2500,
+ 19294,
+ 100500,
+ 21093404
+ ]
+
+ def _validate_id(self, template, name):
+ return compute_utils.parse_decimal_id(template, name)
+
+ def test_name_template_based(self):
+ for template in self.templates:
+ for id in self.ids:
+ self.assertEqual(id, self._validate_id(template,
+ template % id))
+
+ def test_name_not_template_based(self):
+
+ for template in self.templates:
+ for id in self.ids:
+ name = template % id
+
+ self.assertEqual(-1, self._validate_id(template,
+ 'n%s' % name))
+ self.assertEqual(-1, self._validate_id(template,
+ '%sw' % name))
+ self.assertEqual(-1, self._validate_id(template,
+ 'reg%s' % name))
+ self.assertEqual(-1, self._validate_id(template,
+ '%sex' % name))
+ self.assertEqual(-1, self._validate_id(template, '%s%s%s' % (
+ name[:1],
+ 'abr',
+ name[-1:])))
+ self.assertEqual(-1, self._validate_id(template, '%s%s%s' % (
+ name[:1],
+ 'qwer23456ert',
+ name[-1:])))
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 86f47a79c..fd87e420b 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -129,7 +129,7 @@ class _BaseTestCase(object):
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
- {'name': 'foo', 'availability_zone': 'foo'})
+ {'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
@@ -594,9 +594,8 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
def test_instance_get_all(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
- db.instance_get_all_by_filters(self.context, {}, 'created_at', 'desc')
- db.instance_get_all_by_filters(self.context, {'host': 'fake-host'},
- 'created_at', 'desc')
+ db.instance_get_all(self.context)
+ db.instance_get_all_by_host(self.context.elevated(), 'fake-host')
db.instance_get_all_by_filters(self.context, {'name': 'fake-inst'},
'updated_at', 'asc')
self.mox.ReplayAll()
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index 80eb08743..58ff3289c 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -145,6 +145,7 @@ policy_data = """
"compute_extension:rescue": "",
"compute_extension:security_groups": "",
"compute_extension:server_diagnostics": "",
+ "compute_extension:server_password": "",
"compute_extension:services": "",
"compute_extension:simple_tenant_usage:show": "",
"compute_extension:simple_tenant_usage:list": "",
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index e8baf4353..943b98cb2 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -25,11 +25,14 @@ import glanceclient.exc
from nova import context
from nova import exception
from nova.image import glance
+from nova.openstack.common import cfg
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.glance import stubs as glance_stubs
from nova.tests import matchers
+CONF = cfg.CONF
+
class NullWriter(object):
"""Used to test ImageService.get which takes a writer object"""
@@ -703,3 +706,17 @@ class TestGlanceClientWrapper(test.TestCase):
client2.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
+
+
+class TestGlanceUrl(test.TestCase):
+
+ def test_generate_glance_http_url(self):
+ generated_url = glance.generate_glance_url()
+ http_url = "http://%s:%d" % (CONF.glance_host, CONF.glance_port)
+ self.assertEqual(generated_url, http_url)
+
+ def test_generate_glance_https_url(self):
+ self.flags(glance_protocol="https")
+ generated_url = glance.generate_glance_url()
+ https_url = "https://%s:%d" % (CONF.glance_host, CONF.glance_port)
+ self.assertEqual(generated_url, https_url)
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index 20f9044f9..9395ba349 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -361,6 +361,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-server-password",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ServerPassword",
+ "namespace": "http://docs.openstack.org/compute/ext/server-password/api/v2",
+ "updated": "2012-11-29T00:00:00+00:00"
+ },
+ {
"alias": "os-server-start-stop",
"description": "%(text)s",
"links": [],
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index 1669ff957..cc022c082 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -135,6 +135,9 @@
<extension alias="os-server-diagnostics" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/server-diagnostics/api/v1.1" name="ServerDiagnostics">
<description>%(text)s</description>
</extension>
+ <extension alias="os-server-password" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/server-password/api/v2" name="ServerPassword">
+ <description>%(text)s</description>
+ </extension>
<extension alias="os-server-start-stop" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/servers/api/v1.1" name="ServerStartStop">
<description>%(text)s</description>
</extension>
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
index 89a48ee57..119f78ad2 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
@@ -6,7 +6,9 @@
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova2"
+ },
"name": "newname",
"updated_at": "%(timestamp)s"
}
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl
index 3f72a0b43..071e1c43a 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl
@@ -8,5 +8,7 @@
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova2</availability_zone>
+ </metadata>
</aggregate>
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
index ee0ea6c3d..b311bb18e 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
@@ -8,7 +8,9 @@
"%(compute_host)s"
],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl
index 82a0401ad..a45a01789 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl
@@ -10,5 +10,7 @@
</hosts>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
</aggregate>
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl
index 8ce7d9c40..6b94465c4 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl
@@ -6,7 +6,9 @@
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl
index 56f0dd3e8..d59d10a84 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl
@@ -8,5 +8,7 @@
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
</aggregate>
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
index f373f02f7..bed47e730 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
@@ -7,7 +7,9 @@
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl
index 417b1016f..0a6173a0b 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl
@@ -9,6 +9,8 @@
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
</aggregate>
</aggregates>
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
index 058a1ecf5..f34932617 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
@@ -7,6 +7,7 @@
"hosts": [],
"id": 1,
"metadata": {
+ "availability_zone": "nova",
"key": "value"
},
"name": "name",
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl
index 9bbd1f0bd..5b229cfc9 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl
@@ -10,5 +10,6 @@
<id>1</id>
<metadata>
<key>value</key>
+ <availability_zone>nova</availability_zone>
</metadata>
</aggregate>
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
index 8ce7d9c40..6b94465c4 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
@@ -6,7 +6,9 @@
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl
index 56f0dd3e8..d59d10a84 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl
@@ -8,5 +8,7 @@
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
</aggregate>
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl
new file mode 100644
index 000000000..a7f3a1993
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 0,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 512,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl
new file mode 100644
index 000000000..4feec740c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl
new file mode 100644
index 000000000..5b27e1385
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ },
+ {
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ },
+ {
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "rxtx_factor": 1.0,
+ "vcpus": 2
+ },
+ {
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "rxtx_factor": 1.0,
+ "vcpus": 4
+ },
+ {
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "rxtx_factor": 1.0,
+ "vcpus": 8
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl
new file mode 100644
index 000000000..fb2ee09e0
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors>
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl
new file mode 100644
index 000000000..70d0a57de
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "rxtx_factor": 2.0
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl
new file mode 100644
index 000000000..a87b47670
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
+ name="%(flavor_name)s"
+ ram="1024"
+ vcpus="2"
+ disk="10"
+ id="%(flavor_id)s"
+ rxtx_factor="2.0" />
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl
new file mode 100644
index 000000000..abf652fae
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "rxtx_factor": 2.0,
+ "vcpus": 2
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl
new file mode 100644
index 000000000..d24623c55
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="%(flavor_name)s" id="%(flavor_id)s" rxtx_factor="2.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/integrated/api_samples/os-server-password/get-password-resp.json.tpl b/nova/tests/integrated/api_samples/os-server-password/get-password-resp.json.tpl
new file mode 100644
index 000000000..026f15d46
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-password/get-password-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "password": "%(encrypted_password)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-server-password/get-password-resp.xml.tpl b/nova/tests/integrated/api_samples/os-server-password/get-password-resp.xml.tpl
new file mode 100644
index 000000000..046eed30f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-password/get-password-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<password>%(encrypted_password)s</password>
diff --git a/nova/tests/integrated/api_samples/os-server-password/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-server-password/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-password/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-server-password/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-server-password/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-password/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-server-password/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-server-password/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-password/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-server-password/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-server-password/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-password/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 180299844..c15767a82 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -24,6 +24,7 @@ import uuid as uuid_lib
from coverage import coverage
from lxml import etree
+from nova.api.metadata import password
from nova.api.openstack.compute.contrib import coverage_ext
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.api.openstack.compute import extensions
@@ -364,7 +365,6 @@ class ApiSamplesTrap(ApiSampleTestBase):
do_not_approve_additions.append('os-create-server-ext')
do_not_approve_additions.append('os-flavor-access')
do_not_approve_additions.append('os-flavor-extra-specs')
- do_not_approve_additions.append('os-flavor-rxtx')
do_not_approve_additions.append('os-flavor-swap')
do_not_approve_additions.append('os-floating-ip-dns')
do_not_approve_additions.append('os-floating-ip-pools')
@@ -979,6 +979,55 @@ class FlavorsExtraDataXmlTest(FlavorsExtraDataJsonTest):
ctype = 'xml'
+class FlavorRxtxJsonTest(ApiSampleTestBase):
+ extension_name = ('nova.api.openstack.compute.contrib.flavor_rxtx.'
+ 'Flavor_rxtx')
+
+ def _get_flags(self):
+ f = super(FlavorRxtxJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # FlavorRxtx extension also needs Flavormanage to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
+ return f
+
+ def test_flavor_rxtx_get(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ self.assertEqual(response.status, 200)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'flavor_name': 'm1.tiny'
+ }
+ subs.update(self._get_regexes())
+ return self._verify_response('flavor-rxtx-get-resp', subs,
+ response)
+
+ def test_flavors_rxtx_list(self):
+ response = self._do_get('flavors/detail')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('flavor-rxtx-list-resp', subs,
+ response)
+
+ def test_flavors_rxtx_create(self):
+ subs = {
+ 'flavor_id': 100,
+ 'flavor_name': 'flavortest'
+ }
+ response = self._do_post('flavors',
+ 'flavor-rxtx-post-req',
+ subs)
+ self.assertEqual(response.status, 200)
+ subs.update(self._get_regexes())
+ return self._verify_response('flavor-rxtx-post-resp',
+ subs, response)
+
+
+class FlavorRxtxXmlTest(FlavorRxtxJsonTest):
+ ctype = 'xml'
+
+
class SecurityGroupsSampleJsonTest(ServersSampleBase):
extension_name = "nova.api.openstack.compute.contrib" + \
".security_groups.Security_groups"
@@ -1532,7 +1581,7 @@ class AgentsJsonTest(ApiSampleTestBase):
return project
def test_agent_list(self):
- """ Return a list of all agent builds."""
+ """Return a list of all agent builds."""
response = self._do_get('os-agents')
self.assertEqual(response.status, 200)
project = {'url': 'xxxxxxxxxxxx',
@@ -2151,6 +2200,39 @@ class FlavorManageSampleXmlTests(FlavorManageSampleJsonTests):
ctype = "xml"
+class ServerPasswordSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.server_password."
+ "Server_password")
+
+ def test_get_password(self):
+
+ # Mock password since there is no api to set it
+ def fake_ext_password(*args, **kwargs):
+ return ("xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/"
+ "Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp"
+ "28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtV"
+ "VzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNX"
+ "JjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrj"
+ "QskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+Ac"
+ "X//PXk3uJ5kC7d67fPXaVz4WaQRYMg==")
+ self.stubs.Set(password, "extract_password", fake_ext_password)
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/os-server-password' % uuid)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['encrypted_password'] = fake_ext_password().replace('+', '\\+')
+ return self._verify_response('get-password-resp', subs, response)
+
+ def test_reset_password(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s/os-server-password' % uuid)
+ self.assertEqual(response.status, 204)
+
+
+class ServerPasswordSampleXmlTests(ServerPasswordSampleJsonTests):
+ ctype = "xml"
+
+
class DiskConfigJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.disk_config."
"Disk_config")
diff --git a/nova/tests/monkey_patch_example/__init__.py b/nova/tests/monkey_patch_example/__init__.py
index 25cf9ccfe..779dc72f3 100644
--- a/nova/tests/monkey_patch_example/__init__.py
+++ b/nova/tests/monkey_patch_example/__init__.py
@@ -21,7 +21,7 @@ CALLED_FUNCTION = []
def example_decorator(name, function):
- """ decorator for notify which is used from utils.monkey_patch()
+ """decorator for notify which is used from utils.monkey_patch()
:param name: name of the function
:param function: - object of the function
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index 07a1bc2b8..b08da6baa 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -748,8 +748,11 @@ class HostFiltersTestCase(test.TestCase):
def _create_aggregate_with_host(self, name='fake_aggregate',
metadata=None,
hosts=['host1']):
- values = {'name': name,
- 'availability_zone': 'fake_avail_zone', }
+ values = {'name': name}
+ if metadata:
+ metadata['availability_zone'] = 'fake_avail_zone'
+ else:
+ metadata = {'availability_zone': 'fake_avail_zone'}
result = db.aggregate_create(self.context.elevated(), values, metadata)
for host in hosts:
db.aggregate_host_add(self.context.elevated(), result['id'], host)
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index 163afda7d..0835df51d 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -350,7 +350,7 @@ class ApiEc2TestCase(test.TestCase):
self.ec2.delete_security_group(security_group_name)
def test_group_name_valid_chars_security_group(self):
- """ Test that we sanely handle invalid security group names.
+ """Test that we sanely handle invalid security group names.
EC2 API Spec states we should only accept alphanumeric characters,
spaces, dashes, and underscores. Amazon implementation
accepts more characters - so, [:print:] is ok. """
diff --git a/nova/tests/test_cinder.py b/nova/tests/test_cinder.py
index dfdd4f3d7..7b1081b79 100644
--- a/nova/tests/test_cinder.py
+++ b/nova/tests/test_cinder.py
@@ -97,11 +97,14 @@ class FakeHTTPClient(cinder.cinder_client.client.HTTPClient):
class FakeCinderClient(cinder.cinder_client.Client):
- def __init__(self, username, password, project_id=None, auth_url=None):
+ def __init__(self, username, password, project_id=None, auth_url=None,
+ retries=None):
super(FakeCinderClient, self).__init__(username, password,
project_id=project_id,
- auth_url=auth_url)
- self.client = FakeHTTPClient(username, password, project_id, auth_url)
+ auth_url=auth_url,
+ retries=retries)
+ self.client = FakeHTTPClient(username, password, project_id, auth_url,
+ retries=retries)
# keep a ref to the clients callstack for factory's assert_called
self.callstack = self.client.callstack = []
@@ -173,3 +176,11 @@ class CinderTestCase(test.TestCase):
self.assert_called('GET', '/volumes/5678')
self.assertTrue('volume_image_metadata' in volume)
self.assertEqual(volume['volume_image_metadata'], _image_metadata)
+
+ def test_cinder_http_retries(self):
+ retries = 42
+ self.flags(cinder_http_retries=retries)
+ volume = self.api.get(self.context, '1234')
+ self.assert_called('GET', '/volumes/1234')
+ self.assertEquals(
+ self.fake_client_factory.client.client.retries, retries)
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index af329daf6..0aaa42a11 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -321,16 +321,16 @@ class DbApiTestCase(test.TestCase):
inst['uuid'], 'vm_state', [None, 'disable'], 'run')
def test_instance_update_with_instance_uuid(self):
- """ test instance_update() works when an instance UUID is passed """
+ """test instance_update() works when an instance UUID is passed """
ctxt = context.get_admin_context()
# Create an instance with some metadata
- values = {'metadata': {'host': 'foo'},
+ values = {'metadata': {'host': 'foo', 'key1': 'meow'},
'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
# Update the metadata
- values = {'metadata': {'host': 'bar'},
+ values = {'metadata': {'host': 'bar', 'key2': 'wuff'},
'system_metadata': {'original_image_ref': 'baz'}}
db.instance_update(ctxt, instance['uuid'], values)
@@ -338,6 +338,8 @@ class DbApiTestCase(test.TestCase):
# updated
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
self.assertEqual('bar', instance_meta['host'])
+ self.assertEqual('wuff', instance_meta['key2'])
+ self.assertNotIn('key1', instance_meta)
# Retrieve the system metadata to ensure it was successfully updated
system_meta = db.instance_system_metadata_get(ctxt, instance['uuid'])
@@ -479,7 +481,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(404, faults[uuid][0]['code'])
def test_instance_fault_get_by_instance(self):
- """ ensure we can retrieve an instance fault by instance UUID """
+ """ensure we can retrieve an instance fault by instance UUID """
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {})
instance2 = db.instance_create(ctxt, {})
@@ -763,13 +765,13 @@ class DbApiTestCase(test.TestCase):
def _get_fake_aggr_values():
- return {'name': 'fake_aggregate',
- 'availability_zone': 'fake_avail_zone', }
+ return {'name': 'fake_aggregate'}
def _get_fake_aggr_metadata():
return {'fake_key1': 'fake_value1',
- 'fake_key2': 'fake_value2'}
+ 'fake_key2': 'fake_value2',
+ 'availability_zone': 'fake_avail_zone'}
def _get_fake_aggr_hosts():
@@ -800,28 +802,26 @@ class AggregateDBApiTestCase(test.TestCase):
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
- def test_aggregate_create(self):
- """Ensure aggregate can be created with no metadata."""
+ def test_aggregate_create_no_metadata(self):
result = _create_aggregate(metadata=None)
self.assertEquals(result['name'], 'fake_aggregate')
def test_aggregate_create_avoid_name_conflict(self):
- """Test we can avoid conflict on deleted aggregates."""
r1 = _create_aggregate(metadata=None)
db.aggregate_delete(context.get_admin_context(), r1['id'])
- values = {'name': r1['name'], 'availability_zone': 'new_zone'}
- r2 = _create_aggregate(values=values)
+ values = {'name': r1['name']}
+ metadata = {'availability_zone': 'new_zone'}
+ r2 = _create_aggregate(values=values, metadata=metadata)
self.assertEqual(r2['name'], values['name'])
- self.assertEqual(r2['availability_zone'], values['availability_zone'])
+ self.assertEqual(r2['availability_zone'],
+ metadata['availability_zone'])
def test_aggregate_create_raise_exist_exc(self):
- """Ensure aggregate names are distinct."""
_create_aggregate(metadata=None)
self.assertRaises(exception.AggregateNameExists,
_create_aggregate, metadata=None)
def test_aggregate_get_raise_not_found(self):
- """Ensure AggregateNotFound is raised when getting an aggregate."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
@@ -830,7 +830,6 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt, aggregate_id)
def test_aggregate_metadata_get_raise_not_found(self):
- """Ensure AggregateNotFound is raised when getting metadata."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
@@ -839,7 +838,6 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt, aggregate_id)
def test_aggregate_create_with_metadata(self):
- """Ensure aggregate can be created with metadata."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
@@ -847,25 +845,25 @@ class AggregateDBApiTestCase(test.TestCase):
matchers.DictMatches(_get_fake_aggr_metadata()))
def test_aggregate_create_delete_create_with_metadata(self):
- """Ensure aggregate metadata is deleted bug 1052479."""
+ #test for bug 1052479
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(expected_metadata,
matchers.DictMatches(_get_fake_aggr_metadata()))
db.aggregate_delete(ctxt, result['id'])
- result = _create_aggregate(metadata=None)
+ result = _create_aggregate(metadata={'availability_zone':
+ 'fake_avail_zone'})
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertEqual(expected_metadata, {})
+ self.assertEqual(expected_metadata, {'availability_zone':
+ 'fake_avail_zone'})
def test_aggregate_create_low_privi_context(self):
- """Ensure right context is applied when creating aggregate."""
self.assertRaises(exception.AdminRequired,
db.aggregate_create,
self.context, _get_fake_aggr_values())
def test_aggregate_get(self):
- """Ensure we can get aggregate with all its relations."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt)
expected = db.aggregate_get(ctxt, result['id'])
@@ -873,20 +871,16 @@ class AggregateDBApiTestCase(test.TestCase):
self.assertEqual(_get_fake_aggr_metadata(), expected['metadetails'])
def test_aggregate_get_by_host(self):
- """Ensure we can get aggregates by host."""
ctxt = context.get_admin_context()
- values = {'name': 'fake_aggregate2',
- 'availability_zone': 'fake_avail_zone', }
+ values = {'name': 'fake_aggregate2'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual([a1['id'], a2['id']], [x['id'] for x in r1])
def test_aggregate_get_by_host_with_key(self):
- """Ensure we can get aggregates by host."""
ctxt = context.get_admin_context()
- values = {'name': 'fake_aggregate2',
- 'availability_zone': 'fake_avail_zone', }
+ values = {'name': 'fake_aggregate2'}
a1 = _create_aggregate_with_hosts(context=ctxt,
metadata={'goodkey': 'good'})
a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
@@ -894,13 +888,10 @@ class AggregateDBApiTestCase(test.TestCase):
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey')
self.assertEqual([a1['id']], [x['id'] for x in r1])
- def test_aggregate_metdata_get_by_host(self):
- """Ensure we can get aggregates by host."""
+ def test_aggregate_metadata_get_by_host(self):
ctxt = context.get_admin_context()
- values = {'name': 'fake_aggregate2',
- 'availability_zone': 'fake_avail_zone', }
- values2 = {'name': 'fake_aggregate3',
- 'availability_zone': 'fake_avail_zone', }
+ values = {'name': 'fake_aggregate2'}
+ values2 = {'name': 'fake_aggregate3'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
a3 = _create_aggregate_with_hosts(context=ctxt, values=values2,
@@ -909,13 +900,10 @@ class AggregateDBApiTestCase(test.TestCase):
self.assertEqual(r1['fake_key1'], set(['fake_value1']))
self.assertFalse('badkey' in r1)
- def test_aggregate_metdata_get_by_host_with_key(self):
- """Ensure we can get aggregates by host."""
+ def test_aggregate_metadata_get_by_host_with_key(self):
ctxt = context.get_admin_context()
- values = {'name': 'fake_aggregate2',
- 'availability_zone': 'fake_avail_zone', }
- values2 = {'name': 'fake_aggregate3',
- 'availability_zone': 'fake_avail_zone', }
+ values = {'name': 'fake_aggregate2'}
+ values2 = {'name': 'fake_aggregate3'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
a3 = _create_aggregate_with_hosts(context=ctxt, values=values2,
@@ -930,14 +918,24 @@ class AggregateDBApiTestCase(test.TestCase):
key='good')
self.assertFalse('good' in r2)
+ def test_aggregate_host_get_by_metadata_key(self):
+ ctxt = context.get_admin_context()
+ values = {'name': 'fake_aggregate2'}
+ values2 = {'name': 'fake_aggregate3'}
+ a1 = _create_aggregate_with_hosts(context=ctxt)
+ a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
+ a3 = _create_aggregate_with_hosts(context=ctxt, values=values2,
+ hosts=['foo.openstack.org'], metadata={'good': 'value'})
+ r1 = db.aggregate_host_get_by_metadata_key(ctxt, key='good')
+ self.assertEqual(r1, {'foo.openstack.org': set(['value'])})
+ self.assertFalse('fake_key1' in r1)
+
def test_aggregate_get_by_host_not_found(self):
- """Ensure AggregateHostNotFound is raised with unknown host."""
ctxt = context.get_admin_context()
_create_aggregate_with_hosts(context=ctxt)
self.assertEqual([], db.aggregate_get_by_host(ctxt, 'unknown_host'))
def test_aggregate_delete_raise_not_found(self):
- """Ensure AggregateNotFound is raised when deleting an aggregate."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
@@ -946,7 +944,6 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt, aggregate_id)
def test_aggregate_delete(self):
- """Ensure we can delete an aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
db.aggregate_delete(ctxt, result['id'])
@@ -957,9 +954,10 @@ class AggregateDBApiTestCase(test.TestCase):
self.assertEqual(aggregate['deleted'], True)
def test_aggregate_update(self):
- """Ensure an aggregate can be updated."""
ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt, metadata=None)
+ result = _create_aggregate(context=ctxt, metadata={'availability_zone':
+ 'fake_avail_zone'})
+ self.assertEqual(result.availability_zone, 'fake_avail_zone')
new_values = _get_fake_aggr_values()
new_values['availability_zone'] = 'different_avail_zone'
updated = db.aggregate_update(ctxt, 1, new_values)
@@ -967,18 +965,20 @@ class AggregateDBApiTestCase(test.TestCase):
updated['availability_zone'])
def test_aggregate_update_with_metadata(self):
- """Ensure an aggregate can be updated with metadata."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
+ values['availability_zone'] = 'different_avail_zone'
db.aggregate_update(ctxt, 1, values)
expected = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertThat(_get_fake_aggr_metadata(),
+ updated = db.aggregate_get(ctxt, result['id'])
+ self.assertThat(values['metadata'],
matchers.DictMatches(expected))
+ self.assertNotEqual(result.availability_zone,
+ updated.availability_zone)
def test_aggregate_update_with_existing_metadata(self):
- """Ensure an aggregate can be updated with existing metadata."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
values = _get_fake_aggr_values()
@@ -989,7 +989,6 @@ class AggregateDBApiTestCase(test.TestCase):
self.assertThat(values['metadata'], matchers.DictMatches(expected))
def test_aggregate_update_raise_not_found(self):
- """Ensure AggregateNotFound is raised when updating an aggregate."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
@@ -998,26 +997,22 @@ class AggregateDBApiTestCase(test.TestCase):
db.aggregate_update, ctxt, aggregate_id, new_values)
def test_aggregate_get_all(self):
- """Ensure we can get all aggregates."""
ctxt = context.get_admin_context()
counter = 3
for c in xrange(counter):
_create_aggregate(context=ctxt,
- values={'name': 'fake_aggregate_%d' % c,
- 'availability_zone': 'fake_avail_zone'},
+ values={'name': 'fake_aggregate_%d' % c},
metadata=None)
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), counter)
def test_aggregate_get_all_non_deleted(self):
- """Ensure we get only non-deleted aggregates."""
ctxt = context.get_admin_context()
add_counter = 5
remove_counter = 2
aggregates = []
for c in xrange(1, add_counter):
- values = {'name': 'fake_aggregate_%d' % c,
- 'availability_zone': 'fake_avail_zone'}
+ values = {'name': 'fake_aggregate_%d' % c}
aggregates.append(_create_aggregate(context=ctxt,
values=values, metadata=None))
for c in xrange(1, remove_counter):
@@ -1026,7 +1021,6 @@ class AggregateDBApiTestCase(test.TestCase):
self.assertEqual(len(results), add_counter - remove_counter)
def test_aggregate_metadata_add(self):
- """Ensure we can add metadata for the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
@@ -1035,7 +1029,6 @@ class AggregateDBApiTestCase(test.TestCase):
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_update(self):
- """Ensure we can update metadata for the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
metadata = _get_fake_aggr_metadata()
@@ -1048,7 +1041,6 @@ class AggregateDBApiTestCase(test.TestCase):
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_delete(self):
- """Ensure we can delete metadata for the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
@@ -1058,8 +1050,17 @@ class AggregateDBApiTestCase(test.TestCase):
del metadata[metadata.keys()[0]]
self.assertThat(metadata, matchers.DictMatches(expected))
+ def test_aggregate_remove_availability_zone(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata={'availability_zone':
+ 'fake_avail_zone'})
+ db.aggregate_metadata_delete(ctxt, result.id, 'availability_zone')
+ expected = db.aggregate_metadata_get(ctxt, result.id)
+ aggregate = db.aggregate_get(ctxt, result.id)
+ self.assertEquals(aggregate.availability_zone, None)
+ self.assertThat({}, matchers.DictMatches(expected))
+
def test_aggregate_metadata_delete_raise_not_found(self):
- """Ensure AggregateMetadataNotFound is raised when deleting."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateMetadataNotFound,
@@ -1067,14 +1068,12 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt, result['id'], 'foo_key')
def test_aggregate_host_add(self):
- """Ensure we can add host to the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(_get_fake_aggr_hosts(), expected)
- def test_aggregate_host_add_deleted(self):
- """Ensure we can add a host that was previously deleted."""
+ def test_aggregate_host_re_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
host = _get_fake_aggr_hosts()[0]
@@ -1084,19 +1083,16 @@ class AggregateDBApiTestCase(test.TestCase):
self.assertEqual(len(expected), 1)
def test_aggregate_host_add_duplicate_works(self):
- """Ensure we can add host to distinct aggregates."""
ctxt = context.get_admin_context()
r1 = _create_aggregate_with_hosts(context=ctxt, metadata=None)
r2 = _create_aggregate_with_hosts(ctxt,
- values={'name': 'fake_aggregate2',
- 'availability_zone': 'fake_avail_zone2', },
- metadata=None)
+ values={'name': 'fake_aggregate2'},
+ metadata={'availability_zone': 'fake_avail_zone2'})
h1 = db.aggregate_host_get_all(ctxt, r1['id'])
h2 = db.aggregate_host_get_all(ctxt, r2['id'])
self.assertEqual(h1, h2)
def test_aggregate_host_add_duplicate_raise_exist_exc(self):
- """Ensure we cannot add host to the same aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
self.assertRaises(exception.AggregateHostExists,
@@ -1104,7 +1100,6 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt, result['id'], _get_fake_aggr_hosts()[0])
def test_aggregate_host_add_raise_not_found(self):
- """Ensure AggregateFound when adding a host."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
@@ -1114,7 +1109,6 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt, aggregate_id, host)
def test_aggregate_host_delete(self):
- """Ensure we can add host to the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
db.aggregate_host_delete(ctxt, result['id'],
@@ -1123,7 +1117,6 @@ class AggregateDBApiTestCase(test.TestCase):
self.assertEqual(0, len(expected))
def test_aggregate_host_delete_raise_not_found(self):
- """Ensure AggregateHostNotFound is raised when deleting a host."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateHostNotFound,
diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py
index affab4e29..f6c4f141f 100644
--- a/nova/tests/test_imagecache.py
+++ b/nova/tests/test_imagecache.py
@@ -947,7 +947,7 @@ class ImageCacheManagerTestCase(test.TestCase):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
- self.stubs.Set(db, 'instance_get_all_by_filters', fake_get_all)
+ self.stubs.Set(db, 'instance_get_all', fake_get_all)
compute = importutils.import_object(CONF.compute_manager)
self.flags(use_local=True, group='conductor')
compute.conductor_api = conductor.API()
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 6bc18251f..a3529cecd 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -2760,6 +2760,74 @@ class LibvirtConnTestCase(test.TestCase):
instance = db.instance_create(self.context, self.test_instance)
conn.destroy(instance, {})
+ def test_destroy_removes_disk(self):
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_undefine_domain')
+ libvirt_driver.LibvirtDriver._undefine_domain(instance)
+ self.mox.StubOutWithMock(shutil, "rmtree")
+ shutil.rmtree(os.path.join(CONF.instances_path, instance['name']))
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_cleanup_lvm')
+ libvirt_driver.LibvirtDriver._cleanup_lvm(instance)
+
+ # Start test
+ self.mox.ReplayAll()
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_unplug_vifs(instance, network_info):
+ pass
+
+ def fake_unfilter_instance(instance, network_info):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.stubs.Set(conn, '_destroy', fake_destroy)
+ self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
+ self.stubs.Set(conn.firewall_driver,
+ 'unfilter_instance', fake_unfilter_instance)
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ conn.destroy(instance, [])
+
+ def test_destroy_not_removes_disk(self):
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_undefine_domain')
+ libvirt_driver.LibvirtDriver._undefine_domain(instance)
+
+ # Start test
+ self.mox.ReplayAll()
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_unplug_vifs(instance, network_info):
+ pass
+
+ def fake_unfilter_instance(instance, network_info):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.stubs.Set(conn, '_destroy', fake_destroy)
+ self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
+ self.stubs.Set(conn.firewall_driver,
+ 'unfilter_instance', fake_unfilter_instance)
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ conn.destroy(instance, [], None, False)
+
def test_destroy_undefines(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.destroy()
@@ -4303,12 +4371,12 @@ class LibvirtDriverTestCase(test.TestCase):
self.stubs.Set(utils, 'execute', fake_execute)
ins_ref = self._create_instance()
- """ dest is different host case """
+ # dest is different host case
out = self.libvirtconnection.migrate_disk_and_power_off(
None, ins_ref, '10.0.0.2', None, None)
self.assertEquals(out, disk_info_text)
- """ dest is same host case """
+ # dest is same host case
out = self.libvirtconnection.migrate_disk_and_power_off(
None, ins_ref, '10.0.0.1', None, None)
self.assertEquals(out, disk_info_text)
@@ -4325,19 +4393,19 @@ class LibvirtDriverTestCase(test.TestCase):
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
- """ instance not found case """
+ # instance not found case
self.assertRaises(exception.NotFound,
self.libvirtconnection._wait_for_running,
{'name': 'not_found',
'uuid': 'not_found_uuid'})
- """ instance is running case """
+ # instance is running case
self.assertRaises(utils.LoopingCallDone,
self.libvirtconnection._wait_for_running,
{'name': 'running',
'uuid': 'running_uuid'})
- """ else case """
+ # else case
self.libvirtconnection._wait_for_running({'name': 'else',
'uuid': 'other_uuid'})
diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py
index 125b2fe36..bcd858d96 100644
--- a/nova/tests/test_migrations.py
+++ b/nova/tests/test_migrations.py
@@ -297,3 +297,37 @@ class TestMigrations(test.TestCase):
self.assertEqual(version,
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
+
+ def test_migration_146(self):
+ name = 'name'
+ az = 'custom_az'
+
+ def _145_check():
+ agg = aggregates.select(aggregates.c.id == 1).execute().first()
+ self.assertEqual(name, agg.name)
+ self.assertEqual(az, agg.availability_zone)
+
+ for key, engine in self.engines.items():
+ migration_api.version_control(engine, TestMigrations.REPOSITORY,
+ migration.INIT_VERSION)
+ migration_api.upgrade(engine, TestMigrations.REPOSITORY, 145)
+ metadata = sqlalchemy.schema.MetaData()
+ metadata.bind = engine
+ aggregates = sqlalchemy.Table('aggregates', metadata,
+ autoload=True)
+
+ aggregates.insert().values(id=1, availability_zone=az,
+ aggregate_name=1, name=name).execute()
+
+ _145_check()
+
+ migration_api.upgrade(engine, TestMigrations.REPOSITORY, 146)
+
+ aggregate_metadata = sqlalchemy.Table('aggregate_metadata',
+ metadata, autoload=True)
+ metadata = aggregate_metadata.select(aggregate_metadata.c.
+ aggregate_id == 1).execute().first()
+ self.assertEqual(az, metadata['value'])
+
+ migration_api.downgrade(engine, TestMigrations.REPOSITORY, 145)
+ _145_check()
diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py
index 02d3a5a3f..3c944e170 100644
--- a/nova/tests/test_powervm.py
+++ b/nova/tests/test_powervm.py
@@ -26,6 +26,7 @@ from nova.compute import power_state
from nova.openstack.common import log as logging
from nova.virt import images
from nova.virt.powervm import blockdev as powervm_blockdev
+from nova.virt.powervm import common
from nova.virt.powervm import driver as powervm_driver
from nova.virt.powervm import exception
from nova.virt.powervm import lpar
@@ -195,3 +196,31 @@ class PowerVMDriverTestCase(test.TestCase):
self.assertEqual(info['mem'], 1024)
self.assertEqual(info['num_cpu'], 2)
self.assertEqual(info['cpu_time'], 939395)
+
+ def test_remote_utility_1(self):
+ path_one = '/some/file/'
+ path_two = '/path/filename'
+ joined_path = common.aix_path_join(path_one, path_two)
+ expected_path = '/some/file/path/filename'
+ self.assertEqual(joined_path, expected_path)
+
+ def test_remote_utility_2(self):
+ path_one = '/some/file/'
+ path_two = 'path/filename'
+ joined_path = common.aix_path_join(path_one, path_two)
+ expected_path = '/some/file/path/filename'
+ self.assertEqual(joined_path, expected_path)
+
+ def test_remote_utility_3(self):
+ path_one = '/some/file'
+ path_two = '/path/filename'
+ joined_path = common.aix_path_join(path_one, path_two)
+ expected_path = '/some/file/path/filename'
+ self.assertEqual(joined_path, expected_path)
+
+ def test_remote_utility_4(self):
+ path_one = '/some/file'
+ path_two = 'path/filename'
+ joined_path = common.aix_path_join(path_one, path_two)
+ expected_path = '/some/file/path/filename'
+ self.assertEqual(joined_path, expected_path)
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index d2f3b2392..fc935e179 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -26,16 +26,10 @@ import mox
import nova
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova import test
from nova import utils
-CONF = cfg.CONF
-CONF.import_opt('glance_host', 'nova.config')
-CONF.import_opt('glance_port', 'nova.config')
-CONF.import_opt('glance_protocol', 'nova.config')
-
class ByteConversionTest(test.TestCase):
def test_string_conversions(self):
@@ -380,17 +374,6 @@ class GenericUtilsTestCase(test.TestCase):
self.assertFalse(utils.bool_from_str(None))
self.assertFalse(utils.bool_from_str('junk'))
- def test_generate_glance_http_url(self):
- generated_url = utils.generate_glance_url()
- http_url = "http://%s:%d" % (CONF.glance_host, CONF.glance_port)
- self.assertEqual(generated_url, http_url)
-
- def test_generate_glance_https_url(self):
- self.flags(glance_protocol="https")
- generated_url = utils.generate_glance_url()
- https_url = "https://%s:%d" % (CONF.glance_host, CONF.glance_port)
- self.assertEqual(generated_url, https_url)
-
def test_read_cached_file(self):
self.mox.StubOutWithMock(os.path, "getmtime")
os.path.getmtime(mox.IgnoreArg()).AndReturn(1)
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 8b57dfef4..758e5360c 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -2222,11 +2222,12 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
self.compute = importutils.import_object(CONF.compute_manager)
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
- 'availability_zone': 'test_zone',
- 'metadata': {pool_states.POOL_FLAG: 'XenAPI'}}
+ 'metadata': {'availability_zone': 'test_zone',
+ pool_states.POOL_FLAG: 'XenAPI'}}
self.aggr = db.aggregate_create(self.context, values)
self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
'master_compute': 'host',
+ 'availability_zone': 'fake_zone',
pool_states.KEY: pool_states.ACTIVE,
'host': xenapi_fake.get_record('host',
host_ref)['uuid']}
@@ -2306,9 +2307,10 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
values = {"name": 'fake_aggregate',
- "availability_zone": 'fake_zone'}
+ 'metadata': {'availability_zone': 'fake_zone'}}
result = db.aggregate_create(self.context, values)
- metadata = {pool_states.POOL_FLAG: "XenAPI",
+ metadata = {'availability_zone': 'fake_zone',
+ pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.CREATED}
db.aggregate_metadata_add(self.context, result['id'], metadata)
@@ -2358,7 +2360,8 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_clear_pool.called)
- self.assertThat({pool_states.POOL_FLAG: 'XenAPI',
+ self.assertThat({'availability_zone': 'fake_zone',
+ pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: pool_states.ACTIVE},
matchers.DictMatches(result['metadetails']))
@@ -2375,9 +2378,9 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
aggr_zone='fake_zone',
aggr_state=pool_states.CREATED,
hosts=['host'], metadata=None):
- values = {"name": aggr_name,
- "availability_zone": aggr_zone}
- result = db.aggregate_create(self.context, values)
+ values = {"name": aggr_name}
+ result = db.aggregate_create(self.context, values,
+ metadata={'availability_zone': aggr_zone})
pool_flag = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: aggr_state}
db.aggregate_metadata_add(self.context, result['id'], pool_flag)
@@ -2509,7 +2512,7 @@ class StubDependencies(object):
class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
- """ A ResourcePool, use stub dependencies """
+ """A ResourcePool, use stub dependencies """
class HypervisorPoolTestCase(test.TestCase):
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index ca8281295..a44f3e9fd 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -98,7 +98,7 @@ def stubout_determine_is_pv_objectstore(stubs):
def stubout_is_snapshot(stubs):
- """ Always returns true
+ """Always returns true
xenapi fake driver does not create vmrefs for snapshots """
def f(*args):
@@ -158,7 +158,7 @@ def _make_fake_vdi():
class FakeSessionForVMTests(fake.SessionBase):
- """ Stubs out a XenAPISession for VM tests """
+ """Stubs out a XenAPISession for VM tests """
_fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on "
"Sun Nov 6 22:49:02 2011\n"
@@ -204,7 +204,7 @@ class FakeSessionForVMTests(fake.SessionBase):
class FakeSessionForFirewallTests(FakeSessionForVMTests):
- """ Stubs out a XenApi Session for doing IPTable Firewall tests """
+ """Stubs out a XenApi Session for doing IPTable Firewall tests """
def __init__(self, uri, test_case=None):
super(FakeSessionForFirewallTests, self).__init__(uri)
@@ -270,7 +270,7 @@ def stub_out_vm_methods(stubs):
class FakeSessionForVolumeTests(fake.SessionBase):
- """ Stubs out a XenAPISession for Volume tests """
+ """Stubs out a XenAPISession for Volume tests """
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
valid_vdi = False
@@ -284,7 +284,7 @@ class FakeSessionForVolumeTests(fake.SessionBase):
class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
- """ Stubs out a XenAPISession for Volume tests: it injects failures """
+ """Stubs out a XenAPISession for Volume tests: it injects failures """
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
# This is for testing failure
diff --git a/nova/utils.py b/nova/utils.py
index 1056a6e2d..20c291382 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -80,9 +80,6 @@ utils_opts = [
CONF = cfg.CONF
CONF.register_opts(monkey_patch_opts)
CONF.register_opts(utils_opts)
-CONF.import_opt('glance_host', 'nova.config')
-CONF.import_opt('glance_port', 'nova.config')
-CONF.import_opt('glance_protocol', 'nova.config')
CONF.import_opt('service_down_time', 'nova.config')
LOG = logging.getLogger(__name__)
@@ -913,7 +910,7 @@ def is_valid_cidr(address):
def monkey_patch():
- """ If the Flags.monkey_patch set as True,
+ """If the Flags.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
@@ -975,17 +972,6 @@ def timefunc(func):
return inner
-def generate_glance_url():
- """Generate the URL to glance."""
- return "%s://%s:%d" % (CONF.glance_protocol, CONF.glance_host,
- CONF.glance_port)
-
-
-def generate_image_url(image_ref):
- """Generate an image URL from an image_ref."""
- return "%s/images/%s" % (generate_glance_url(), image_ref)
-
-
@contextlib.contextmanager
def remove_path_on_error(path):
"""Protect code that wants to operate on PATH atomically.
diff --git a/nova/virt/baremetal/vif_driver.py b/nova/virt/baremetal/vif_driver.py
index 2dc03410b..08e68c250 100644
--- a/nova/virt/baremetal/vif_driver.py
+++ b/nova/virt/baremetal/vif_driver.py
@@ -18,14 +18,13 @@ from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.baremetal import db as bmdb
-from nova.virt.vif import VIFDriver
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
-class BareMetalVIFDriver(VIFDriver):
+class BareMetalVIFDriver(object):
def _after_plug(self, instance, network, mapping, pif):
pass
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 7d627e80c..da4a9475c 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -167,6 +167,13 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
+ def list_instance_uuids(self):
+ """
+ Return the UUIDS of all the instances known to the virtualization
+ layer, as a list.
+ """
+ raise NotImplementedError()
+
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""
@@ -194,7 +201,8 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
- def destroy(self, instance, network_info, block_device_info=None):
+ def destroy(self, instance, network_info, block_device_info=None,
+ destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance.
If the instance is not found (for example if networking failed), this
@@ -206,6 +214,7 @@ class ComputeDriver(object):
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
+ :param destroy_disks: Indicates if disks should be destroyed
"""
# TODO(Vek): Need to pass context in for access to auth_token
@@ -776,6 +785,20 @@ class ComputeDriver(object):
"""
return {}
+ def instance_on_disk(self, instance):
+ """Checks access of instance files on the host.
+
+ :param instance: instance to lookup
+
+ Returns True if files of an instance with the supplied ID accessible on
+ the host, False otherwise.
+
+ .. note::
+ Used in rebuild for HA implementation and required for validation
+ of access to instance shared disk files
+ """
+ return False
+
def load_compute_driver(virtapi, compute_driver=None):
"""Load a compute driver module.
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 5d3b3c926..348675fe2 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -189,7 +189,8 @@ class FakeDriver(driver.ComputeDriver):
def resume(self, instance, network_info, block_device_info=None):
pass
- def destroy(self, instance, network_info, block_device_info=None):
+ def destroy(self, instance, network_info, block_device_info=None,
+ destroy_disks=True):
key = instance['name']
if key in self.instances:
del self.instances[key]
@@ -349,7 +350,7 @@ class FakeDriver(driver.ComputeDriver):
raise NotImplementedError('This method is supported only by libvirt.')
def test_remove_vm(self, instance_name):
- """ Removes the named VM, as if it crashed. For testing"""
+ """Removes the named VM, as if it crashed. For testing"""
self.instances.pop(instance_name)
def get_host_stats(self, refresh=False):
@@ -386,7 +387,6 @@ class FakeDriver(driver.ComputeDriver):
return 'disabled'
def get_disk_available_least(self):
- """ """
pass
def get_volume_connector(self, instance):
@@ -395,6 +395,12 @@ class FakeDriver(driver.ComputeDriver):
def get_available_nodes(self):
return _FAKE_NODES
+ def instance_on_disk(self, instance):
+ return False
+
+ def list_instance_uuids(self):
+ return []
+
class FakeVirtAPI(virtapi.VirtAPI):
def instance_update(self, context, instance_uuid, updates):
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index d7a5cbc31..178d35882 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -50,7 +50,7 @@ def load_driver(default, *args, **kwargs):
class FirewallDriver(object):
- """ Firewall Driver base class.
+ """Firewall Driver base class.
Defines methods that any driver providing security groups
and provider firewall functionality should implement.
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 2b57ba0b1..62cb46f2f 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -98,8 +98,9 @@ class HyperVDriver(driver.ComputeDriver):
block_device_info=None):
self._vmops.reboot(instance, network_info, reboot_type)
- def destroy(self, instance, network_info=None, cleanup=True):
- self._vmops.destroy(instance, network_info, cleanup)
+ def destroy(self, instance, network_info=None, cleanup=True,
+ destroy_disks=True):
+ self._vmops.destroy(instance, network_info, cleanup, destroy_disks)
def get_info(self, instance):
return self._vmops.get_info(instance)
diff --git a/nova/virt/hyperv/hostops.py b/nova/virt/hyperv/hostops.py
index 8c501ab30..5cbe46c1c 100644
--- a/nova/virt/hyperv/hostops.py
+++ b/nova/virt/hyperv/hostops.py
@@ -39,7 +39,7 @@ class HostOps(baseops.BaseOps):
self._stats = None
def _get_cpu_info(self):
- """ Get the CPU information.
+ """Get the CPU information.
:returns: A dictionary containing the main properties
of the central processor in the hypervisor.
"""
@@ -107,7 +107,7 @@ class HostOps(baseops.BaseOps):
return total_gb, used_gb
def _get_vcpu_used(self):
- """ Get vcpu usage number of physical computer.
+ """Get vcpu usage number of physical computer.
:returns: The total number of vcpu that currently used.
"""
#TODO(jordanrinke) figure out a way to count assigned VCPUs
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index 46fbd6cbc..2c3253685 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -69,7 +69,7 @@ class VMOps(baseops.BaseOps):
self._volumeops = volumeops
def list_instances(self):
- """ Return the names of all the instances known to Hyper-V. """
+ """Return the names of all the instances known to Hyper-V. """
vms = [v.ElementName
for v in self._conn.Msvm_ComputerSystem(['ElementName'],
Caption="Virtual Machine")]
@@ -118,7 +118,7 @@ class VMOps(baseops.BaseOps):
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
- """ Create a new VM and start it."""
+ """Create a new VM and start it."""
vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is not None:
raise exception.InstanceExists(name=instance['name'])
@@ -271,7 +271,7 @@ class VMOps(baseops.BaseOps):
LOG.debug(_('Set vcpus for vm %s...'), instance["name"])
def _create_scsi_controller(self, vm_name):
- """ Create an iscsi controller ready to mount volumes """
+ """Create an iscsi controller ready to mount volumes """
LOG.debug(_('Creating a scsi controller for %(vm_name)s for volume '
'attaching') % locals())
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
@@ -448,7 +448,8 @@ class VMOps(baseops.BaseOps):
raise exception.InstanceNotFound(instance_id=instance["id"])
self._set_vm_state(instance['name'], 'Reboot')
- def destroy(self, instance, network_info=None, cleanup=True):
+ def destroy(self, instance, network_info=None, cleanup=True,
+ destroy_disks=True):
"""Destroy the VM. Also destroy the associated VHD disk files"""
LOG.debug(_("Got request to destroy vm %s"), instance['name'])
vm = self._vmutils.lookup(self._conn, instance['name'])
@@ -486,17 +487,18 @@ class VMOps(baseops.BaseOps):
if not success:
raise vmutils.HyperVException(_('Failed to destroy vm %s') %
instance['name'])
- #Disconnect volumes
- for volume_drive in volumes_drives_list:
- self._volumeops.disconnect_volume(volume_drive)
- #Delete associated vhd disk files.
- for disk in disk_files:
- vhdfile = self._conn_cimv2.query(
- "Select * from CIM_DataFile where Name = '" +
- disk.replace("'", "''") + "'")[0]
- LOG.debug(_("Del: disk %(vhdfile)s vm %(name)s")
- % {'vhdfile': vhdfile, 'name': instance['name']})
- vhdfile.Delete()
+ if destroy_disks:
+ #Disconnect volumes
+ for volume_drive in volumes_drives_list:
+ self._volumeops.disconnect_volume(volume_drive)
+ #Delete associated vhd disk files.
+ for disk in disk_files:
+ vhdfile = self._conn_cimv2.query(
+ "Select * from CIM_DataFile where Name = '" +
+ disk.replace("'", "''") + "'")[0]
+ LOG.debug(_("Del: disk %(vhdfile)s vm %(name)s")
+ % {'vhdfile': vhdfile, 'name': instance['name']})
+ vhdfile.Delete()
def pause(self, instance):
"""Pause VM instance."""
diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py
index 8ae437cf9..31c05b9ad 100644
--- a/nova/virt/hyperv/volumeutils.py
+++ b/nova/virt/hyperv/volumeutils.py
@@ -68,7 +68,7 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils):
time.sleep(CONF.hyperv_wait_between_attach_retry)
def logout_storage_target(self, target_iqn):
- """ Logs out storage target through its session id """
+ """Logs out storage target through its session id """
sessions = self._conn_wmi.query(
"SELECT * FROM MSiSCSIInitiator_SessionClass \
@@ -77,5 +77,5 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils):
self.execute_log_out(session.SessionId)
def execute_log_out(self, session_id):
- """ Executes log out of the session described by its session ID """
+ """Executes log out of the session described by its session ID """
self.execute('iscsicli.exe ' + 'logouttarget ' + session_id)
diff --git a/nova/virt/hyperv/volumeutilsV2.py b/nova/virt/hyperv/volumeutilsV2.py
index 8d7c91862..03e3002f4 100644
--- a/nova/virt/hyperv/volumeutilsV2.py
+++ b/nova/virt/hyperv/volumeutilsV2.py
@@ -53,7 +53,7 @@ class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
time.sleep(CONF.hyperv_wait_between_attach_retry)
def logout_storage_target(self, target_iqn):
- """ Logs out storage target through its session id """
+ """Logs out storage target through its session id """
target = self._conn_storage.MSFT_iSCSITarget(
NodeAddress=target_iqn)[0]
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index e3d95c62e..d90a34123 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -456,6 +456,10 @@ class LibvirtDriver(driver.ComputeDriver):
return names
+ def list_instance_uuids(self):
+ return [self._conn.lookupByName(name).UUIDString()
+ for name in self.list_instances()]
+
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for (network, mapping) in network_info:
@@ -516,9 +520,10 @@ class LibvirtDriver(driver.ComputeDriver):
timer = utils.FixedIntervalLoopingCall(_wait_for_destroy)
timer.start(interval=0.5).wait()
- def destroy(self, instance, network_info, block_device_info=None):
+ def destroy(self, instance, network_info, block_device_info=None,
+ destroy_disks=True):
self._destroy(instance)
- self._cleanup(instance, network_info, block_device_info)
+ self._cleanup(instance, network_info, block_device_info, destroy_disks)
def _undefine_domain(self, instance):
try:
@@ -551,7 +556,8 @@ class LibvirtDriver(driver.ComputeDriver):
locals(), instance=instance)
raise
- def _cleanup(self, instance, network_info, block_device_info):
+ def _cleanup(self, instance, network_info, block_device_info,
+ destroy_disks):
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
try:
@@ -575,21 +581,22 @@ class LibvirtDriver(driver.ComputeDriver):
connection_info,
mount_device)
- target = os.path.join(CONF.instances_path, instance['name'])
- LOG.info(_('Deleting instance files %(target)s') % locals(),
- instance=instance)
- if os.path.exists(target):
- # If we fail to get rid of the directory
- # tree, this shouldn't block deletion of
- # the instance as whole.
- try:
- shutil.rmtree(target)
- except OSError, e:
- LOG.error(_("Failed to cleanup directory %(target)s: %(e)s") %
- locals())
+ if destroy_disks:
+ target = os.path.join(CONF.instances_path, instance['name'])
+ LOG.info(_('Deleting instance files %(target)s') % locals(),
+ instance=instance)
+ if os.path.exists(target):
+ # If we fail to get rid of the directory
+ # tree, this shouldn't block deletion of
+ # the instance as whole.
+ try:
+ shutil.rmtree(target)
+ except OSError, e:
+ LOG.error(_("Failed to cleanup directory %(target)s: %(e)s"
+ ) % locals())
- #NOTE(bfilippov): destroy all LVM disks for this instance
- self._cleanup_lvm(instance)
+ #NOTE(bfilippov): destroy all LVM disks for this instance
+ self._cleanup_lvm(instance)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object"""
@@ -2026,7 +2033,7 @@ class LibvirtDriver(driver.ComputeDriver):
return stats['total'] / (1024 ** 3)
def get_vcpu_used(self):
- """ Get vcpu usage number of physical computer.
+ """Get vcpu usage number of physical computer.
:returns: The total number of vcpu that currently used.
@@ -2581,9 +2588,7 @@ class LibvirtDriver(driver.ComputeDriver):
timer.start(interval=0.5).wait()
def _fetch_instance_kernel_ramdisk(self, context, instance):
- """ Download kernel and ramdisk for given instance in the given
- instance directory.
- """
+ """Download kernel and ramdisk for instance in instance directory."""
instance_dir = os.path.join(CONF.instances_path, instance['name'])
if instance['kernel_id']:
libvirt_utils.fetch_image(context,
@@ -3004,8 +3009,7 @@ class LibvirtDriver(driver.ComputeDriver):
def get_diagnostics(self, instance):
def get_io_devices(xml_doc):
- """ get the list of io devices from the
- xml document."""
+ """get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
@@ -3090,6 +3094,16 @@ class LibvirtDriver(driver.ComputeDriver):
"""only used for Resource Pools"""
pass
+ def instance_on_disk(self, instance):
+ # ensure directories exist and are writable
+ instance_path = os.path.join(CONF.instances_path, instance["name"])
+
+ LOG.debug(_('Checking instance files accessability'
+ '%(instance_path)s')
+ % locals())
+
+ return os.access(instance_path, os.W_OK)
+
class HostState(object):
"""Manages information about the compute node through libvirt"""
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index a3071e0c9..f65fa4a7e 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -273,7 +273,7 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
OVS virtual port XML (introduced in libvirt 0.9.11)."""
def get_config(self, instance, network, mapping):
- """ Pass data required to create OVS virtual port element"""
+ """Pass data required to create OVS virtual port element"""
conf = super(LibvirtOpenVswitchVirtualPortDriver,
self).get_config(instance,
network,
diff --git a/nova/virt/libvirt/volume_nfs.py b/nova/virt/libvirt/volume_nfs.py
index c78c4c1b2..fd01ada52 100644
--- a/nova/virt/libvirt/volume_nfs.py
+++ b/nova/virt/libvirt/volume_nfs.py
@@ -39,8 +39,8 @@ CONF.register_opts(volume_opts)
class NfsVolumeDriver(volume.LibvirtVolumeDriver):
- """ Class implements libvirt part of volume driver for NFS
- """
+ """Class implements libvirt part of volume driver for NFS."""
+
def __init__(self, *args, **kwargs):
"""Create back-end to nfs and check connection"""
super(NfsVolumeDriver, self).__init__(*args, **kwargs)
diff --git a/nova/virt/powervm/blockdev.py b/nova/virt/powervm/blockdev.py
index b359716ff..fb3a0210c 100644
--- a/nova/virt/powervm/blockdev.py
+++ b/nova/virt/powervm/blockdev.py
@@ -18,11 +18,16 @@ import hashlib
import os
import re
+from eventlet import greenthread
+
from nova import utils
+from nova.image import glance
+
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
+
from nova.virt import images
from nova.virt.powervm import command
from nova.virt.powervm import common
@@ -78,7 +83,7 @@ class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
:param context: nova context used to retrieve image from glance
:param instance: instance to create the volume for
- :image_id: image_id reference used to locate image in glance
+ :param image_id: image_id reference used to locate image in glance
:returns: dictionary with the name of the created
Logical Volume device in 'device_name' key
"""
@@ -125,8 +130,44 @@ class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
return {'device_name': disk_name}
- def create_image_from_volume(self):
- raise NotImplementedError()
+ def create_image_from_volume(self, device_name, context,
+ image_id, image_meta):
+ """Capture the contents of a volume and upload to glance
+
+ :param device_name: device in /dev/ to capture
+ :param context: nova context for operation
+ :param image_id: image reference to pre-created image in glance
+ :param image_meta: metadata for new image
+ """
+
+ # do the disk copy
+ dest_file_path = common.aix_path_join(CONF.powervm_img_remote_path,
+ image_id)
+ self._copy_device_to_file(device_name, dest_file_path)
+
+ # compress and copy the file back to the nova-compute host
+ snapshot_file_path = self._copy_image_file_from_host(
+ dest_file_path, CONF.powervm_img_local_path,
+ compress=True)
+
+ # get glance service
+ glance_service, image_id = glance.get_remote_image_service(
+ context, image_id)
+
+ # upload snapshot file to glance
+ with open(snapshot_file_path, 'r') as img_file:
+ glance_service.update(context,
+ image_id,
+ image_meta,
+ img_file)
+ LOG.debug(_("Snapshot added to glance."))
+
+ # clean up local image file
+ try:
+ os.remove(snapshot_file_path)
+ except OSError as ose:
+ LOG.warn(_("Failed to clean up snapshot file "
+ "%(snapshot_file_path)s") % locals())
def migrate_volume(self):
raise NotImplementedError()
@@ -202,6 +243,25 @@ class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
cmd = 'dd if=%s of=/dev/%s bs=1024k' % (source_path, device)
self.run_vios_command_as_root(cmd)
+ def _copy_device_to_file(self, device_name, file_path):
+ """Copy a device to a file using dd
+
+ :param device_name: device name to copy from
+ :param file_path: output file path
+ """
+ cmd = 'dd if=/dev/%s of=%s bs=1024k' % (device_name, file_path)
+ self.run_vios_command_as_root(cmd)
+
+ def _md5sum_remote_file(self, remote_path):
+ # AIX6/VIOS cannot md5sum files with sizes greater than ~2GB
+ cmd = ("perl -MDigest::MD5 -e 'my $file = \"%s\"; open(FILE, $file); "
+ "binmode(FILE); "
+ "print Digest::MD5->new->addfile(*FILE)->hexdigest, "
+ "\" $file\n\";'" % remote_path)
+
+ output = self.run_vios_command_as_root(cmd)
+ return output[0]
+
def _copy_image_file(self, source_path, remote_path, decompress=False):
"""Copy file to VIOS, decompress it, and return its new size and name.
@@ -225,26 +285,24 @@ class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
if not decompress:
final_path = comp_path
else:
- final_path = "%s.%s" % (uncomp_path, source_cksum)
+ final_path = uncomp_path
# Check whether the image is already on IVM
output = self.run_vios_command("ls %s" % final_path,
check_exit_code=False)
# If the image does not exist already
- if not len(output):
+ if not output:
# Copy file to IVM
common.ftp_put_command(self.connection_data, source_path,
remote_path)
# Verify image file checksums match
- cmd = ("/usr/bin/csum -h MD5 %s |"
- "/usr/bin/awk '{print $1}'" % comp_path)
- output = self.run_vios_command_as_root(cmd)
- if not len(output):
+ output = self._md5sum_remote_file(final_path)
+ if not output:
LOG.error(_("Unable to get checksum"))
raise exception.PowerVMFileTransferFailed()
- if source_cksum != output[0]:
+ if source_cksum != output.split(' ')[0]:
LOG.error(_("Image checksums do not match"))
raise exception.PowerVMFileTransferFailed()
@@ -271,7 +329,7 @@ class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
# Calculate file size in multiples of 512 bytes
output = self.run_vios_command("ls -o %s|awk '{print $4}'" %
final_path, check_exit_code=False)
- if len(output):
+ if output:
size = int(output[0])
else:
LOG.error(_("Uncompressed image file not found"))
@@ -281,6 +339,71 @@ class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
return final_path, size
+ def _copy_image_file_from_host(self, remote_source_path, local_dest_dir,
+ compress=False):
+ """
+ Copy a file from IVM to the nova-compute host,
+ and return the location of the copy
+
+ :param remote_source_path remote source file path
+ :param local_dest_dir local destination directory
+ :param compress: if True, compress the file before transfer;
+ if False (default), copy the file as is
+ """
+
+ temp_str = common.aix_path_join(local_dest_dir,
+ os.path.basename(remote_source_path))
+ local_file_path = temp_str + '.gz'
+
+ if compress:
+ copy_from_path = remote_source_path + '.gz'
+ else:
+ copy_from_path = remote_source_path
+
+ if compress:
+ # Gzip the file
+ cmd = "/usr/bin/gzip %s" % remote_source_path
+ self.run_vios_command_as_root(cmd)
+
+ # Cleanup uncompressed remote file
+ cmd = "/usr/bin/rm -f %s" % remote_source_path
+ self.run_vios_command_as_root(cmd)
+
+ # Get file checksum
+ output = self._md5sum_remote_file(copy_from_path)
+ if not output:
+ LOG.error(_("Unable to get checksum"))
+ msg_args = {'file_path': copy_from_path}
+ raise exception.PowerVMFileTransferFailed(**msg_args)
+ else:
+ source_chksum = output.split(' ')[0]
+
+ # Copy file to host
+ common.ftp_get_command(self.connection_data,
+ copy_from_path,
+ local_file_path)
+
+ # Calculate copied image checksum
+ with open(local_file_path, 'r') as image_file:
+ hasher = hashlib.md5()
+ block_size = 0x10000
+ buf = image_file.read(block_size)
+ while len(buf) > 0:
+ hasher.update(buf)
+ buf = image_file.read(block_size)
+ dest_chksum = hasher.hexdigest()
+
+ # do comparison
+ if source_chksum and dest_chksum != source_chksum:
+ LOG.error(_("Image checksums do not match"))
+ raise exception.PowerVMFileTransferFailed()
+
+ # Cleanup transferred remote file
+ cmd = "/usr/bin/rm -f %s" % copy_from_path
+ output = self.run_vios_command_as_root(cmd)
+
+ return local_file_path
+
def run_vios_command(self, cmd, check_exit_code=True):
"""Run a remote command using an active ssh connection.
diff --git a/nova/virt/powervm/common.py b/nova/virt/powervm/common.py
index 179bd7f14..bf69be84e 100644
--- a/nova/virt/powervm/common.py
+++ b/nova/virt/powervm/common.py
@@ -63,6 +63,7 @@ def ssh_command_as_root(ssh_connection, cmd, check_exit_code=True):
:returns: Tuple -- a tuple of (stdout, stderr)
:raises: nova.exception.ProcessExecutionError
"""
+ LOG.debug(_('Running cmd (SSH-as-root): %s') % cmd)
chan = ssh_connection._transport.open_session()
# This command is required to be executed
# in order to become root.
@@ -108,5 +109,48 @@ def ftp_put_command(connection, local_path, remote_dir):
f.close()
ftp.close()
except Exception:
- LOG.exception(_('File transfer to PowerVM manager failed'))
- raise exception.PowerVMFileTransferFailed(file_path=local_path)
+ LOG.error(_('File transfer to PowerVM manager failed'))
+ raise exception.PowerVMFTPTransferFailed(ftp_cmd='PUT',
+ source_path=local_path, dest_path=remote_dir)
+
+
+def ftp_get_command(connection, remote_path, local_path):
+ """Retrieve a file via FTP
+
+ :param connection: a Connection object.
+ :param remote_path: path to the remote file
+ :param local_path: path to local destination
+ :raises: PowerVMFileTransferFailed
+ """
+ try:
+ ftp = ftplib.FTP(host=connection.host,
+ user=connection.username,
+ passwd=connection.password)
+ ftp.cwd(os.path.dirname(remote_path))
+ name = os.path.basename(remote_path)
+ LOG.debug(_("ftp GET %(remote_path)s to: %(local_path)s") % locals())
+ with open(local_path, 'w') as ftpfile:
+ ftpcmd = 'RETR %s' % name
+ ftp.retrbinary(ftpcmd, ftpfile.write)
+ ftp.close()
+ except Exception:
+ LOG.error(_("File transfer from PowerVM manager failed"))
+ raise exception.PowerVMFTPTransferFailed(ftp_cmd='GET',
+ source_path=remote_path, dest_path=local_path)
+
+
+def aix_path_join(path_one, path_two):
+ """Ensures file path is built correctly for remote UNIX system
+
+ :param path_one: string of the first file path
+ :param path_two: string of the second file path
+ :returns: a uniform path constructed from both strings
+ """
+ if path_one.endswith('/'):
+ path_one = path_one.rstrip('/')
+
+ if path_two.startswith('/'):
+ path_two = path_two.lstrip('/')
+
+ final_path = path_one + '/' + path_two
+ return final_path
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index 0821d4d84..5696bad87 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -14,6 +14,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+import time
+
+from nova.compute import task_states
+from nova.compute import vm_states
+
+from nova import context as nova_context
+
+from nova.image import glance
+
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -90,9 +100,10 @@ class PowerVMDriver(driver.ComputeDriver):
"""Create a new instance/VM/domain on powerVM."""
self._powervm.spawn(context, instance, image_meta['id'])
- def destroy(self, instance, network_info, block_device_info=None):
+ def destroy(self, instance, network_info, block_device_info=None,
+ destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance."""
- self._powervm.destroy(instance['name'])
+ self._powervm.destroy(instance['name'], destroy_disks)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
@@ -111,6 +122,48 @@ class PowerVMDriver(driver.ComputeDriver):
"""
pass
+ def snapshot(self, context, instance, image_id):
+ """Snapshots the specified instance.
+
+ :param context: security context
+ :param instance: Instance object as returned by DB layer.
+ :param image_id: Reference to a pre-created image that will
+ hold the snapshot.
+ """
+ snapshot_start = time.time()
+
+ # get current image info
+ glance_service, old_image_id = glance.get_remote_image_service(
+ context, instance['image_ref'])
+ image_meta = glance_service.show(context, old_image_id)
+ img_props = image_meta['properties']
+
+ # build updated snapshot metadata
+ snapshot_meta = glance_service.show(context, image_id)
+ new_snapshot_meta = {'is_public': False,
+ 'name': snapshot_meta['name'],
+ 'status': 'active',
+ 'properties': {'image_location': 'snapshot',
+ 'image_state': 'available',
+ 'owner_id': instance['project_id']
+ },
+ 'disk_format': image_meta['disk_format'],
+ 'container_format': image_meta['container_format']
+ }
+
+ if 'architecture' in image_meta['properties']:
+ arch = image_meta['properties']['architecture']
+ new_snapshot_meta['properties']['architecture'] = arch
+
+ # disk capture and glance upload
+ self._powervm.capture_image(context, instance, image_id,
+ new_snapshot_meta)
+
+ snapshot_time = time.time() - snapshot_start
+ inst_name = instance['name']
+ LOG.info(_("%(inst_name)s captured in %(snapshot_time)s seconds") %
+ locals())
+
def pause(self, instance):
"""Pause the specified instance."""
pass
diff --git a/nova/virt/powervm/exception.py b/nova/virt/powervm/exception.py
index 2a8cf4771..50e08eaea 100644
--- a/nova/virt/powervm/exception.py
+++ b/nova/virt/powervm/exception.py
@@ -22,7 +22,11 @@ class PowerVMConnectionFailed(exception.NovaException):
class PowerVMFileTransferFailed(exception.NovaException):
- message = _("File '%(file_path)' transfer to PowerVM manager failed")
+ message = _("File '%(file_path)s' transfer to PowerVM manager failed")
+
+
+class PowerVMFTPTransferFailed(PowerVMFileTransferFailed):
+ message = _("FTP %(ftp_cmd)s from %(source_path)s to %(dest_path)s failed")
class PowerVMLPARInstanceNotFound(exception.InstanceNotFound):
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index ad6b17035..f659f1ba7 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -276,18 +276,48 @@ class PowerVMOperator(object):
LOG.info(_("Instance spawned in %s seconds") % spawn_time,
instance=instance)
- def destroy(self, instance_name):
+ def destroy(self, instance_name, destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance.
:param instance_name: Instance name.
"""
try:
- self._cleanup(instance_name)
+ self._cleanup(instance_name, destroy_disks)
except exception.PowerVMLPARInstanceNotFound:
LOG.warn(_("During destroy, LPAR instance '%s' was not found on "
"PowerVM system.") % instance_name)
- def _cleanup(self, instance_name):
+ def capture_image(self, context, instance, image_id, image_meta):
+ """Capture the root disk for a snapshot
+
+ :param context: nova context for this operation
+ :param instance: instance information to capture the image from
+ :param image_id: uuid of pre-created snapshot image
+ :param image_meta: metadata to upload with captured image
+ """
+ lpar = self._operator.get_lpar(instance['name'])
+ previous_state = lpar['state']
+
+ # stop the instance if it is running
+ if previous_state == 'Running':
+ LOG.debug(_("Stopping instance %s for snapshot.") %
+ instance['name'])
+ # wait up to 2 minutes for shutdown
+ self.power_off(instance['name'], timeout=120)
+
+ # get disk_name
+ vhost = self._operator.get_vhost_by_instance_id(lpar['lpar_id'])
+ disk_name = self._operator.get_disk_name_by_vhost(vhost)
+
+ # do capture and upload
+ self._disk_adapter.create_image_from_volume(
+ disk_name, context, image_id, image_meta)
+
+ # restart instance if it was running before
+ if previous_state == 'Running':
+ self.power_on(instance['name'])
+
+ def _cleanup(self, instance_name, destroy_disks=True):
lpar_id = self._get_instance(instance_name)['lpar_id']
try:
vhost = self._operator.get_vhost_by_instance_id(lpar_id)
@@ -296,7 +326,7 @@ class PowerVMOperator(object):
LOG.debug(_("Shutting down the instance '%s'") % instance_name)
self._operator.stop_lpar(instance_name)
- if disk_name:
+ if disk_name and destroy_disks:
# TODO(mrodden): we should also detach from the instance
# before we start deleting things...
self._disk_adapter.detach_volume_from_host(disk_name)
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 50fc3e922..ce8005861 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -136,9 +136,10 @@ class VMWareESXDriver(driver.ComputeDriver):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
- def destroy(self, instance, network_info, block_device_info=None):
+ def destroy(self, instance, network_info, block_device_info=None,
+ destroy_disks=True):
"""Destroy VM instance."""
- self._vmops.destroy(instance, network_info)
+ self._vmops.destroy(instance, network_info, destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index b5b5d1fff..e591245e2 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -539,7 +539,7 @@ class VMWareVMOps(object):
self._session._wait_for_task(instance['uuid'], reset_task)
LOG.debug(_("Did hard reboot of VM"), instance=instance)
- def destroy(self, instance, network_info):
+ def destroy(self, instance, network_info, destroy_disks=True):
"""
Destroy a VM instance. Steps followed are:
1. Power off the VM, if it is in poweredOn state.
@@ -590,30 +590,32 @@ class VMWareVMOps(object):
# Delete the folder holding the VM related content on
# the datastore.
- try:
- dir_ds_compliant_path = vm_util.build_datastore_path(
- datastore_name,
- os.path.dirname(vmx_file_path))
- LOG.debug(_("Deleting contents of the VM from "
- "datastore %(datastore_name)s") %
- {'datastore_name': datastore_name},
- instance=instance)
- delete_task = self._session._call_method(
- self._session._get_vim(),
- "DeleteDatastoreFile_Task",
- self._session._get_vim().get_service_content().fileManager,
- name=dir_ds_compliant_path)
- self._session._wait_for_task(instance['uuid'], delete_task)
- LOG.debug(_("Deleted contents of the VM from "
- "datastore %(datastore_name)s") %
- {'datastore_name': datastore_name},
- instance=instance)
- except Exception, excep:
- LOG.warn(_("In vmwareapi:vmops:destroy, "
- "got this exception while deleting"
- " the VM contents from the disk: %s")
- % str(excep),
- instance=instance)
+ if destroy_disks:
+ try:
+ dir_ds_compliant_path = vm_util.build_datastore_path(
+ datastore_name,
+ os.path.dirname(vmx_file_path))
+ LOG.debug(_("Deleting contents of the VM from "
+ "datastore %(datastore_name)s") %
+ {'datastore_name': datastore_name},
+ instance=instance)
+ vim = self._session._get_vim()
+ delete_task = self._session._call_method(
+ vim,
+ "DeleteDatastoreFile_Task",
+ vim.get_service_content().fileManager,
+ name=dir_ds_compliant_path)
+ self._session._wait_for_task(instance['uuid'], delete_task)
+ LOG.debug(_("Deleted contents of the VM from "
+ "datastore %(datastore_name)s") %
+ {'datastore_name': datastore_name},
+ instance=instance)
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:vmops:destroy, "
+ "got this exception while deleting"
+ " the VM contents from the disk: %s")
+ % str(excep),
+ instance=instance)
except Exception, exc:
LOG.exception(exc, instance=instance)
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index d3047d364..1b60faa9f 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -189,7 +189,7 @@ class XenAPIDriver(driver.ComputeDriver):
block_device_info)
def snapshot(self, context, instance, image_id, update_task_state):
- """ Create snapshot from a running VM instance """
+ """Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, instance, network_info, reboot_type,
@@ -211,9 +211,11 @@ class XenAPIDriver(driver.ComputeDriver):
"""Apply a diff to the instance metadata."""
self._vmops.change_instance_metadata(instance, diff)
- def destroy(self, instance, network_info, block_device_info=None):
+ def destroy(self, instance, network_info, block_device_info=None,
+ destroy_disks=True):
"""Destroy VM instance"""
- self._vmops.destroy(instance, network_info, block_device_info)
+ self._vmops.destroy(instance, network_info, block_device_info,
+ destroy_disks)
def pause(self, instance):
"""Pause VM instance"""
@@ -515,24 +517,24 @@ class XenAPIDriver(driver.ComputeDriver):
return self._vmops.unfilter_instance(instance_ref, network_info)
def refresh_security_group_rules(self, security_group_id):
- """ Updates security group rules for all instances
- associated with a given security group
- Invoked when security group rules are updated
- """
+ """Updates security group rules for all instances associated with a
+ given security group.
+
+ Invoked when security group rules are updated."""
return self._vmops.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
- """ Updates security group rules for all instances
- associated with a given security group
- Invoked when instances are added/removed to a security group
- """
+ """Updates security group rules for all instances associated with a
+ given security group.
+
+ Invoked when instances are added/removed to a security group."""
return self._vmops.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
- """ Updates security group rules for specified instance
- Invoked when instances are added/removed to a security group
- or when a rule is added/removed to a security group
- """
+ """Updates security group rules for specified instance.
+
+ Invoked when instances are added/removed to a security group
+ or when a rule is added/removed to a security group."""
return self._vmops.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
diff --git a/nova/virt/xenapi/firewall.py b/nova/virt/xenapi/firewall.py
index e30465741..9c6a60d18 100644
--- a/nova/virt/xenapi/firewall.py
+++ b/nova/virt/xenapi/firewall.py
@@ -27,12 +27,11 @@ LOG = logging.getLogger(__name__)
class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
- """ Dom0IptablesFirewallDriver class
+ """Dom0IptablesFirewallDriver class
This class provides an implementation for nova.virt.Firewall
using iptables. This class is meant to be used with the xenapi
- backend and uses xenapi plugin to enforce iptables rules in dom0
-
+ backend and uses xenapi plugin to enforce iptables rules in dom0.
"""
def _plugin_execute(self, *cmd, **kwargs):
# Prepare arguments for plugin call
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index fbf3e0599..d3dfdd539 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -1067,7 +1067,8 @@ class VMOps(object):
# Destroy Rescue VM
self._session.call_xenapi("VM.destroy", rescue_vm_ref)
- def destroy(self, instance, network_info, block_device_info=None):
+ def destroy(self, instance, network_info, block_device_info=None,
+ destroy_disks=True):
"""Destroy VM instance.
This is the method exposed by xenapi_conn.destroy(). The rest of the
@@ -1087,10 +1088,11 @@ class VMOps(object):
self._destroy_rescue_instance(rescue_vm_ref, vm_ref)
return self._destroy(instance, vm_ref, network_info,
- block_device_info=block_device_info)
+ block_device_info=block_device_info,
+ destroy_disks=destroy_disks)
def _destroy(self, instance, vm_ref, network_info=None,
- block_device_info=None):
+ block_device_info=None, destroy_disks=True):
"""Destroys VM instance by performing:
1. A shutdown
@@ -1106,10 +1108,11 @@ class VMOps(object):
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
- # Destroy VDIs
- self._detach_vm_vols(instance, vm_ref, block_device_info)
- self._destroy_vdis(instance, vm_ref, block_device_info)
- self._destroy_kernel_ramdisk(instance, vm_ref)
+ # Destroy VDIs (if necessary)
+ if destroy_disks:
+ self._detach_vm_vols(instance, vm_ref, block_device_info)
+ self._destroy_vdis(instance, vm_ref, block_device_info)
+ self._destroy_kernel_ramdisk(instance, vm_ref)
vm_utils.destroy_vm(self._session, instance, vm_ref)
@@ -1509,15 +1512,15 @@ class VMOps(object):
self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key)
def refresh_security_group_rules(self, security_group_id):
- """ recreates security group rules for every instance """
+ """recreates security group rules for every instance """
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
- """ recreates security group rules for every instance """
+ """recreates security group rules for every instance """
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
- """ recreates security group rules for specified instance """
+ """recreates security group rules for specified instance """
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
@@ -1600,8 +1603,8 @@ class VMOps(object):
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
- """ Check if it is possible to execute live migration
- on the source side.
+ """Check if it's possible to execute live migration on the source side.
+
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest_check_data: data returned by the check on the
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 04c151d1e..514295605 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -42,6 +42,9 @@ cinder_opts = [
default=None,
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
+ cfg.IntOpt('cinder_http_retries',
+ default=3,
+ help='Number of cinderclient retries on failed http calls'),
]
CONF = cfg.CONF
@@ -72,7 +75,8 @@ def cinderclient(context):
c = cinder_client.Client(context.user_id,
context.auth_token,
project_id=context.project_id,
- auth_url=url)
+ auth_url=url,
+ retries=CONF.cinder_http_retries)
# noauth extracts user_id:project_id from auth_token
c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id,
context.project_id)
diff --git a/run_tests.sh b/run_tests.sh
index a34cab5a0..c4a1d9efc 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -119,7 +119,7 @@ function run_pep8 {
srcfiles+=" setup.py"
# Until all these issues get fixed, ignore.
- ignore='--ignore=N4,E12,E711,E721,E712'
+ ignore='--ignore=N402,E12,E711,E721,E712'
${wrapper} python tools/hacking.py ${ignore} ${srcfiles}
diff --git a/setup.py b/setup.py
index b04ac2b4a..12de5c4d6 100644
--- a/setup.py
+++ b/setup.py
@@ -50,6 +50,7 @@ setuptools.setup(name='nova',
'bin/nova-api-metadata',
'bin/nova-api-os-compute',
'bin/nova-baremetal-deploy-helper',
+ 'bin/nova-baremetal-manage',
'bin/nova-rpc-zmq-receiver',
'bin/nova-cells',
'bin/nova-cert',
diff --git a/tools/hacking.py b/tools/hacking.py
index bde4f42d4..a860aa37b 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -268,18 +268,23 @@ def nova_import_no_db_in_virt(logical_line, filename):
yield (0, "NOVA N307: nova.db import not allowed in nova/virt/*")
-def nova_docstring_start_space(physical_line):
+def nova_docstring_start_space(physical_line, previous_logical):
"""Check for docstring not start with space.
nova HACKING guide recommendation for docstring:
Docstring should not start with space
N401
"""
- pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
- if (pos != -1 and len(physical_line) > pos + 1):
- if (physical_line[pos + 3] == ' '):
- return (pos, "NOVA N401: one line docstring should not start with"
- " a space")
+ # it's important that we determine this is actually a docstring,
+ # and not a doc block used somewhere after the first line of a
+ # function def
+ if (previous_logical.startswith("def ") or
+ previous_logical.startswith("class ")):
+ pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE])
+ if (pos != -1 and len(physical_line) > pos + 4):
+ if (physical_line[pos + 3] == ' '):
+ return (pos, "NOVA N401: docstring should not start with"
+ " a space")
def nova_docstring_one_line(physical_line):
diff --git a/tox.ini b/tox.ini
index a3e44630f..ca5e6e778 100644
--- a/tox.ini
+++ b/tox.ini
@@ -18,9 +18,9 @@ downloadcache = ~/cache/pip
[testenv:pep8]
deps=pep8==1.3.3
commands =
- python tools/hacking.py --ignore=N4,E12,E711,E721,E712 --repeat --show-source \
+ python tools/hacking.py --ignore=N402,E12,E711,E721,E712 --repeat --show-source \
--exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg .
- python tools/hacking.py --ignore=N4,E12,E711,E721,E712 --repeat --show-source \
+ python tools/hacking.py --ignore=N402,E12,E711,E721,E712 --repeat --show-source \
--filename=nova* bin
[testenv:pylint]