summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-dhcpbridge2
-rwxr-xr-xbin/nova-manage10
-rwxr-xr-xcontrib/xen/vif-openstack39
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.json20
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.xml10
-rw-r--r--doc/api_samples/os-cells/cells-get-resp.json9
-rw-r--r--doc/api_samples/os-cells/cells-get-resp.xml2
-rw-r--r--doc/api_samples/os-cells/cells-list-empty-resp.json3
-rw-r--r--doc/api_samples/os-cells/cells-list-empty-resp.xml2
-rw-r--r--doc/api_samples/os-cells/cells-list-resp.json39
-rw-r--r--doc/api_samples/os-cells/cells-list-resp.xml8
-rw-r--r--doc/api_samples/os-hosts/hosts-list-resp.json5
-rw-r--r--doc/api_samples/os-hosts/hosts-list-resp.xml3
-rw-r--r--doc/api_samples/os-tenant-networks/networks-list-res.json (renamed from doc/api_samples/os-networks/networks-list-res.json)0
-rw-r--r--doc/api_samples/os-tenant-networks/networks-post-res.json (renamed from doc/api_samples/os-networks/networks-post-res.json)0
-rw-r--r--etc/nova/policy.json1
-rw-r--r--nova/api/ec2/cloud.py20
-rw-r--r--nova/api/openstack/compute/contrib/admin_actions.py4
-rw-r--r--nova/api/openstack/compute/contrib/admin_networks.py170
-rw-r--r--nova/api/openstack/compute/contrib/cells.py303
-rw-r--r--nova/api/openstack/compute/contrib/hosts.py78
-rw-r--r--nova/api/openstack/compute/contrib/networks_associate.py2
-rw-r--r--nova/api/openstack/compute/contrib/os_networks.py261
-rw-r--r--nova/api/openstack/compute/contrib/os_tenant_networks.py214
-rw-r--r--nova/api/openstack/compute/contrib/services.py2
-rw-r--r--nova/api/openstack/compute/servers.py24
-rw-r--r--nova/api/sizelimit.py2
-rw-r--r--nova/cells/manager.py13
-rw-r--r--nova/cells/messaging.py34
-rw-r--r--nova/cells/rpcapi.py19
-rw-r--r--nova/cells/state.py13
-rw-r--r--nova/compute/api.py368
-rw-r--r--nova/compute/cells_api.py121
-rw-r--r--nova/compute/manager.py42
-rw-r--r--nova/compute/resource_tracker.py6
-rw-r--r--nova/compute/rpcapi.py10
-rw-r--r--nova/conductor/api.py16
-rw-r--r--nova/conductor/manager.py5
-rw-r--r--nova/console/manager.py2
-rw-r--r--nova/console/vmrc_manager.py4
-rw-r--r--nova/db/api.py21
-rw-r--r--nova/db/sqlalchemy/api.py79
-rw-r--r--nova/exception.py33
-rw-r--r--nova/locale/nova.pot4
-rw-r--r--nova/manager.py5
-rw-r--r--nova/network/api.py14
-rw-r--r--nova/network/manager.py8
-rw-r--r--nova/network/model.py16
-rw-r--r--nova/network/quantumv2/api.py42
-rw-r--r--nova/quota.py2
-rw-r--r--nova/scheduler/driver.py11
-rw-r--r--nova/scheduler/manager.py7
-rw-r--r--nova/service.py1
-rw-r--r--nova/tests/api/ec2/test_cloud.py12
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py89
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_cells.py396
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hosts.py10
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_networks.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_services.py18
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py1
-rw-r--r--nova/tests/baremetal/test_driver.py13
-rw-r--r--nova/tests/cells/test_cells_manager.py26
-rw-r--r--nova/tests/cells/test_cells_messaging.py46
-rw-r--r--nova/tests/cells/test_cells_rpcapi.py20
-rw-r--r--nova/tests/compute/test_compute.py330
-rw-r--r--nova/tests/compute/test_compute_cells.py86
-rw-r--r--nova/tests/compute/test_host_api.py175
-rw-r--r--nova/tests/compute/test_resource_tracker.py16
-rw-r--r--nova/tests/compute/test_rpcapi.py3
-rw-r--r--nova/tests/conductor/test_conductor.py34
-rw-r--r--nova/tests/fake_imagebackend.py2
-rw-r--r--nova/tests/fake_libvirt_utils.py11
-rw-r--r--nova/tests/fake_policy.py7
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gzbin291 -> 291 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gzbin0 -> 618 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gzbin735 -> 734 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gzbin313 -> 313 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gzbin433 -> 430 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gzbin645 -> 725 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gzbin17580 -> 21340 bytes
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl20
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl7
-rw-r--r--nova/tests/integrated/api_samples/os-cells/cells-get-resp.json.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-cells/cells-get-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-cells/cells-list-resp.json.tpl39
-rw-r--r--nova/tests/integrated/api_samples/os-cells/cells-list-resp.xml.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl11
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl)0
-rw-r--r--nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl)0
-rw-r--r--nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl)0
-rw-r--r--nova/tests/integrated/integrated_helpers.py5
-rw-r--r--nova/tests/integrated/test_api_samples.py105
-rw-r--r--nova/tests/integrated/test_extensions.py1
-rw-r--r--nova/tests/network/test_api.py22
-rw-r--r--nova/tests/network/test_manager.py4
-rw-r--r--nova/tests/network/test_quantumv2.py41
-rw-r--r--nova/tests/scheduler/test_scheduler.py113
-rw-r--r--nova/tests/test_configdrive2.py4
-rw-r--r--nova/tests/test_db_api.py21
-rw-r--r--nova/tests/test_exception.py12
-rw-r--r--nova/tests/test_hypervapi.py8
-rw-r--r--nova/tests/test_imagebackend.py11
-rw-r--r--nova/tests/test_libvirt.py22
-rw-r--r--nova/tests/test_libvirt_vif.py7
-rw-r--r--nova/tests/test_metadata.py10
-rw-r--r--nova/tests/test_periodic_tasks.py14
-rw-r--r--nova/tests/test_service.py1
-rw-r--r--nova/tests/test_vmwareapi.py10
-rw-r--r--nova/tests/utils.py3
-rw-r--r--nova/tests/vmwareapi/stubs.py14
-rw-r--r--nova/virt/baremetal/driver.py26
-rw-r--r--nova/virt/baremetal/volume_driver.py1
-rw-r--r--nova/virt/configdrive.py12
-rw-r--r--nova/virt/driver.py29
-rw-r--r--nova/virt/hyperv/vif.py133
-rw-r--r--nova/virt/hyperv/vmops.py94
-rw-r--r--nova/virt/hyperv/vmutils.py16
-rw-r--r--nova/virt/libvirt/config.py8
-rw-r--r--nova/virt/libvirt/designer.py101
-rw-r--r--nova/virt/libvirt/driver.py138
-rw-r--r--nova/virt/libvirt/imagebackend.py12
-rw-r--r--nova/virt/libvirt/utils.py17
-rw-r--r--nova/virt/libvirt/vif.py138
-rw-r--r--nova/virt/vmwareapi/__init__.py2
-rw-r--r--nova/virt/vmwareapi/driver.py28
-rw-r--r--nova/virt/vmwareapi/fake.py2
-rw-r--r--nova/virt/vmwareapi/network_util.py (renamed from nova/virt/vmwareapi/network_utils.py)0
-rw-r--r--nova/virt/vmwareapi/read_write_util.py12
-rw-r--r--nova/virt/vmwareapi/vif.py14
-rw-r--r--nova/virt/vmwareapi/vim.py123
-rw-r--r--nova/virt/vmwareapi/vm_util.py2
-rw-r--r--nova/virt/vmwareapi/vmops.py6
-rw-r--r--nova/virt/vmwareapi/vmware_images.py10
-rw-r--r--nova/virt/xenapi/vm_utils.py6
-rw-r--r--nova/volume/cinder.py14
-rw-r--r--nova/wsgi.py14
139 files changed, 3355 insertions, 1562 deletions
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 6187e052d..ee7bf2da9 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -111,7 +111,7 @@ CONF.register_cli_opt(
def main():
- """Parse environment and arguments and call the approproate action."""
+ """Parse environment and arguments and call the appropriate action."""
try:
config_file = os.environ['CONFIG_FILE']
except KeyError:
diff --git a/bin/nova-manage b/bin/nova-manage
index 67212a198..4f3d889ea 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -205,7 +205,7 @@ class ShellCommands(object):
@args('--path', dest='path', metavar='<path>', help='Script path')
def script(self, path):
- """Runs the script from the specifed path with flags set properly.
+ """Runs the script from the specified path with flags set properly.
arguments: path"""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
@@ -1056,11 +1056,11 @@ class CellCommands(object):
ctxt = context.get_admin_context()
db.cell_create(ctxt, values)
- @args('--cell_id', dest='cell_id', metavar='<cell_id>',
- help='ID of the cell to delete')
- def delete(self, cell_id):
+ @args('--cell_name', dest='cell_name', metavar='<cell_name>',
+ help='Name of the cell to delete')
+ def delete(self, cell_name):
ctxt = context.get_admin_context()
- db.cell_delete(ctxt, cell_id)
+ db.cell_delete(ctxt, cell_name)
def list(self):
ctxt = context.get_admin_context()
diff --git a/contrib/xen/vif-openstack b/contrib/xen/vif-openstack
new file mode 100755
index 000000000..1df6ad6ac
--- /dev/null
+++ b/contrib/xen/vif-openstack
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+## vim: set syn=on ts=4 sw=4 sts=0 noet foldmethod=indent:
+## copyright: B1 Systems GmbH <info@b1-systems.de>, 2012.
+## author: Christian Berendt <berendt@b1-systems.de>, 2012.
+## license: Apache License, Version 2.0
+##
+## purpose:
+## Creates a new vif device without attaching it to a
+## bridge. Quantum Linux Bridge Agent will attach the
+## created device to the belonging bridge.
+##
+## usage:
+## place the script in ${XEN_SCRIPT_DIR}/vif-openstack and
+## set (vif-script vif-openstack) in /etc/xen/xend-config.sxp.
+
+dir=$(dirname "$0")
+. "$dir/vif-common.sh"
+
+case "$command" in
+ online)
+ setup_virtual_bridge_port "$dev"
+ ip link set $dev up
+ ;;
+
+ offline)
+ ip link set $dev down
+ ;;
+
+ add)
+ setup_virtual_bridge_port "$dev"
+ ip link set $dev up
+ ;;
+esac
+
+if [ "$type_if" = vif -a "$command" = "online" ]
+then
+ success
+fi
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index 42e86eadd..bd002c080 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -89,6 +89,14 @@
"updated": "2012-08-09T00:00:00+00:00"
},
{
+ "alias": "os-cells",
+ "description": "Enables cells-related functionality such as adding neighbor cells,\n listing neighbor cells, and getting the capabilities of the local cell.\n ",
+ "links": [],
+ "name": "Cells",
+ "namespace": "http://docs.openstack.org/compute/ext/cells/api/v1.1",
+ "updated": "2011-09-21T00:00:00+00:00"
+ },
+ {
"alias": "os-certificates",
"description": "Certificates support.",
"links": [],
@@ -297,19 +305,19 @@
"updated": "2012-08-07T00:00:00+00:00"
},
{
- "alias": "os-admin-networks",
+ "alias": "os-networks",
"description": "Admin-only Network Management Extension.",
"links": [],
- "name": "AdminNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1",
+ "name": "Networks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
"updated": "2011-12-23T00:00:00+00:00"
},
{
- "alias": "os-networks",
+ "alias": "os-tenant-networks",
"description": "Tenant-based Network Management Extension.",
"links": [],
- "name": "OSNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
+ "name": "OSTenantNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2",
"updated": "2011-12-23T00:00:00+00:00"
},
{
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index ea0b45a12..ebb1c4302 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -37,6 +37,12 @@
<extension alias="os-availability-zone" updated="2012-08-09T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1" name="AvailabilityZone">
<description>Add availability_zone to the Create Server v1.1 API.</description>
</extension>
+ <extension alias="os-cells" updated="2011-09-21T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/cells/api/v1.1" name="Cells">
+ <description>Enables cells-related functionality such as adding child cells,
+ listing child cells, getting the capabilities of the local cell,
+ and returning build plans to parent cells' schedulers
+ </description>
+ </extension>
<extension alias="os-certificates" updated="2012-01-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/certificates/api/v1.1" name="Certificates">
<description>Certificates support.</description>
</extension>
@@ -125,13 +131,13 @@
<extension alias="os-multiple-create" updated="2012-08-07T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1" name="MultipleCreate">
<description>Allow multiple create in the Create Server v1.1 API.</description>
</extension>
- <extension alias="os-admin-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1" name="AdminNetworks">
+ <extension alias="os-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="Networks">
<description>Admin-only Network Management Extension.</description>
</extension>
<extension alias="os-networks-associate" updated="2012-11-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
<description>Network association support.</description>
</extension>
- <extension alias="os-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="OSNetworks">
+ <extension alias="os-tenant-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2" name="OSTenantNetworks">
<description>Tenant-based Network Management Extension.</description>
</extension>
<extension alias="os-quota-class-sets" updated="2012-03-12T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1" name="QuotaClasses">
diff --git a/doc/api_samples/os-cells/cells-get-resp.json b/doc/api_samples/os-cells/cells-get-resp.json
new file mode 100644
index 000000000..62eb8ec31
--- /dev/null
+++ b/doc/api_samples/os-cells/cells-get-resp.json
@@ -0,0 +1,9 @@
+{
+ "cell": {
+ "name": "cell3",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child",
+ "username": "username3"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-cells/cells-get-resp.xml b/doc/api_samples/os-cells/cells-get-resp.xml
new file mode 100644
index 000000000..12256a5bd
--- /dev/null
+++ b/doc/api_samples/os-cells/cells-get-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cell xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" username="username3" rpc_host="None" type="child" name="cell3" rpc_port="None"/> \ No newline at end of file
diff --git a/doc/api_samples/os-cells/cells-list-empty-resp.json b/doc/api_samples/os-cells/cells-list-empty-resp.json
new file mode 100644
index 000000000..5325a4e85
--- /dev/null
+++ b/doc/api_samples/os-cells/cells-list-empty-resp.json
@@ -0,0 +1,3 @@
+{
+ "cells": []
+} \ No newline at end of file
diff --git a/doc/api_samples/os-cells/cells-list-empty-resp.xml b/doc/api_samples/os-cells/cells-list-empty-resp.xml
new file mode 100644
index 000000000..6ac77b4bd
--- /dev/null
+++ b/doc/api_samples/os-cells/cells-list-empty-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cells xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"/> \ No newline at end of file
diff --git a/doc/api_samples/os-cells/cells-list-resp.json b/doc/api_samples/os-cells/cells-list-resp.json
new file mode 100644
index 000000000..97ea4c6dd
--- /dev/null
+++ b/doc/api_samples/os-cells/cells-list-resp.json
@@ -0,0 +1,39 @@
+{
+ "cells": [
+ {
+ "name": "cell1",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child",
+ "username": "username1"
+ },
+ {
+ "name": "cell3",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child",
+ "username": "username3"
+ },
+ {
+ "name": "cell5",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child",
+ "username": "username5"
+ },
+ {
+ "name": "cell2",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "parent",
+ "username": "username2"
+ },
+ {
+ "name": "cell4",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "parent",
+ "username": "username4"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-cells/cells-list-resp.xml b/doc/api_samples/os-cells/cells-list-resp.xml
new file mode 100644
index 000000000..7d697bb91
--- /dev/null
+++ b/doc/api_samples/os-cells/cells-list-resp.xml
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cells xmlns="http://docs.rackspacecloud.com/servers/api/v1.0">
+ <cell username="username1" rpc_host="None" type="child" name="cell1" rpc_port="None"/>
+ <cell username="username3" rpc_host="None" type="child" name="cell3" rpc_port="None"/>
+ <cell username="username5" rpc_host="None" type="child" name="cell5" rpc_port="None"/>
+ <cell username="username2" rpc_host="None" type="parent" name="cell2" rpc_port="None"/>
+ <cell username="username4" rpc_host="None" type="parent" name="cell4" rpc_port="None"/>
+</cells> \ No newline at end of file
diff --git a/doc/api_samples/os-hosts/hosts-list-resp.json b/doc/api_samples/os-hosts/hosts-list-resp.json
index 5a963c602..0c4126a7e 100644
--- a/doc/api_samples/os-hosts/hosts-list-resp.json
+++ b/doc/api_samples/os-hosts/hosts-list-resp.json
@@ -24,6 +24,11 @@
"host_name": "6e48bfe1a3304b7b86154326328750ae",
"service": "conductor",
"zone": "internal"
+ },
+ {
+ "host_name": "39f55087a1024d1380755951c945ca69",
+ "service": "cells",
+ "zone": "internal"
}
]
}
diff --git a/doc/api_samples/os-hosts/hosts-list-resp.xml b/doc/api_samples/os-hosts/hosts-list-resp.xml
index 8266a5d49..9a99c577a 100644
--- a/doc/api_samples/os-hosts/hosts-list-resp.xml
+++ b/doc/api_samples/os-hosts/hosts-list-resp.xml
@@ -5,4 +5,5 @@
<host host_name="2d1bdd671b5d41fd89dec74be5770c63" service="network"/>
<host host_name="7c2dd5ecb7494dd1bf4240b7f7f9bf3a" service="scheduler"/>
<host host_name="f9c273d8e03141a2a01def0ad18e5be4" service="conductor"/>
-</hosts> \ No newline at end of file
+ <host host_name="2b893569cd824b979bd80a2c94570a1f" service="cells"/>
+</hosts>
diff --git a/doc/api_samples/os-networks/networks-list-res.json b/doc/api_samples/os-tenant-networks/networks-list-res.json
index b857e8112..b857e8112 100644
--- a/doc/api_samples/os-networks/networks-list-res.json
+++ b/doc/api_samples/os-tenant-networks/networks-list-res.json
diff --git a/doc/api_samples/os-networks/networks-post-res.json b/doc/api_samples/os-tenant-networks/networks-post-res.json
index 536a9a0a4..536a9a0a4 100644
--- a/doc/api_samples/os-networks/networks-post-res.json
+++ b/doc/api_samples/os-tenant-networks/networks-post-res.json
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index 04766371e..fd1f9c2e0 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -29,6 +29,7 @@
"compute_extension:admin_actions:migrate": "rule:admin_api",
"compute_extension:aggregates": "rule:admin_api",
"compute_extension:agents": "rule:admin_api",
+ "compute_extension:cells": "rule:admin_api",
"compute_extension:certificates": "",
"compute_extension:cloudpipe": "rule:admin_api",
"compute_extension:cloudpipe_update": "rule:admin_api",
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 73a4a02ae..414b2e969 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -27,6 +27,7 @@ import time
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
+from nova.api.metadata import password
from nova.api import validator
from nova import availability_zones
from nova import block_device
@@ -148,7 +149,7 @@ def _properties_get_mappings(properties):
def _format_block_device_mapping(bdm):
- """Contruct BlockDeviceMappingItemType
+ """Construct BlockDeviceMappingItemType
{'device_name': '...', 'snapshot_id': , ...}
=> BlockDeviceMappingItemType
"""
@@ -758,6 +759,23 @@ class CloudController(object):
return True
+ def get_password_data(self, context, instance_id, **kwargs):
+ # instance_id may be passed in as a list of instances
+ if isinstance(instance_id, list):
+ ec2_id = instance_id[0]
+ else:
+ ec2_id = instance_id
+ validate_ec2_id(ec2_id)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
+ instance = self.compute_api.get(context, instance_uuid)
+ output = password.extract_password(instance)
+ # NOTE(vish): this should be timestamp from the metadata fields
+ # but it isn't important enough to implement properly
+ now = timeutils.utcnow()
+ return {"InstanceId": ec2_id,
+ "Timestamp": now,
+ "passwordData": output}
+
def get_console_output(self, context, instance_id, **kwargs):
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py
index f345d9617..fa7836b37 100644
--- a/nova/api/openstack/compute/contrib/admin_actions.py
+++ b/nova/api/openstack/compute/contrib/admin_actions.py
@@ -307,9 +307,7 @@ class AdminActionsController(wsgi.Controller):
try:
instance = self.compute_api.get(context, id)
- self.compute_api.update(context, instance,
- vm_state=state,
- task_state=None)
+ self.compute_api.update_state(context, instance, state)
except exception.InstanceNotFound:
raise exc.HTTPNotFound(_("Server not found"))
except Exception:
diff --git a/nova/api/openstack/compute/contrib/admin_networks.py b/nova/api/openstack/compute/contrib/admin_networks.py
deleted file mode 100644
index f5facd601..000000000
--- a/nova/api/openstack/compute/contrib/admin_networks.py
+++ /dev/null
@@ -1,170 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Grid Dynamics
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import netaddr
-import webob
-from webob import exc
-
-from nova.api.openstack import extensions
-from nova.api.openstack import wsgi
-from nova import exception
-from nova import network
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-authorize = extensions.extension_authorizer('compute', 'admin_networks')
-authorize_view = extensions.extension_authorizer('compute',
- 'admin_networks:view')
-
-
-def network_dict(context, network):
- fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2',
- 'cidr_v6', 'gateway_v6', 'label', 'netmask_v6')
- admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted',
- 'injected', 'bridge', 'vlan', 'vpn_public_address',
- 'vpn_public_port', 'vpn_private_address', 'dhcp_start',
- 'project_id', 'host', 'bridge_interface', 'multi_host',
- 'priority', 'rxtx_base')
- if network:
- # NOTE(mnaser): We display a limited set of fields so users can know
- # what networks are available, extra system-only fields
- # are only visible if they are an admin.
- if context.is_admin:
- fields += admin_fields
- result = dict((field, network[field]) for field in fields)
- if 'uuid' in network:
- result['id'] = network['uuid']
- return result
- else:
- return {}
-
-
-class AdminNetworkController(wsgi.Controller):
-
- def __init__(self, network_api=None):
- self.network_api = network_api or network.API()
-
- def index(self, req):
- context = req.environ['nova.context']
- authorize_view(context)
- networks = self.network_api.get_all(context)
- result = [network_dict(context, net_ref) for net_ref in networks]
- return {'networks': result}
-
- @wsgi.action("disassociate")
- def _disassociate_host_and_project(self, req, id, body):
- context = req.environ['nova.context']
- authorize(context)
- LOG.debug(_("Disassociating network with id %s"), id)
-
- try:
- self.network_api.associate(context, id, host=None, project=None)
- except exception.NetworkNotFound:
- raise exc.HTTPNotFound(_("Network not found"))
- return exc.HTTPAccepted()
-
- def show(self, req, id):
- context = req.environ['nova.context']
- authorize_view(context)
- LOG.debug(_("Showing network with id %s") % id)
- try:
- network = self.network_api.get(context, id)
- except exception.NetworkNotFound:
- raise exc.HTTPNotFound(_("Network not found"))
- return {'network': network_dict(context, network)}
-
- def delete(self, req, id):
- context = req.environ['nova.context']
- authorize(context)
- LOG.info(_("Deleting network with id %s") % id)
- try:
- self.network_api.delete(context, id)
- except exception.NetworkNotFound:
- raise exc.HTTPNotFound(_("Network not found"))
- return exc.HTTPAccepted()
-
- def create(self, req, body):
- context = req.environ['nova.context']
- authorize(context)
-
- def bad(e):
- return exc.HTTPUnprocessableEntity(explanation=e)
-
- if not (body and body.get("network")):
- raise bad(_("Missing network in body"))
-
- params = body["network"]
- if not params.get("label"):
- raise bad(_("Network label is required"))
-
- cidr = params.get("cidr") or params.get("cidr_v6")
- if not cidr:
- raise bad(_("Network cidr or cidr_v6 is required"))
-
- LOG.debug(_("Creating network with label %s") % params["label"])
-
- params["num_networks"] = 1
- params["network_size"] = netaddr.IPNetwork(cidr).size
-
- network = self.network_api.create(context, **params)[0]
- return {"network": network_dict(context, network)}
-
- def add(self, req, body):
- context = req.environ['nova.context']
- authorize(context)
- if not body:
- raise exc.HTTPUnprocessableEntity()
-
- network_id = body.get('id', None)
- project_id = context.project_id
- LOG.debug(_("Associating network %(network)s"
- " with project %(project)s") %
- {"network": network_id or "",
- "project": project_id})
- try:
- self.network_api.add_network_to_project(
- context, project_id, network_id)
- except Exception as ex:
- msg = (_("Cannot associate network %(network)s"
- " with project %(project)s: %(message)s") %
- {"network": network_id or "",
- "project": project_id,
- "message": getattr(ex, "value", str(ex))})
- raise exc.HTTPBadRequest(explanation=msg)
-
- return webob.Response(status_int=202)
-
-
-class Admin_networks(extensions.ExtensionDescriptor):
- """Admin-only Network Management Extension."""
-
- name = "AdminNetworks"
- alias = "os-admin-networks"
- namespace = ("http://docs.openstack.org/compute/"
- "ext/os-admin-networks/api/v1.1")
- updated = "2011-12-23T00:00:00+00:00"
-
- def get_resources(self):
- member_actions = {'action': 'POST'}
- collection_actions = {'add': 'POST'}
- res = extensions.ResourceExtension(
- 'os-admin-networks',
- AdminNetworkController(),
- member_actions=member_actions,
- collection_actions=collection_actions)
- return [res]
diff --git a/nova/api/openstack/compute/contrib/cells.py b/nova/api/openstack/compute/contrib/cells.py
new file mode 100644
index 000000000..03e2e4ca2
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/cells.py
@@ -0,0 +1,303 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011-2012 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The cells extension."""
+from xml.dom import minidom
+from xml.parsers import expat
+
+from webob import exc
+
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova.cells import rpcapi as cells_rpcapi
+from nova.compute import api as compute
+from nova import db
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
+
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+CONF.import_opt('name', 'nova.cells.opts', group='cells')
+CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
+
+authorize = extensions.extension_authorizer('compute', 'cells')
+
+
+def make_cell(elem):
+ elem.set('name')
+ elem.set('username')
+ elem.set('type')
+ elem.set('rpc_host')
+ elem.set('rpc_port')
+
+ caps = xmlutil.SubTemplateElement(elem, 'capabilities',
+ selector='capabilities')
+ cap = xmlutil.SubTemplateElement(caps, xmlutil.Selector(0),
+ selector=xmlutil.get_items)
+ cap.text = 1
+
+
+cell_nsmap = {None: wsgi.XMLNS_V10}
+
+
+class CellTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('cell', selector='cell')
+ make_cell(root)
+ return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
+
+
+class CellsTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('cells')
+ elem = xmlutil.SubTemplateElement(root, 'cell', selector='cells')
+ make_cell(elem)
+ return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
+
+
+class CellDeserializer(wsgi.XMLDeserializer):
+ """Deserializer to handle xml-formatted cell create requests."""
+
+ def _extract_capabilities(self, cap_node):
+ caps = {}
+ for cap in cap_node.childNodes:
+ cap_name = cap.tagName
+ caps[cap_name] = self.extract_text(cap)
+ return caps
+
+ def _extract_cell(self, node):
+ cell = {}
+ cell_node = self.find_first_child_named(node, 'cell')
+
+ extract_fns = {'capabilities': self._extract_capabilities}
+
+ for child in cell_node.childNodes:
+ name = child.tagName
+ extract_fn = extract_fns.get(name, self.extract_text)
+ cell[name] = extract_fn(child)
+ return cell
+
+ def default(self, string):
+ """Deserialize an xml-formatted cell create request."""
+ try:
+ node = minidom.parseString(string)
+ except expat.ExpatError:
+ msg = _("cannot understand XML")
+ raise exception.MalformedRequestBody(reason=msg)
+
+ return {'body': {'cell': self._extract_cell(node)}}
+
+
+def _filter_keys(item, keys):
+ """
+ Filters all model attributes except for keys
+ item is a dict
+
+ """
+ return dict((k, v) for k, v in item.iteritems() if k in keys)
+
+
+def _scrub_cell(cell, detail=False):
+ keys = ['name', 'username', 'rpc_host', 'rpc_port']
+ if detail:
+ keys.append('capabilities')
+
+ cell_info = _filter_keys(cell, keys)
+ cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
+ return cell_info
+
+
+class Controller(object):
+ """Controller for Cell resources."""
+
+ def __init__(self):
+ self.compute_api = compute.API()
+ self.cells_rpcapi = cells_rpcapi.CellsAPI()
+
+ def _get_cells(self, ctxt, req, detail=False):
+ """Return all cells."""
+ # Ask the CellsManager for the most recent data
+ items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
+ items = common.limited(items, req)
+ items = [_scrub_cell(item, detail=detail) for item in items]
+ return dict(cells=items)
+
+ @wsgi.serializers(xml=CellsTemplate)
+ def index(self, req):
+ """Return all cells in brief."""
+ ctxt = req.environ['nova.context']
+ authorize(ctxt)
+ return self._get_cells(ctxt, req)
+
+ @wsgi.serializers(xml=CellsTemplate)
+ def detail(self, req):
+ """Return all cells in detail."""
+ ctxt = req.environ['nova.context']
+ authorize(ctxt)
+ return self._get_cells(ctxt, req, detail=True)
+
+ @wsgi.serializers(xml=CellTemplate)
+ def info(self, req):
+ """Return name and capabilities for this cell."""
+ context = req.environ['nova.context']
+ authorize(context)
+ cell_capabs = {}
+ my_caps = CONF.cells.capabilities
+ for cap in my_caps:
+ key, value = cap.split('=')
+ cell_capabs[key] = value
+ cell = {'name': CONF.cells.name,
+ 'type': 'self',
+ 'rpc_host': None,
+ 'rpc_port': 0,
+ 'username': None,
+ 'capabilities': cell_capabs}
+ return dict(cell=cell)
+
+ @wsgi.serializers(xml=CellTemplate)
+ def show(self, req, id):
+ """Return data about the given cell name. 'id' is a cell name."""
+ context = req.environ['nova.context']
+ authorize(context)
+ try:
+ cell = db.cell_get(context, id)
+ except exception.CellNotFound:
+ raise exc.HTTPNotFound()
+ return dict(cell=_scrub_cell(cell))
+
+ def delete(self, req, id):
+ """Delete a child or parent cell entry. 'id' is a cell name."""
+ context = req.environ['nova.context']
+ authorize(context)
+ num_deleted = db.cell_delete(context, id)
+ if num_deleted == 0:
+ raise exc.HTTPNotFound()
+ return {}
+
+ def _validate_cell_name(self, cell_name):
+ """Validate cell name is not empty and doesn't contain '!' or '.'."""
+ if not cell_name:
+ msg = _("Cell name cannot be empty")
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+ if '!' in cell_name or '.' in cell_name:
+ msg = _("Cell name cannot contain '!' or '.'")
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ def _validate_cell_type(self, cell_type):
+ """Validate cell_type is 'parent' or 'child'."""
+ if cell_type not in ['parent', 'child']:
+ msg = _("Cell type must be 'parent' or 'child'")
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ def _convert_cell_type(self, cell):
+ """Convert cell['type'] to is_parent boolean."""
+ if 'type' in cell:
+ self._validate_cell_type(cell['type'])
+ cell['is_parent'] = cell['type'] == 'parent'
+ del cell['type']
+ else:
+ cell['is_parent'] = False
+
+ @wsgi.serializers(xml=CellTemplate)
+ @wsgi.deserializers(xml=CellDeserializer)
+ def create(self, req, body):
+ """Create a child cell entry."""
+ context = req.environ['nova.context']
+ authorize(context)
+ if 'cell' not in body:
+ msg = _("No cell information in request")
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+ cell = body['cell']
+ if 'name' not in cell:
+ msg = _("No cell name in request")
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+ self._validate_cell_name(cell['name'])
+ self._convert_cell_type(cell)
+ cell = db.cell_create(context, cell)
+ return dict(cell=_scrub_cell(cell))
+
+ @wsgi.serializers(xml=CellTemplate)
+ @wsgi.deserializers(xml=CellDeserializer)
+ def update(self, req, id, body):
+ """Update a child cell entry. 'id' is the cell name to update."""
+ context = req.environ['nova.context']
+ authorize(context)
+ if 'cell' not in body:
+ msg = _("No cell information in request")
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+ cell = body['cell']
+ cell.pop('id', None)
+ if 'name' in cell:
+ self._validate_cell_name(cell['name'])
+ self._convert_cell_type(cell)
+ try:
+ cell = db.cell_update(context, id, cell)
+ except exception.CellNotFound:
+ raise exc.HTTPNotFound()
+ return dict(cell=_scrub_cell(cell))
+
+ def sync_instances(self, req, body):
+ """Tell all cells to sync instance info."""
+ context = req.environ['nova.context']
+ authorize(context)
+ project_id = body.pop('project_id', None)
+ deleted = body.pop('deleted', False)
+ updated_since = body.pop('updated_since', None)
+ if body:
+ msg = _("Only 'updated_since' and 'project_id' are understood.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ if updated_since:
+ try:
+ timeutils.parse_isotime(updated_since)
+ except ValueError:
+ msg = _('Invalid changes-since value')
+ raise exc.HTTPBadRequest(explanation=msg)
+ self.cells_rpcapi.sync_instances(context, project_id=project_id,
+ updated_since=updated_since, deleted=deleted)
+
+
+class Cells(extensions.ExtensionDescriptor):
+ """Enables cells-related functionality such as adding neighbor cells,
+ listing neighbor cells, and getting the capabilities of the local cell.
+ """
+
+ name = "Cells"
+ alias = "os-cells"
+ namespace = "http://docs.openstack.org/compute/ext/cells/api/v1.1"
+ updated = "2011-09-21T00:00:00+00:00"
+
+ def get_resources(self):
+ coll_actions = {
+ 'detail': 'GET',
+ 'info': 'GET',
+ 'sync_instances': 'POST',
+ }
+
+ res = extensions.ResourceExtension('os-cells',
+ Controller(), collection_actions=coll_actions)
+ return [res]
diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py
index 52487c305..d1b39d6db 100644
--- a/nova/api/openstack/compute/contrib/hosts.py
+++ b/nova/api/openstack/compute/contrib/hosts.py
@@ -124,10 +124,17 @@ class HostController(object):
"""
context = req.environ['nova.context']
authorize(context)
+ filters = {}
zone = req.GET.get('zone', None)
- data = self.api.list_hosts(context, zone)
-
- return {'hosts': data}
+ if zone:
+ filters['availability_zone'] = zone
+ services = self.api.service_get_all(context, filters=filters)
+ hosts = []
+ for service in services:
+ hosts.append({'host_name': service['host'],
+ 'service': service['topic'],
+ 'zone': service['availability_zone']})
+ return {'hosts': hosts}
@wsgi.serializers(xml=HostUpdateTemplate)
@wsgi.deserializers(xml=HostUpdateDeserializer)
@@ -243,6 +250,55 @@ class HostController(object):
def reboot(self, req, id):
return self._host_power_action(req, host_name=id, action="reboot")
+ @staticmethod
+ def _get_total_resources(host_name, compute_node):
+ return {'resource': {'host': host_name,
+ 'project': '(total)',
+ 'cpu': compute_node['vcpus'],
+ 'memory_mb': compute_node['memory_mb'],
+ 'disk_gb': compute_node['local_gb']}}
+
+ @staticmethod
+ def _get_used_now_resources(host_name, compute_node):
+ return {'resource': {'host': host_name,
+ 'project': '(used_now)',
+ 'cpu': compute_node['vcpus_used'],
+ 'memory_mb': compute_node['memory_mb_used'],
+ 'disk_gb': compute_node['local_gb_used']}}
+
+ @staticmethod
+ def _get_resource_totals_from_instances(host_name, instances):
+ cpu_sum = 0
+ mem_sum = 0
+ hdd_sum = 0
+ for instance in instances:
+ cpu_sum += instance['vcpus']
+ mem_sum += instance['memory_mb']
+ hdd_sum += instance['root_gb'] + instance['ephemeral_gb']
+
+ return {'resource': {'host': host_name,
+ 'project': '(used_max)',
+ 'cpu': cpu_sum,
+ 'memory_mb': mem_sum,
+ 'disk_gb': hdd_sum}}
+
+ @staticmethod
+ def _get_resources_by_project(host_name, instances):
+ # Getting usage resource per project
+ project_map = {}
+ for instance in instances:
+ resource = project_map.setdefault(instance['project_id'],
+ {'host': host_name,
+ 'project': instance['project_id'],
+ 'cpu': 0,
+ 'memory_mb': 0,
+ 'disk_gb': 0})
+ resource['cpu'] += instance['vcpus']
+ resource['memory_mb'] += instance['memory_mb']
+ resource['disk_gb'] += (instance['root_gb'] +
+ instance['ephemeral_gb'])
+ return project_map
+
@wsgi.serializers(xml=HostShowTemplate)
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
@@ -256,14 +312,26 @@ class HostController(object):
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
context = req.environ['nova.context']
+ host_name = id
try:
- data = self.api.describe_host(context, id)
+ service = self.api.service_get_by_compute_host(context, host_name)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.message)
except exception.AdminRequired:
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
- return {'host': data}
+ compute_node = service['compute_node'][0]
+ instances = self.api.instance_get_all_by_host(context, host_name)
+ resources = [self._get_total_resources(host_name, compute_node)]
+ resources.append(self._get_used_now_resources(host_name,
+ compute_node))
+ resources.append(self._get_resource_totals_from_instances(host_name,
+ instances))
+ by_proj_resources = self._get_resources_by_project(host_name,
+ instances)
+ for resource in by_proj_resources.itervalues():
+ resources.append({'resource': resource})
+ return {'host': resources}
class Hosts(extensions.ExtensionDescriptor):
diff --git a/nova/api/openstack/compute/contrib/networks_associate.py b/nova/api/openstack/compute/contrib/networks_associate.py
index 4990c1b5e..3cdda1d76 100644
--- a/nova/api/openstack/compute/contrib/networks_associate.py
+++ b/nova/api/openstack/compute/contrib/networks_associate.py
@@ -62,6 +62,6 @@ class Networks_associate(extensions.ExtensionDescriptor):
def get_controller_extensions(self):
extension = extensions.ControllerExtension(
- self, 'os-admin-networks', NetworkAssociateActionController())
+ self, 'os-networks', NetworkAssociateActionController())
return [extension]
diff --git a/nova/api/openstack/compute/contrib/os_networks.py b/nova/api/openstack/compute/contrib/os_networks.py
index 4be0bd100..d1d172686 100644
--- a/nova/api/openstack/compute/contrib/os_networks.py
+++ b/nova/api/openstack/compute/contrib/os_networks.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2013 OpenStack LLC.
+# Copyright 2011 Grid Dynamics
+# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,199 +16,155 @@
# License for the specific language governing permissions and limitations
# under the License.
-
import netaddr
-import netaddr.core as netexc
+import webob
from webob import exc
from nova.api.openstack import extensions
-from nova import context as nova_context
+from nova.api.openstack import wsgi
from nova import exception
-import nova.network
-from nova.openstack.common import cfg
+from nova import network
from nova.openstack.common import log as logging
-from nova import quota
-
-
-CONF = cfg.CONF
-
-try:
- os_network_opts = [
- cfg.BoolOpt("enable_network_quota",
- default=False,
- help="Enables or disables quotaing of tenant networks"),
- cfg.StrOpt('use_quantum_default_nets',
- default="False",
- help=('Control for checking for default networks')),
- cfg.StrOpt('quantum_default_tenant_id',
- default="default",
- help=('Default tenant id when creating quantum '
- 'networks'))
- ]
- CONF.register_opts(os_network_opts)
-except cfg.DuplicateOptError:
- # NOTE(jkoelker) These options are verbatim elsewhere this is here
- # to make sure they are registered for our use.
- pass
-
-if CONF.enable_network_quota:
- opts = [
- cfg.IntOpt('quota_networks',
- default=3,
- help='number of private networks allowed per project'),
- ]
- CONF.register_opts(opts)
-
-QUOTAS = quota.QUOTAS
-LOG = logging.getLogger(__name__)
-authorize = extensions.extension_authorizer('compute', 'os-networks')
-
-
-def network_dict(network):
- return {"id": network.get("uuid") or network["id"],
- "cidr": network["cidr"],
- "label": network["label"]}
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'networks')
+authorize_view = extensions.extension_authorizer('compute',
+ 'networks:view')
+
+
+def network_dict(context, network):
+ fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2',
+ 'cidr_v6', 'gateway_v6', 'label', 'netmask_v6')
+ admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted',
+ 'injected', 'bridge', 'vlan', 'vpn_public_address',
+ 'vpn_public_port', 'vpn_private_address', 'dhcp_start',
+ 'project_id', 'host', 'bridge_interface', 'multi_host',
+ 'priority', 'rxtx_base')
+ if network:
+ # NOTE(mnaser): We display a limited set of fields so users can know
+ # what networks are available, extra system-only fields
+ # are only visible if they are an admin.
+ if context.is_admin:
+ fields += admin_fields
+ result = dict((field, network[field]) for field in fields)
+ if 'uuid' in network:
+ result['id'] = network['uuid']
+ return result
+ else:
+ return {}
+
+
+class NetworkController(wsgi.Controller):
-class NetworkController(object):
def __init__(self, network_api=None):
- self.network_api = nova.network.API()
- self._default_networks = []
-
- def _refresh_default_networks(self):
- self._default_networks = []
- if CONF.use_quantum_default_nets == "True":
- try:
- self._default_networks = self._get_default_networks()
- except Exception:
- LOG.exception("Failed to get default networks")
-
- def _get_default_networks(self):
- project_id = CONF.quantum_default_tenant_id
- ctx = nova_context.RequestContext(user_id=None,
- project_id=project_id)
- networks = {}
- for n in self.network_api.get_all(ctx):
- networks[n['id']] = n['label']
- return [{'id': k, 'label': v} for k, v in networks.iteritems()]
+ self.network_api = network_api or network.API()
def index(self, req):
context = req.environ['nova.context']
- authorize(context)
+ authorize_view(context)
networks = self.network_api.get_all(context)
- if not self._default_networks:
- self._refresh_default_networks()
- networks.extend(self._default_networks)
- return {'networks': [network_dict(n) for n in networks]}
+ result = [network_dict(context, net_ref) for net_ref in networks]
+ return {'networks': result}
- def show(self, req, id):
+ @wsgi.action("disassociate")
+ def _disassociate_host_and_project(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
+ LOG.debug(_("Disassociating network with id %s"), id)
+
+ try:
+ self.network_api.associate(context, id, host=None, project=None)
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return exc.HTTPAccepted()
+
+ def show(self, req, id):
+ context = req.environ['nova.context']
+ authorize_view(context)
LOG.debug(_("Showing network with id %s") % id)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
raise exc.HTTPNotFound(_("Network not found"))
- return network_dict(network)
+ return {'network': network_dict(context, network)}
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
- try:
- if CONF.enable_network_quota:
- reservation = QUOTAS.reserve(context, networks=-1)
- except Exception:
- reservation = None
- LOG.exception(_("Failed to update usages deallocating "
- "network."))
-
LOG.info(_("Deleting network with id %s") % id)
-
try:
self.network_api.delete(context, id)
- if CONF.enable_network_quota and reservation:
- QUOTAS.commit(context, reservation)
- response = exc.HTTPAccepted()
except exception.NetworkNotFound:
- response = exc.HTTPNotFound(_("Network not found"))
-
- return response
+ raise exc.HTTPNotFound(_("Network not found"))
+ return exc.HTTPAccepted()
def create(self, req, body):
- if not body:
- raise exc.HTTPUnprocessableEntity()
-
- context = req.environ["nova.context"]
+ context = req.environ['nova.context']
authorize(context)
- network = body["network"]
- keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
- "num_networks"]
- kwargs = dict((k, network.get(k)) for k in keys)
+ def bad(e):
+ return exc.HTTPUnprocessableEntity(explanation=e)
- label = network["label"]
+ if not (body and body.get("network")):
+ raise bad(_("Missing network in body"))
- if not (kwargs["cidr"] or kwargs["cidr_v6"]):
- msg = _("No CIDR requested")
- raise exc.HTTPBadRequest(explanation=msg)
- if kwargs["cidr"]:
- try:
- net = netaddr.IPNetwork(kwargs["cidr"])
- if net.size < 4:
- msg = _("Requested network does not contain "
- "enough (2+) usable hosts")
- raise exc.HTTPBadRequest(explanation=msg)
- except netexc.AddrFormatError:
- msg = _("CIDR is malformed.")
- raise exc.HTTPBadRequest(explanation=msg)
- except netexc.AddrConversionError:
- msg = _("Address could not be converted.")
- raise exc.HTTPBadRequest(explanation=msg)
-
- networks = []
+ params = body["network"]
+ if not params.get("label"):
+ raise bad(_("Network label is required"))
+
+ cidr = params.get("cidr") or params.get("cidr_v6")
+ if not cidr:
+ raise bad(_("Network cidr or cidr_v6 is required"))
+
+ LOG.debug(_("Creating network with label %s") % params["label"])
+
+ params["num_networks"] = 1
+ params["network_size"] = netaddr.IPNetwork(cidr).size
+
+ network = self.network_api.create(context, **params)[0]
+ return {"network": network_dict(context, network)}
+
+ def add(self, req, body):
+ context = req.environ['nova.context']
+ authorize(context)
+ if not body:
+ raise exc.HTTPUnprocessableEntity()
+
+ network_id = body.get('id', None)
+ project_id = context.project_id
+ LOG.debug(_("Associating network %(network)s"
+ " with project %(project)s") %
+ {"network": network_id or "",
+ "project": project_id})
try:
- if CONF.enable_network_quota:
- reservation = QUOTAS.reserve(context, networks=1)
- except exception.OverQuota:
- msg = _("Quota exceeded, too many networks.")
+ self.network_api.add_network_to_project(
+ context, project_id, network_id)
+ except Exception as ex:
+ msg = (_("Cannot associate network %(network)s"
+ " with project %(project)s: %(message)s") %
+ {"network": network_id or "",
+ "project": project_id,
+ "message": getattr(ex, "value", str(ex))})
raise exc.HTTPBadRequest(explanation=msg)
- try:
- networks = self.network_api.create(context,
- label=label, **kwargs)
- if CONF.enable_network_quota:
- QUOTAS.commit(context, reservation)
- except Exception:
- if CONF.enable_network_quota:
- QUOTAS.rollback(context, reservation)
- msg = _("Create networks failed")
- LOG.exception(msg, extra=network)
- raise exc.HTTPServiceUnavailable(explanation=msg)
- return {"network": network_dict(networks[0])}
+ return webob.Response(status_int=202)
class Os_networks(extensions.ExtensionDescriptor):
- """Tenant-based Network Management Extension."""
+ """Admin-only Network Management Extension."""
- name = "OSNetworks"
+ name = "Networks"
alias = "os-networks"
- namespace = "http://docs.openstack.org/compute/ext/os-networks/api/v1.1"
- updated = "2012-03-07T09:46:43-05:00"
+ namespace = ("http://docs.openstack.org/compute/"
+ "ext/os-networks/api/v1.1")
+ updated = "2011-12-23T00:00:00+00:00"
def get_resources(self):
- ext = extensions.ResourceExtension('os-networks',
- NetworkController())
- return [ext]
-
-
-def _sync_networks(context, project_id, session):
- ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
- ctx = ctx.elevated()
- networks = nova.network.api.API().get_all(ctx)
- return dict(networks=len(networks))
-
-
-if CONF.enable_network_quota:
- QUOTAS.register_resource(quota.ReservableResource('networks',
- _sync_networks,
- 'quota_networks'))
+ member_actions = {'action': 'POST'}
+ collection_actions = {'add': 'POST'}
+ res = extensions.ResourceExtension(
+ 'os-networks',
+ NetworkController(),
+ member_actions=member_actions,
+ collection_actions=collection_actions)
+ return [res]
diff --git a/nova/api/openstack/compute/contrib/os_tenant_networks.py b/nova/api/openstack/compute/contrib/os_tenant_networks.py
new file mode 100644
index 000000000..03178ab65
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/os_tenant_networks.py
@@ -0,0 +1,214 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import netaddr
+import netaddr.core as netexc
+from webob import exc
+
+from nova.api.openstack import extensions
+from nova import context as nova_context
+from nova import exception
+import nova.network
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import quota
+
+
+CONF = cfg.CONF
+
+try:
+ os_network_opts = [
+ cfg.BoolOpt("enable_network_quota",
+ default=False,
+ help="Enables or disables quotaing of tenant networks"),
+ cfg.StrOpt('use_quantum_default_nets',
+ default="False",
+ help=('Control for checking for default networks')),
+ cfg.StrOpt('quantum_default_tenant_id',
+ default="default",
+ help=('Default tenant id when creating quantum '
+ 'networks'))
+ ]
+ CONF.register_opts(os_network_opts)
+except cfg.DuplicateOptError:
+ # NOTE(jkoelker) These options are verbatim elsewhere this is here
+ # to make sure they are registered for our use.
+ pass
+
+if CONF.enable_network_quota:
+ opts = [
+ cfg.IntOpt('quota_networks',
+ default=3,
+ help='number of private networks allowed per project'),
+ ]
+ CONF.register_opts(opts)
+
+QUOTAS = quota.QUOTAS
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'os-tenant-networks')
+
+
+def network_dict(network):
+ return {"id": network.get("uuid") or network["id"],
+ "cidr": network["cidr"],
+ "label": network["label"]}
+
+
+class NetworkController(object):
+ def __init__(self, network_api=None):
+ self.network_api = nova.network.API()
+ self._default_networks = []
+
+ def _refresh_default_networks(self):
+ self._default_networks = []
+ if CONF.use_quantum_default_nets == "True":
+ try:
+ self._default_networks = self._get_default_networks()
+ except Exception:
+ LOG.exception("Failed to get default networks")
+
+ def _get_default_networks(self):
+ project_id = CONF.quantum_default_tenant_id
+ ctx = nova_context.RequestContext(user_id=None,
+ project_id=project_id)
+ networks = {}
+ for n in self.network_api.get_all(ctx):
+ networks[n['id']] = n['label']
+ return [{'id': k, 'label': v} for k, v in networks.iteritems()]
+
+ def index(self, req):
+ context = req.environ['nova.context']
+ authorize(context)
+ networks = self.network_api.get_all(context)
+ if not self._default_networks:
+ self._refresh_default_networks()
+ networks.extend(self._default_networks)
+ return {'networks': [network_dict(n) for n in networks]}
+
+ def show(self, req, id):
+ context = req.environ['nova.context']
+ authorize(context)
+ LOG.debug(_("Showing network with id %s") % id)
+ try:
+ network = self.network_api.get(context, id)
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return network_dict(network)
+
+ def delete(self, req, id):
+ context = req.environ['nova.context']
+ authorize(context)
+ try:
+ if CONF.enable_network_quota:
+ reservation = QUOTAS.reserve(context, networks=-1)
+ except Exception:
+ reservation = None
+ LOG.exception(_("Failed to update usages deallocating "
+ "network."))
+
+ LOG.info(_("Deleting network with id %s") % id)
+
+ try:
+ self.network_api.delete(context, id)
+ if CONF.enable_network_quota and reservation:
+ QUOTAS.commit(context, reservation)
+ response = exc.HTTPAccepted()
+ except exception.NetworkNotFound:
+ response = exc.HTTPNotFound(_("Network not found"))
+
+ return response
+
+ def create(self, req, body):
+ if not body:
+ raise exc.HTTPUnprocessableEntity()
+
+ context = req.environ["nova.context"]
+ authorize(context)
+
+ network = body["network"]
+ keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
+ "num_networks"]
+ kwargs = dict((k, network.get(k)) for k in keys)
+
+ label = network["label"]
+
+ if not (kwargs["cidr"] or kwargs["cidr_v6"]):
+ msg = _("No CIDR requested")
+ raise exc.HTTPBadRequest(explanation=msg)
+ if kwargs["cidr"]:
+ try:
+ net = netaddr.IPNetwork(kwargs["cidr"])
+ if net.size < 4:
+ msg = _("Requested network does not contain "
+ "enough (2+) usable hosts")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except netexc.AddrFormatError:
+ msg = _("CIDR is malformed.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except netexc.AddrConversionError:
+ msg = _("Address could not be converted.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ networks = []
+ try:
+ if CONF.enable_network_quota:
+ reservation = QUOTAS.reserve(context, networks=1)
+ except exception.OverQuota:
+ msg = _("Quota exceeded, too many networks.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ try:
+ networks = self.network_api.create(context,
+ label=label, **kwargs)
+ if CONF.enable_network_quota:
+ QUOTAS.commit(context, reservation)
+ except Exception:
+ if CONF.enable_network_quota:
+ QUOTAS.rollback(context, reservation)
+ msg = _("Create networks failed")
+ LOG.exception(msg, extra=network)
+ raise exc.HTTPServiceUnavailable(explanation=msg)
+ return {"network": network_dict(networks[0])}
+
+
+class Os_tenant_networks(extensions.ExtensionDescriptor):
+ """Tenant-based Network Management Extension."""
+
+ name = "OSTenantNetworks"
+ alias = "os-tenant-networks"
+ namespace = ("http://docs.openstack.org/compute/"
+ "ext/os-tenant-networks/api/v2")
+ updated = "2012-03-07T09:46:43-05:00"
+
+ def get_resources(self):
+ ext = extensions.ResourceExtension('os-tenant-networks',
+ NetworkController())
+ return [ext]
+
+
+def _sync_networks(context, project_id, session):
+ ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
+ ctx = ctx.elevated()
+ networks = nova.network.api.API().get_all(ctx)
+ return dict(networks=len(networks))
+
+
+if CONF.enable_network_quota:
+ QUOTAS.register_resource(quota.ReservableResource('networks',
+ _sync_networks,
+ 'quota_networks'))
diff --git a/nova/api/openstack/compute/contrib/services.py b/nova/api/openstack/compute/contrib/services.py
index c792c72da..2786ad814 100644
--- a/nova/api/openstack/compute/contrib/services.py
+++ b/nova/api/openstack/compute/contrib/services.py
@@ -21,6 +21,7 @@ import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
+from nova import availability_zones
from nova import db
from nova import exception
from nova.openstack.common import cfg
@@ -69,6 +70,7 @@ class ServiceController(object):
authorize(context)
now = timeutils.utcnow()
services = db.service_get_all(context)
+ services = availability_zones.set_availability_zones(context, services)
host = ''
if 'host' in req.GET:
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index d3a6fc8a9..f0fdb5a15 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -538,10 +538,10 @@ class Controller(wsgi.Controller):
marker=marker)
except exception.MarkerNotFound as e:
msg = _('marker [%s] not found') % marker
- raise webob.exc.HTTPBadRequest(explanation=msg)
+ raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as e:
msg = _("Flavor could not be found")
- raise webob.exc.HTTPUnprocessableEntity(explanation=msg)
+ raise exc.HTTPUnprocessableEntity(explanation=msg)
if is_detail:
self._add_instance_faults(context, instance_list)
@@ -828,21 +828,24 @@ class Controller(wsgi.Controller):
try:
min_count = int(min_count)
except ValueError:
- raise webob.exc.HTTPBadRequest(_('min_count must be an '
- 'integer value'))
+ msg = _('min_count must be an integer value')
+ raise exc.HTTPBadRequest(explanation=msg)
if min_count < 1:
- raise webob.exc.HTTPBadRequest(_('min_count must be > 0'))
+ msg = _('min_count must be > 0')
+ raise exc.HTTPBadRequest(explanation=msg)
try:
max_count = int(max_count)
except ValueError:
- raise webob.exc.HTTPBadRequest(_('max_count must be an '
- 'integer value'))
+ msg = _('max_count must be an integer value')
+ raise exc.HTTPBadRequest(explanation=msg)
if max_count < 1:
- raise webob.exc.HTTPBadRequest(_('max_count must be > 0'))
+ msg = _('max_count must be > 0')
+ raise exc.HTTPBadRequest(explanation=msg)
if min_count > max_count:
- raise webob.exc.HTTPBadRequest(_('min_count must be <= max_count'))
+ msg = _('min_count must be <= max_count')
+ raise exc.HTTPBadRequest(explanation=msg)
auto_disk_config = False
if self.ext_mgr.is_loaded('OS-DCF'):
@@ -1202,7 +1205,8 @@ class Controller(wsgi.Controller):
try:
body = body['rebuild']
except (KeyError, TypeError):
- raise exc.HTTPBadRequest(_("Invalid request body"))
+ msg = _('Invalid request body')
+ raise exc.HTTPBadRequest(explanation=msg)
try:
image_href = body["imageRef"]
diff --git a/nova/api/sizelimit.py b/nova/api/sizelimit.py
index 70ff73b2b..77ab4415c 100644
--- a/nova/api/sizelimit.py
+++ b/nova/api/sizelimit.py
@@ -38,7 +38,7 @@ LOG = logging.getLogger(__name__)
class RequestBodySizeLimiter(wsgi.Middleware):
- """Add a 'nova.context' to WSGI environ."""
+ """Limit the size of incoming requests."""
def __init__(self, *args, **kwargs):
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
diff --git a/nova/cells/manager.py b/nova/cells/manager.py
index 0942bae28..133946794 100644
--- a/nova/cells/manager.py
+++ b/nova/cells/manager.py
@@ -65,7 +65,7 @@ class CellsManager(manager.Manager):
Scheduling requests get passed to the scheduler class.
"""
- RPC_API_VERSION = '1.0'
+ RPC_API_VERSION = '1.1'
def __init__(self, *args, **kwargs):
# Mostly for tests.
@@ -186,6 +186,10 @@ class CellsManager(manager.Manager):
self.msg_runner.schedule_run_instance(ctxt, our_cell,
host_sched_kwargs)
+ def get_cell_info_for_neighbors(self, _ctxt):
+ """Return cell information for our neighbor cells."""
+ return self.state_manager.get_cell_info_for_neighbors()
+
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
response = self.msg_runner.run_compute_api_method(ctxt,
@@ -218,3 +222,10 @@ class CellsManager(manager.Manager):
def bw_usage_update_at_top(self, ctxt, bw_update_info):
"""Update bandwidth usage at top level cell."""
self.msg_runner.bw_usage_update_at_top(ctxt, bw_update_info)
+
+ def sync_instances(self, ctxt, project_id, updated_since, deleted):
+ """Force a sync of all instances, potentially by project_id,
+ and potentially since a certain date/time.
+ """
+ self.msg_runner.sync_instances(ctxt, project_id, updated_since,
+ deleted)
diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py
index 56d521892..34ca74855 100644
--- a/nova/cells/messaging.py
+++ b/nova/cells/messaging.py
@@ -27,6 +27,7 @@ import sys
from eventlet import queue
from nova.cells import state as cells_state
+from nova.cells import utils as cells_utils
from nova import compute
from nova import context
from nova.db import base
@@ -37,6 +38,7 @@ from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
+from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import utils
@@ -778,6 +780,26 @@ class _BroadcastMessageMethods(_BaseMessageMethods):
return
self.db.bw_usage_update(message.ctxt, **bw_update_info)
+ def _sync_instance(self, ctxt, instance):
+ if instance['deleted']:
+ self.msg_runner.instance_destroy_at_top(ctxt, instance)
+ else:
+ self.msg_runner.instance_update_at_top(ctxt, instance)
+
+ def sync_instances(self, message, project_id, updated_since, deleted,
+ **kwargs):
+ projid_str = project_id is None and "<all>" or project_id
+ since_str = updated_since is None and "<all>" or updated_since
+ LOG.info(_("Forcing a sync of instances, project_id="
+ "%(projid_str)s, updated_since=%(since_str)s"), locals())
+ if updated_since is not None:
+ updated_since = timeutils.parse_isotime(updated_since)
+ instances = cells_utils.get_instances_to_sync(message.ctxt,
+ updated_since=updated_since, project_id=project_id,
+ deleted=deleted)
+ for instance in instances:
+ self._sync_instance(message.ctxt, instance)
+
_CELL_MESSAGE_TYPE_TO_MESSAGE_CLS = {'targeted': _TargetedMessage,
'broadcast': _BroadcastMessage,
@@ -1004,6 +1026,18 @@ class MessageRunner(object):
'up', run_locally=False)
message.process()
+ def sync_instances(self, ctxt, project_id, updated_since, deleted):
+ """Force a sync of all instances, potentially by project_id,
+ and potentially since a certain date/time.
+ """
+ method_kwargs = dict(project_id=project_id,
+ updated_since=updated_since,
+ deleted=deleted)
+ message = _BroadcastMessage(self, ctxt, 'sync_instances',
+ method_kwargs, 'down',
+ run_locally=False)
+ message.process()
+
@staticmethod
def get_message_types():
return _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS.keys()
diff --git a/nova/cells/rpcapi.py b/nova/cells/rpcapi.py
index 8ce298829..0ab4fc352 100644
--- a/nova/cells/rpcapi.py
+++ b/nova/cells/rpcapi.py
@@ -40,6 +40,7 @@ class CellsAPI(rpc_proxy.RpcProxy):
API version history:
1.0 - Initial version.
+ 1.1 - Adds get_cell_info_for_neighbors() and sync_instances()
'''
BASE_RPC_API_VERSION = '1.0'
@@ -136,3 +137,21 @@ class CellsAPI(rpc_proxy.RpcProxy):
'info_cache': iicache}
self.cast(ctxt, self.make_msg('instance_update_at_top',
instance=instance))
+
+ def get_cell_info_for_neighbors(self, ctxt):
+ """Get information about our neighbor cells from the manager."""
+ if not CONF.cells.enable:
+ return []
+ return self.call(ctxt, self.make_msg('get_cell_info_for_neighbors'),
+ version='1.1')
+
+ def sync_instances(self, ctxt, project_id=None, updated_since=None,
+ deleted=False):
+ """Ask all cells to sync instance data."""
+ if not CONF.cells.enable:
+ return
+ return self.cast(ctxt, self.make_msg('sync_instances',
+ project_id=project_id,
+ updated_since=updated_since,
+ deleted=deleted),
+ version='1.1')
diff --git a/nova/cells/state.py b/nova/cells/state.py
index 345c44ca9..e3886bedb 100644
--- a/nova/cells/state.py
+++ b/nova/cells/state.py
@@ -75,8 +75,8 @@ class CellState(object):
def get_cell_info(self):
"""Return subset of cell information for OS API use."""
- db_fields_to_return = ['id', 'is_parent', 'weight_scale',
- 'weight_offset', 'username', 'rpc_host', 'rpc_port']
+ db_fields_to_return = ['is_parent', 'weight_scale', 'weight_offset',
+ 'username', 'rpc_host', 'rpc_port']
cell_info = dict(name=self.name, capabilities=self.capabilities)
if self.db_info:
for field in db_fields_to_return:
@@ -267,6 +267,15 @@ class CellStateManager(base.Base):
self._update_our_capacity(ctxt)
@sync_from_db
+ def get_cell_info_for_neighbors(self):
+ """Return cell information for all neighbor cells."""
+ cell_list = [cell.get_cell_info()
+ for cell in self.child_cells.itervalues()]
+ cell_list.extend([cell.get_cell_info()
+ for cell in self.parent_cells.itervalues()])
+ return cell_list
+
+ @sync_from_db
def get_my_state(self):
"""Return information for my (this) cell."""
return self.my_cell_state
diff --git a/nova/compute/api.py b/nova/compute/api.py
index d0a039644..8ba6b97aa 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -92,6 +92,7 @@ CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('consoleauth_topic', 'nova.consoleauth')
+CONF.import_opt('enable', 'nova.cells.opts', group='cells')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
@@ -404,20 +405,20 @@ class API(base.Base):
options_from_image['auto_disk_config'] = auto_disk_config
return options_from_image
- def _create_instance(self, context, instance_type,
- image_href, kernel_id, ramdisk_id,
- min_count, max_count,
- display_name, display_description,
- key_name, key_data, security_group,
- availability_zone, user_data, metadata,
- injected_files, admin_password,
- access_ip_v4, access_ip_v6,
- requested_networks, config_drive,
- block_device_mapping, auto_disk_config,
- reservation_id=None, scheduler_hints=None):
+ def _validate_and_provision_instance(self, context, instance_type,
+ image_href, kernel_id, ramdisk_id,
+ min_count, max_count,
+ display_name, display_description,
+ key_name, key_data, security_group,
+ availability_zone, user_data,
+ metadata, injected_files,
+ access_ip_v4, access_ip_v6,
+ requested_networks, config_drive,
+ block_device_mapping,
+ auto_disk_config, reservation_id,
+ scheduler_hints):
"""Verify all the input parameters regardless of the provisioning
- strategy being performed and schedule the instance(s) for
- creation."""
+ strategy being performed."""
if not metadata:
metadata = {}
@@ -437,6 +438,19 @@ class API(base.Base):
raise exception.InstanceTypeNotFound(
instance_type_id=instance_type['id'])
+ if user_data:
+ l = len(user_data)
+ if l > MAX_USERDATA_SIZE:
+ # NOTE(mikal): user_data is stored in a text column, and
+ # the database might silently truncate if its over length.
+ raise exception.InstanceUserDataTooLarge(
+ length=l, maxsize=MAX_USERDATA_SIZE)
+
+ try:
+ base64.decodestring(user_data)
+ except base64.binascii.Error:
+ raise exception.InstanceUserDataMalformed()
+
# Reserve quotas
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
@@ -484,9 +498,6 @@ class API(base.Base):
key_name)
key_data = key_pair['public_key']
- if reservation_id is None:
- reservation_id = utils.generate_uid('r')
-
root_device_name = block_device.properties_root_device_name(
image.get('properties', {}))
@@ -524,19 +535,6 @@ class API(base.Base):
'root_device_name': root_device_name,
'progress': 0}
- if user_data:
- l = len(user_data)
- if l > MAX_USERDATA_SIZE:
- # NOTE(mikal): user_data is stored in a text column, and
- # the database might silently truncate if its over length.
- raise exception.InstanceUserDataTooLarge(
- length=l, maxsize=MAX_USERDATA_SIZE)
-
- try:
- base64.decodestring(user_data)
- except base64.binascii.Error:
- raise exception.InstanceUserDataMalformed()
-
options_from_image = self._inherit_properties_from_image(
image, auto_disk_config)
@@ -579,6 +577,36 @@ class API(base.Base):
'security_group': security_group,
}
+ return (instances, request_spec, filter_properties)
+
+ def _create_instance(self, context, instance_type,
+ image_href, kernel_id, ramdisk_id,
+ min_count, max_count,
+ display_name, display_description,
+ key_name, key_data, security_group,
+ availability_zone, user_data, metadata,
+ injected_files, admin_password,
+ access_ip_v4, access_ip_v6,
+ requested_networks, config_drive,
+ block_device_mapping, auto_disk_config,
+ reservation_id=None, scheduler_hints=None):
+ """Verify all the input parameters regardless of the provisioning
+ strategy being performed and schedule the instance(s) for
+ creation."""
+
+ if reservation_id is None:
+ reservation_id = utils.generate_uid('r')
+
+ (instances, request_spec, filter_properties) = \
+ self._validate_and_provision_instance(context, instance_type,
+ image_href, kernel_id, ramdisk_id, min_count,
+ max_count, display_name, display_description,
+ key_name, key_data, security_group, availability_zone,
+ user_data, metadata, injected_files, access_ip_v4,
+ access_ip_v6, requested_networks, config_drive,
+ block_device_mapping, auto_disk_config,
+ reservation_id, scheduler_hints)
+
self.scheduler_rpcapi.run_instance(context,
request_spec=request_spec,
admin_password=admin_password, injected_files=injected_files,
@@ -850,6 +878,20 @@ class API(base.Base):
for host_name in host_names:
self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
+ def update_state(self, context, instance, new_state):
+ """Updates the state of a compute instance.
+ For example to 'active' or 'error'.
+ Also sets 'task_state' to None.
+ Used by admin_actions api
+
+ :param context: The security context
+ :param instance: The instance to update
+ :param new_state: A member of vm_state, eg. 'active'
+ """
+ self.update(context, instance,
+ vm_state=new_state,
+ task_state=None)
+
@wrap_check_policy
def update(self, context, instance, **kwargs):
"""Updates the instance in the datastore.
@@ -954,19 +996,16 @@ class API(base.Base):
host=src_host, cast=False,
reservations=downsize_reservations)
- # NOTE(jogo): db allows for multiple compute services per host
+ is_up = False
try:
- services = self.db.service_get_all_compute_by_host(
+ service = self.db.service_get_by_compute_host(
context.elevated(), instance['host'])
- except exception.ComputeHostNotFound:
- services = []
-
- is_up = False
- for service in services:
if self.servicegroup_api.service_is_up(service):
is_up = True
cb(context, instance, bdms)
- break
+ except exception.ComputeHostNotFound:
+ pass
+
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms)
@@ -1264,7 +1303,7 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
- extra_properties=None):
+ extra_properties=None, image_id=None):
"""Backup the given instance
:param instance: nova.db.sqlalchemy.models.Instance
@@ -1274,14 +1313,26 @@ class API(base.Base):
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
"""
- recv_meta = self._create_image(context, instance, name, 'backup',
- backup_type=backup_type, rotation=rotation,
- extra_properties=extra_properties)
- return recv_meta
+ instance = self.update(context, instance,
+ task_state=task_states.IMAGE_BACKUP,
+ expected_task_state=None)
+ if image_id:
+ # The image entry has already been created, so just pull the
+ # metadata.
+ image_meta = self.image_service.show(context, image_id)
+ else:
+ image_meta = self._create_image(context, instance, name,
+ 'backup', backup_type=backup_type,
+ rotation=rotation, extra_properties=extra_properties)
+ self.compute_rpcapi.snapshot_instance(context, instance=instance,
+ image_id=image_meta['id'], image_type='backup',
+ backup_type=backup_type, rotation=rotation)
+ return image_meta
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
- def snapshot(self, context, instance, name, extra_properties=None):
+ def snapshot(self, context, instance, name, extra_properties=None,
+ image_id=None):
"""Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
@@ -1290,12 +1341,25 @@ class API(base.Base):
:returns: A dict containing image metadata
"""
- return self._create_image(context, instance, name, 'snapshot',
- extra_properties=extra_properties)
+ instance = self.update(context, instance,
+ task_state=task_states.IMAGE_SNAPSHOT,
+ expected_task_state=None)
+ if image_id:
+ # The image entry has already been created, so just pull the
+ # metadata.
+ image_meta = self.image_service.show(context, image_id)
+ else:
+ image_meta = self._create_image(context, instance, name,
+ 'snapshot', extra_properties=extra_properties)
+ self.compute_rpcapi.snapshot_instance(context, instance=instance,
+ image_id=image_meta['id'], image_type='snapshot')
+ return image_meta
def _create_image(self, context, instance, name, image_type,
backup_type=None, rotation=None, extra_properties=None):
- """Create snapshot or backup for an instance on this host.
+ """Create new image entry in the image service. This new image
+ will be reserved for the compute manager to upload a snapshot
+ or backup.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
@@ -1309,29 +1373,6 @@ class API(base.Base):
"""
instance_uuid = instance['uuid']
- if image_type == "snapshot":
- task_state = task_states.IMAGE_SNAPSHOT
- elif image_type == "backup":
- task_state = task_states.IMAGE_BACKUP
- else:
- raise Exception(_('Image type not recognized %s') % image_type)
-
- # change instance state and notify
- old_vm_state = instance["vm_state"]
- old_task_state = instance["task_state"]
-
- self.db.instance_test_and_set(
- context, instance_uuid, 'task_state', [None], task_state)
-
- # NOTE(sirp): `instance_test_and_set` only sets the task-state in the
- # DB, but we also need to set it on the current instance so that the
- # correct value is passed down to the compute manager.
- instance['task_state'] = task_state
-
- notifications.send_update_with_states(context, instance, old_vm_state,
- instance["vm_state"], old_task_state, instance["task_state"],
- service="api", verify_states=True)
-
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
@@ -1378,11 +1419,7 @@ class API(base.Base):
# up above will not be overwritten by inherited values
properties.setdefault(key, value)
- recv_meta = self.image_service.create(context, sent_meta)
- self.compute_rpcapi.snapshot_instance(context, instance=instance,
- image_id=recv_meta['id'], image_type=image_type,
- backup_type=backup_type, rotation=rotation)
- return recv_meta
+ return self.image_service.create(context, sent_meta)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
@@ -1525,12 +1562,9 @@ class API(base.Base):
elevated = context.elevated()
block_info = self._get_block_device_info(elevated,
instance['uuid'])
- network_info = self.network_api.get_instance_nw_info(elevated,
- instance)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=block_info,
- network_info=network_info,
reboot_type=reboot_type)
def _get_image(self, context, image_href):
@@ -1643,6 +1677,11 @@ class API(base.Base):
self.db.migration_update(elevated, migration_ref['id'],
{'status': 'reverting'})
+ # With cells, the best we can do right now is commit the reservations
+ # immediately...
+ if CONF.cells.enable and reservations:
+ QUOTAS.commit(context, reservations)
+ reservations = []
self.compute_rpcapi.revert_resize(context,
instance=instance, migration=migration_ref,
@@ -1667,6 +1706,11 @@ class API(base.Base):
self.db.migration_update(elevated, migration_ref['id'],
{'status': 'confirming'})
+ # With cells, the best we can do right now is commit the reservations
+ # immediately...
+ if CONF.cells.enable and reservations:
+ QUOTAS.commit(context, reservations)
+ reservations = []
self.compute_rpcapi.confirm_resize(context,
instance=instance, migration=migration_ref,
@@ -1829,6 +1873,12 @@ class API(base.Base):
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
+ # With cells, the best we can do right now is commit the reservations
+ # immediately...
+ if CONF.cells.enable and reservations:
+ QUOTAS.commit(context, reservations)
+ reservations = []
+
args = {
"instance": instance,
"instance_type": new_instance_type,
@@ -2163,140 +2213,76 @@ class API(base.Base):
disk_over_commit, instance, host_name)
-def check_host(fn):
- """Decorator that makes sure that the host exists."""
- def wrapped(self, context, host_name, *args, **kwargs):
- if self.does_host_exist(context, host_name):
- return fn(self, context, host_name, *args, **kwargs)
- else:
- raise exception.HostNotFound(host=host_name)
- return wrapped
-
-
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
- def __init__(self):
- self.compute_rpcapi = compute_rpcapi.ComputeAPI()
+ def __init__(self, rpcapi=None):
+ self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
super(HostAPI, self).__init__()
- @check_host
+ def _assert_host_exists(self, context, host_name):
+ """Raise HostNotFound if compute host doesn't exist."""
+ if not self.db.service_get_by_host_and_topic(context, host_name,
+ CONF.compute_topic):
+ raise exception.HostNotFound(host=host_name)
+
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
- return self.compute_rpcapi.set_host_enabled(context, enabled=enabled,
+ self._assert_host_exists(context, host_name)
+ return self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
- @check_host
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
- return self.compute_rpcapi.get_host_uptime(context, host=host_name)
+ self._assert_host_exists(context, host_name)
+ return self.rpcapi.get_host_uptime(context, host=host_name)
- @check_host
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
- return self.compute_rpcapi.host_power_action(context, action=action,
+ self._assert_host_exists(context, host_name)
+ return self.rpcapi.host_power_action(context, action=action,
host=host_name)
- def list_hosts(self, context, zone=None, service=None):
- """Returns a summary list of enabled hosts, optionally filtering
- by zone and/or service type.
+ def set_host_maintenance(self, context, host_name, mode):
+ """Start/Stop host maintenance window. On start, it triggers
+ guest VMs evacuation."""
+ self._assert_host_exists(context, host_name)
+ return self.rpcapi.host_maintenance_mode(context,
+ host_param=host_name, mode=mode, host=host_name)
+
+ def service_get_all(self, context, filters=None):
+ """Returns a list of services, optionally filtering the results.
+
+ If specified, 'filters' should be a dictionary containing services
+ attributes and matching values. Ie, to get a list of services for
+ the 'compute' topic, use filters={'topic': 'compute'}.
"""
- LOG.debug(_("Listing hosts"))
+ if filters is None:
+ filters = {}
services = self.db.service_get_all(context, False)
- services = availability_zones.set_availability_zones(context, services)
- if zone:
- services = [s for s in services if s['availability_zone'] == zone]
- hosts = []
- for host in services:
- hosts.append({'host_name': host['host'], 'service': host['topic'],
- 'zone': host['availability_zone']})
- if service:
- hosts = [host for host in hosts
- if host["service"] == service]
- return hosts
-
- def does_host_exist(self, context, host_name):
- """
- Returns True if the host with host_name exists, False otherwise
- """
- return self.db.service_does_host_exist(context, host_name)
+ services = availability_zones.set_availability_zones(context,
+ services)
+ ret_services = []
+ for service in services:
+ for key, val in filters.iteritems():
+ if service[key] != val:
+ break
+ else:
+ # All filters matched.
+ ret_services.append(service)
+ return ret_services
- def describe_host(self, context, host_name):
- """
- Returns information about a host in this kind of format:
- :returns:
- ex.::
- {'host': 'hostname',
- 'project': 'admin',
- 'cpu': 1,
- 'memory_mb': 2048,
- 'disk_gb': 30}
- """
- # Getting compute node info and related instances info
- try:
- compute_ref = self.db.service_get_all_compute_by_host(context,
- host_name)
- compute_ref = compute_ref[0]
- except exception.ComputeHostNotFound:
- raise exception.HostNotFound(host=host_name)
- instance_refs = self.db.instance_get_all_by_host(context,
- compute_ref['host'])
-
- # Getting total available/used resource
- compute_ref = compute_ref['compute_node'][0]
- resources = [{'resource': {'host': host_name, 'project': '(total)',
- 'cpu': compute_ref['vcpus'],
- 'memory_mb': compute_ref['memory_mb'],
- 'disk_gb': compute_ref['local_gb']}},
- {'resource': {'host': host_name, 'project': '(used_now)',
- 'cpu': compute_ref['vcpus_used'],
- 'memory_mb': compute_ref['memory_mb_used'],
- 'disk_gb': compute_ref['local_gb_used']}}]
-
- cpu_sum = 0
- mem_sum = 0
- hdd_sum = 0
- for i in instance_refs:
- cpu_sum += i['vcpus']
- mem_sum += i['memory_mb']
- hdd_sum += i['root_gb'] + i['ephemeral_gb']
-
- resources.append({'resource': {'host': host_name,
- 'project': '(used_max)',
- 'cpu': cpu_sum,
- 'memory_mb': mem_sum,
- 'disk_gb': hdd_sum}})
-
- # Getting usage resource per project
- project_ids = [i['project_id'] for i in instance_refs]
- project_ids = list(set(project_ids))
- for project_id in project_ids:
- vcpus = [i['vcpus'] for i in instance_refs
- if i['project_id'] == project_id]
-
- mem = [i['memory_mb'] for i in instance_refs
- if i['project_id'] == project_id]
-
- disk = [i['root_gb'] + i['ephemeral_gb'] for i in instance_refs
- if i['project_id'] == project_id]
-
- resources.append({'resource': {'host': host_name,
- 'project': project_id,
- 'cpu': sum(vcpus),
- 'memory_mb': sum(mem),
- 'disk_gb': sum(disk)}})
- return resources
-
- @check_host
- def set_host_maintenance(self, context, host, mode):
- """Start/Stop host maintenance window. On start, it triggers
- guest VMs evacuation."""
- return self.compute_rpcapi.host_maintenance_mode(context,
- host_param=host, mode=mode, host=host)
+ def service_get_by_compute_host(self, context, host_name):
+ """Get service entry for the given compute hostname."""
+ return self.db.service_get_by_compute_host(context, host_name)
+
+ def instance_get_all_by_host(self, context, host_name):
+ """Return all instances on the given host."""
+ return self.db.instance_get_all_by_host(context, host_name)
class AggregateAPI(base.Base):
@@ -2360,8 +2346,7 @@ class AggregateAPI(base.Base):
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
# validates the host; ComputeHostNotFound is raised if invalid
- service = self.db.service_get_all_compute_by_host(
- context, host_name)[0]
+ self.db.service_get_by_compute_host(context, host_name)
aggregate = self.db.aggregate_get(context, aggregate_id)
self.db.aggregate_host_add(context, aggregate_id, host_name)
#NOTE(jogo): Send message to host to support resource pools
@@ -2372,8 +2357,7 @@ class AggregateAPI(base.Base):
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
# validates the host; ComputeHostNotFound is raised if invalid
- service = self.db.service_get_all_compute_by_host(
- context, host_name)[0]
+ self.db.service_get_by_compute_host(context, host_name)
aggregate = self.db.aggregate_get(context, aggregate_id)
self.db.aggregate_host_delete(context, aggregate_id, host_name)
self.compute_rpcapi.remove_aggregate_host(context,
diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py
index 698c6eed0..d1d9a11d2 100644
--- a/nova/compute/cells_api.py
+++ b/nova/compute/cells_api.py
@@ -18,7 +18,7 @@
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute_api
-from nova.compute import task_states
+from nova.compute import instance_types
from nova.compute import vm_states
from nova import exception
from nova.openstack.common import excutils
@@ -115,15 +115,28 @@ class ComputeCellsAPI(compute_api.API):
"""
return
- def _create_image(self, context, instance, name, image_type,
- backup_type=None, rotation=None, extra_properties=None):
- if backup_type:
- return self._call_to_cells(context, instance, 'backup',
- name, backup_type, rotation,
- extra_properties=extra_properties)
- else:
- return self._call_to_cells(context, instance, 'snapshot',
- name, extra_properties=extra_properties)
+ def backup(self, context, instance, name, backup_type, rotation,
+ extra_properties=None, image_id=None):
+ """Backup the given instance."""
+ image_meta = super(ComputeCellsAPI, self).backup(context,
+ instance, name, backup_type, rotation,
+ extra_properties=extra_properties, image_id=image_id)
+ image_id = image_meta['id']
+ self._cast_to_cells(context, instance, 'backup', name,
+ backup_type=backup_type, rotation=rotation,
+ extra_properties=extra_properties, image_id=image_id)
+ return image_meta
+
+ def snapshot(self, context, instance, name, extra_properties=None,
+ image_id=None):
+ """Snapshot the given instance."""
+ image_meta = super(ComputeCellsAPI, self).snapshot(context,
+ instance, name, extra_properties=extra_properties,
+ image_id=image_id)
+ image_id = image_meta['id']
+ self._cast_to_cells(context, instance, 'snapshot',
+ name, extra_properties=extra_properties, image_id=image_id)
+ return image_meta
def create(self, *args, **kwargs):
"""We can use the base functionality, but I left this here just
@@ -131,17 +144,45 @@ class ComputeCellsAPI(compute_api.API):
"""
return super(ComputeCellsAPI, self).create(*args, **kwargs)
- @validate_cell
- def update(self, context, instance, **kwargs):
- """Update an instance."""
+ def update_state(self, context, instance, new_state):
+ """Updates the state of a compute instance.
+ For example to 'active' or 'error'.
+ Also sets 'task_state' to None.
+ Used by admin_actions api
+
+ :param context: The security context
+ :param instance: The instance to update
+ :param new_state: A member of vm_state to change
+ the instance's state to,
+ eg. 'active'
+ """
+ self.update(context, instance,
+ pass_on_state_change=True,
+ vm_state=new_state,
+ task_state=None)
+
+ def update(self, context, instance, pass_on_state_change=False, **kwargs):
+ """
+ Update an instance.
+ :param pass_on_state_change: if true, the state change will be passed
+ on to child cells
+ """
+ cell_name = instance['cell_name']
+ if cell_name and self._cell_read_only(cell_name):
+ raise exception.InstanceInvalidState(
+ attr="vm_state",
+ instance_uuid=instance['uuid'],
+ state="temporary_readonly",
+ method='update')
rv = super(ComputeCellsAPI, self).update(context,
instance, **kwargs)
- # We need to skip vm_state/task_state updates... those will
- # happen when via a a _cast_to_cells for running a different
- # compute api method
kwargs_copy = kwargs.copy()
- kwargs_copy.pop('vm_state', None)
- kwargs_copy.pop('task_state', None)
+ if not pass_on_state_change:
+ # We need to skip vm_state/task_state updates... those will
+ # happen via a _cast_to_cells when running a different
+ # compute api method
+ kwargs_copy.pop('vm_state', None)
+ kwargs_copy.pop('task_state', None)
if kwargs_copy:
try:
self._cast_to_cells(context, instance, 'update',
@@ -241,22 +282,14 @@ class ComputeCellsAPI(compute_api.API):
@validate_cell
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
- # NOTE(markwash): regular api manipulates the migration here, but we
- # don't have access to it. So to preserve the interface just update the
- # vm and task state.
- self.update(context, instance,
- task_state=task_states.RESIZE_REVERTING)
+ super(ComputeCellsAPI, self).revert_resize(context, instance)
self._cast_to_cells(context, instance, 'revert_resize')
@check_instance_state(vm_state=[vm_states.RESIZED])
@validate_cell
def confirm_resize(self, context, instance):
"""Confirms a migration/resize and deletes the 'old' instance."""
- # NOTE(markwash): regular api manipulates migration here, but we don't
- # have the migration in the api database. So to preserve the interface
- # just update the vm and task state without calling super()
- self.update(context, instance, task_state=None,
- vm_state=vm_states.ACTIVE)
+ super(ComputeCellsAPI, self).confirm_resize(context, instance)
self._cast_to_cells(context, instance, 'confirm_resize')
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
@@ -269,8 +302,36 @@ class ComputeCellsAPI(compute_api.API):
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
- super(ComputeCellsAPI, self).resize(context, instance, *args,
- **kwargs)
+ super(ComputeCellsAPI, self).resize(context, instance, *args, **kwargs)
+
+ # NOTE(johannes): If we get to this point, then we know the
+ # specified flavor_id is valid and exists. We'll need to load
+ # it again, but that should be safe.
+
+ old_instance_type_id = instance['instance_type_id']
+ old_instance_type = instance_types.get_instance_type(
+ old_instance_type_id)
+
+ flavor_id = kwargs.get('flavor_id')
+
+ if not flavor_id:
+ new_instance_type = old_instance_type
+ else:
+ new_instance_type = instance_types.get_instance_type_by_flavor_id(
+ flavor_id)
+
+ # NOTE(johannes): Later, when the resize is confirmed or reverted,
+ # the superclass implementations of those methods will need access
+ # to a local migration record for quota reasons. We don't need
+ # source and/or destination information, just the old and new
+ # instance_types. Status is set to 'finished' since nothing else
+ # will update the status along the way.
+ self.db.migration_create(context.elevated(),
+ {'instance_uuid': instance['uuid'],
+ 'old_instance_type_id': old_instance_type['id'],
+ 'new_instance_type_id': new_instance_type['id'],
+ 'status': 'finished'})
+
# FIXME(comstud): pass new instance_type object down to a method
# that'll unfold it
self._cast_to_cells(context, instance, 'resize', *args, **kwargs)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 85942541f..3bf8e61ef 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -25,10 +25,6 @@ building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
-**Related Flags**
-
-:instances_path: Where instances are kept on disk
-
"""
import contextlib
@@ -297,7 +293,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.22'
+ RPC_API_VERSION = '2.23'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -469,7 +465,7 @@ class ComputeManager(manager.SchedulerDependentManager):
'resume guests'), instance=instance)
elif drv_state == power_state.RUNNING:
- # VMWareAPI drivers will raise an exception
+ # VMwareAPI drivers will raise an exception
try:
self.driver.ensure_filtering_rules_for_instance(
instance,
@@ -682,9 +678,9 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
limits = filter_properties.get('limits', {})
with rt.instance_claim(context, instance, limits):
-
+ macs = self.driver.macs_for_instance(instance)
network_info = self._allocate_network(context, instance,
- requested_networks)
+ requested_networks, macs)
block_device_info = self._prep_block_device(context,
instance, bdms)
instance = self._spawn(context, instance, image_meta,
@@ -915,7 +911,7 @@ class ComputeManager(manager.SchedulerDependentManager):
expected_task_state=(task_states.SCHEDULING,
None))
- def _allocate_network(self, context, instance, requested_networks):
+ def _allocate_network(self, context, instance, requested_networks, macs):
"""Allocate networks for an instance and return the network info."""
instance = self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
@@ -926,7 +922,8 @@ class ComputeManager(manager.SchedulerDependentManager):
# allocate and get network info
network_info = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
- requested_networks=requested_networks)
+ requested_networks=requested_networks,
+ macs=macs)
except Exception:
LOG.exception(_('Instance failed network setup'),
instance=instance)
@@ -1443,19 +1440,14 @@ class ComputeManager(manager.SchedulerDependentManager):
if block_device_info is None:
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
- # NOTE(danms): remove this when RPC API < 2.5 compatibility
- # is no longer needed
- if network_info is None:
- network_info = self._get_instance_nw_info(context, instance)
- else:
- network_info = network_model.NetworkInfo.hydrate(network_info)
+ network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context, instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE)
if instance['power_state'] != power_state.RUNNING:
state = instance['power_state']
@@ -1476,10 +1468,10 @@ class ComputeManager(manager.SchedulerDependentManager):
# Fall through and reset task_state to None
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context, instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
self._notify_about_instance_usage(context, instance, "reboot.end")
@@ -2592,10 +2584,10 @@ class ComputeManager(manager.SchedulerDependentManager):
pass
def _get_compute_info(self, context, host):
- compute_node_ref = self.conductor_api.service_get_all_compute_by_host(
+ compute_node_ref = self.conductor_api.service_get_by_compute_host(
context, host)
try:
- return compute_node_ref[0]['compute_node'][0]
+ return compute_node_ref['compute_node'][0]
except IndexError:
raise exception.NotFound(_("Host %(host)s not found") % locals())
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 075d59ec8..f3c3ae7a3 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -308,8 +308,7 @@ class ResourceTracker(object):
def _get_service(self, context):
try:
- return db.service_get_all_compute_by_host(context,
- self.host)[0]
+ return db.service_get_by_compute_host(context, self.host)
except exception.NotFound:
LOG.warn(_("No service record for host %s"), self.host)
@@ -355,8 +354,7 @@ class ResourceTracker(object):
def confirm_resize(self, context, migration, status='confirmed'):
"""Cleanup usage for a confirmed resize."""
elevated = context.elevated()
- db.migration_update(elevated, migration['id'],
- {'status': status})
+ self.conductor_api.migration_update(elevated, migration, status)
self.update_available_resource(elevated)
def revert_resize(self, context, migration, status='reverted'):
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index ae283283b..3e7ed1cfd 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -157,6 +157,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.21 - Add migrate_data dict param to pre_live_migration()
2.22 - Add recreate, on_shared_storage and host arguments to
rebuild_instance()
+ 2.23 - Remove network_info from reboot_instance
'''
#
@@ -383,16 +384,15 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
_compute_topic(self.topic, ctxt, host, None),
version='2.20')
- def reboot_instance(self, ctxt, instance,
- block_device_info, network_info, reboot_type):
+ def reboot_instance(self, ctxt, instance, block_device_info,
+ reboot_type):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('reboot_instance',
instance=instance_p,
block_device_info=block_device_info,
- network_info=network_info,
reboot_type=reboot_type),
topic=_compute_topic(self.topic, ctxt, None, instance),
- version='2.5')
+ version='2.23')
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
@@ -525,7 +525,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
version='2.3')
def snapshot_instance(self, ctxt, instance, image_id, image_type,
- backup_type, rotation):
+ backup_type=None, rotation=None):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('snapshot_instance',
instance=instance_p, image_id=image_id,
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 4cc10604b..63b64f830 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -249,8 +249,11 @@ class LocalAPI(object):
def service_get_by_host_and_topic(self, context, host, topic):
return self._manager.service_get_all_by(context, topic, host)
- def service_get_all_compute_by_host(self, context, host):
- return self._manager.service_get_all_by(context, 'compute', host)
+ def service_get_by_compute_host(self, context, host):
+ result = self._manager.service_get_all_by(context, 'compute', host)
+ # FIXME(comstud): A major revision bump to 2.0 should return a
+ # single entry, so we should just return 'result' at that point.
+ return result[0]
def service_get_by_args(self, context, host, binary):
return self._manager.service_get_all_by(context, host=host,
@@ -493,9 +496,12 @@ class API(object):
def service_get_by_host_and_topic(self, context, host, topic):
return self.conductor_rpcapi.service_get_all_by(context, topic, host)
- def service_get_all_compute_by_host(self, context, host):
- return self.conductor_rpcapi.service_get_all_by(context, 'compute',
- host)
+ def service_get_by_compute_host(self, context, host):
+ result = self.conductor_rpcapi.service_get_all_by(context, 'compute',
+ host)
+ # FIXME(comstud): A major revision bump to 2.0 should return a
+ # single entry, so we should just return 'result' at that point.
+ return result[0]
def service_get_by_args(self, context, host, binary):
return self.conductor_rpcapi.service_get_all_by(context, host=host,
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 8c6f39f02..b0d4011ad 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -261,8 +261,9 @@ class ConductorManager(manager.SchedulerDependentManager):
result = self.db.service_get_all(context)
elif all((topic, host)):
if topic == 'compute':
- result = self.db.service_get_all_compute_by_host(context,
- host)
+ result = self.db.service_get_by_compute_host(context, host)
+ # FIXME(comstud) Potentially remove this on bump to v2.0
+ result = [result]
else:
result = self.db.service_get_by_host_and_topic(context,
host, topic)
diff --git a/nova/console/manager.py b/nova/console/manager.py
index 243c028d9..2045f824d 100644
--- a/nova/console/manager.py
+++ b/nova/console/manager.py
@@ -65,7 +65,6 @@ class ConsoleProxyManager(manager.Manager):
def init_host(self):
self.driver.init_host()
- @exception.wrap_exception()
def add_console(self, context, instance_id, password=None,
port=None, **kwargs):
instance = self.db.instance_get(context, instance_id)
@@ -93,7 +92,6 @@ class ConsoleProxyManager(manager.Manager):
return console['id']
- @exception.wrap_exception()
def remove_console(self, context, console_id, **_kwargs):
try:
console = self.db.console_get(context, console_id)
diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py
index e8eab4db2..bb1818943 100644
--- a/nova/console/vmrc_manager.py
+++ b/nova/console/vmrc_manager.py
@@ -49,7 +49,7 @@ class ConsoleVMRCManager(manager.Manager):
"""Get VIM session for the pool specified."""
vim_session = None
if pool['id'] not in self.sessions.keys():
- vim_session = vmwareapi_conn.VMWareAPISession(
+ vim_session = vmwareapi_conn.VMwareAPISession(
pool['address'],
pool['username'],
pool['password'],
@@ -75,7 +75,6 @@ class ConsoleVMRCManager(manager.Manager):
self.driver.setup_console(context, console)
return console
- @exception.wrap_exception()
def add_console(self, context, instance_id, password=None,
port=None, **kwargs):
"""Adds a console for the instance.
@@ -105,7 +104,6 @@ class ConsoleVMRCManager(manager.Manager):
instance)
return console['id']
- @exception.wrap_exception()
def remove_console(self, context, console_id, **_kwargs):
"""Removes a console entry."""
try:
diff --git a/nova/db/api.py b/nova/db/api.py
index b1552b480..ecfcfab15 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -151,9 +151,12 @@ def service_get_all_by_host(context, host):
return IMPL.service_get_all_by_host(context, host)
-def service_get_all_compute_by_host(context, host):
- """Get all compute services for a given host."""
- return IMPL.service_get_all_compute_by_host(context, host)
+def service_get_by_compute_host(context, host):
+ """Get the service entry for a given compute host.
+
+ Returns the service entry joined with the compute_node entry.
+ """
+ return IMPL.service_get_by_compute_host(context, host)
def service_get_all_compute_sorted(context):
@@ -1357,19 +1360,19 @@ def cell_create(context, values):
return IMPL.cell_create(context, values)
-def cell_update(context, cell_id, values):
+def cell_update(context, cell_name, values):
"""Update a child Cell entry."""
- return IMPL.cell_update(context, cell_id, values)
+ return IMPL.cell_update(context, cell_name, values)
-def cell_delete(context, cell_id):
+def cell_delete(context, cell_name):
"""Delete a child Cell."""
- return IMPL.cell_delete(context, cell_id)
+ return IMPL.cell_delete(context, cell_name)
-def cell_get(context, cell_id):
+def cell_get(context, cell_name):
"""Get a specific child Cell."""
- return IMPL.cell_get(context, cell_id)
+ return IMPL.cell_get(context, cell_name)
def cell_get_all(context):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 8930f6ccc..038a47ca1 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -337,15 +337,6 @@ def service_get_all(context, disabled=None):
@require_admin_context
-def service_does_host_exist(context, host_name, include_disabled):
- query = get_session().query(func.count(models.Service.host)).\
- filter_by(host=host_name)
- if not include_disabled:
- query = query.filter_by(disabled=False)
- return query.scalar() > 0
-
-
-@require_admin_context
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
@@ -370,12 +361,12 @@ def service_get_all_by_host(context, host):
@require_admin_context
-def service_get_all_compute_by_host(context, host):
+def service_get_by_compute_host(context, host):
result = model_query(context, models.Service, read_deleted="no").\
options(joinedload('compute_node')).\
filter_by(host=host).\
filter_by(topic=CONF.compute_topic).\
- all()
+ first()
if not result:
raise exception.ComputeHostNotFound(host=host)
@@ -1787,42 +1778,6 @@ def instance_get_all_hung_in_rebooting(context, reboot_window):
@require_context
-def instance_test_and_set(context, instance_uuid, attr, ok_states, new_state):
- """Atomically check if an instance is in a valid state, and if it is, set
- the instance into a new state.
- """
- if not uuidutils.is_uuid_like(instance_uuid):
- raise exception.InvalidUUID(instance_uuid)
-
- session = get_session()
- with session.begin():
- query = model_query(context, models.Instance, session=session,
- project_only=True).\
- filter_by(uuid=instance_uuid)
-
- attr_column = getattr(models.Instance, attr)
- filter_op = None
- # NOTE(boris-42): `SELECT IN` doesn't work with None values because
- # they are incomparable.
- if None in ok_states:
- filter_op = or_(attr_column == None,
- attr_column.in_(filter(lambda x: x is not None,
- ok_states)))
- else:
- filter_op = attr_column.in_(ok_states)
-
- count = query.filter(filter_op).\
- update({attr: new_state}, synchronize_session=False)
- if count == 0:
- instance_ref = query.first()
- raise exception.InstanceInvalidState(
- attr=attr,
- instance_uuid=instance_ref['uuid'],
- state=instance_ref[attr],
- method='instance_test_and_set')
-
-
-@require_context
def instance_update(context, instance_uuid, values):
instance_ref = _instance_update(context, instance_uuid, values)[1]
return instance_ref
@@ -3764,34 +3719,30 @@ def cell_create(context, values):
return cell
-def _cell_get_by_id_query(context, cell_id, session=None):
- return model_query(context, models.Cell, session=session).\
- filter_by(id=cell_id)
+def _cell_get_by_name_query(context, cell_name, session=None):
+ return model_query(context, models.Cell,
+ session=session).filter_by(name=cell_name)
@require_admin_context
-def cell_update(context, cell_id, values):
- cell = cell_get(context, cell_id)
- cell.update(values)
- cell.save()
+def cell_update(context, cell_name, values):
+ session = get_session()
+ with session.begin():
+ cell = _cell_get_by_name_query(context, cell_name, session=session)
+ cell.update(values)
return cell
@require_admin_context
-def cell_delete(context, cell_id):
- session = get_session()
- with session.begin():
- return _cell_get_by_id_query(context, cell_id, session=session).\
- delete()
+def cell_delete(context, cell_name):
+ return _cell_get_by_name_query(context, cell_name).soft_delete()
@require_admin_context
-def cell_get(context, cell_id):
- result = _cell_get_by_id_query(context, cell_id).first()
-
+def cell_get(context, cell_name):
+ result = _cell_get_by_name_query(context, cell_name).first()
if not result:
- raise exception.CellNotFound(cell_id=cell_id)
-
+ raise exception.CellNotFound(cell_name=cell_name)
return result
diff --git a/nova/exception.py b/nova/exception.py
index 7ec23d32d..dcd75bf4e 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -82,9 +82,11 @@ def wrap_exception(notifier=None, publisher_id=None, event_type=None,
# to pass it in as a parameter. Otherwise we get a cyclic import of
# nova.notifier.api -> nova.utils -> nova.exception :(
def inner(f):
- def wrapped(*args, **kw):
+ def wrapped(self, context, *args, **kw):
+ # Don't store self or context in the payload, it now seems to
+ # contain confidential information.
try:
- return f(*args, **kw)
+ return f(self, context, *args, **kw)
except Exception, e:
with excutils.save_and_reraise_exception():
if notifier:
@@ -104,10 +106,6 @@ def wrap_exception(notifier=None, publisher_id=None, event_type=None,
# propagated.
temp_type = f.__name__
- context = get_context_from_function_and_args(f,
- args,
- kw)
-
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
@@ -524,6 +522,10 @@ class PortNotFound(NotFound):
message = _("Port %(port_id)s could not be found.")
+class PortNotUsable(NovaException):
+ message = _("Port %(port_id)s not usable for instance %(instance)s.")
+
+
class FixedIpNotFound(NotFound):
message = _("No fixed IP associated with id %(id)s.")
@@ -770,7 +772,7 @@ class FlavorAccessNotFound(NotFound):
class CellNotFound(NotFound):
- message = _("Cell %(cell_id)s could not be found.")
+ message = _("Cell %(cell_name)s doesn't exist.")
class CellRoutingInconsistency(NovaException):
@@ -1089,20 +1091,3 @@ class CryptoCAFileNotFound(FileNotFound):
class CryptoCRLFileNotFound(FileNotFound):
message = _("The CRL file for %(project)s could not be found")
-
-
-def get_context_from_function_and_args(function, args, kwargs):
- """Find an arg of type RequestContext and return it.
-
- This is useful in a couple of decorators where we don't
- know much about the function we're wrapping.
- """
-
- # import here to avoid circularity:
- from nova import context
-
- for arg in itertools.chain(kwargs.values(), args):
- if isinstance(arg, context.RequestContext):
- return arg
-
- return None
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index 3fb397298..347b98733 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -7538,7 +7538,7 @@ msgstr ""
#: nova/virt/vmwareapi/driver.py:107
msgid ""
"Must specify vmwareapi_host_ip,vmwareapi_host_username and "
-"vmwareapi_host_password to usecompute_driver=vmwareapi.VMWareESXDriver"
+"vmwareapi_host_password to usecompute_driver=vmwareapi.VMwareESXDriver"
msgstr ""
#: nova/virt/vmwareapi/driver.py:258
@@ -7635,7 +7635,7 @@ msgstr ""
#: nova/virt/vmwareapi/read_write_util.py:142
#, python-format
-msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s"
+msgid "Exception during HTTP connection close in VMwareHTTPWrite. Exception is %s"
msgstr ""
#: nova/virt/vmwareapi/vim.py:83
diff --git a/nova/manager.py b/nova/manager.py
index cb15b776e..7df63f719 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -215,8 +215,9 @@ class Manager(base.Base):
if self._periodic_spacing[task_name] is None:
wait = 0
else:
- wait = time.time() - (self._periodic_last_run[task_name] +
- self._periodic_spacing[task_name])
+ due = (self._periodic_last_run[task_name] +
+ self._periodic_spacing[task_name])
+ wait = max(0, due - time.time())
if wait > 0.2:
if wait < idle_for:
idle_for = wait
diff --git a/nova/network/api.py b/nova/network/api.py
index 25680e656..976be93ed 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -74,7 +74,11 @@ def update_instance_cache_with_nw_info(api, context, instance,
class API(base.Base):
- """API for interacting with the network manager."""
+ """API for doing networking via the nova-network network manager.
+
+ This is a pluggable module - other implementations do networking via
+ other services (such as Quantum).
+ """
_sentinel = object()
@@ -180,9 +184,15 @@ class API(base.Base):
@refresh_cache
def allocate_for_instance(self, context, instance, vpn,
- requested_networks):
+ requested_networks, macs=None):
"""Allocates all network structures for an instance.
+ TODO(someone): document the rest of these parameters.
+
+ :param macs: None or a set of MAC addresses that the instance
+ should use. macs is supplied by the hypervisor driver (contrast
+ with requested_networks which is user supplied).
+ NB: macs is ignored by nova-network.
:returns: network info as from get_instance_nw_info() below
"""
args = {}
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 8d9255dac..7b69c7a36 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -568,7 +568,7 @@ class FloatingIP(object):
else:
host = network['host']
- interface = CONF.public_interface or floating_ip['interface']
+ interface = floating_ip.get('interface')
if host == self.host:
# i'm the correct host
self._associate_floating_ip(context, floating_address,
@@ -585,6 +585,7 @@ class FloatingIP(object):
def _associate_floating_ip(self, context, floating_address, fixed_address,
interface, instance_uuid):
"""Performs db and driver calls to associate floating ip & fixed ip."""
+ interface = CONF.public_interface or interface
@lockutils.synchronized(unicode(floating_address), 'nova-')
def do_associate():
@@ -642,7 +643,7 @@ class FloatingIP(object):
# send to correct host, unless i'm the correct host
network = self._get_network_by_id(context, fixed_ip['network_id'])
- interface = CONF.public_interface or floating_ip['interface']
+ interface = floating_ip.get('interface')
if network['multi_host']:
instance = self.db.instance_get_by_uuid(context,
fixed_ip['instance_uuid'])
@@ -672,7 +673,7 @@ class FloatingIP(object):
def _disassociate_floating_ip(self, context, address, interface,
instance_uuid):
"""Performs db and driver calls to disassociate floating ip."""
- # disassociate floating ip
+ interface = CONF.public_interface or interface
@lockutils.synchronized(unicode(address), 'nova-')
def do_disassociate():
@@ -1231,6 +1232,7 @@ class NetworkManager(manager.SchedulerDependentManager):
nw_info = network_model.NetworkInfo()
for vif in vifs:
vif_dict = {'id': vif['uuid'],
+ 'type': network_model.VIF_TYPE_BRIDGE,
'address': vif['address']}
# handle case where vif doesn't have a network
diff --git a/nova/network/model.py b/nova/network/model.py
index dcee68f8c..e4fe0d54c 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -25,6 +25,18 @@ def ensure_string_keys(d):
# http://bugs.python.org/issue4978
return dict([(str(k), v) for k, v in d.iteritems()])
+# Constants for the 'vif_type' field in VIF class
+VIF_TYPE_OVS = 'ovs'
+VIF_TYPE_BRIDGE = 'bridge'
+VIF_TYPE_802_QBG = '802.1qbg'
+VIF_TYPE_802_QBH = '802.1qbh'
+VIF_TYPE_OTHER = 'other'
+
+# Constant for max length of network interface names
+# eg 'bridge' in the Network class or 'devname' in
+# the VIF class
+NIC_NAME_LEN = 14
+
class Model(dict):
"""Defines some necessary structures for most of the network models."""
@@ -195,13 +207,14 @@ class Network(Model):
class VIF(Model):
"""Represents a Virtual Interface in Nova."""
def __init__(self, id=None, address=None, network=None, type=None,
- **kwargs):
+ devname=None, **kwargs):
super(VIF, self).__init__()
self['id'] = id
self['address'] = address
self['network'] = network or None
self['type'] = type
+ self['devname'] = devname
self._set_meta(kwargs)
@@ -366,6 +379,7 @@ class NetworkInfo(list):
'broadcast': str(subnet_v4.as_netaddr().broadcast),
'mac': vif['address'],
'vif_type': vif['type'],
+ 'vif_devname': vif.get('devname'),
'vif_uuid': vif['id'],
'rxtx_cap': vif.get_meta('rxtx_cap', 0),
'dns': [get_ip(ip) for ip in subnet_v4['dns']],
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 51386b4fd..0deb3a4bb 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -48,6 +48,11 @@ quantum_opts = [
default='keystone',
help='auth strategy for connecting to '
'quantum in admin context'),
+ # TODO(berrange) temporary hack until Quantum can pass over the
+ # name of the OVS bridge it is configured with
+ cfg.StrOpt('quantum_ovs_bridge',
+ default='br-int',
+ help='Name of Integration Bridge used by Open vSwitch'),
]
CONF = cfg.CONF
@@ -99,7 +104,16 @@ class API(base.Base):
return nets
def allocate_for_instance(self, context, instance, **kwargs):
- """Allocate all network resources for the instance."""
+ """Allocate all network resources for the instance.
+
+ TODO(someone): document the rest of these parameters.
+
+ :param macs: None or a set of MAC addresses that the instance
+ should use. macs is supplied by the hypervisor driver (contrast
+ with requested_networks which is user supplied).
+ NB: QuantumV2 does not yet honour mac address limits.
+ """
+ hypervisor_macs = kwargs.get('macs', None)
quantum = quantumv2.get_client(context)
LOG.debug(_('allocate_for_instance() for %s'),
instance['display_name'])
@@ -114,7 +128,11 @@ class API(base.Base):
if requested_networks:
for network_id, fixed_ip, port_id in requested_networks:
if port_id:
- port = quantum.show_port(port_id).get('port')
+ port = quantum.show_port(port_id)['port']
+ if hypervisor_macs is not None:
+ if port['mac_address'] not in hypervisor_macs:
+ raise exception.PortNotUsable(port_id=port_id,
+ instance=instance['display_name'])
network_id = port['network_id']
ports[network_id] = port
elif fixed_ip:
@@ -570,9 +588,24 @@ class API(base.Base):
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
+ bridge = None
+ vif_type = port.get('binding:vif_type')
+ # TODO(berrange) Quantum should pass the bridge name
+ # in another binding metadata field
+ if vif_type == network_model.VIF_TYPE_OVS:
+ bridge = CONF.quantum_ovs_bridge
+ elif vif_type == network_model.VIF_TYPE_BRIDGE:
+ bridge = "brq" + port['network_id']
+
+ if bridge is not None:
+ bridge = bridge[:network_model.NIC_NAME_LEN]
+
+ devname = "tap" + port['id']
+ devname = devname[:network_model.NIC_NAME_LEN]
+
network = network_model.Network(
id=port['network_id'],
- bridge='', # Quantum ignores this field
+ bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=net['tenant_id']
@@ -582,7 +615,8 @@ class API(base.Base):
id=port['id'],
address=port['mac_address'],
network=network,
- type=port.get('binding:vif_type')))
+ type=port.get('binding:vif_type'),
+ devname=devname))
return nw_info
def _get_subnets_from_port(self, context, port):
diff --git a/nova/quota.py b/nova/quota.py
index 96e612503..1856c97c1 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -965,6 +965,7 @@ class QuotaEngine(object):
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to commit reservations "
"%(reservations)s") % locals())
+ LOG.debug(_("Committed reservations %(reservations)s") % locals())
def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
@@ -986,6 +987,7 @@ class QuotaEngine(object):
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to roll back reservations "
"%(reservations)s") % locals())
+ LOG.debug(_("Rolled back reservations %(reservations)s") % locals())
def usage_reset(self, context, resources):
"""
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index dc494af8f..d1ae1cd6e 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -192,12 +192,12 @@ class Scheduler(object):
# Checking src host exists and compute node
src = instance_ref['host']
try:
- services = db.service_get_all_compute_by_host(context, src)
+ service = db.service_get_by_compute_host(context, src)
except exception.NotFound:
raise exception.ComputeServiceUnavailable(host=src)
# Checking src host is alive.
- if not self.servicegroup_api.service_is_up(services[0]):
+ if not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=src)
def _live_migration_dest_check(self, context, instance_ref, dest):
@@ -209,8 +209,7 @@ class Scheduler(object):
"""
# Checking dest exists and compute node.
- dservice_refs = db.service_get_all_compute_by_host(context, dest)
- dservice_ref = dservice_refs[0]
+ dservice_ref = db.service_get_by_compute_host(context, dest)
# Checking dest host is alive.
if not self.servicegroup_api.service_is_up(dservice_ref):
@@ -290,5 +289,5 @@ class Scheduler(object):
:return: value specified by key
"""
- compute_node_ref = db.service_get_all_compute_by_host(context, host)
- return compute_node_ref[0]['compute_node'][0]
+ service_ref = db.service_get_by_compute_host(context, host)
+ return service_ref['compute_node'][0]
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 033ee9cc8..84bdcddb5 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -220,13 +220,12 @@ class SchedulerManager(manager.Manager):
"""
# Getting compute node info and related instances info
- compute_ref = db.service_get_all_compute_by_host(context, host)
- compute_ref = compute_ref[0]
+ service_ref = db.service_get_by_compute_host(context, host)
instance_refs = db.instance_get_all_by_host(context,
- compute_ref['host'])
+ service_ref['host'])
# Getting total available/used resource
- compute_ref = compute_ref['compute_node'][0]
+ compute_ref = service_ref['compute_node'][0]
resource = {'vcpus': compute_ref['vcpus'],
'memory_mb': compute_ref['memory_mb'],
'local_gb': compute_ref['local_gb'],
diff --git a/nova/service.py b/nova/service.py
index 39e414eb6..0fde14baa 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -32,7 +32,6 @@ import greenlet
from nova import conductor
from nova import context
-from nova import db
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import eventlet_backdoor
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index b30a3ddeb..562473121 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -30,6 +30,7 @@ import fixtures
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
+from nova.api.metadata import password
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import utils as compute_utils
@@ -1387,6 +1388,17 @@ class CloudTestCase(test.TestCase):
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
+ def test_get_password_data(self):
+ instance_id = self._run_instance(
+ image_id='ami-1',
+ instance_type=CONF.default_instance_type,
+ max_count=1)
+ self.stubs.Set(password, 'extract_password', lambda i: 'fakepass')
+ output = self.cloud.get_password_data(context=self.context,
+ instance_id=[instance_id])
+ self.assertEquals(output['passwordData'], 'fakepass')
+ rv = self.cloud.terminate_instances(self.context, [instance_id])
+
def test_console_output(self):
instance_id = self._run_instance(
image_id='ami-1',
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py
new file mode 100644
index 000000000..b8f4e6398
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py
@@ -0,0 +1,89 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Compute admin api w/ Cells
+"""
+
+from nova.api.openstack.compute.contrib import admin_actions
+from nova.compute import cells_api as compute_cells_api
+from nova.compute import vm_states
+from nova.openstack.common import log as logging
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+LOG = logging.getLogger('nova.tests.test_compute_cells')
+
+INSTANCE_IDS = {'inst_id': 1}
+
+
+class CellsAdminAPITestCase(test.TestCase):
+
+ def setUp(self):
+ super(CellsAdminAPITestCase, self).setUp()
+
+ def _fake_cell_read_only(*args, **kwargs):
+ return False
+
+ def _fake_validate_cell(*args, **kwargs):
+ return
+
+ def _fake_compute_api_get(context, instance_id):
+ return {'id': 1, 'uuid': instance_id, 'vm_state': vm_states.ACTIVE,
+ 'task_state': None, 'cell_name': None}
+
+ def _fake_instance_update_and_get_original(context, instance_uuid,
+ values):
+ inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
+ name=values.get('display_name'))
+ return (inst, inst)
+
+ def fake_cast_to_cells(context, instance, method, *args, **kwargs):
+ """
+ Makes sure that the cells recieve the cast to update
+ the cell state
+ """
+ self.cells_recieved_kwargs.update(kwargs)
+
+ self.admin_api = admin_actions.AdminActionsController()
+ self.admin_api.compute_api = compute_cells_api.ComputeCellsAPI()
+ self.stubs.Set(self.admin_api.compute_api, '_cell_read_only',
+ _fake_cell_read_only)
+ self.stubs.Set(self.admin_api.compute_api, '_validate_cell',
+ _fake_validate_cell)
+ self.stubs.Set(self.admin_api.compute_api, 'get',
+ _fake_compute_api_get)
+ self.stubs.Set(self.admin_api.compute_api.db,
+ 'instance_update_and_get_original',
+ _fake_instance_update_and_get_original)
+ self.stubs.Set(self.admin_api.compute_api, '_cast_to_cells',
+ fake_cast_to_cells)
+
+ self.uuid = uuidutils.generate_uuid()
+ url = '/fake/servers/%s/action' % self.uuid
+ self.request = fakes.HTTPRequest.blank(url)
+ self.cells_recieved_kwargs = {}
+
+ def test_reset_active(self):
+ body = {"os-resetState": {"state": "error"}}
+ result = self.admin_api._reset_state(self.request, 'inst_id', body)
+
+ self.assertEqual(result.status_int, 202)
+ # Make sure the cells recieved the update
+ self.assertEqual(self.cells_recieved_kwargs,
+ dict(vm_state=vm_states.ERROR,
+ task_state=None))
diff --git a/nova/tests/api/openstack/compute/contrib/test_cells.py b/nova/tests/api/openstack/compute/contrib/test_cells.py
new file mode 100644
index 000000000..82d469524
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_cells.py
@@ -0,0 +1,396 @@
+# Copyright 2011-2012 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from lxml import etree
+from webob import exc
+
+from nova.api.openstack.compute.contrib import cells as cells_ext
+from nova.api.openstack import xmlutil
+from nova.cells import rpcapi as cells_rpcapi
+from nova import context
+from nova import db
+from nova import exception
+from nova.openstack.common import timeutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+FAKE_CELLS = [
+ dict(id=1, name='cell1', username='bob', is_parent=True,
+ weight_scale=1.0, weight_offset=0.0,
+ rpc_host='r1.example.org', password='xxxx'),
+ dict(id=2, name='cell2', username='alice', is_parent=False,
+ weight_scale=1.0, weight_offset=0.0,
+ rpc_host='r2.example.org', password='qwerty')]
+
+
+FAKE_CAPABILITIES = [
+ {'cap1': '0,1', 'cap2': '2,3'},
+ {'cap3': '4,5', 'cap4': '5,6'}]
+
+
+def fake_db_cell_get(context, cell_name):
+ for cell in FAKE_CELLS:
+ if cell_name == cell['name']:
+ return cell
+ else:
+ raise exception.CellNotFound(cell_name=cell_name)
+
+
+def fake_db_cell_create(context, values):
+ cell = dict(id=1)
+ cell.update(values)
+ return cell
+
+
+def fake_db_cell_update(context, cell_id, values):
+ cell = fake_db_cell_get(context, cell_id)
+ cell.update(values)
+ return cell
+
+
+def fake_cells_api_get_all_cell_info(*args):
+ cells = copy.deepcopy(FAKE_CELLS)
+ del cells[0]['password']
+ del cells[1]['password']
+ for i, cell in enumerate(cells):
+ cell['capabilities'] = FAKE_CAPABILITIES[i]
+ return cells
+
+
+def fake_db_cell_get_all(context):
+ return FAKE_CELLS
+
+
+class CellsTest(test.TestCase):
+ def setUp(self):
+ super(CellsTest, self).setUp()
+ self.stubs.Set(db, 'cell_get', fake_db_cell_get)
+ self.stubs.Set(db, 'cell_get_all', fake_db_cell_get_all)
+ self.stubs.Set(db, 'cell_update', fake_db_cell_update)
+ self.stubs.Set(db, 'cell_create', fake_db_cell_create)
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'get_cell_info_for_neighbors',
+ fake_cells_api_get_all_cell_info)
+
+ self.controller = cells_ext.Controller()
+ self.context = context.get_admin_context()
+
+ def _get_request(self, resource):
+ return fakes.HTTPRequest.blank('/v2/fake/' + resource)
+
+ def test_index(self):
+ req = self._get_request("cells")
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(len(res_dict['cells']), 2)
+ for i, cell in enumerate(res_dict['cells']):
+ self.assertEqual(cell['name'], FAKE_CELLS[i]['name'])
+ self.assertNotIn('capabilitiles', cell)
+ self.assertNotIn('password', cell)
+
+ def test_detail(self):
+ req = self._get_request("cells/detail")
+ res_dict = self.controller.detail(req)
+
+ self.assertEqual(len(res_dict['cells']), 2)
+ for i, cell in enumerate(res_dict['cells']):
+ self.assertEqual(cell['name'], FAKE_CELLS[i]['name'])
+ self.assertEqual(cell['capabilities'], FAKE_CAPABILITIES[i])
+ self.assertNotIn('password', cell)
+
+ def test_show_bogus_cell_raises(self):
+ req = self._get_request("cells/bogus")
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'bogus')
+
+ def test_get_cell_by_name(self):
+ req = self._get_request("cells/cell1")
+ res_dict = self.controller.show(req, 'cell1')
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'cell1')
+ self.assertEqual(cell['rpc_host'], 'r1.example.org')
+ self.assertNotIn('password', cell)
+
+ def test_cell_delete(self):
+ call_info = {'delete_called': 0}
+
+ def fake_db_cell_delete(context, cell_name):
+ self.assertEqual(cell_name, 'cell999')
+ call_info['delete_called'] += 1
+
+ self.stubs.Set(db, 'cell_delete', fake_db_cell_delete)
+
+ req = self._get_request("cells/cell999")
+ self.controller.delete(req, 'cell999')
+ self.assertEqual(call_info['delete_called'], 1)
+
+ def test_delete_bogus_cell_raises(self):
+ req = self._get_request("cells/cell999")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPNotFound, self.controller.delete, req,
+ 'cell999')
+
+ def test_cell_create_parent(self):
+ body = {'cell': {'name': 'meow',
+ 'username': 'fred',
+ 'password': 'fubar',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent',
+ # Also test this is ignored/stripped
+ 'is_parent': False}}
+
+ req = self._get_request("cells")
+ res_dict = self.controller.create(req, body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'meow')
+ self.assertEqual(cell['username'], 'fred')
+ self.assertEqual(cell['rpc_host'], 'r3.example.org')
+ self.assertEqual(cell['type'], 'parent')
+ self.assertNotIn('password', cell)
+ self.assertNotIn('is_parent', cell)
+
+ def test_cell_create_child(self):
+ body = {'cell': {'name': 'meow',
+ 'username': 'fred',
+ 'password': 'fubar',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'child'}}
+
+ req = self._get_request("cells")
+ res_dict = self.controller.create(req, body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'meow')
+ self.assertEqual(cell['username'], 'fred')
+ self.assertEqual(cell['rpc_host'], 'r3.example.org')
+ self.assertEqual(cell['type'], 'child')
+ self.assertNotIn('password', cell)
+ self.assertNotIn('is_parent', cell)
+
+ def test_cell_create_no_name_raises(self):
+ body = {'cell': {'username': 'moocow',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_create_name_empty_string_raises(self):
+ body = {'cell': {'name': '',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_create_name_with_bang_raises(self):
+ body = {'cell': {'name': 'moo!cow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_create_name_with_dot_raises(self):
+ body = {'cell': {'name': 'moo.cow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_create_name_with_invalid_type_raises(self):
+ body = {'cell': {'name': 'moocow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'invalid'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_update(self):
+ body = {'cell': {'username': 'zeb',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ res_dict = self.controller.update(req, 'cell1', body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'cell1')
+ self.assertEqual(cell['rpc_host'], FAKE_CELLS[0]['rpc_host'])
+ self.assertEqual(cell['username'], 'zeb')
+ self.assertNotIn('password', cell)
+
+ def test_cell_update_empty_name_raises(self):
+ body = {'cell': {'name': '',
+ 'username': 'zeb',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.update, req, 'cell1', body)
+
+ def test_cell_update_invalid_type_raises(self):
+ body = {'cell': {'username': 'zeb',
+ 'type': 'invalid',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.update, req, 'cell1', body)
+
+ def test_cell_info(self):
+ caps = ['cap1=a;b', 'cap2=c;d']
+ self.flags(name='darksecret', capabilities=caps, group='cells')
+
+ req = self._get_request("cells/info")
+ res_dict = self.controller.info(req)
+ cell = res_dict['cell']
+ cell_caps = cell['capabilities']
+
+ self.assertEqual(cell['name'], 'darksecret')
+ self.assertEqual(cell_caps['cap1'], 'a;b')
+ self.assertEqual(cell_caps['cap2'], 'c;d')
+
+ def test_sync_instances(self):
+ call_info = {}
+
+ def sync_instances(self, context, **kwargs):
+ call_info['project_id'] = kwargs.get('project_id')
+ call_info['updated_since'] = kwargs.get('updated_since')
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)
+
+ req = self._get_request("cells/sync_instances")
+ body = {}
+ self.controller.sync_instances(req, body=body)
+ self.assertEqual(call_info['project_id'], None)
+ self.assertEqual(call_info['updated_since'], None)
+
+ body = {'project_id': 'test-project'}
+ self.controller.sync_instances(req, body=body)
+ self.assertEqual(call_info['project_id'], 'test-project')
+ self.assertEqual(call_info['updated_since'], None)
+
+ expected = timeutils.utcnow().isoformat()
+ if not expected.endswith("+00:00"):
+ expected += "+00:00"
+
+ body = {'updated_since': expected}
+ self.controller.sync_instances(req, body=body)
+ self.assertEqual(call_info['project_id'], None)
+ self.assertEqual(call_info['updated_since'], expected)
+
+ body = {'updated_since': 'skjdfkjsdkf'}
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.sync_instances, req, body=body)
+
+ body = {'foo': 'meow'}
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.sync_instances, req, body=body)
+
+
+class TestCellsXMLSerializer(test.TestCase):
+ def test_multiple_cells(self):
+ fixture = {'cells': fake_cells_api_get_all_cell_info()}
+
+ serializer = cells_ext.CellsTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cells' % xmlutil.XMLNS_V10)
+ self.assertEqual(len(res_tree), 2)
+ self.assertEqual(res_tree[0].tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree[1].tag, '{%s}cell' % xmlutil.XMLNS_V10)
+
+ def test_single_cell_with_caps(self):
+ cell = {'id': 1,
+ 'name': 'darksecret',
+ 'username': 'meow',
+ 'capabilities': {'cap1': 'a;b',
+ 'cap2': 'c;d'}}
+ fixture = {'cell': cell}
+
+ serializer = cells_ext.CellTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree.get('name'), 'darksecret')
+ self.assertEqual(res_tree.get('username'), 'meow')
+ self.assertEqual(res_tree.get('password'), None)
+ self.assertEqual(len(res_tree), 1)
+
+ child = res_tree[0]
+ self.assertEqual(child.tag,
+ '{%s}capabilities' % xmlutil.XMLNS_V10)
+ for elem in child:
+ self.assertIn(elem.tag, ('{%s}cap1' % xmlutil.XMLNS_V10,
+ '{%s}cap2' % xmlutil.XMLNS_V10))
+ if elem.tag == '{%s}cap1' % xmlutil.XMLNS_V10:
+ self.assertEqual(elem.text, 'a;b')
+ elif elem.tag == '{%s}cap2' % xmlutil.XMLNS_V10:
+ self.assertEqual(elem.text, 'c;d')
+
+ def test_single_cell_without_caps(self):
+ cell = {'id': 1,
+ 'username': 'woof',
+ 'name': 'darksecret'}
+ fixture = {'cell': cell}
+
+ serializer = cells_ext.CellTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree.get('name'), 'darksecret')
+ self.assertEqual(res_tree.get('username'), 'woof')
+ self.assertEqual(res_tree.get('password'), None)
+ self.assertEqual(len(res_tree), 0)
+
+
+class TestCellsXMLDeserializer(test.TestCase):
+ def test_cell_deserializer(self):
+ caps_dict = {'cap1': 'a;b',
+ 'cap2': 'c;d'}
+ caps_xml = ("<capabilities><cap1>a;b</cap1>"
+ "<cap2>c;d</cap2></capabilities>")
+ expected = {'cell': {'name': 'testcell1',
+ 'type': 'child',
+ 'rpc_host': 'localhost',
+ 'capabilities': caps_dict}}
+ intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ "<cell><name>testcell1</name><type>child</type>"
+ "<rpc_host>localhost</rpc_host>"
+ "%s</cell>") % caps_xml
+ deserializer = cells_ext.CellDeserializer()
+ result = deserializer.deserialize(intext)
+ self.assertEqual(dict(body=expected), result)
diff --git a/nova/tests/api/openstack/compute/contrib/test_hosts.py b/nova/tests/api/openstack/compute/contrib/test_hosts.py
index be4465cf9..e103b5b19 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hosts.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hosts.py
@@ -32,8 +32,10 @@ def stub_service_get_all(context, disabled=None):
return fake_hosts.SERVICES_LIST
-def stub_service_does_host_exist(context, host_name):
- return host_name in [row['host'] for row in stub_service_get_all(context)]
+def stub_service_get_by_host_and_topic(context, host_name, topic):
+ for service in stub_service_get_all(context):
+ if service['host'] == host_name and service['topic'] == topic:
+ return service
def stub_set_host_enabled(context, host_name, enabled):
@@ -130,8 +132,8 @@ class HostTestCase(test.TestCase):
self.stubs.Set(db, 'service_get_all',
stub_service_get_all)
# Only hosts in our fake DB exist
- self.stubs.Set(db, 'service_does_host_exist',
- stub_service_does_host_exist)
+ self.stubs.Set(db, 'service_get_by_host_and_topic',
+ stub_service_get_by_host_and_topic)
# 'host_c1' always succeeds, and 'host_c2'
self.stubs.Set(self.hosts_api, 'set_host_enabled',
stub_set_host_enabled)
diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py
index ba65e8f6a..44d9e8af3 100644
--- a/nova/tests/api/openstack/compute/contrib/test_networks.py
+++ b/nova/tests/api/openstack/compute/contrib/test_networks.py
@@ -21,8 +21,8 @@ import uuid
import webob
-from nova.api.openstack.compute.contrib import admin_networks as networks
from nova.api.openstack.compute.contrib import networks_associate
+from nova.api.openstack.compute.contrib import os_networks as networks
from nova import exception
from nova.openstack.common import cfg
from nova import test
@@ -177,7 +177,7 @@ class NetworksTest(test.TestCase):
def setUp(self):
super(NetworksTest, self).setUp()
self.fake_network_api = FakeNetworkAPI()
- self.controller = networks.AdminNetworkController(
+ self.controller = networks.NetworkController(
self.fake_network_api)
self.associate_controller = networks_associate\
.NetworkAssociateActionController(self.fake_network_api)
diff --git a/nova/tests/api/openstack/compute/contrib/test_services.py b/nova/tests/api/openstack/compute/contrib/test_services.py
index 24f169d98..1bd47b67a 100644
--- a/nova/tests/api/openstack/compute/contrib/test_services.py
+++ b/nova/tests/api/openstack/compute/contrib/test_services.py
@@ -26,30 +26,30 @@ from nova.tests.api.openstack import fakes
fake_services_list = [{'binary': 'nova-scheduler',
'host': 'host1',
- 'availability_zone': 'nova',
'id': 1,
'disabled': True,
+ 'topic': 'scheduler',
'updated_at': datetime(2012, 10, 29, 13, 42, 2),
'created_at': datetime(2012, 9, 18, 2, 46, 27)},
{'binary': 'nova-compute',
'host': 'host1',
- 'availability_zone': 'nova',
'id': 2,
'disabled': True,
+ 'topic': 'compute',
'updated_at': datetime(2012, 10, 29, 13, 42, 5),
'created_at': datetime(2012, 9, 18, 2, 46, 27)},
{'binary': 'nova-scheduler',
'host': 'host2',
- 'availability_zone': 'nova',
'id': 3,
'disabled': False,
+ 'topic': 'scheduler',
'updated_at': datetime(2012, 9, 19, 6, 55, 34),
'created_at': datetime(2012, 9, 18, 2, 46, 28)},
{'binary': 'nova-compute',
'host': 'host2',
- 'availability_zone': 'nova',
'id': 4,
'disabled': True,
+ 'topic': 'compute',
'updated_at': datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime(2012, 9, 18, 2, 46, 28)},
]
@@ -75,7 +75,7 @@ class FakeRequestWithHostService(object):
GET = {"host": "host1", "service": "nova-compute"}
-def fake_servcie_get_all(context):
+def fake_service_get_all(context):
return fake_services_list
@@ -111,7 +111,7 @@ class ServicesTest(test.TestCase):
def setUp(self):
super(ServicesTest, self).setUp()
- self.stubs.Set(db, "service_get_all", fake_servcie_get_all)
+ self.stubs.Set(db, "service_get_all", fake_service_get_all)
self.stubs.Set(timeutils, "utcnow", fake_utcnow)
self.stubs.Set(db, "service_get_by_args",
fake_service_get_by_host_binary)
@@ -128,7 +128,7 @@ class ServicesTest(test.TestCase):
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'nova-scheduler',
- 'host': 'host1', 'zone': 'nova',
+ 'host': 'host1', 'zone': 'internal',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute',
@@ -136,7 +136,7 @@ class ServicesTest(test.TestCase):
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'nova-scheduler', 'host': 'host2',
- 'zone': 'nova',
+ 'zone': 'internal',
'status': 'enabled', 'state': 'down',
'updated_at': datetime(2012, 9, 19, 6, 55, 34)},
{'binary': 'nova-compute', 'host': 'host2',
@@ -150,7 +150,7 @@ class ServicesTest(test.TestCase):
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'nova-scheduler', 'host': 'host1',
- 'zone': 'nova',
+ 'zone': 'internal',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute', 'host': 'host1',
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
index e3810510b..485968209 100644
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ b/nova/tests/api/openstack/compute/test_extensions.py
@@ -185,7 +185,6 @@ class ExtensionControllerTest(ExtensionTestCase):
"Keypairs",
"Multinic",
"MultipleCreate",
- "OSNetworks",
"QuotaClasses",
"Quotas",
"Rescue",
diff --git a/nova/tests/baremetal/test_driver.py b/nova/tests/baremetal/test_driver.py
index d5384eff0..37ef71881 100644
--- a/nova/tests/baremetal/test_driver.py
+++ b/nova/tests/baremetal/test_driver.py
@@ -136,6 +136,19 @@ class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
row = db.bm_node_get(self.context, self.node['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
+ def test_macs_for_instance(self):
+ self._create_node()
+ expected = set(['01:23:45:67:89:01', '01:23:45:67:89:02'])
+ self.assertEqual(
+ expected, self.driver.macs_for_instance(self.test_instance))
+
+ def test_macs_for_instance_no_interfaces(self):
+ # Nodes cannot boot with no MACs, so we raise an error if that happens.
+ self.nic_info = []
+ self._create_node()
+ self.assertRaises(exception.NovaException,
+ self.driver.macs_for_instance, self.test_instance)
+
def test_spawn_node_in_use(self):
self._create_node()
db.bm_node_update(self.context, self.node['id'],
diff --git a/nova/tests/cells/test_cells_manager.py b/nova/tests/cells/test_cells_manager.py
index 72ef3f1f0..ef165f4ed 100644
--- a/nova/tests/cells/test_cells_manager.py
+++ b/nova/tests/cells/test_cells_manager.py
@@ -38,6 +38,21 @@ class CellsManagerClassTestCase(test.TestCase):
self.driver = self.cells_manager.driver
self.ctxt = 'fake_context'
+ def _get_fake_responses(self):
+ responses = []
+ expected_responses = []
+ for x in xrange(1, 4):
+ responses.append(messaging.Response('cell%s' % x, x, False))
+ expected_responses.append(('cell%s' % x, x))
+ return expected_responses, responses
+
+ def test_get_cell_info_for_neighbors(self):
+ self.mox.StubOutWithMock(self.cells_manager.state_manager,
+ 'get_cell_info_for_neighbors')
+ self.cells_manager.state_manager.get_cell_info_for_neighbors()
+ self.mox.ReplayAll()
+ self.cells_manager.get_cell_info_for_neighbors(self.ctxt)
+
def test_post_start_hook_child_cell(self):
self.mox.StubOutWithMock(self.driver, 'start_consumers')
self.mox.StubOutWithMock(context, 'get_admin_context')
@@ -211,3 +226,14 @@ class CellsManagerClassTestCase(test.TestCase):
# Now the last 1 and the first 1
self.assertEqual(call_info['sync_instances'],
[instances[-1], instances[0]])
+
+ def test_sync_instances(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'sync_instances')
+ self.msg_runner.sync_instances(self.ctxt, 'fake-project',
+ 'fake-time', 'fake-deleted')
+ self.mox.ReplayAll()
+ self.cells_manager.sync_instances(self.ctxt,
+ project_id='fake-project',
+ updated_since='fake-time',
+ deleted='fake-deleted')
diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py
index 9973716f6..da45721ed 100644
--- a/nova/tests/cells/test_cells_messaging.py
+++ b/nova/tests/cells/test_cells_messaging.py
@@ -14,11 +14,14 @@
"""
Tests For Cells Messaging module
"""
+import mox
from nova.cells import messaging
+from nova.cells import utils as cells_utils
from nova import context
from nova import exception
from nova.openstack.common import cfg
+from nova.openstack.common import timeutils
from nova import test
from nova.tests.cells import fakes
@@ -912,3 +915,46 @@ class CellsBroadcastMethodsTestCase(test.TestCase):
self.src_msg_runner.bw_usage_update_at_top(self.ctxt,
fake_bw_update_info)
+
+ def test_sync_instances(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ project_id = 'fake_project_id'
+ updated_since_raw = 'fake_updated_since_raw'
+ updated_since_parsed = 'fake_updated_since_parsed'
+ deleted = 'fake_deleted'
+
+ instance1 = dict(uuid='fake_uuid1', deleted=False)
+ instance2 = dict(uuid='fake_uuid2', deleted=True)
+ fake_instances = [instance1, instance2]
+
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'instance_update_at_top')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'instance_destroy_at_top')
+
+ self.mox.StubOutWithMock(timeutils, 'parse_isotime')
+ self.mox.StubOutWithMock(cells_utils, 'get_instances_to_sync')
+
+ # Middle cell.
+ timeutils.parse_isotime(updated_since_raw).AndReturn(
+ updated_since_parsed)
+ cells_utils.get_instances_to_sync(self.ctxt,
+ updated_since=updated_since_parsed,
+ project_id=project_id,
+ deleted=deleted).AndReturn([])
+
+ # Bottom/Target cell
+ timeutils.parse_isotime(updated_since_raw).AndReturn(
+ updated_since_parsed)
+ cells_utils.get_instances_to_sync(self.ctxt,
+ updated_since=updated_since_parsed,
+ project_id=project_id,
+ deleted=deleted).AndReturn(fake_instances)
+ self.tgt_msg_runner.instance_update_at_top(self.ctxt, instance1)
+ self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance2)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.sync_instances(self.ctxt,
+ project_id, updated_since_raw, deleted)
diff --git a/nova/tests/cells/test_cells_rpcapi.py b/nova/tests/cells/test_cells_rpcapi.py
index b51bfa0c1..5e045aca9 100644
--- a/nova/tests/cells/test_cells_rpcapi.py
+++ b/nova/tests/cells/test_cells_rpcapi.py
@@ -204,3 +204,23 @@ class CellsAPITestCase(test.TestCase):
expected_args = {'bw_update_info': bw_update_info}
self._check_result(call_info, 'bw_usage_update_at_top',
expected_args)
+
+ def test_get_cell_info_for_neighbors(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.get_cell_info_for_neighbors(
+ self.fake_context)
+ self._check_result(call_info, 'get_cell_info_for_neighbors', {},
+ version='1.1')
+ self.assertEqual(result, 'fake_response')
+
+ def test_sync_instances(self):
+ call_info = self._stub_rpc_method('cast', None)
+ self.cells_rpcapi.sync_instances(self.fake_context,
+ project_id='fake_project', updated_since='fake_time',
+ deleted=True)
+
+ expected_args = {'project_id': 'fake_project',
+ 'updated_since': 'fake_time',
+ 'deleted': True}
+ self._check_result(call_info, 'sync_instances', expected_args,
+ version='1.1')
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 08d9451b3..0d9f67231 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -60,7 +60,6 @@ from nova import test
from nova.tests.compute import fake_resource_tracker
from nova.tests.db.fakes import FakeModel
from nova.tests import fake_network
-from nova.tests import fake_network_cache_model
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova import utils
@@ -146,10 +145,11 @@ class BaseTestCase(test.TestCase):
fake_network.set_stub_network_methods(self.stubs)
def tearDown(self):
+ ctxt = context.get_admin_context()
fake_image.FakeImageService_reset()
- instances = db.instance_get_all(self.context.elevated())
+ instances = db.instance_get_all(ctxt)
for instance in instances:
- db.instance_destroy(self.context.elevated(), instance['uuid'])
+ db.instance_destroy(ctxt, instance['uuid'])
fake.restore_nodes()
super(BaseTestCase, self).tearDown()
@@ -996,96 +996,109 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
- def _stub_out_reboot(self, fake_net_info, fake_block_dev_info):
- def fake_reboot(driver, inst, net_info, reboot_type, block_dev_info):
- self.assertEqual(block_dev_info, fake_block_dev_info)
- self.assertEqual(net_info, fake_net_info)
-
- self.stubs.Set(nova.virt.fake.FakeDriver, 'legacy_nwinfo',
- lambda x: False)
- self.stubs.Set(nova.virt.fake.FakeDriver, 'reboot', fake_reboot)
+ def _test_reboot(self, soft, legacy_nwinfo_driver):
+ # This is a true unit test, so we don't need the network stubs.
+ fake_network.unset_stub_network_methods(self.stubs)
- def test_reboot_soft(self):
- # Ensure instance can be soft rebooted.
- instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.REBOOTING})
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute, '_instance_update')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute.driver, 'legacy_nwinfo')
+ self.mox.StubOutWithMock(self.compute.driver, 'reboot')
+
+ instance = dict(uuid='fake-instance',
+ power_state='unknown')
+ updated_instance1 = dict(uuid='updated-instance1',
+ power_state='fake')
+ updated_instance2 = dict(uuid='updated-instance2',
+ power_state='fake')
+
+ fake_nw_model = network_model.NetworkInfo()
+ self.mox.StubOutWithMock(fake_nw_model, 'legacy')
+
+ fake_block_dev_info = 'fake_block_dev_info'
+ fake_power_state1 = 'fake_power_state1'
+ fake_power_state2 = 'fake_power_state2'
+ reboot_type = soft and 'SOFT' or 'HARD'
+
+ # Beginning of calls we expect.
+
+ # FIXME(comstud): I don't feel like the context needs to
+ # be elevated at all. Hopefully remove elevated from
+ # reboot_instance and remove the stub here in a future patch.
+ # econtext would just become self.context below then.
+ econtext = self.context.elevated()
+
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.context.elevated().AndReturn(econtext)
+
+ self.compute._get_instance_nw_info(econtext,
+ instance).AndReturn(
+ fake_nw_model)
+ self.compute._notify_about_instance_usage(econtext,
+ instance,
+ 'reboot.start')
+ self.compute._get_power_state(econtext,
+ instance).AndReturn(fake_power_state1)
+ self.compute._instance_update(econtext, instance['uuid'],
+ power_state=fake_power_state1,
+ vm_state=vm_states.ACTIVE).AndReturn(updated_instance1)
+
+ # Reboot should check the driver to see if legacy nwinfo is
+ # needed. If it is, the model's legacy() method should be
+ # called and the result passed to driver.reboot. If the
+ # driver wants the model, we pass the model.
+ self.compute.driver.legacy_nwinfo().AndReturn(legacy_nwinfo_driver)
+ if legacy_nwinfo_driver:
+ expected_nw_info = 'legacy-nwinfo'
+ fake_nw_model.legacy().AndReturn(expected_nw_info)
+ else:
+ expected_nw_info = fake_nw_model
+
+ # Annoying. driver.reboot is wrapped in a try/except, and
+ # doesn't re-raise. It eats exception generated by mox if
+ # this is called with the wrong args, so we have to hack
+ # around it.
+ reboot_call_info = {}
+ expected_call_info = {'args': (updated_instance1, expected_nw_info,
+ reboot_type, fake_block_dev_info),
+ 'kwargs': {}}
+
+ def fake_reboot(*args, **kwargs):
+ reboot_call_info['args'] = args
+ reboot_call_info['kwargs'] = kwargs
+
+ self.stubs.Set(self.compute.driver, 'reboot', fake_reboot)
+
+ # Power state should be updated again
+ self.compute._get_power_state(econtext,
+ updated_instance1).AndReturn(fake_power_state2)
+ self.compute._instance_update(econtext, updated_instance1['uuid'],
+ power_state=fake_power_state2,
+ task_state=None,
+ vm_state=vm_states.ACTIVE).AndReturn(updated_instance2)
+ self.compute._notify_about_instance_usage(econtext,
+ updated_instance2,
+ 'reboot.end')
- reboot_type = "SOFT"
- fake_net_info = []
- fake_block_dev_info = {'foo': 'bar'}
- self._stub_out_reboot(fake_net_info, fake_block_dev_info)
+ self.mox.ReplayAll()
self.compute.reboot_instance(self.context, instance=instance,
- network_info=fake_net_info,
block_device_info=fake_block_dev_info,
reboot_type=reboot_type)
+ self.assertEqual(expected_call_info, reboot_call_info)
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
- self.assertEqual(inst_ref['task_state'], None)
-
- self.compute.terminate_instance(self.context,
- instance=jsonutils.to_primitive(inst_ref))
+ def test_reboot_soft(self):
+ self._test_reboot(True, False)
def test_reboot_hard(self):
- # Ensure instance can be hard rebooted.
- instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.REBOOTING_HARD})
-
- reboot_type = "HARD"
- fake_net_info = []
- fake_block_dev_info = {'foo': 'bar'}
- self._stub_out_reboot(fake_net_info, fake_block_dev_info)
- self.compute.reboot_instance(self.context, instance=instance,
- network_info=fake_net_info,
- block_device_info=fake_block_dev_info,
- reboot_type=reboot_type)
+ self._test_reboot(False, False)
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
- self.assertEqual(inst_ref['task_state'], None)
+ def test_reboot_soft_legacy_nwinfo_driver(self):
+ self._test_reboot(True, True)
- self.compute.terminate_instance(self.context,
- instance=jsonutils.to_primitive(inst_ref))
-
- def test_reboot_nwinfo(self):
- # Ensure instance network info is rehydrated in reboot.
- instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.REBOOTING_HARD})
-
- result = {'was_instance': []}
-
- # NOTE(danms): Beware the dragons ahead:
- # Since the _legacy_nw_info() method in manager runs inside a
- # try..except block, we can't assert from here. Further, this
- # will be run more than once during the operation we're about
- # to fire off, which means we need to make sure that it doesn't
- # fail any of the times it is run. Hence the obscurity below.
- def fake_legacy_nw_info(network_info):
- result['was_instance'].append(
- isinstance(network_info, network_model.NetworkInfo))
- self.stubs.Set(self.compute, '_legacy_nw_info', fake_legacy_nw_info)
-
- fake_net_info = network_model.NetworkInfo([
- fake_network_cache_model.new_vif(),
- fake_network_cache_model.new_vif(
- {'address': 'bb:bb:bb:bb:bb:bb'})])
- fake_net_info_p = jsonutils.to_primitive(fake_net_info)
- fake_block_dev_info = {'foo': 'bar'}
- self.compute.reboot_instance(self.context, instance=instance,
- network_info=fake_net_info_p,
- block_device_info=fake_block_dev_info,
- reboot_type="SOFT")
-
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.compute.terminate_instance(self.context,
- instance=jsonutils.to_primitive(inst_ref))
- self.assertFalse(False in result['was_instance'])
+ def test_reboot_hard_legacy_nwinfo_driver(self):
+ self._test_reboot(False, True)
def test_set_admin_password(self):
# Ensure instance can have its admin password set.
@@ -1510,6 +1523,27 @@ class ComputeTestCase(BaseTestCase):
instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
+ def test_run_instance_queries_macs(self):
+ # run_instance should ask the driver for node mac addresses and pass
+ # that to the network_api in use.
+ fake_network.unset_stub_network_methods(self.stubs)
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+
+ macs = set(['01:23:45:67:89:ab'])
+ self.mox.StubOutWithMock(self.compute.network_api,
+ "allocate_for_instance")
+ self.compute.network_api.allocate_for_instance(
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ requested_networks=None,
+ vpn=False, macs=macs).AndReturn(
+ fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+ spectacular=True))
+ self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance")
+ self.compute.driver.macs_for_instance(instance).AndReturn(macs)
+ self.mox.ReplayAll()
+ self.compute.run_instance(self.context, instance=instance)
+
def test_instance_set_to_error_on_uncaught_exception(self):
# Test that instance is set to error state when exception is raised.
instance = jsonutils.to_primitive(self._create_fake_instance())
@@ -1520,7 +1554,8 @@ class ComputeTestCase(BaseTestCase):
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None,
- vpn=False).AndRaise(rpc_common.RemoteError())
+ vpn=False,
+ macs=None).AndRaise(rpc_common.RemoteError())
fake_network.unset_stub_network_methods(self.stubs)
@@ -3159,7 +3194,6 @@ class ComputeTestCase(BaseTestCase):
self.compute._destroy_evacuated_instances(fake_context)
def test_init_host(self):
-
our_host = self.compute.host
fake_context = 'fake-context'
startup_instances = ['inst1', 'inst2', 'inst3']
@@ -3212,7 +3246,10 @@ class ComputeTestCase(BaseTestCase):
self.mox.ReplayAll()
self.compute.init_host()
- # VerifyCall done by tearDown
+ # tearDown() uses context.get_admin_context(), so we have
+ # to do the verification here and unstub it.
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
def test_get_instances_on_driver(self):
fake_context = context.get_admin_context()
@@ -4198,12 +4235,10 @@ class ComputeAPITestCase(BaseTestCase):
def _stub_out_reboot(self, device_name):
def fake_reboot_instance(rpcapi, context, instance,
block_device_info,
- network_info,
reboot_type):
self.assertEqual(
block_device_info['block_device_mapping'][0]['mount_device'],
device_name)
- self.assertEqual(network_info[0]['network']['bridge'], 'fake_br1')
self.stubs.Set(nova.compute.rpcapi.ComputeAPI, 'reboot_instance',
fake_reboot_instance)
@@ -4376,6 +4411,31 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_snapshot_given_image_uuid(self):
+ """Ensure a snapshot of an instance can be created when image UUID
+ is already known.
+ """
+ instance = self._create_fake_instance()
+ name = 'snap1'
+ extra_properties = {'extra_param': 'value1'}
+ recv_meta = self.compute_api.snapshot(self.context, instance, name,
+ extra_properties)
+ image_id = recv_meta['id']
+
+ def fake_show(meh, context, id):
+ return recv_meta
+
+ instance = db.instance_update(self.context, instance['uuid'],
+ {'task_state': None})
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ image = self.compute_api.snapshot(self.context, instance, name,
+ extra_properties,
+ image_id=image_id)
+ self.assertEqual(image, recv_meta)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
def test_snapshot_minram_mindisk_VHD(self):
"""Ensure a snapshots min_ram and min_disk are correct.
@@ -4383,27 +4443,25 @@ class ComputeAPITestCase(BaseTestCase):
and min_disk set to that of the original instances flavor.
"""
- self.fake_image['disk_format'] = 'vhd'
+ self.fake_image.update(disk_format='vhd',
+ min_ram=1, min_disk=1)
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
- instance = self._create_fake_instance()
- inst_params = {'root_gb': 2, 'memory_mb': 256}
- instance['instance_type'].update(inst_params)
+ instance = self._create_fake_instance(type_name='m1.small')
image = self.compute_api.snapshot(self.context, instance, 'snap1',
{'extra_param': 'value1'})
self.assertEqual(image['name'], 'snap1')
- self.assertEqual(image['min_ram'], 256)
- self.assertEqual(image['min_disk'], 2)
+ instance_type = instance['instance_type']
+ self.assertEqual(image['min_ram'], instance_type['memory_mb'])
+ self.assertEqual(image['min_disk'], instance_type['root_gb'])
properties = image['properties']
self.assertTrue('backup_type' not in properties)
self.assertEqual(properties['image_type'], 'snapshot')
self.assertEqual(properties['instance_uuid'], instance['uuid'])
self.assertEqual(properties['extra_param'], 'value1')
- db.instance_destroy(self.context, instance['uuid'])
-
def test_snapshot_minram_mindisk(self):
"""Ensure a snapshots min_ram and min_disk are correct.
@@ -4469,7 +4527,10 @@ class ComputeAPITestCase(BaseTestCase):
def fake_show(*args):
raise exception.ImageNotFound(image_id="fake")
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ if not self.__class__.__name__ == "CellsComputeAPITestCase":
+ # Cells tests will call this a 2nd time in child cell with
+ # the newly created image_id, and we want that one to succeed.
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
instance = self._create_fake_instance()
@@ -6090,81 +6151,6 @@ class ComputePolicyTestCase(BaseTestCase):
availability_zone='1:1')
-class ComputeHostAPITestCase(BaseTestCase):
- def setUp(self):
- super(ComputeHostAPITestCase, self).setUp()
- self.host_api = compute_api.HostAPI()
-
- def _rpc_call_stub(self, call_info):
- def fake_rpc_call(context, topic, msg, timeout=None):
- call_info['context'] = context
- call_info['topic'] = topic
- call_info['msg'] = msg
- self.stubs.Set(rpc, 'call', fake_rpc_call)
-
- def _pretend_fake_host_exists(self, ctxt):
- """Sets it so that the host API always thinks that 'fake_host'
- exists"""
- self.mox.StubOutWithMock(self.host_api, 'does_host_exist')
- self.host_api.does_host_exist(ctxt, 'fake_host').AndReturn(True)
- self.mox.ReplayAll()
-
- def test_set_host_enabled(self):
- ctxt = context.get_admin_context()
- call_info = {}
- self._rpc_call_stub(call_info)
-
- self._pretend_fake_host_exists(ctxt)
- self.host_api.set_host_enabled(ctxt, 'fake_host', 'fake_enabled')
- self.assertEqual(call_info['context'], ctxt)
- self.assertEqual(call_info['topic'], 'compute.fake_host')
- self.assertEqual(call_info['msg'],
- {'method': 'set_host_enabled',
- 'args': {'enabled': 'fake_enabled'},
- 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
-
- def test_get_host_uptime(self):
- ctxt = context.RequestContext('fake', 'fake')
- call_info = {}
- self._rpc_call_stub(call_info)
-
- self._pretend_fake_host_exists(ctxt)
- self.host_api.get_host_uptime(ctxt, 'fake_host')
- self.assertEqual(call_info['context'], ctxt)
- self.assertEqual(call_info['topic'], 'compute.fake_host')
- self.assertEqual(call_info['msg'],
- {'method': 'get_host_uptime',
- 'args': {},
- 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
-
- def test_host_power_action(self):
- ctxt = context.get_admin_context()
- call_info = {}
- self._rpc_call_stub(call_info)
- self._pretend_fake_host_exists(ctxt)
- self.host_api.host_power_action(ctxt, 'fake_host', 'fake_action')
- self.assertEqual(call_info['context'], ctxt)
- self.assertEqual(call_info['topic'], 'compute.fake_host')
- self.assertEqual(call_info['msg'],
- {'method': 'host_power_action',
- 'args': {'action': 'fake_action'},
- 'version':
- compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
-
- def test_set_host_maintenance(self):
- ctxt = context.get_admin_context()
- call_info = {}
- self._rpc_call_stub(call_info)
- self._pretend_fake_host_exists(ctxt)
- self.host_api.set_host_maintenance(ctxt, 'fake_host', 'fake_mode')
- self.assertEqual(call_info['context'], ctxt)
- self.assertEqual(call_info['topic'], 'compute.fake_host')
- self.assertEqual(call_info['msg'],
- {'method': 'host_maintenance_mode',
- 'args': {'host': 'fake_host', 'mode': 'fake_mode'},
- 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
-
-
class KeypairAPITestCase(BaseTestCase):
def setUp(self):
super(KeypairAPITestCase, self).setUp()
diff --git a/nova/tests/compute/test_compute_cells.py b/nova/tests/compute/test_compute_cells.py
index aa4b448d4..3c25f9b43 100644
--- a/nova/tests/compute/test_compute_cells.py
+++ b/nova/tests/compute/test_compute_cells.py
@@ -16,7 +16,11 @@
"""
Tests For Compute w/ Cells
"""
+import functools
+
from nova.compute import cells_api as compute_cells_api
+from nova import db
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.tests.compute import test_compute
@@ -28,17 +32,57 @@ ORIG_COMPUTE_API = None
def stub_call_to_cells(context, instance, method, *args, **kwargs):
fn = getattr(ORIG_COMPUTE_API, method)
+ original_instance = kwargs.pop('original_instance', None)
+ if original_instance:
+ instance = original_instance
+ # Restore this in 'child cell DB'
+ db.instance_update(context, instance['uuid'],
+ dict(vm_state=instance['vm_state'],
+ task_state=instance['task_state']))
+
return fn(context, instance, *args, **kwargs)
def stub_cast_to_cells(context, instance, method, *args, **kwargs):
fn = getattr(ORIG_COMPUTE_API, method)
+ original_instance = kwargs.pop('original_instance', None)
+ if original_instance:
+ instance = original_instance
+ # Restore this in 'child cell DB'
+ db.instance_update(context, instance['uuid'],
+ dict(vm_state=instance['vm_state'],
+ task_state=instance['task_state']))
fn(context, instance, *args, **kwargs)
-def deploy_stubs(stubs, api):
- stubs.Set(api, '_call_to_cells', stub_call_to_cells)
- stubs.Set(api, '_cast_to_cells', stub_cast_to_cells)
+def deploy_stubs(stubs, api, original_instance=None):
+ call = stub_call_to_cells
+ cast = stub_cast_to_cells
+
+ if original_instance:
+ kwargs = dict(original_instance=original_instance)
+ call = functools.partial(stub_call_to_cells, **kwargs)
+ cast = functools.partial(stub_cast_to_cells, **kwargs)
+
+ stubs.Set(api, '_call_to_cells', call)
+ stubs.Set(api, '_cast_to_cells', cast)
+
+
+def wrap_create_instance(func):
+ @functools.wraps(func)
+ def wrapper(self, *args, **kwargs):
+ instance = self._create_fake_instance()
+
+ def fake(*args, **kwargs):
+ return instance
+
+ self.stubs.Set(self, '_create_fake_instance', fake)
+ original_instance = jsonutils.to_primitive(instance)
+ deploy_stubs(self.stubs, self.compute_api,
+ original_instance=original_instance)
+ return func(self, *args, **kwargs)
+
+ return wrapper
class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
@@ -84,6 +128,42 @@ class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
def test_get_backdoor_port(self):
self.skipTest("Test is incompatible with cells.")
+ def test_snapshot_given_image_uuid(self):
+ self.skipTest("Test doesn't apply to API cell.")
+
+ @wrap_create_instance
+ def test_snapshot(self):
+ return super(CellsComputeAPITestCase, self).test_snapshot()
+
+ @wrap_create_instance
+ def test_snapshot_image_metadata_inheritance(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_image_metadata_inheritance()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk_VHD(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk_VHD()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk_img_missing_minram(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk_img_missing_minram()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk_no_image(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk_no_image()
+
+ @wrap_create_instance
+ def test_backup(self):
+ return super(CellsComputeAPITestCase, self).test_backup()
+
class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase):
def setUp(self):
diff --git a/nova/tests/compute/test_host_api.py b/nova/tests/compute/test_host_api.py
index f00245d1e..95d3c4926 100644
--- a/nova/tests/compute/test_host_api.py
+++ b/nova/tests/compute/test_host_api.py
@@ -13,93 +13,114 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova.compute import api
+from nova import compute
+from nova.compute import rpcapi as compute_rpcapi
from nova import context
-from nova import db
-from nova import exception
+from nova.openstack.common import rpc
from nova import test
-from nova.tests import fake_hosts
-class HostApiTestCase(test.TestCase):
- """
- Tests 'host' subset of the compute api
- """
-
+class ComputeHostAPITestCase(test.TestCase):
def setUp(self):
- super(HostApiTestCase, self).setUp()
- self.compute_rpcapi = api.compute_rpcapi
- self.api = api.HostAPI()
+ super(ComputeHostAPITestCase, self).setUp()
+ self.host_api = compute.HostAPI()
+ self.ctxt = context.get_admin_context()
- def test_bad_host_set_enabled(self):
- """
- Tests that actions on single hosts that don't exist blow up without
- having to reach the host via rpc. Should raise HostNotFound if you
- try to update a host that is not in the DB
+ def _mock_rpc_call(self, expected_message, result=None):
+ if result is None:
+ result = 'fake-result'
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(self.ctxt, 'compute.fake_host',
+ expected_message, None).AndReturn(result)
+
+ def _mock_assert_host_exists(self):
+ """Sets it so that the host API always thinks that 'fake_host'
+ exists.
"""
- self.assertRaises(exception.HostNotFound, self.api.set_host_enabled,
- context.get_admin_context(), "bogus_host_name", False)
+ self.mox.StubOutWithMock(self.host_api, '_assert_host_exists')
+ self.host_api._assert_host_exists(self.ctxt, 'fake_host')
+
+ def test_set_host_enabled(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call(
+ {'method': 'set_host_enabled',
+ 'args': {'enabled': 'fake_enabled'},
+ 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
+
+ self.mox.ReplayAll()
+ result = self.host_api.set_host_enabled(self.ctxt, 'fake_host',
+ 'fake_enabled')
+ self.assertEqual('fake-result', result)
+
+ def test_get_host_uptime(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call(
+ {'method': 'get_host_uptime',
+ 'args': {},
+ 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
+ self.mox.ReplayAll()
+ result = self.host_api.get_host_uptime(self.ctxt, 'fake_host')
+ self.assertEqual('fake-result', result)
+
+ def test_host_power_action(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call(
+ {'method': 'host_power_action',
+ 'args': {'action': 'fake_action'},
+ 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
+ self.mox.ReplayAll()
+ result = self.host_api.host_power_action(self.ctxt, 'fake_host',
+ 'fake_action')
+ self.assertEqual('fake-result', result)
- def test_list_compute_hosts(self):
- ctx = context.get_admin_context()
- self.mox.StubOutWithMock(db, 'service_get_all')
- db.service_get_all(ctx, False).AndReturn(fake_hosts.SERVICES_LIST)
+ def test_set_host_maintenance(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call(
+ {'method': 'host_maintenance_mode',
+ 'args': {'host': 'fake_host', 'mode': 'fake_mode'},
+ 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
self.mox.ReplayAll()
- compute_hosts = self.api.list_hosts(ctx, service="compute")
+ result = self.host_api.set_host_maintenance(self.ctxt, 'fake_host',
+ 'fake_mode')
+ self.assertEqual('fake-result', result)
+
+ def test_service_get_all(self):
+ services = [dict(id=1, key1='val1', key2='val2', topic='compute',
+ host='host1'),
+ dict(id=2, key1='val2', key3='val3', topic='compute',
+ host='host2')]
+ exp_services = []
+ for service in services:
+ exp_service = {}
+ exp_service.update(availability_zone='nova', **service)
+ exp_services.append(exp_service)
+
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'service_get_all')
+
+ # Test no filters
+ self.host_api.db.service_get_all(self.ctxt, False).AndReturn(
+ services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt)
self.mox.VerifyAll()
- expected = [host for host in fake_hosts.HOST_LIST
- if host["service"] == "compute"]
- self.assertEqual(expected, compute_hosts)
+ self.assertEqual(exp_services, result)
- def test_describe_host(self):
- """
- Makes sure that describe_host returns the correct information
- given our fake input.
- """
- ctx = context.get_admin_context()
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
- host_name = 'host_c1'
- db.service_get_all_compute_by_host(ctx, host_name).AndReturn(
- [{'host': 'fake_host',
- 'compute_node': [
- {'vcpus': 4,
- 'vcpus_used': 1,
- 'memory_mb': 8192,
- 'memory_mb_used': 2048,
- 'local_gb': 1024,
- 'local_gb_used': 648}
- ]
- }])
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
- db.instance_get_all_by_host(ctx, 'fake_host').AndReturn(
- [{'project_id': 42,
- 'vcpus': 1,
- 'memory_mb': 2048,
- 'root_gb': 648,
- 'ephemeral_gb': 0,
- }])
+ # Test no filters #2
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt, False).AndReturn(
+ services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt, filters={})
+ self.mox.VerifyAll()
+ self.assertEqual(exp_services, result)
+
+ # Test w/ filter
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt, False).AndReturn(
+ services)
self.mox.ReplayAll()
- result = self.api.describe_host(ctx, host_name)
- self.assertEqual(result,
- [{'resource': {'cpu': 4,
- 'disk_gb': 1024,
- 'host': 'host_c1',
- 'memory_mb': 8192,
- 'project': '(total)'}},
- {'resource': {'cpu': 1,
- 'disk_gb': 648,
- 'host': 'host_c1',
- 'memory_mb': 2048,
- 'project': '(used_now)'}},
- {'resource': {'cpu': 1,
- 'disk_gb': 648,
- 'host': 'host_c1',
- 'memory_mb': 2048,
- 'project': '(used_max)'}},
- {'resource': {'cpu': 1,
- 'disk_gb': 648,
- 'host': 'host_c1',
- 'memory_mb': 2048,
- 'project': 42}}]
- )
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=dict(key1='val2'))
self.mox.VerifyAll()
+ self.assertEqual([exp_services[1]], result)
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index 3bfd51461..53d92a13f 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -297,8 +297,8 @@ class MissingComputeNodeTestCase(BaseTestCase):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
- self.stubs.Set(db, 'service_get_all_compute_by_host',
- self._fake_service_get_all_compute_by_host)
+ self.stubs.Set(db, 'service_get_by_compute_host',
+ self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
@@ -306,10 +306,10 @@ class MissingComputeNodeTestCase(BaseTestCase):
self.created = True
return self._create_compute_node()
- def _fake_service_get_all_compute_by_host(self, ctx, host):
+ def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
- return [service]
+ return service
def test_create_compute_node(self):
self.tracker.update_available_resource(self.context)
@@ -330,8 +330,8 @@ class BaseTrackerTestCase(BaseTestCase):
self.tracker = self._tracker()
self._migrations = {}
- self.stubs.Set(db, 'service_get_all_compute_by_host',
- self._fake_service_get_all_compute_by_host)
+ self.stubs.Set(db, 'service_get_by_compute_host',
+ self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'migration_update',
@@ -342,10 +342,10 @@ class BaseTrackerTestCase(BaseTestCase):
self.tracker.update_available_resource(self.context)
self.limits = self._limits()
- def _fake_service_get_all_compute_by_host(self, ctx, host):
+ def _fake_service_get_by_compute_host(self, ctx, host):
self.compute = self._create_compute_node()
self.service = self._create_service(host, compute=self.compute)
- return [self.service]
+ return self.service
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index a31d9a14b..00b90ea65 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -236,9 +236,8 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('reboot_instance', 'cast',
instance=self.fake_instance,
block_device_info={},
- network_info={},
reboot_type='type',
- version='2.5')
+ version='2.23')
def test_rebuild_instance(self):
self._test_compute_api('rebuild_instance', 'cast',
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 46fadf4f0..cc3dbfcc0 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -451,12 +451,16 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
- def _test_stubbed(self, name, dbargs, condargs):
+ def _test_stubbed(self, name, dbargs, condargs,
+ db_result_listified=False):
self.mox.StubOutWithMock(db, name)
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_get_all_by(self.context, **condargs)
- self.assertEqual(result, 'fake-result')
+ if db_result_listified:
+ self.assertEqual(['fake-result'], result)
+ else:
+ self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
@@ -476,10 +480,11 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
('host',),
dict(host='host'))
- def test_service_get_all_compute_by_host(self):
- self._test_stubbed('service_get_all_compute_by_host',
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host',
('host',),
- dict(topic='compute', host='host'))
+ dict(topic='compute', host='host'),
+ db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
@@ -547,12 +552,16 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
- def _test_stubbed(self, name, dbargs, condargs):
+ def _test_stubbed(self, name, dbargs, condargs,
+ db_result_listified=False):
self.mox.StubOutWithMock(db, name)
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_get_all_by(self.context, **condargs)
- self.assertEqual(result, 'fake-result')
+ if db_result_listified:
+ self.assertEqual(['fake-result'], result)
+ else:
+ self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
@@ -572,10 +581,11 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
('host',),
dict(host='host'))
- def test_service_get_all_compute_by_host(self):
- self._test_stubbed('service_get_all_compute_by_host',
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host',
('host',),
- dict(topic='compute', host='host'))
+ dict(topic='compute', host='host'),
+ db_result_listified=True)
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
@@ -681,8 +691,8 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
- def test_service_get_all_compute_by_host(self):
- self._test_stubbed('service_get_all_compute_by_host', 'host')
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host', 'host')
def test_service_create(self):
self._test_stubbed('service_create', {})
diff --git a/nova/tests/fake_imagebackend.py b/nova/tests/fake_imagebackend.py
index 978c879fd..c284a5042 100644
--- a/nova/tests/fake_imagebackend.py
+++ b/nova/tests/fake_imagebackend.py
@@ -28,7 +28,7 @@ class Backend(object):
def image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name):
- self.path = os.path.join(instance, name)
+ self.path = os.path.join(instance['name'], name)
def create_image(self, prepare_template, base,
size, *args, **kwargs):
diff --git a/nova/tests/fake_libvirt_utils.py b/nova/tests/fake_libvirt_utils.py
index bb789b74a..b3d842468 100644
--- a/nova/tests/fake_libvirt_utils.py
+++ b/nova/tests/fake_libvirt_utils.py
@@ -17,6 +17,12 @@
import os
import StringIO
+from nova.openstack.common import cfg
+
+
+CONF = cfg.CONF
+CONF.import_opt('instances_path', 'nova.compute.manager')
+
files = {'console.log': True}
disk_sizes = {}
@@ -133,3 +139,8 @@ def get_fs_info(path):
def fetch_image(context, target, image_id, user_id, project_id):
pass
+
+
+def get_instance_path(instance):
+ # TODO(mikal): we should really just call the real one here
+ return os.path.join(CONF.instances_path, instance['name'])
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index c5d160209..15890cdcd 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -104,6 +104,7 @@ policy_data = """
"compute_extension:admin_actions:migrate": "",
"compute_extension:aggregates": "",
"compute_extension:agents": "",
+ "compute_extension:cells": "",
"compute_extension:certificates": "",
"compute_extension:cloudpipe": "",
"compute_extension:cloudpipe_update": "",
@@ -136,10 +137,10 @@ policy_data = """
"compute_extension:instance_usage_audit_log": "",
"compute_extension:keypairs": "",
"compute_extension:multinic": "",
- "compute_extension:admin_networks": "",
- "compute_extension:admin_networks:view": "",
+ "compute_extension:networks": "",
+ "compute_extension:networks:view": "",
"compute_extension:networks_associate": "",
- "compute_extension:os-networks": "",
+ "compute_extension:os-tenant-networks": "",
"compute_extension:quotas:show": "",
"compute_extension:quotas:update": "",
"compute_extension:quota_classes": "",
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz
index 861c1ee8e..df40b08c0 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..b51766f75
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
index acf47a4f6..092a1f933 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
index af57ccc47..77f333c00 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
index b67b1a894..8ab166a60 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
index 24fb6e539..97e96be17 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
index 0634adcba..728464ca9 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
Binary files differ
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index 0dd777fe2..fe0613646 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -89,6 +89,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-cells",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Cells",
+ "namespace": "http://docs.openstack.org/compute/ext/cells/api/v1.1",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "os-certificates",
"description": "%(text)s",
"links": [],
@@ -305,19 +313,19 @@
"updated": "%(timestamp)s"
},
{
- "alias": "os-admin-networks",
+ "alias": "os-networks",
"description": "%(text)s",
"links": [],
- "name": "AdminNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1",
+ "name": "Networks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
"updated": "%(timestamp)s"
},
{
- "alias": "os-networks",
+ "alias": "os-tenant-networks",
"description": "%(text)s",
"links": [],
- "name": "OSNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
+ "name": "OSTenantNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2",
"updated": "%(timestamp)s"
},
{
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index fe34f369b..2051d891a 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -33,6 +33,9 @@
<extension alias="os-agents" name="Agents" namespace="http://docs.openstack.org/compute/ext/agents/api/v2" updated="%(timestamp)s">
<description>%(text)s</description>
</extension>
+ <extension alias="os-cells" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/cells/api/v1.1" name="Cells">
+ <description>%(text)s</description>
+ </extension>
<extension alias="os-certificates" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/certificates/api/v1.1" name="Certificates">
<description>%(text)s</description>
</extension>
@@ -114,10 +117,10 @@
<extension alias="os-multiple-create" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1" name="MultipleCreate">
<description>%(text)s</description>
</extension>
- <extension alias="os-admin-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1" name="AdminNetworks">
+ <extension alias="os-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="Networks">
<description>%(text)s</description>
</extension>
- <extension alias="os-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="OSNetworks">
+ <extension alias="os-tenant-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2" name="OSTenantNetworks">
<description>%(text)s</description>
</extension>
<extension alias="os-networks-associate" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-cells/cells-get-resp.json.tpl
new file mode 100644
index 000000000..2993b1df8
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-cells/cells-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "cell": {
+ "name": "cell3",
+ "username": "username3",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-cells/cells-get-resp.xml.tpl
new file mode 100644
index 000000000..d31a674a2
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-cells/cells-get-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cell xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" name="cell3" username="username3" rpc_port="None" rpc_host="None" type="child"/>
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl b/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl
new file mode 100644
index 000000000..b16e12cd6
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl
@@ -0,0 +1,4 @@
+{
+ "cells": []
+}
+
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl b/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl
new file mode 100644
index 000000000..32fef4f04
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cells xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"/>
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-cells/cells-list-resp.json.tpl
new file mode 100644
index 000000000..3d7a6c207
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-cells/cells-list-resp.json.tpl
@@ -0,0 +1,39 @@
+{
+ "cells": [
+ {
+ "name": "cell1",
+ "username": "username1",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child"
+ },
+ {
+ "name": "cell2",
+ "username": "username2",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "parent"
+ },
+ {
+ "name": "cell3",
+ "username": "username3",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child"
+ },
+ {
+ "name": "cell4",
+ "username": "username4",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "parent"
+ },
+ {
+ "name": "cell5",
+ "username": "username5",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-cells/cells-list-resp.xml.tpl
new file mode 100644
index 000000000..58312201f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-cells/cells-list-resp.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cells xmlns="http://docs.rackspacecloud.com/servers/api/v1.0">
+ <cell name="cell1" username="username1" rpc_port="None" rpc_host="None" type="child"/>
+ <cell name="cell2" username="username2" rpc_port="None" rpc_host="None" type="parent"/>
+ <cell name="cell3" username="username3" rpc_port="None" rpc_host="None" type="child"/>
+ <cell name="cell4" username="username4" rpc_port="None" rpc_host="None" type="parent"/>
+ <cell name="cell5" username="username5" rpc_port="None" rpc_host="None" type="child"/>
+</cells>
diff --git a/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl
index eeb191597..504f66f59 100644
--- a/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl
@@ -21,9 +21,14 @@
"zone": "internal"
},
{
- "host_name": "%(host_name)s",
- "service": "conductor",
- "zone": "internal"
+ "host_name": "%(host_name)s",
+ "service": "conductor",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "cells",
+ "zone": "internal"
}
]
}
diff --git a/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl
index 25ef5a299..4e9d3195d 100644
--- a/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl
@@ -5,4 +5,5 @@
<host host_name="%(host_name)s" service="network"/>
<host host_name="%(host_name)s" service="scheduler"/>
<host host_name="%(host_name)s" service="conductor"/>
+ <host host_name="%(host_name)s" service="cells"/>
</hosts>
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl
index 757084d2f..757084d2f 100644
--- a/nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl
+++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl
index fb1c2d3d0..fb1c2d3d0 100644
--- a/nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl
+++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl
index ff9e2273d..ff9e2273d 100644
--- a/nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl
+++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index e20d6881b..f17dc025f 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -27,7 +27,7 @@ import nova.image.glance
from nova.openstack.common import cfg
from nova.openstack.common.log import logging
from nova import service
-from nova import test # For the flags
+from nova import test
from nova.tests import fake_crypto
import nova.tests.image.fake
from nova.tests.integrated.api import client
@@ -35,6 +35,8 @@ from nova.tests.integrated.api import client
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+CONF.import_opt('manager', 'nova.cells.opts', group='cells')
def generate_random_alphanumeric(length):
@@ -81,6 +83,7 @@ class _IntegratedTestBase(test.TestCase):
self.scheduler = self.start_service('cert')
self.network = self.start_service('network')
self.scheduler = self.start_service('scheduler')
+ self.cells = self.start_service('cells', manager=CONF.cells.manager)
self._start_api_service()
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 0cbc1352b..7c3157872 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -28,14 +28,13 @@ from lxml import etree
from nova.api.metadata import password
from nova.api.openstack.compute.contrib import coverage_ext
# Import extensions to pull in osapi_compute_extension CONF option used below.
-from nova.api.openstack.compute import extensions
from nova.cloudpipe.pipelib import CloudPipe
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
-from nova.network import api
-from nova.network.manager import NetworkManager
+from nova.network import api as network_api
+from nova.network import manager as network_manager
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -55,6 +54,8 @@ CONF.import_opt('osapi_compute_extension',
CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common')
CONF.import_opt('osapi_glance_link_prefix', 'nova.api.openstack.common')
+CONF.import_opt('enable', 'nova.cells.opts', group='cells')
+CONF.import_opt('db_check_interval', 'nova.cells.state', group='cells')
LOG = logging.getLogger(__name__)
@@ -143,7 +144,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
template = self._get_template(name)
if self.generate_samples and not os.path.exists(template):
- with open(template, 'w') as outf:
+ with open(template, 'w'):
pass
with open(template) as inf:
return inf.read().strip()
@@ -372,7 +373,7 @@ class ApiSamplesTrap(ApiSampleTestBase):
do_not_approve_additions.append('os-fping')
do_not_approve_additions.append('os-hypervisors')
do_not_approve_additions.append('os-instance_usage_audit_log')
- do_not_approve_additions.append('os-admin-networks')
+ do_not_approve_additions.append('os-networks')
do_not_approve_additions.append('os-services')
do_not_approve_additions.append('os-volumes')
@@ -1502,7 +1503,8 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase):
'vpn_public_port': 22}
self.stubs.Set(CloudPipe, 'get_encoded_zip', get_user_data)
- self.stubs.Set(NetworkManager, "get_network", network_api_get)
+ self.stubs.Set(network_manager.NetworkManager, "get_network",
+ network_api_get)
def generalize_subs(self, subs, vanilla_regexes):
subs['project_id'] = 'cloudpipe-[0-9a-f-]+'
@@ -2091,8 +2093,8 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
hypervisor_type='bar',
hypervisor_version='1',
disabled=False)
- return [{'compute_node': [service]}]
- self.stubs.Set(db, "service_get_all_compute_by_host", fake_get_compute)
+ return {'compute_node': [service]}
+ self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-live-migrate',
@@ -2359,8 +2361,8 @@ class DiskConfigXmlTest(DiskConfigJsonTest):
class OsNetworksJsonTests(ApiSampleTestBase):
- extension_name = ("nova.api.openstack.compute.contrib.os_networks"
- ".Os_networks")
+ extension_name = ("nova.api.openstack.compute.contrib.os_tenant_networks"
+ ".Os_tenant_networks")
def setUp(self):
super(OsNetworksJsonTests, self).setUp()
@@ -2377,21 +2379,22 @@ class OsNetworksJsonTests(ApiSampleTestBase):
self.stubs.Set(nova.quota.QuotaEngine, "rollback", fake)
def test_list_networks(self):
- response = self._do_get('os-networks')
+ response = self._do_get('os-tenant-networks')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('networks-list-res', subs, response)
def test_create_network(self):
- response = self._do_post('os-networks', "networks-post-req", {})
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('networks-post-res', subs, response)
- def test_delete_networK(self):
- response = self._do_post('os-networks', "networks-post-req", {})
+ def test_delete_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
net = json.loads(response.read())
- response = self._do_delete('os-networks/%s' % net["network"]["id"])
+ response = self._do_delete('os-tenant-networks/%s' %
+ net["network"]["id"])
self.assertEqual(response.status, 202)
@@ -2406,7 +2409,7 @@ class NetworksAssociateJsonTests(ApiSampleTestBase):
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# Networks_associate requires Networks to be update
f['osapi_compute_extension'].append(
- 'nova.api.openstack.compute.contrib.admin_networks.Admin_networks')
+ 'nova.api.openstack.compute.contrib.os_networks.Os_networks')
return f
def setUp(self):
@@ -2417,28 +2420,28 @@ class NetworksAssociateJsonTests(ApiSampleTestBase):
project=NetworksAssociateJsonTests._sentinel):
return True
- self.stubs.Set(api.API, "associate", fake_associate)
+ self.stubs.Set(network_api.API, "associate", fake_associate)
def test_disassociate(self):
- response = self._do_post('os-admin-networks/1/action',
+ response = self._do_post('os-networks/1/action',
'network-disassociate-req',
{})
self.assertEqual(response.status, 202)
def test_disassociate_host(self):
- response = self._do_post('os-admin-networks/1/action',
+ response = self._do_post('os-networks/1/action',
'network-disassociate-host-req',
{})
self.assertEqual(response.status, 202)
def test_disassociate_project(self):
- response = self._do_post('os-admin-networks/1/action',
+ response = self._do_post('os-networks/1/action',
'network-disassociate-project-req',
{})
self.assertEqual(response.status, 202)
def test_associate_host(self):
- response = self._do_post('os-admin-networks/1/action',
+ response = self._do_post('os-networks/1/action',
'network-associate-host-req',
{"host": "testHost"})
self.assertEqual(response.status, 202)
@@ -2500,3 +2503,63 @@ class QuotaClassesSampleJsonTests(ApiSampleTestBase):
class QuotaClassesSampleXmlTests(QuotaClassesSampleJsonTests):
ctype = "xml"
+
+
+class CellsSampleJsonTest(ApiSampleTestBase):
+ extension_name = "nova.api.openstack.compute.contrib.cells.Cells"
+
+ def setUp(self):
+ # db_check_interval < 0 makes cells manager always hit the DB
+ self.flags(enable=True, db_check_interval=-1, group='cells')
+ super(CellsSampleJsonTest, self).setUp()
+ self._stub_cells()
+
+ def _stub_cells(self, num_cells=5):
+ self.cells = []
+ self.cells_next_id = 1
+
+ def _fake_cell_get_all(context):
+ return self.cells
+
+ def _fake_cell_get(context, cell_name):
+ for cell in self.cells:
+ if cell['name'] == cell_name:
+ return cell
+ raise exception.CellNotFound(cell_name=cell_name)
+
+ for x in xrange(num_cells):
+ cell = models.Cell()
+ our_id = self.cells_next_id
+ self.cells_next_id += 1
+ cell.update({'id': our_id,
+ 'name': 'cell%s' % our_id,
+ 'username': 'username%s' % our_id,
+ 'is_parent': our_id % 2 == 0})
+ self.cells.append(cell)
+
+ self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all)
+ self.stubs.Set(db, 'cell_get', _fake_cell_get)
+
+ def test_cells_empty_list(self):
+ # Override this
+ self._stub_cells(num_cells=0)
+ response = self._do_get('os-cells')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('cells-list-empty-resp', subs, response)
+
+ def test_cells_list(self):
+ response = self._do_get('os-cells')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('cells-list-resp', subs, response)
+
+ def test_cells_get(self):
+ response = self._do_get('os-cells/cell3')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('cells-get-resp', subs, response)
+
+
+class CellsSampleXmlTest(CellsSampleJsonTest):
+ ctype = 'xml'
diff --git a/nova/tests/integrated/test_extensions.py b/nova/tests/integrated/test_extensions.py
index b6e1adc73..ca5ff8374 100644
--- a/nova/tests/integrated/test_extensions.py
+++ b/nova/tests/integrated/test_extensions.py
@@ -16,7 +16,6 @@
# under the License.
# Import extensions to pull in osapi_compute_extension CONF option used below.
-from nova.api.openstack.compute import extensions
from nova.openstack.common import cfg
from nova.openstack.common.log import logging
from nova.tests.integrated import integrated_helpers
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
index 94cccd9d9..959c5a472 100644
--- a/nova/tests/network/test_api.py
+++ b/nova/tests/network/test_api.py
@@ -17,8 +17,11 @@
"""Tests for network API."""
+import itertools
import random
+import mox
+
from nova import context
from nova import exception
from nova import network
@@ -37,6 +40,25 @@ class ApiTestCase(test.TestCase):
self.context = context.RequestContext('fake-user',
'fake-project')
+ def test_allocate_for_instance_handles_macs_passed(self):
+ # If a macs argument is supplied to the 'nova-network' API, it is just
+ # ignored. This test checks that the call down to the rpcapi layer
+ # doesn't pass macs down: nova-network doesn't support hypervisor
+ # mac address limits (today anyhow).
+ macs = set(['ab:cd:ef:01:23:34'])
+ self.mox.StubOutWithMock(
+ self.network_api.network_rpcapi, "allocate_for_instance")
+ kwargs = dict(zip(['host', 'instance_id', 'instance_uuid',
+ 'project_id', 'requested_networks', 'rxtx_factor', 'vpn'],
+ itertools.repeat(mox.IgnoreArg())))
+ self.network_api.network_rpcapi.allocate_for_instance(
+ mox.IgnoreArg(), **kwargs).AndReturn([])
+ self.mox.ReplayAll()
+ instance = dict(id='id', uuid='uuid', project_id='project_id',
+ host='host', instance_type={'rxtx_factor': 0})
+ self.network_api.allocate_for_instance(
+ 'context', instance, 'vpn', 'requested_networks', macs=macs)
+
def _do_test_associate_floating_ip(self, orig_instance_uuid):
"""Test post-association logic."""
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index d825a86d1..1552630fb 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -27,6 +27,7 @@ from nova import exception
from nova import ipv6
from nova.network import linux_net
from nova.network import manager as network_manager
+from nova.network import model as net_model
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
@@ -184,7 +185,8 @@ class FlatNetworkTestCase(test.TestCase):
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
- 'vif_type': None,
+ 'vif_type': net_model.VIF_TYPE_BRIDGE,
+ 'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'should_create_vlan': False,
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index 004e76071..876bce90d 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -342,15 +342,11 @@ class TestQuantumv2(test.TestCase):
self.assertEquals('my_mac%s' % id_suffix, nw_inf[0]['address'])
self.assertEquals(0, len(nw_inf[0]['network']['subnets']))
- def _allocate_for_instance(self, net_idx=1, **kwargs):
+ def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
api = quantumapi.API()
self.mox.StubOutWithMock(api, 'get_instance_nw_info')
# Net idx is 1-based for compatibility with existing unit tests
nets = self.nets[net_idx - 1]
- api.get_instance_nw_info(mox.IgnoreArg(),
- self.instance,
- networks=nets).AndReturn(None)
-
ports = {}
fixed_ips = {}
req_net_ids = []
@@ -359,7 +355,8 @@ class TestQuantumv2(test.TestCase):
if port_id:
self.moxed_client.show_port(port_id).AndReturn(
{'port': {'id': 'my_portid1',
- 'network_id': 'my_netid1'}})
+ 'network_id': 'my_netid1',
+ 'mac_address': 'my_mac1'}})
ports['my_netid1'] = self.port_data1[0]
id = 'my_netid1'
else:
@@ -368,6 +365,9 @@ class TestQuantumv2(test.TestCase):
expected_network_order = req_net_ids
else:
expected_network_order = [n['id'] for n in nets]
+ if kwargs.get('_break_list_networks'):
+ self.mox.ReplayAll()
+ return api
search_ids = [net['id'] for net in nets if net['id'] in req_net_ids]
mox_list_network_params = dict(tenant_id=self.instance['project_id'],
@@ -409,7 +409,15 @@ class TestQuantumv2(test.TestCase):
res_port = {'port': {'id': 'fake'}}
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn(res_port)
+
+ api.get_instance_nw_info(mox.IgnoreArg(),
+ self.instance,
+ networks=nets).AndReturn(None)
self.mox.ReplayAll()
+ return api
+
+ def _allocate_for_instance(self, net_idx=1, **kwargs):
+ api = self._stub_allocate_for_instance(net_idx, **kwargs)
api.allocate_for_instance(self.context, self.instance, **kwargs)
def test_allocate_for_instance_1(self):
@@ -420,6 +428,26 @@ class TestQuantumv2(test.TestCase):
# Allocate one port in two networks env.
self._allocate_for_instance(2)
+ def test_allocate_for_instance_accepts_macs_kwargs_None(self):
+ # The macs kwarg should be accepted as None.
+ self._allocate_for_instance(1, macs=None)
+
+ def test_allocate_for_instance_accepts_macs_kwargs_set(self):
+ # The macs kwarg should be accepted, as a set.
+ self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45']))
+
+ def test_allocate_for_instance_mac_conflicting_requested_port(self):
+ # specify only first and last network
+ requested_networks = [(None, None, 'my_portid1')]
+ api = self._stub_allocate_for_instance(
+ net_idx=1, requested_networks=requested_networks,
+ macs=set(['unknown:mac']),
+ _break_list_networks=True)
+ self.assertRaises(exception.PortNotUsable,
+ api.allocate_for_instance, self.context,
+ self.instance, requested_networks=requested_networks,
+ macs=set(['unknown:mac']))
+
def test_allocate_for_instance_with_requested_networks(self):
# specify only first and last network
requested_networks = [
@@ -435,7 +463,6 @@ class TestQuantumv2(test.TestCase):
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_port(self):
- # specify only first and last network
requested_networks = [(None, None, 'myportid1')]
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index ceea74e70..dd5b0ae32 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -111,13 +111,13 @@ class SchedulerManagerTestCase(test.TestCase):
def test_show_host_resources(self):
host = 'fake_host'
- computes = [{'host': host,
- 'compute_node': [{'vcpus': 4,
- 'vcpus_used': 2,
- 'memory_mb': 1024,
- 'memory_mb_used': 512,
- 'local_gb': 1024,
- 'local_gb_used': 512}]}]
+ compute_node = {'host': host,
+ 'compute_node': [{'vcpus': 4,
+ 'vcpus_used': 2,
+ 'memory_mb': 1024,
+ 'memory_mb_used': 512,
+ 'local_gb': 1024,
+ 'local_gb_used': 512}]}
instances = [{'project_id': 'project1',
'vcpus': 1,
'memory_mb': 128,
@@ -134,11 +134,11 @@ class SchedulerManagerTestCase(test.TestCase):
'root_gb': 256,
'ephemeral_gb': 0}]
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
- db.service_get_all_compute_by_host(self.context, host).AndReturn(
- computes)
+ db.service_get_by_compute_host(self.context, host).AndReturn(
+ compute_node)
db.instance_get_all_by_host(self.context, host).AndReturn(instances)
self.mox.ReplayAll()
@@ -338,8 +338,6 @@ class SchedulerTestCase(test.TestCase):
block_migration = False
disk_over_commit = False
instance = jsonutils.to_primitive(self._live_migration_instance())
- instance_id = instance['id']
- instance_uuid = instance['uuid']
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance, dest)
@@ -362,7 +360,7 @@ class SchedulerTestCase(test.TestCase):
# Test live migration when all checks pass.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
@@ -373,34 +371,32 @@ class SchedulerTestCase(test.TestCase):
block_migration = True
disk_over_commit = True
instance = jsonutils.to_primitive(self._live_migration_instance())
- instance_id = instance['id']
- instance_uuid = instance['uuid']
# Source checks
- db.service_get_all_compute_by_host(self.context,
- instance['host']).AndReturn(['fake_service2'])
+ db.service_get_by_compute_host(self.context,
+ instance['host']).AndReturn('fake_service2')
self.servicegroup_api.service_is_up('fake_service2').AndReturn(True)
# Destination checks (compute is up, enough memory, disk)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
# assert_compute_node_has_enough_memory()
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'memory_mb': 2048,
- 'hypervisor_version': 1}]}])
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'memory_mb': 2048,
+ 'hypervisor_version': 1}]})
db.instance_get_all_by_host(self.context, dest).AndReturn(
[dict(memory_mb=256), dict(memory_mb=512)])
# Common checks (same hypervisor, etc)
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1}]}])
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]})
+ db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1,
- 'cpu_info': 'fake_cpu_info'}]}])
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1,
+ 'cpu_info': 'fake_cpu_info'}]})
rpc.call(self.context, "compute.fake_host2",
{"method": 'check_can_live_migrate_destination',
@@ -440,7 +436,7 @@ class SchedulerTestCase(test.TestCase):
# Raise exception when src compute node is does not exist.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -448,9 +444,9 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
# Compute down
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context,
instance['host']).AndRaise(
- exception.NotFound())
+ exception.ComputeHostNotFound(host='fake'))
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
@@ -463,7 +459,7 @@ class SchedulerTestCase(test.TestCase):
# Raise exception when src compute node is not alive.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -471,8 +467,8 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
# Compute down
- db.service_get_all_compute_by_host(self.context,
- instance['host']).AndReturn(['fake_service2'])
+ db.service_get_by_compute_host(self.context,
+ instance['host']).AndReturn('fake_service2')
self.servicegroup_api.service_is_up('fake_service2').AndReturn(False)
self.mox.ReplayAll()
@@ -486,7 +482,7 @@ class SchedulerTestCase(test.TestCase):
# Raise exception when dest compute node is not alive.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
dest = 'fake_host2'
@@ -495,8 +491,8 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
# Compute is down
self.servicegroup_api.service_is_up('fake_service3').AndReturn(False)
@@ -511,17 +507,16 @@ class SchedulerTestCase(test.TestCase):
# Confirms exception raises in case dest and src is same host.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
block_migration = False
- disk_over_commit = False
instance = self._live_migration_instance()
# make dest same as src
dest = instance['host']
self.driver._live_migration_src_check(self.context, instance)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
self.mox.ReplayAll()
@@ -535,7 +530,7 @@ class SchedulerTestCase(test.TestCase):
# Confirms exception raises when dest doesn't have enough memory.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
@@ -546,8 +541,8 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
self.driver._get_compute_info(self.context, dest).AndReturn(
@@ -569,7 +564,7 @@ class SchedulerTestCase(test.TestCase):
self.mox.StubOutWithMock(rpc, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -579,13 +574,13 @@ class SchedulerTestCase(test.TestCase):
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance, dest)
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1}]}])
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]})
+ db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'not-xen',
- 'hypervisor_version': 1}]}])
+ {'compute_node': [{'hypervisor_type': 'not-xen',
+ 'hypervisor_version': 1}]})
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
@@ -601,7 +596,7 @@ class SchedulerTestCase(test.TestCase):
self.mox.StubOutWithMock(rpc, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -611,13 +606,13 @@ class SchedulerTestCase(test.TestCase):
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance, dest)
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1}]}])
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]})
+ db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 2}]}])
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 2}]})
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.driver.schedule_live_migration, self.context,
diff --git a/nova/tests/test_configdrive2.py b/nova/tests/test_configdrive2.py
index 260ab28c7..28fa423e0 100644
--- a/nova/tests/test_configdrive2.py
+++ b/nova/tests/test_configdrive2.py
@@ -67,11 +67,9 @@ class ConfigDriveTestCase(test.TestCase):
utils.mkfs('vfat', mox.IgnoreArg(),
label='config-2').AndReturn(None)
- utils.trycmd('mount', '-o', 'loop', mox.IgnoreArg(),
+ utils.trycmd('mount', '-o', mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(),
run_as_root=True).AndReturn((None, None))
- utils.trycmd('chown', mox.IgnoreArg(), mox.IgnoreArg(),
- run_as_root=True).AndReturn((None, None))
utils.execute('umount', mox.IgnoreArg(),
run_as_root=True).AndReturn(None)
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 7df28bfcb..c70e96cdc 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -299,27 +299,6 @@ class DbApiTestCase(test.TestCase):
self.assertRaises(exception.DuplicateVlan,
db.network_create_safe, ctxt, values2)
- def test_instance_test_and_set(self):
- ctxt = context.get_admin_context()
- states = [
- (None, [None, 'some'], 'building'),
- (None, [None], 'building'),
- ('building', ['building'], 'ready'),
- ('building', [None, 'building'], 'ready')]
- for st in states:
- inst = db.instance_create(ctxt, {'vm_state': st[0]})
- uuid = inst['uuid']
- db.instance_test_and_set(ctxt, uuid, 'vm_state', st[1], st[2])
- inst = db.instance_get_by_uuid(ctxt, uuid)
- self.assertEqual(inst["vm_state"], st[2])
-
- def test_instance_test_and_set_exception(self):
- ctxt = context.get_admin_context()
- inst = db.instance_create(ctxt, {'vm_state': 'building'})
- self.assertRaises(exception.InstanceInvalidState,
- db.instance_test_and_set, ctxt,
- inst['uuid'], 'vm_state', [None, 'disable'], 'run')
-
def test_instance_update_with_instance_uuid(self):
# test instance_update() works when an instance UUID is passed.
ctxt = context.get_admin_context()
diff --git a/nova/tests/test_exception.py b/nova/tests/test_exception.py
index 9e34f287c..ad67cff26 100644
--- a/nova/tests/test_exception.py
+++ b/nova/tests/test_exception.py
@@ -52,23 +52,23 @@ class FakeNotifier(object):
self.provided_context = context
-def good_function():
+def good_function(self, context):
return 99
-def bad_function_exception(blah="a", boo="b", context=None):
+def bad_function_exception(self, context, extra, blah="a", boo="b", zoo=None):
raise test.TestingException()
class WrapExceptionTestCase(test.TestCase):
def test_wrap_exception_good_return(self):
wrapped = exception.wrap_exception()
- self.assertEquals(99, wrapped(good_function)())
+ self.assertEquals(99, wrapped(good_function)(1, 2))
def test_wrap_exception_throws_exception(self):
wrapped = exception.wrap_exception()
self.assertRaises(test.TestingException,
- wrapped(bad_function_exception))
+ wrapped(bad_function_exception), 1, 2, 3)
def test_wrap_exception_with_notifier(self):
notifier = FakeNotifier()
@@ -76,7 +76,7 @@ class WrapExceptionTestCase(test.TestCase):
"level")
ctxt = context.get_admin_context()
self.assertRaises(test.TestingException,
- wrapped(bad_function_exception), context=ctxt)
+ wrapped(bad_function_exception), 1, ctxt, 3, zoo=3)
self.assertEquals(notifier.provided_publisher, "publisher")
self.assertEquals(notifier.provided_event, "event")
self.assertEquals(notifier.provided_priority, "level")
@@ -88,7 +88,7 @@ class WrapExceptionTestCase(test.TestCase):
notifier = FakeNotifier()
wrapped = exception.wrap_exception(notifier)
self.assertRaises(test.TestingException,
- wrapped(bad_function_exception))
+ wrapped(bad_function_exception), 1, 2, 3)
self.assertEquals(notifier.provided_publisher, None)
self.assertEquals(notifier.provided_event, "bad_function_exception")
self.assertEquals(notifier.provided_priority, notifier.ERROR)
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index f5713c457..9fec9d151 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -68,7 +68,8 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._setup_stubs()
self.flags(instances_path=r'C:\Hyper-V\test\instances',
- vswitch_name='external')
+ vswitch_name='external',
+ network_api_class='nova.network.quantumv2.api.API')
self._hypervutils = hypervutils.HyperVUtils()
self._conn = driver_hyperv.HyperVDriver(None)
@@ -119,6 +120,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
from nova.virt.hyperv import hostops
from nova.virt.hyperv import livemigrationops
from nova.virt.hyperv import snapshotops
+ from nova.virt.hyperv import vif
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
from nova.virt.hyperv import volumeutils
@@ -129,6 +131,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
basevolumeutils,
baseops,
hostops,
+ vif,
vmops,
vmutils,
volumeops,
@@ -240,6 +243,9 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self.assertEquals(len(dvd_paths), 0)
def test_spawn_no_vswitch_exception(self):
+ self.flags(network_api_class='nova.network.api.API')
+ # Reinstantiate driver, as the VIF plugin is loaded during __init__
+ self._conn = driver_hyperv.HyperVDriver(None)
# Set flag to a non existing vswitch
self.flags(vswitch_name=str(uuid.uuid4()))
self.assertRaises(vmutils.HyperVException, self._spawn_instance, True)
diff --git a/nova/tests/test_imagebackend.py b/nova/tests/test_imagebackend.py
index 82b5eb475..a9865cb44 100644
--- a/nova/tests/test_imagebackend.py
+++ b/nova/tests/test_imagebackend.py
@@ -22,6 +22,7 @@ from nova.openstack.common import cfg
from nova import test
from nova.tests import fake_libvirt_utils
from nova.virt.libvirt import imagebackend
+from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
@@ -38,12 +39,12 @@ class _ImageTestCase(object):
super(_ImageTestCase, self).setUp()
self.flags(disable_process_locking=True,
instances_path=self.INSTANCES_PATH)
- self.INSTANCE = 'instance'
+ self.INSTANCE = {'name': 'instance'}
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
- self.PATH = os.path.join(CONF.instances_path, self.INSTANCE,
- self.NAME)
+ self.PATH = os.path.join(
+ libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
self.TEMPLATE_DIR = os.path.join(CONF.instances_path,
'_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
@@ -215,7 +216,7 @@ class LvmTestCase(_ImageTestCase, test.TestCase):
self.image_class = imagebackend.Lvm
super(LvmTestCase, self).setUp()
self.flags(libvirt_images_volume_group=self.VG)
- self.LV = '%s_%s' % (self.INSTANCE, self.NAME)
+ self.LV = '%s_%s' % (self.INSTANCE['name'], self.NAME)
self.PATH = os.path.join('/dev', self.VG, self.LV)
self.disk = imagebackend.disk
@@ -342,7 +343,7 @@ class LvmTestCase(_ImageTestCase, test.TestCase):
class BackendTestCase(test.TestCase):
- INSTANCE = 'fake-instance'
+ INSTANCE = {'name': 'fake-instance'}
NAME = 'fake-name.suffix'
def get_image(self, use_cow, image_type):
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 53bb1b984..de0745654 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -478,7 +478,8 @@ class CacheConcurrencyTestCase(test.TestCase):
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
- thr1 = eventlet.spawn(backend.image('instance', 'name').cache,
+ thr1 = eventlet.spawn(backend.image({'name': 'instance'},
+ 'name').cache,
_concurrency, 'fname', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
@@ -488,7 +489,8 @@ class CacheConcurrencyTestCase(test.TestCase):
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
- thr2 = eventlet.spawn(backend.image('instance', 'name').cache,
+ thr2 = eventlet.spawn(backend.image({'name': 'instance'},
+ 'name').cache,
_concurrency, 'fname', None,
signal=sig2, wait=wait2, done=done2)
@@ -512,7 +514,8 @@ class CacheConcurrencyTestCase(test.TestCase):
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
- thr1 = eventlet.spawn(backend.image('instance', 'name').cache,
+ thr1 = eventlet.spawn(backend.image({'name': 'instance'},
+ 'name').cache,
_concurrency, 'fname2', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
@@ -522,7 +525,8 @@ class CacheConcurrencyTestCase(test.TestCase):
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
- thr2 = eventlet.spawn(backend.image('instance', 'name').cache,
+ thr2 = eventlet.spawn(backend.image({'name': 'instance'},
+ 'name').cache,
_concurrency, 'fname1', None,
signal=sig2, wait=wait2, done=done2)
eventlet.sleep(0)
@@ -904,6 +908,9 @@ class LibvirtConnTestCase(test.TestCase):
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
+ cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("tm2"))
+ cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("ht"))
+
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
@@ -927,6 +934,9 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(conf.cpu.mode, None)
self.assertEquals(conf.cpu.model, "Opteron_G4")
self.assertEquals(conf.cpu.vendor, "AMD")
+ self.assertEquals(len(conf.cpu.features), 2)
+ self.assertEquals(conf.cpu.features[0].name, "tm2")
+ self.assertEquals(conf.cpu.features[1].name, "ht")
def test_get_guest_cpu_config_custom_old(self):
def get_lib_version_stub(self):
@@ -4447,7 +4457,7 @@ class LibvirtDriverTestCase(test.TestCase):
block_device_info=None):
pass
- def fake_create_domain(xml, inst_name=''):
+ def fake_create_domain(xml, instance=None):
return None
def fake_enable_hairpin(instance):
@@ -4493,7 +4503,7 @@ class LibvirtDriverTestCase(test.TestCase):
def fake_plug_vifs(instance, network_info):
pass
- def fake_create_domain(xml, inst_name=''):
+ def fake_create_domain(xml, instance=None):
return None
def fake_enable_hairpin(instance):
diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py
index aeebb5742..11ffa020f 100644
--- a/nova/tests/test_libvirt_vif.py
+++ b/nova/tests/test_libvirt_vif.py
@@ -47,7 +47,8 @@ class LibvirtVifTestCase(test.TestCase):
'gateway_v6': net['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
- 'vif_uuid': 'vif-xxx-yyy-zzz'
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
+ 'vif_devname': 'tap-xxx-yyy-zzz'
}
instance = {
@@ -229,7 +230,7 @@ class LibvirtVifTestCase(test.TestCase):
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
- self.assertEqual(br_name, CONF.libvirt_ovs_bridge)
+ self.assertEqual(br_name, "br0")
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
vp = node.find("virtualport")
@@ -257,7 +258,7 @@ class LibvirtVifTestCase(test.TestCase):
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
br_name = node.find("source").get("bridge")
- self.assertTrue(br_name.startswith("brq"))
+ self.assertEqual(br_name, "br0")
def test_quantum_hybrid_driver(self):
d = vif.LibvirtHybridOVSBridgeDriver()
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index 29e63aba7..f15d71633 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -342,7 +342,7 @@ class OpenStackMetadataTestCase(test.TestCase):
mdinst = fake_InstanceMetadata(self.stubs, inst)
# since this instance had no user-data it should not be there.
- self.assertFalse('user-data' in mdinst.lookup("/openstack/2012-08-10"))
+ self.assertFalse('user_data' in mdinst.lookup("/openstack/2012-08-10"))
self.assertRaises(base.InvalidMetadataPath,
mdinst.lookup, "/openstack/2012-08-10/user_data")
@@ -362,6 +362,14 @@ class OpenStackMetadataTestCase(test.TestCase):
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
self.assertFalse("random_seed" in json.loads(mdjson))
+ def test_no_dashes_in_metadata(self):
+ # top level entries in meta_data should not contain '-' in their name
+ inst = copy(self.instance)
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+ mdjson = json.loads(mdinst.lookup("/openstack/latest/meta_data.json"))
+
+ self.assertEqual([], [k for k in mdjson.keys() if k.find("-") != -1])
+
class MetadataHandlerTestCase(test.TestCase):
"""Test that metadata is returning proper values."""
diff --git a/nova/tests/test_periodic_tasks.py b/nova/tests/test_periodic_tasks.py
index 5804ea49b..39669967f 100644
--- a/nova/tests/test_periodic_tasks.py
+++ b/nova/tests/test_periodic_tasks.py
@@ -17,6 +17,7 @@
import fixtures
+import time
from nova import manager
from nova import test
@@ -76,6 +77,19 @@ class Manager(test.TestCase):
idle = m.periodic_tasks(None)
self.assertAlmostEqual(60, idle, 1)
+ def test_periodic_tasks_idle_calculation(self):
+ class Manager(manager.Manager):
+ @manager.periodic_task(spacing=10)
+ def bar(self):
+ return 'bar'
+
+ m = Manager()
+ m.periodic_tasks(None)
+ time.sleep(0.1)
+ idle = m.periodic_tasks(None)
+ self.assertTrue(idle > 9.7)
+ self.assertTrue(idle < 9.9)
+
def test_periodic_tasks_disabled(self):
class Manager(manager.Manager):
@manager.periodic_task(spacing=-1)
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 4873714f3..71beed51e 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -112,7 +112,6 @@ class ServiceTestCase(test.TestCase):
self.host = 'foo'
self.binary = 'nova-fake'
self.topic = 'fake'
- self.mox.StubOutWithMock(service, 'db')
self.mox.StubOutWithMock(db, 'service_create')
self.mox.StubOutWithMock(db, 'service_get_by_args')
self.flags(use_local=True, group='conductor')
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index 86b3a5730..577d227ce 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -16,7 +16,7 @@
# under the License.
"""
-Test suite for VMWareAPI.
+Test suite for VMwareAPI.
"""
from nova.compute import power_state
@@ -33,11 +33,11 @@ from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import fake as vmwareapi_fake
-class VMWareAPIVMTestCase(test.TestCase):
+class VMwareAPIVMTestCase(test.TestCase):
"""Unit tests for Vmware API connection calls."""
def setUp(self):
- super(VMWareAPIVMTestCase, self).setUp()
+ super(VMwareAPIVMTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake', is_admin=False)
self.flags(vmwareapi_host_ip='test_url',
vmwareapi_host_username='test_username',
@@ -48,7 +48,7 @@ class VMWareAPIVMTestCase(test.TestCase):
vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs)
- self.conn = driver.VMWareESXDriver(None, False)
+ self.conn = driver.VMwareESXDriver(None, False)
# NOTE(vish): none of the network plugging code is actually
# being tested
self.network_info = [({'bridge': 'fa0',
@@ -78,7 +78,7 @@ class VMWareAPIVMTestCase(test.TestCase):
nova.tests.image.fake.stub_out_image_service(self.stubs)
def tearDown(self):
- super(VMWareAPIVMTestCase, self).tearDown()
+ super(VMwareAPIVMTestCase, self).tearDown()
vmwareapi_fake.cleanup()
nova.tests.image.fake.FakeImageService_reset()
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
index 00b70ceb3..6437f9537 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/utils.py
@@ -20,6 +20,7 @@ import nova.context
import nova.db
from nova.image import glance
from nova.network import minidns
+from nova.network import model as network_model
from nova.openstack.common import cfg
CONF = cfg.CONF
@@ -91,6 +92,8 @@ def get_test_network_info(count=1):
'bridge_interface': fake_bridge_interface,
'injected': False}
mapping = {'mac': fake,
+ 'vif_type': network_model.VIF_TYPE_BRIDGE,
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
'dhcp_server': fake,
'dns': ['fake1', 'fake2'],
'gateway': fake,
diff --git a/nova/tests/vmwareapi/stubs.py b/nova/tests/vmwareapi/stubs.py
index 494b201d0..0330246e2 100644
--- a/nova/tests/vmwareapi/stubs.py
+++ b/nova/tests/vmwareapi/stubs.py
@@ -21,31 +21,31 @@ Stubouts for the test suite
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import fake
-from nova.virt.vmwareapi import network_utils
+from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import vmware_images
def fake_get_vim_object(arg):
- """Stubs out the VMWareAPISession's get_vim_object method."""
+ """Stubs out the VMwareAPISession's get_vim_object method."""
return fake.FakeVim()
def fake_is_vim_object(arg, module):
- """Stubs out the VMWareAPISession's is_vim_object method."""
+ """Stubs out the VMwareAPISession's is_vim_object method."""
return isinstance(module, fake.FakeVim)
def set_stubs(stubs):
"""Set the stubs."""
- stubs.Set(vmops.VMWareVMOps, 'plug_vifs', fake.fake_plug_vifs)
- stubs.Set(network_utils, 'get_network_with_the_name',
+ stubs.Set(vmops.VMwareVMOps, 'plug_vifs', fake.fake_plug_vifs)
+ stubs.Set(network_util, 'get_network_with_the_name',
fake.fake_get_network)
stubs.Set(vmware_images, 'fetch_image', fake.fake_fetch_image)
stubs.Set(vmware_images, 'get_vmdk_size_and_properties',
fake.fake_get_vmdk_size_and_properties)
stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image)
- stubs.Set(driver.VMWareAPISession, "_get_vim_object",
+ stubs.Set(driver.VMwareAPISession, "_get_vim_object",
fake_get_vim_object)
- stubs.Set(driver.VMWareAPISession, "_is_vim_object",
+ stubs.Set(driver.VMwareAPISession, "_is_vim_object",
fake_is_vim_object)
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 462e0c444..9904fdcd4 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -188,13 +188,28 @@ class BareMetalDriver(driver.ComputeDriver):
l.append(inst['name'])
return l
- def spawn(self, context, instance, image_meta, injected_files,
- admin_password, network_info=None, block_device_info=None):
+ def _require_node(self, instance):
+ """Get a node_id out of a manager instance dict.
+ The compute manager is meant to know the node id, so a missing node is
+ a significant issue - it may mean we've been passed someone elses data.
+ """
node_id = instance.get('node')
if not node_id:
raise exception.NovaException(_(
- "Baremetal node id not supplied to driver"))
+ "Baremetal node id not supplied to driver for %r")
+ % instance['uuid'])
+ return node_id
+
+ def macs_for_instance(self, instance):
+ context = nova_context.get_admin_context()
+ node_id = self._require_node(instance)
+ return set(iface['address'] for iface in
+ db.bm_interface_get_all_by_bm_node_id(context, node_id))
+
+ def spawn(self, context, instance, image_meta, injected_files,
+ admin_password, network_info=None, block_device_info=None):
+ node_id = self._require_node(instance)
# NOTE(deva): this db method will raise an exception if the node is
# already in use. We call it here to ensure no one else
@@ -324,10 +339,9 @@ class BareMetalDriver(driver.ComputeDriver):
return self.volume_driver.attach_volume(connection_info,
instance, mountpoint)
- @exception.wrap_exception()
- def detach_volume(self, connection_info, instance, mountpoint):
+ def detach_volume(self, connection_info, instance_name, mountpoint):
return self.volume_driver.detach_volume(connection_info,
- instance, mountpoint)
+ instance_name, mountpoint)
def get_info(self, instance):
# NOTE(deva): compute/manager.py expects to get NotFound exception
diff --git a/nova/virt/baremetal/volume_driver.py b/nova/virt/baremetal/volume_driver.py
index 570cea1d8..2e6f82b93 100644
--- a/nova/virt/baremetal/volume_driver.py
+++ b/nova/virt/baremetal/volume_driver.py
@@ -246,7 +246,6 @@ class LibvirtVolumeDriver(VolumeDriver):
# TODO(NTTdocomo): support CHAP
_allow_iscsi_tgtadm(tid, 'ALL')
- @exception.wrap_exception()
def detach_volume(self, connection_info, instance, mountpoint):
mount_device = mountpoint.rpartition("/")[2]
try:
diff --git a/nova/virt/configdrive.py b/nova/virt/configdrive.py
index d4352c5e6..886136460 100644
--- a/nova/virt/configdrive.py
+++ b/nova/virt/configdrive.py
@@ -130,20 +130,16 @@ class ConfigDriveBuilder(object):
try:
mountdir = tempfile.mkdtemp(dir=CONF.config_drive_tempdir,
prefix='cd_mnt_')
- _out, err = utils.trycmd('mount', '-o', 'loop', path, mountdir,
+ _out, err = utils.trycmd('mount', '-o',
+ 'loop,uid=%d,gid=%d' % (os.getuid(),
+ os.getgid()),
+ path, mountdir,
run_as_root=True)
if err:
raise exception.ConfigDriveMountFailed(operation='mount',
error=err)
mounted = True
- _out, err = utils.trycmd('chown',
- '%s.%s' % (os.getuid(), os.getgid()),
- mountdir, run_as_root=True)
- if err:
- raise exception.ConfigDriveMountFailed(operation='chown',
- error=err)
-
# NOTE(mikal): I can't just use shutils.copytree here, because the
# destination directory already exists. This is annoying.
for ent in os.listdir(self.tempdir):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index e396de6a0..a8f779e66 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -732,6 +732,35 @@ class ComputeDriver(object):
# related helpers.
raise NotImplementedError(self.legacy_nwinfo)
+ def macs_for_instance(self, instance):
+ """What MAC addresses must this instance have?
+
+ Some hypervisors (such as bare metal) cannot do freeform virtualisation
+ of MAC addresses. This method allows drivers to return a set of MAC
+ addresses that the instance is to have. allocate_for_instance will take
+ this into consideration when provisioning networking for the instance.
+
+ Mapping of MAC addresses to actual networks (or permitting them to be
+ freeform) is up to the network implementation layer. For instance,
+ with openflow switches, fixed MAC addresses can still be virtualised
+ onto any L2 domain, with arbitrary VLANs etc, but regular switches
+ require pre-configured MAC->network mappings that will match the
+ actual configuration.
+
+ Most hypervisors can use the default implementation which returns None.
+ Hypervisors with MAC limits should return a set of MAC addresses, which
+ will be supplied to the allocate_for_instance call by the compute
+ manager, and it is up to that call to ensure that all assigned network
+ details are compatible with the set of MAC addresses.
+
+ This is called during spawn_instance by the compute manager.
+
+ :return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
+ None means 'no constraints', a set means 'these and only these
+ MAC addresses'.
+ """
+ return None
+
def manage_image_cache(self, context, all_instances):
"""
Manage the driver's local image cache.
diff --git a/nova/virt/hyperv/vif.py b/nova/virt/hyperv/vif.py
new file mode 100644
index 000000000..a898d3ac2
--- /dev/null
+++ b/nova/virt/hyperv/vif.py
@@ -0,0 +1,133 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 Cloudbase Solutions Srl
+# Copyright 2013 Pedro Navarro Perez
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import sys
+import uuid
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import wmi
+
+from abc import abstractmethod
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.virt.hyperv import vmutils
+
+hyperv_opts = [
+ cfg.StrOpt('vswitch_name',
+ default=None,
+ help='External virtual switch Name, '
+ 'if not provided, the first external virtual '
+ 'switch is used'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(hyperv_opts)
+
+LOG = logging.getLogger(__name__)
+
+
+class HyperVBaseVIFDriver(object):
+ @abstractmethod
+ def plug(self, instance, vif):
+ pass
+
+ @abstractmethod
+ def unplug(self, instance, vif):
+ pass
+
+
+class HyperVQuantumVIFDriver(HyperVBaseVIFDriver):
+ """Quantum VIF driver."""
+
+ def plug(self, instance, vif):
+ # Quantum takes care of plugging the port
+ pass
+
+ def unplug(self, instance, vif):
+ # Quantum takes care of unplugging the port
+ pass
+
+
+class HyperVNovaNetworkVIFDriver(HyperVBaseVIFDriver):
+ """Nova network VIF driver."""
+
+ def __init__(self):
+ self._vmutils = vmutils.VMUtils()
+ self._conn = wmi.WMI(moniker='//./root/virtualization')
+
+ def _find_external_network(self):
+ """Find the vswitch that is connected to the physical nic.
+ Assumes only one physical nic on the host
+ """
+ #If there are no physical nics connected to networks, return.
+ LOG.debug(_("Attempting to bind NIC to %s ")
+ % CONF.vswitch_name)
+ if CONF.vswitch_name:
+ LOG.debug(_("Attempting to bind NIC to %s ")
+ % CONF.vswitch_name)
+ bound = self._conn.Msvm_VirtualSwitch(
+ ElementName=CONF.vswitch_name)
+ else:
+ LOG.debug(_("No vSwitch specified, attaching to default"))
+ self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
+ if len(bound) == 0:
+ return None
+ if CONF.vswitch_name:
+ return self._conn.Msvm_VirtualSwitch(
+ ElementName=CONF.vswitch_name)[0]\
+ .associators(wmi_result_class='Msvm_SwitchPort')[0]\
+ .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
+ else:
+ return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\
+ .associators(wmi_result_class='Msvm_SwitchPort')[0]\
+ .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
+
+ def plug(self, instance, vif):
+ extswitch = self._find_external_network()
+ if extswitch is None:
+ raise vmutils.HyperVException(_('Cannot find vSwitch'))
+
+ vm_name = instance['name']
+
+ nic_data = self._conn.Msvm_SyntheticEthernetPortSettingData(
+ ElementName=vif['id'])[0]
+
+ switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
+ #Create a port on the vswitch.
+ (new_port, ret_val) = switch_svc.CreateSwitchPort(
+ Name=str(uuid.uuid4()),
+ FriendlyName=vm_name,
+ ScopeOfResidence="",
+ VirtualSwitch=extswitch.path_())
+ if ret_val != 0:
+ LOG.error(_('Failed creating a port on the external vswitch'))
+ raise vmutils.HyperVException(_('Failed creating port for %s') %
+ vm_name)
+ ext_path = extswitch.path_()
+ LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
+ % locals())
+
+ vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+
+ nic_data.Connection = [new_port]
+ self._vmutils.modify_virt_resource(self._conn, nic_data, vm)
+
+ def unplug(self, instance, vif):
+ #TODO(alepilotti) Not implemented
+ pass
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index 1fba15506..3d8958266 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -24,6 +24,7 @@ import uuid
from nova.api.metadata import base as instance_metadata
from nova import exception
from nova.openstack.common import cfg
+from nova.openstack.common import importutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova import utils
@@ -35,10 +36,6 @@ from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
hyperv_opts = [
- cfg.StrOpt('vswitch_name',
- default=None,
- help='Default vSwitch Name, '
- 'if none provided first external is used'),
cfg.BoolOpt('limit_cpu_features',
default=False,
help='Required for live migration among '
@@ -59,14 +56,32 @@ hyperv_opts = [
CONF = cfg.CONF
CONF.register_opts(hyperv_opts)
CONF.import_opt('use_cow_images', 'nova.virt.driver')
+CONF.import_opt('network_api_class', 'nova.network')
class VMOps(baseops.BaseOps):
+ _vif_driver_class_map = {
+ 'nova.network.quantumv2.api.API':
+ 'nova.virt.hyperv.vif.HyperVQuantumVIFDriver',
+ 'nova.network.api.API':
+ 'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver',
+ }
+
def __init__(self, volumeops):
super(VMOps, self).__init__()
self._vmutils = vmutils.VMUtils()
self._volumeops = volumeops
+ self._load_vif_driver_class()
+
+ def _load_vif_driver_class(self):
+ try:
+ class_name = self._vif_driver_class_map[CONF.network_api_class]
+ self._vif_driver = importutils.import_object(class_name)
+ except KeyError:
+ raise TypeError(_("VIF driver not found for "
+ "network_api_class: %s") %
+ CONF.network_api_class)
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
@@ -158,8 +173,8 @@ class VMOps(baseops.BaseOps):
self._create_scsi_controller(instance['name'])
for vif in network_info:
- mac_address = vif['address'].replace(':', '')
- self._create_nic(instance['name'], mac_address)
+ self._create_nic(instance['name'], vif)
+ self._vif_driver.plug(instance, vif)
if configdrive.required_by(instance):
self._create_config_drive(instance, injected_files,
@@ -367,46 +382,28 @@ class VMOps(baseops.BaseOps):
LOG.info(_('Created drive type %(drive_type)s for %(vm_name)s') %
locals())
- def _create_nic(self, vm_name, mac):
+ def _create_nic(self, vm_name, vif):
"""Create a (synthetic) nic and attach it to the vm."""
LOG.debug(_('Creating nic for %s '), vm_name)
- #Find the vswitch that is connected to the physical nic.
- vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
- extswitch = self._find_external_network()
- if extswitch is None:
- raise vmutils.HyperVException(_('Cannot find vSwitch'))
- vm = vms[0]
- switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
- #Find the default nic and clone it to create a new nic for the vm.
- #Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with
- #Linux Integration Components installed.
+ #Create a new nic
syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData()
default_nic_data = [n for n in syntheticnics_data
if n.InstanceID.rfind('Default') > 0]
new_nic_data = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_SyntheticEthernetPortSettingData',
default_nic_data[0])
- #Create a port on the vswitch.
- (new_port, ret_val) = switch_svc.CreateSwitchPort(
- Name=str(uuid.uuid4()),
- FriendlyName=vm_name,
- ScopeOfResidence="",
- VirtualSwitch=extswitch.path_())
- if ret_val != 0:
- LOG.error(_('Failed creating a port on the external vswitch'))
- raise vmutils.HyperVException(_('Failed creating port for %s') %
- vm_name)
- ext_path = extswitch.path_()
- LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
- % locals())
- #Connect the new nic to the new port.
- new_nic_data.Connection = [new_port]
- new_nic_data.ElementName = vm_name + ' nic'
- new_nic_data.Address = mac
+
+ #Configure the nic
+ new_nic_data.ElementName = vif['id']
+ new_nic_data.Address = vif['address'].replace(':', '')
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
- #Add the new nic to the vm.
+
+ #Add the new nic to the vm
+ vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+
new_resources = self._vmutils.add_virt_resource(self._conn,
new_nic_data, vm)
if new_resources is None:
@@ -414,33 +411,6 @@ class VMOps(baseops.BaseOps):
vm_name)
LOG.info(_("Created nic for %s "), vm_name)
- def _find_external_network(self):
- """Find the vswitch that is connected to the physical nic.
- Assumes only one physical nic on the host
- """
- #If there are no physical nics connected to networks, return.
- LOG.debug(_("Attempting to bind NIC to %s ")
- % CONF.vswitch_name)
- if CONF.vswitch_name:
- LOG.debug(_("Attempting to bind NIC to %s ")
- % CONF.vswitch_name)
- bound = self._conn.Msvm_VirtualSwitch(
- ElementName=CONF.vswitch_name)
- else:
- LOG.debug(_("No vSwitch specified, attaching to default"))
- self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
- if len(bound) == 0:
- return None
- if CONF.vswitch_name:
- return self._conn.Msvm_VirtualSwitch(
- ElementName=CONF.vswitch_name)[0]\
- .associators(wmi_result_class='Msvm_SwitchPort')[0]\
- .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
- else:
- return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\
- .associators(wmi_result_class='Msvm_SwitchPort')[0]\
- .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
-
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
vm = self._vmutils.lookup(self._conn, instance['name'])
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index bae8a1f1a..d899f977d 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -130,7 +130,7 @@ class VMUtils(object):
return newinst
def add_virt_resource(self, conn, res_setting_data, target_vm):
- """Add a new resource (disk/nic) to the VM."""
+ """Adds a new resource to the VM."""
vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
(job, new_resources, ret_val) = vs_man_svc.\
AddVirtualSystemResources([res_setting_data.GetText_(1)],
@@ -145,8 +145,20 @@ class VMUtils(object):
else:
return None
+ def modify_virt_resource(self, conn, res_setting_data, target_vm):
+ """Updates a VM resource."""
+ vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
+ (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
+ ResourceSettingData=[res_setting_data.GetText_(1)],
+ ComputerSystem=target_vm.path_())
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self.check_job_status(job)
+ else:
+ success = (ret_val == 0)
+ return success
+
def remove_virt_resource(self, conn, res_setting_data, target_vm):
- """Add a new resource (disk/nic) to the VM."""
+ """Removes a VM resource."""
vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
(job, ret_val) = vs_man_svc.\
RemoveVirtualSystemResources([res_setting_data.path_()],
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index 222e6d52d..6785c8823 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright (c) 2012 Red Hat, Inc.
+# Copyright (C) 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -18,7 +18,11 @@
Configuration for libvirt objects.
Classes to represent the configuration of various libvirt objects
-and support conversion to/from XML
+and support conversion to/from XML. These classes are solely concerned
+by providing direct Object <-> XML document conversions. No policy or
+operational decisions should be made by code in these classes. Such
+policy belongs in the 'designer.py' module which provides simplified
+helpers for populating up config object instances.
"""
from nova import exception
diff --git a/nova/virt/libvirt/designer.py b/nova/virt/libvirt/designer.py
new file mode 100644
index 000000000..b832db4fa
--- /dev/null
+++ b/nova/virt/libvirt/designer.py
@@ -0,0 +1,101 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (C) 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Policy based configuration of libvirt objects
+
+This module provides helper APIs for populating the config.py
+classes based on common operational needs / policies
+"""
+
+from nova.virt import netutils
+
+
+def set_vif_guest_frontend_config(conf, mac, model, driver):
+ """Populate a LibvirtConfigGuestInterface instance
+ with guest frontend details"""
+ conf.mac_addr = mac
+ if model is not None:
+ conf.model = model
+ if driver is not None:
+ conf.driver_name = driver
+
+
+def set_vif_host_backend_bridge_config(conf, brname, tapname=None):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for a software bridge"""
+ conf.net_type = "bridge"
+ conf.source_dev = brname
+ if tapname:
+ conf.target_dev = tapname
+ conf.script = ""
+
+
+def set_vif_host_backend_ethernet_config(conf, tapname):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for an externally configured
+ host device.
+
+ NB use of this configuration is discouraged by
+ libvirt project and will mark domains as 'tainted'"""
+
+ conf.net_type = "ethernet"
+ conf.target_dev = tapname
+ conf.script = ""
+
+
+def set_vif_host_backend_ovs_config(conf, brname, interfaceid, tapname=None):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for an OpenVSwitch bridge"""
+
+ conf.net_type = "bridge"
+ conf.source_dev = brname
+ conf.vporttype = "openvswitch"
+ conf.add_vport_param("interfaceid", interfaceid)
+ if tapname:
+ conf.target_dev = tapname
+ conf.script = ""
+
+
+def set_vif_host_backend_filter_config(conf, name,
+ primary_addr,
+ dhcp_server=None,
+ ra_server=None,
+ allow_same_net=False,
+ ipv4_cidr=None,
+ ipv6_cidr=None):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for traffic filtering"""
+
+ conf.filtername = name
+ conf.add_filter_param("IP", primary_addr)
+
+ if dhcp_server:
+ conf.add_filter_param("DHCPSERVER", dhcp_server)
+
+ if ra_server:
+ conf.add_filter_param("RASERVER", ra_server)
+
+ if allow_same_net:
+ if ipv4_cidr:
+ net, mask = netutils.get_net_and_mask(ipv4_cidr)
+ conf.add_filter_param("PROJNET", net)
+ conf.add_filter_param("PROJMASK", mask)
+
+ if ipv6_cidr:
+ net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
+ conf.add_filter_param("PROJNET6", net)
+ conf.add_filter_param("PROJMASK6", prefix)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 42d9dd99b..4312086a8 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -586,7 +586,7 @@ class LibvirtDriver(driver.ComputeDriver):
mount_device)
if destroy_disks:
- target = os.path.join(CONF.instances_path, instance['name'])
+ target = libvirt_utils.get_instance_path(instance)
LOG.info(_('Deleting instance files %(target)s') % locals(),
instance=instance)
if os.path.exists(target):
@@ -642,8 +642,7 @@ class LibvirtDriver(driver.ComputeDriver):
}
def _cleanup_resize(self, instance, network_info):
- target = os.path.join(CONF.instances_path,
- instance['name'] + "_resize")
+ target = libvirt_utils.get_instance_path(instance) + "_resize"
if os.path.exists(target):
shutil.rmtree(target)
@@ -661,7 +660,6 @@ class LibvirtDriver(driver.ComputeDriver):
method = getattr(driver, method_name)
return method(connection_info, *args, **kwargs)
- @exception.wrap_exception()
def attach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
@@ -716,7 +714,6 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info=block_device_info)
return xml
- @exception.wrap_exception()
def detach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
mount_device = mountpoint.rpartition("/")[2]
@@ -749,7 +746,6 @@ class LibvirtDriver(driver.ComputeDriver):
connection_info,
mount_device)
- @exception.wrap_exception()
def snapshot(self, context, instance, image_href, update_task_state):
"""Create snapshot from a running VM instance.
@@ -845,7 +841,6 @@ class LibvirtDriver(driver.ComputeDriver):
metadata,
image_file)
- @exception.wrap_exception()
def reboot(self, instance, network_info, reboot_type='SOFT',
block_device_info=None):
"""Reboot a virtual machine, given an instance reference."""
@@ -932,24 +927,20 @@ class LibvirtDriver(driver.ComputeDriver):
timer = utils.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
- @exception.wrap_exception()
def pause(self, instance):
"""Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
- @exception.wrap_exception()
def unpause(self, instance):
"""Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
- @exception.wrap_exception()
def power_off(self, instance):
"""Power off the specified instance."""
self._destroy(instance)
- @exception.wrap_exception()
def power_on(self, instance):
"""Power on the specified instance."""
dom = self._lookup_by_name(instance['name'])
@@ -958,20 +949,17 @@ class LibvirtDriver(driver.ComputeDriver):
instance)
timer.start(interval=0.5).wait()
- @exception.wrap_exception()
def suspend(self, instance):
"""Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
dom.managedSave(0)
- @exception.wrap_exception()
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_domain_xml(instance, network_info, block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
- @exception.wrap_exception()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
@@ -979,7 +967,6 @@ class LibvirtDriver(driver.ComputeDriver):
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
- @exception.wrap_exception()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
@@ -990,11 +977,9 @@ class LibvirtDriver(driver.ComputeDriver):
data recovery.
"""
-
+ instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_domain_xml(instance, network_info)
- unrescue_xml_path = os.path.join(CONF.instances_path,
- instance['name'],
- 'unrescue.xml')
+ unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_images = {
@@ -1010,24 +995,20 @@ class LibvirtDriver(driver.ComputeDriver):
self._destroy(instance)
self._create_domain(xml)
- @exception.wrap_exception()
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
- unrescue_xml_path = os.path.join(CONF.instances_path,
- instance['name'],
- 'unrescue.xml')
+ instance_dir = libvirt_utils.get_instance_path(instance)
+ unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance['name'])
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
- rescue_files = os.path.join(CONF.instances_path, instance['name'],
- "*.rescue")
+ rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
- @exception.wrap_exception()
def poll_rebooting_instances(self, timeout, instances):
pass
@@ -1042,7 +1023,6 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
- @exception.wrap_exception()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
xml = self.to_xml(instance, network_info, image_meta,
@@ -1083,7 +1063,6 @@ class LibvirtDriver(driver.ComputeDriver):
fp.write(data)
return fpath
- @exception.wrap_exception()
def get_console_output(self, instance):
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
@@ -1134,9 +1113,9 @@ class LibvirtDriver(driver.ComputeDriver):
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
- self._chown_console_log_for_instance(instance['name'])
+ self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
- console_log = self._get_console_log_path(instance['name'])
+ console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
@@ -1150,7 +1129,6 @@ class LibvirtDriver(driver.ComputeDriver):
def get_host_ip_addr():
return CONF.my_ip
- @exception.wrap_exception()
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
@@ -1227,11 +1205,12 @@ class LibvirtDriver(driver.ComputeDriver):
utils.mkfs('swap', target)
@staticmethod
- def _get_console_log_path(instance_name):
- return os.path.join(CONF.instances_path, instance_name, 'console.log')
+ def _get_console_log_path(instance):
+ return os.path.join(libvirt_utils.get_instance_path(instance),
+ 'console.log')
- def _chown_console_log_for_instance(self, instance_name):
- console_log = self._get_console_log_path(instance_name)
+ def _chown_console_log_for_instance(self, instance):
+ console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
@@ -1243,12 +1222,11 @@ class LibvirtDriver(driver.ComputeDriver):
# syntactic nicety
def basepath(fname='', suffix=suffix):
- return os.path.join(CONF.instances_path,
- instance['name'],
+ return os.path.join(libvirt_utils.get_instance_path(instance),
fname + suffix)
def image(fname, image_type=CONF.libvirt_images_type):
- return self.image_backend.image(instance['name'],
+ return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
@@ -1261,11 +1239,11 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml)
# NOTE(dprince): for rescue console.log may already exist... chown it.
- self._chown_console_log_for_instance(instance['name'])
+ self._chown_console_log_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
- self._get_console_log_path(instance['name']), '', 007)
+ self._get_console_log_path(instance), '', 007)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
@@ -1472,6 +1450,7 @@ class LibvirtDriver(driver.ComputeDriver):
for hostfeat in hostcpu.features:
guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name)
guestfeat.policy = "require"
+ guestcpu.features.append(guestfeat)
return guestcpu
@@ -1537,9 +1516,8 @@ class LibvirtDriver(driver.ComputeDriver):
if CONF.libvirt_type == "lxc":
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
- fs.source_dir = os.path.join(CONF.instances_path,
- instance['name'],
- 'rootfs')
+ fs.source_dir = os.path.join(
+ libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
if image_meta and image_meta.get('disk_format') == 'iso':
@@ -1557,8 +1535,7 @@ class LibvirtDriver(driver.ComputeDriver):
def disk_info(name, disk_dev, disk_bus=default_disk_bus,
device_type="disk"):
- image = self.image_backend.image(instance['name'],
- name)
+ image = self.image_backend.image(instance, name)
return image.libvirt_info(disk_bus,
disk_dev,
device_type,
@@ -1645,9 +1622,8 @@ class LibvirtDriver(driver.ComputeDriver):
diskconfig.source_type = "file"
diskconfig.driver_format = "raw"
diskconfig.driver_cache = self.disk_cachemode
- diskconfig.source_path = os.path.join(CONF.instances_path,
- instance['name'],
- "disk.config")
+ diskconfig.source_path = os.path.join(
+ libvirt_utils.get_instance_path(instane), "disk.config")
diskconfig.target_dev = self.default_last_device
diskconfig.target_bus = default_disk_bus
devices.append(diskconfig)
@@ -1675,6 +1651,7 @@ class LibvirtDriver(driver.ComputeDriver):
'kernel_id' if a kernel is needed for the rescue image.
"""
inst_type = instance['instance_type']
+ inst_path = libvirt_utils.get_instance_path(instance)
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt_type
@@ -1733,9 +1710,7 @@ class LibvirtDriver(driver.ComputeDriver):
if rescue:
if rescue.get('kernel_id'):
- guest.os_kernel = os.path.join(CONF.instances_path,
- instance['name'],
- "kernel.rescue")
+ guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
@@ -1743,22 +1718,16 @@ class LibvirtDriver(driver.ComputeDriver):
(root_device_name or "/dev/vda",))
if rescue.get('ramdisk_id'):
- guest.os_initrd = os.path.join(CONF.instances_path,
- instance['name'],
- "ramdisk.rescue")
+ guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
elif instance['kernel_id']:
- guest.os_kernel = os.path.join(CONF.instances_path,
- instance['name'],
- "kernel")
+ guest.os_kernel = os.path.join(inst_path, "kernel")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s console=ttyS0" %
(root_device_name or "/dev/vda",))
if instance['ramdisk_id']:
- guest.os_initrd = os.path.join(CONF.instances_path,
- instance['name'],
- "ramdisk")
+ guest.os_initrd = os.path.join(inst_path, "ramdisk")
else:
guest.os_boot_dev = "hd"
@@ -1806,8 +1775,7 @@ class LibvirtDriver(driver.ComputeDriver):
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
- consolelog.source_path = self._get_console_log_path(
- instance['name'])
+ consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
@@ -1877,18 +1845,23 @@ class LibvirtDriver(driver.ComputeDriver):
'cpu_time': cpu_time}
def _create_domain(self, xml=None, domain=None,
- inst_name='', launch_flags=0):
+ instance=None, launch_flags=0):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
+ inst_path = None
+ if instance:
+ inst_path = libvirt_utils.get_instance_path(instance)
+
if CONF.libvirt_type == 'lxc':
- container_dir = os.path.join(CONF.instances_path,
- inst_name,
- 'rootfs')
+ if not inst_path:
+ inst_path = None
+
+ container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
- image = self.image_backend.image(inst_name, 'disk')
+ image = self.image_backend.image(instance, 'disk')
disk.setup_container(image.path,
container_dir=container_dir,
use_cow=CONF.use_cow_images)
@@ -1902,9 +1875,7 @@ class LibvirtDriver(driver.ComputeDriver):
# namespace and so there is no need to keep the container rootfs
# mounted in the host namespace
if CONF.libvirt_type == 'lxc':
- container_dir = os.path.join(CONF.instances_path,
- inst_name,
- 'rootfs')
+ container_dir = os.path.join(inst_path, 'rootfs')
disk.teardown_container(container_dir=container_dir)
return domain
@@ -1926,7 +1897,7 @@ class LibvirtDriver(driver.ComputeDriver):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
- domain = self._create_domain(xml, inst_name=instance['name'])
+ domain = self._create_domain(xml, instance=instance)
self.firewall_driver.apply_instance_filter(instance, network_info)
return domain
@@ -2615,7 +2586,7 @@ class LibvirtDriver(driver.ComputeDriver):
def _fetch_instance_kernel_ramdisk(self, context, instance):
"""Download kernel and ramdisk for instance in instance directory."""
- instance_dir = os.path.join(CONF.instances_path, instance['name'])
+ instance_dir = libvirt_utils.get_instance_path(instance)
if instance['kernel_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir, 'kernel'),
@@ -2645,14 +2616,13 @@ class LibvirtDriver(driver.ComputeDriver):
if is_volume_backed and not (is_block_migration or is_shared_storage):
# Create the instance directory on destination compute node.
- instance_dir = os.path.join(CONF.instances_path,
- instance_ref['name'])
+ instance_dir = libvirt_utils.get_instance_path(instance_ref)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
# Touch the console.log file, required by libvirt.
- console_file = self._get_console_log_path(instance_ref['name'])
+ console_file = self._get_console_log_path(instance_ref)
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
@@ -2701,7 +2671,7 @@ class LibvirtDriver(driver.ComputeDriver):
disk_info = jsonutils.loads(disk_info_json)
# make instance directory
- instance_dir = os.path.join(CONF.instances_path, instance['name'])
+ instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
@@ -2720,7 +2690,7 @@ class LibvirtDriver(driver.ComputeDriver):
# Remove any size tags which the cache manages
cache_name = cache_name.split('_')[0]
- image = self.image_backend.image(instance['name'],
+ image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt_images_type)
image.cache(fetch_func=libvirt_utils.fetch_image,
@@ -2751,8 +2721,7 @@ class LibvirtDriver(driver.ComputeDriver):
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance_ref["name"] not in dom_list:
- instance_dir = os.path.join(CONF.instances_path,
- instance_ref["name"])
+ instance_dir = libvirt_utils.get_instance_path(instance_ref)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
# In case of block migration, destination does not have
# libvirt.xml
@@ -2891,7 +2860,6 @@ class LibvirtDriver(driver.ComputeDriver):
except Exception:
pass
- @exception.wrap_exception()
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
@@ -2915,7 +2883,7 @@ class LibvirtDriver(driver.ComputeDriver):
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
same_host = (dest == self.get_host_ip_addr())
- inst_base = "%s/%s" % (CONF.instances_path, instance['name'])
+ inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
try:
utils.execute('mv', inst_base, inst_base_resize)
@@ -2957,7 +2925,6 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.info(_("Instance running successfully."), instance=instance)
raise utils.LoopingCallDone()
- @exception.wrap_exception()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
@@ -3010,13 +2977,12 @@ class LibvirtDriver(driver.ComputeDriver):
instance)
timer.start(interval=0.5).wait()
- @exception.wrap_exception()
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
- inst_base = "%s/%s" % (CONF.instances_path, instance['name'])
+ inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
utils.execute('mv', inst_base_resize, inst_base)
@@ -3122,12 +3088,10 @@ class LibvirtDriver(driver.ComputeDriver):
def instance_on_disk(self, instance):
# ensure directories exist and are writable
- instance_path = os.path.join(CONF.instances_path, instance["name"])
-
+ instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug(_('Checking instance files accessability'
'%(instance_path)s')
% locals())
-
return os.access(instance_path, os.W_OK)
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index f4c41f539..d272e408c 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -142,8 +142,9 @@ class Raw(Image):
def __init__(self, instance=None, name=None, path=None):
super(Raw, self).__init__("file", "raw", is_block_dev=False)
- self.path = path or os.path.join(CONF.instances_path,
- instance, name)
+ self.path = (path or
+ os.path.join(libvirt_utils.get_instance_path(instance),
+ name))
def create_image(self, prepare_template, base, size, *args, **kwargs):
@lockutils.synchronized(base, 'nova-', external=True,
@@ -170,8 +171,9 @@ class Qcow2(Image):
def __init__(self, instance=None, name=None, path=None):
super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False)
- self.path = path or os.path.join(CONF.instances_path,
- instance, name)
+ self.path = (path or
+ os.path.join(libvirt_utils.get_instance_path(instance),
+ name))
def create_image(self, prepare_template, base, size, *args, **kwargs):
@lockutils.synchronized(base, 'nova-', external=True,
@@ -208,7 +210,7 @@ class Lvm(Image):
' libvirt_images_volume_group'
' flag to use LVM images.'))
self.vg = CONF.libvirt_images_volume_group
- self.lv = '%s_%s' % (self.escape(instance),
+ self.lv = '%s_%s' % (self.escape(instance['name']),
self.escape(name))
self.path = os.path.join('/dev', self.vg, self.lv)
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 9c8d192c7..4b3517da7 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -30,6 +30,7 @@ from nova import utils
from nova.virt import images
CONF = cfg.CONF
+CONF.import_opt('instances_path', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
@@ -498,3 +499,19 @@ def get_fs_info(path):
def fetch_image(context, target, image_id, user_id, project_id):
"""Grab image."""
images.fetch_to_raw(context, image_id, target, user_id, project_id)
+
+
+def get_instance_path(instance):
+ """Determine the correct path for instance storage.
+
+ This used to be calculated all over the place. This method centralizes
+ this into one location, which will make it easier to change the
+ algorithm used to name instance storage directories.
+
+ :param instance: the instance we want a path for
+
+ :returns: a path to store information about that instance
+ """
+ # TODO(mikal): we should use UUID instead of name, as name isn't
+ # nessesarily unique
+ return os.path.join(CONF.instances_path, instance['name'])
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 1dc30f73e..54de9da2d 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -21,19 +21,17 @@
from nova import exception
from nova.network import linux_net
+from nova.network import model as network_model
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
-from nova.virt import netutils
from nova.virt.libvirt import config as vconfig
-
+from nova.virt.libvirt import designer
+from nova.virt import netutils
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
- cfg.StrOpt('libvirt_ovs_bridge',
- default='br-int',
- help='Name of Integration Bridge used by Open vSwitch'),
cfg.BoolOpt('libvirt_use_virtio_for_bridges',
default=True,
help='Use virtio for bridge interfaces with KVM/QEMU'),
@@ -44,21 +42,28 @@ CONF.register_opts(libvirt_vif_opts)
CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
CONF.import_opt('use_ipv6', 'nova.netconf')
-LINUX_DEV_LEN = 14
-
class LibvirtBaseVIFDriver(object):
+ def get_vif_devname(self, mapping):
+ if 'vif_devname' in mapping:
+ return mapping['vif_devname']
+ return ("nic" + mapping['vif_uuid'])[:network_model.NIC_NAME_LEN]
+
def get_config(self, instance, network, mapping):
conf = vconfig.LibvirtConfigGuestInterface()
- conf.mac_addr = mapping['mac']
- if CONF.libvirt_type in ('kvm', 'qemu') and \
- CONF.libvirt_use_virtio_for_bridges:
- conf.model = "virtio"
+ model = None
+ driver = None
+ if (CONF.libvirt_type in ('kvm', 'qemu') and
+ CONF.libvirt_use_virtio_for_bridges):
+ model = "virtio"
# Workaround libvirt bug, where it mistakenly
# enables vhost mode, even for non-KVM guests
if CONF.libvirt_type == "qemu":
- conf.driver_name = "qemu"
+ driver = "qemu"
+
+ designer.set_vif_guest_frontend_config(
+ conf, mapping['mac'], model, driver)
return conf
@@ -75,28 +80,26 @@ class LibvirtBridgeDriver(LibvirtBaseVIFDriver):
self).get_config(instance,
network,
mapping)
- conf.net_type = "bridge"
- conf.source_dev = network['bridge']
- conf.script = ""
- conf.filtername = "nova-instance-" + instance['name'] + "-" + mac_id
- conf.add_filter_param("IP", mapping['ips'][0]['ip'])
- if mapping['dhcp_server']:
- conf.add_filter_param("DHCPSERVER", mapping['dhcp_server'])
+ designer.set_vif_host_backend_bridge_config(
+ conf, network['bridge'], self.get_vif_devname(mapping))
- if CONF.use_ipv6:
- conf.add_filter_param("RASERVER",
- mapping.get('gateway_v6') + "/128")
+ name = "nova-instance-" + instance['name'] + "-" + mac_id
+ primary_addr = mapping['ips'][0]['ip']
+ dhcp_server = ra_server = ipv4_cidr = ipv6_cidr = None
+ if mapping['dhcp_server']:
+ dhcp_server = mapping['dhcp_server']
+ if CONF.use_ipv6:
+ ra_server = mapping.get('gateway_v6') + "/128"
if CONF.allow_same_net_traffic:
- net, mask = netutils.get_net_and_mask(network['cidr'])
- conf.add_filter_param("PROJNET", net)
- conf.add_filter_param("PROJMASK", mask)
+ ipv4_cidr = network['cidr']
if CONF.use_ipv6:
- net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen(
- network['cidr_v6'])
- conf.add_filter_param("PROJNET6", net_v6)
- conf.add_filter_param("PROJMASK6", prefixlen_v6)
+ ipv6_cidr = network['cidr_v6']
+
+ designer.set_vif_host_backend_filter_config(
+ conf, name, primary_addr, dhcp_server,
+ ra_server, ipv4_cidr, ipv6_cidr)
return conf
@@ -135,42 +138,37 @@ class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver):
OVS virtual port XML (0.9.10 or earlier).
"""
- def get_dev_name(self, iface_id):
- return ("tap" + iface_id)[:LINUX_DEV_LEN]
-
def get_config(self, instance, network, mapping):
- dev = self.get_dev_name(mapping['vif_uuid'])
+ dev = self.get_vif_devname(mapping)
conf = super(LibvirtOpenVswitchDriver,
self).get_config(instance,
network,
mapping)
- conf.net_type = "ethernet"
- conf.target_dev = dev
- conf.script = ""
+ designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
- def create_ovs_vif_port(self, dev, iface_id, mac, instance_id):
+ def create_ovs_vif_port(self, bridge, dev, iface_id, mac, instance_id):
utils.execute('ovs-vsctl', '--', '--may-exist', 'add-port',
- CONF.libvirt_ovs_bridge, dev,
- '--', 'set', 'Interface', dev,
- 'external-ids:iface-id=%s' % iface_id,
- 'external-ids:iface-status=active',
- 'external-ids:attached-mac=%s' % mac,
- 'external-ids:vm-uuid=%s' % instance_id,
- run_as_root=True)
-
- def delete_ovs_vif_port(self, dev):
- utils.execute('ovs-vsctl', 'del-port', CONF.libvirt_ovs_bridge,
- dev, run_as_root=True)
+ bridge, dev,
+ '--', 'set', 'Interface', dev,
+ 'external-ids:iface-id=%s' % iface_id,
+ 'external-ids:iface-status=active',
+ 'external-ids:attached-mac=%s' % mac,
+ 'external-ids:vm-uuid=%s' % instance_id,
+ run_as_root=True)
+
+ def delete_ovs_vif_port(self, bridge, dev):
+ utils.execute('ovs-vsctl', 'del-port', bridge, dev,
+ run_as_root=True)
utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
def plug(self, instance, vif):
network, mapping = vif
iface_id = mapping['vif_uuid']
- dev = self.get_dev_name(iface_id)
+ dev = self.get_vif_devname(mapping)
if not linux_net.device_exists(dev):
# Older version of the command 'ip' from the iproute2 package
# don't have support for the tuntap option (lp:882568). If it
@@ -185,14 +183,16 @@ class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver):
utils.execute('tunctl', '-b', '-t', dev, run_as_root=True)
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
- self.create_ovs_vif_port(dev, iface_id, mapping['mac'],
+ self.create_ovs_vif_port(network['bridge'],
+ dev, iface_id, mapping['mac'],
instance['uuid'])
def unplug(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
network, mapping = vif
- self.delete_ovs_vif_port(self.get_dev_name(mapping['vif_uuid']))
+ self.delete_ovs_vif_port(network['bridge'],
+ self.get_vif_devname(mapping))
except exception.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
@@ -208,11 +208,11 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver,
"""
def get_br_name(self, iface_id):
- return ("qbr" + iface_id)[:LINUX_DEV_LEN]
+ return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
- return (("qvb%s" % iface_id)[:LINUX_DEV_LEN],
- ("qvo%s" % iface_id)[:LINUX_DEV_LEN])
+ return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
+ ("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
def get_config(self, instance, network, mapping):
br_name = self.get_br_name(mapping['vif_uuid'])
@@ -243,7 +243,8 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver,
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
- self.create_ovs_vif_port(v2_name, iface_id, mapping['mac'],
+ self.create_ovs_vif_port(network['bridge'],
+ v2_name, iface_id, mapping['mac'],
instance['uuid'])
def unplug(self, instance, vif):
@@ -263,7 +264,7 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver,
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
- self.delete_ovs_vif_port(v2_name)
+ self.delete_ovs_vif_port(network['bridge'], v2_name)
except exception.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
@@ -279,10 +280,9 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
network,
mapping)
- conf.net_type = "bridge"
- conf.source_dev = CONF.libvirt_ovs_bridge
- conf.vporttype = "openvswitch"
- conf.add_vport_param("interfaceid", mapping['vif_uuid'])
+ designer.set_vif_host_backend_ovs_config(
+ conf, network['bridge'], mapping['vif_uuid'],
+ self.get_vif_devname(mapping))
return conf
@@ -297,18 +297,9 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
class QuantumLinuxBridgeVIFDriver(LibvirtBaseVIFDriver):
"""VIF driver for Linux Bridge when running Quantum."""
- def get_bridge_name(self, network_id):
- return ("brq" + network_id)[:LINUX_DEV_LEN]
-
- def get_dev_name(self, iface_id):
- return ("tap" + iface_id)[:LINUX_DEV_LEN]
-
def get_config(self, instance, network, mapping):
- iface_id = mapping['vif_uuid']
- dev = self.get_dev_name(iface_id)
-
- bridge = self.get_bridge_name(network['id'])
- linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(bridge, None,
+ linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(network['bridge'],
+ None,
filtering=False)
conf = super(QuantumLinuxBridgeVIFDriver,
@@ -316,9 +307,8 @@ class QuantumLinuxBridgeVIFDriver(LibvirtBaseVIFDriver):
network,
mapping)
- conf.target_dev = dev
- conf.net_type = "bridge"
- conf.source_dev = bridge
+ designer.set_vif_host_backend_bridge_config(
+ conf, network['bridge'], self.get_vif_devname(mapping))
return conf
diff --git a/nova/virt/vmwareapi/__init__.py b/nova/virt/vmwareapi/__init__.py
index fa6f6ceb5..66e7d9b02 100644
--- a/nova/virt/vmwareapi/__init__.py
+++ b/nova/virt/vmwareapi/__init__.py
@@ -18,4 +18,4 @@
:mod:`vmwareapi` -- Nova support for VMware ESX/ESXi Server through VMware API.
"""
# NOTE(sdague) for nicer compute_driver specification
-from nova.virt.vmwareapi.driver import VMWareESXDriver
+from nova.virt.vmwareapi.driver import VMwareESXDriver
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 8734df1f6..986c4ef28 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -52,29 +52,29 @@ LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('vmwareapi_host_ip',
default=None,
- help='URL for connection to VMWare ESX host.Required if '
- 'compute_driver is vmwareapi.VMWareESXDriver.'),
+ help='URL for connection to VMware ESX host.Required if '
+ 'compute_driver is vmwareapi.VMwareESXDriver.'),
cfg.StrOpt('vmwareapi_host_username',
default=None,
- help='Username for connection to VMWare ESX host. '
+ help='Username for connection to VMware ESX host. '
'Used only if compute_driver is '
- 'vmwareapi.VMWareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver.'),
cfg.StrOpt('vmwareapi_host_password',
default=None,
- help='Password for connection to VMWare ESX host. '
+ help='Password for connection to VMware ESX host. '
'Used only if compute_driver is '
- 'vmwareapi.VMWareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver.'),
cfg.FloatOpt('vmwareapi_task_poll_interval',
default=5.0,
help='The interval used for polling of remote tasks. '
'Used only if compute_driver is '
- 'vmwareapi.VMWareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver.'),
cfg.IntOpt('vmwareapi_api_retry_count',
default=10,
help='The number of times we retry on failures, e.g., '
'socket error, etc. '
'Used only if compute_driver is '
- 'vmwareapi.VMWareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver.'),
]
CONF = cfg.CONF
@@ -93,11 +93,11 @@ class Failure(Exception):
return str(self.details)
-class VMWareESXDriver(driver.ComputeDriver):
+class VMwareESXDriver(driver.ComputeDriver):
"""The ESX host connection object."""
def __init__(self, virtapi, read_only=False, scheme="https"):
- super(VMWareESXDriver, self).__init__(virtapi)
+ super(VMwareESXDriver, self).__init__(virtapi)
host_ip = CONF.vmwareapi_host_ip
host_username = CONF.vmwareapi_host_username
@@ -107,11 +107,11 @@ class VMWareESXDriver(driver.ComputeDriver):
raise Exception(_("Must specify vmwareapi_host_ip,"
"vmwareapi_host_username "
"and vmwareapi_host_password to use"
- "compute_driver=vmwareapi.VMWareESXDriver"))
+ "compute_driver=vmwareapi.VMwareESXDriver"))
- session = VMWareAPISession(host_ip, host_username, host_password,
+ session = VMwareAPISession(host_ip, host_username, host_password,
api_retry_count, scheme=scheme)
- self._vmops = vmops.VMWareVMOps(session)
+ self._vmops = vmops.VMwareVMOps(session)
def init_host(self, host):
"""Do the initialization that needs to be done."""
@@ -209,7 +209,7 @@ class VMWareESXDriver(driver.ComputeDriver):
self._vmops.unplug_vifs(instance, network_info)
-class VMWareAPISession(object):
+class VMwareAPISession(object):
"""
Sets up a session with the ESX host and handles all
the calls made to the host.
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
index fdf85dc8b..3f5041c22 100644
--- a/nova/virt/vmwareapi/fake.py
+++ b/nova/virt/vmwareapi/fake.py
@@ -16,7 +16,7 @@
# under the License.
"""
-A fake VMWare VI API implementation.
+A fake VMware VI API implementation.
"""
import pprint
diff --git a/nova/virt/vmwareapi/network_utils.py b/nova/virt/vmwareapi/network_util.py
index a3b20137d..a3b20137d 100644
--- a/nova/virt/vmwareapi/network_utils.py
+++ b/nova/virt/vmwareapi/network_util.py
diff --git a/nova/virt/vmwareapi/read_write_util.py b/nova/virt/vmwareapi/read_write_util.py
index 52d095ad3..39ea8e2e8 100644
--- a/nova/virt/vmwareapi/read_write_util.py
+++ b/nova/virt/vmwareapi/read_write_util.py
@@ -108,8 +108,8 @@ class VMwareHTTPFile(object):
raise NotImplementedError
-class VMWareHTTPWriteFile(VMwareHTTPFile):
- """VMWare file write handler class."""
+class VMwareHTTPWriteFile(VMwareHTTPFile):
+ """VMware file write handler class."""
def __init__(self, host, data_center_name, datastore_name, cookies,
file_path, file_size, scheme="https"):
@@ -140,12 +140,12 @@ class VMWareHTTPWriteFile(VMwareHTTPFile):
self.conn.getresponse()
except Exception, excep:
LOG.debug(_("Exception during HTTP connection close in "
- "VMWareHTTpWrite. Exception is %s") % excep)
- super(VMWareHTTPWriteFile, self).close()
+ "VMwareHTTpWrite. Exception is %s") % excep)
+ super(VMwareHTTPWriteFile, self).close()
-class VmWareHTTPReadFile(VMwareHTTPFile):
- """VMWare file read handler class."""
+class VMwareHTTPReadFile(VMwareHTTPFile):
+ """VMware file read handler class."""
def __init__(self, host, data_center_name, datastore_name, cookies,
file_path, scheme="https"):
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py
index 4d53e266d..c5b524186 100644
--- a/nova/virt/vmwareapi/vif.py
+++ b/nova/virt/vmwareapi/vif.py
@@ -15,12 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""VIF drivers for VMWare."""
+"""VIF drivers for VMware."""
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
-from nova.virt.vmwareapi import network_utils
+from nova.virt.vmwareapi import network_util
LOG = logging.getLogger(__name__)
@@ -44,28 +44,28 @@ def ensure_vlan_bridge(self, session, network):
# Check if the vlan_interface physical network adapter exists on the
# host.
- if not network_utils.check_if_vlan_interface_exists(session,
+ if not network_util.check_if_vlan_interface_exists(session,
vlan_interface):
raise exception.NetworkAdapterNotFound(adapter=vlan_interface)
# Get the vSwitch associated with the Physical Adapter
- vswitch_associated = network_utils.get_vswitch_for_vlan_interface(
+ vswitch_associated = network_util.get_vswitch_for_vlan_interface(
session, vlan_interface)
if vswitch_associated is None:
raise exception.SwitchNotFoundForNetworkAdapter(
adapter=vlan_interface)
# Check whether bridge already exists and retrieve the the ref of the
# network whose name_label is "bridge"
- network_ref = network_utils.get_network_with_the_name(session, bridge)
+ network_ref = network_util.get_network_with_the_name(session, bridge)
if network_ref is None:
# Create a port group on the vSwitch associated with the
# vlan_interface corresponding physical network adapter on the ESX
# host.
- network_utils.create_port_group(session, bridge,
+ network_util.create_port_group(session, bridge,
vswitch_associated, vlan_num)
else:
# Get the vlan id and vswitch corresponding to the port group
- _get_pg_info = network_utils.get_vlanid_and_vswitch_for_portgroup
+ _get_pg_info = network_util.get_vlanid_and_vswitch_for_portgroup
pg_vlanid, pg_vswitch = _get_pg_info(session, bridge)
# Check if the vswitch associated is proper
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py
index 194b78a1d..83d120df5 100644
--- a/nova/virt/vmwareapi/vim.py
+++ b/nova/virt/vmwareapi/vim.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -101,69 +102,65 @@ class Vim:
def __getattr__(self, attr_name):
"""Makes the API calls and gets the result."""
- try:
- return getattr(self, attr_name)
- except AttributeError:
-
- def vim_request_handler(managed_object, **kwargs):
- """
- Builds the SOAP message and parses the response for fault
- checking and other errors.
-
- managed_object : Managed Object Reference or Managed
- Object Name
- **kwargs : Keyword arguments of the call
- """
- # Dynamic handler for VI SDK Calls
- try:
- request_mo = self._request_managed_object_builder(
- managed_object)
- request = getattr(self.client.service, attr_name)
- response = request(request_mo, **kwargs)
- # To check for the faults that are part of the message body
- # and not returned as Fault object response from the ESX
- # SOAP server
- if hasattr(error_util.FaultCheckers,
- attr_name.lower() + "_fault_checker"):
- fault_checker = getattr(error_util.FaultCheckers,
- attr_name.lower() + "_fault_checker")
- fault_checker(response)
- return response
- # Catch the VimFaultException that is raised by the fault
- # check of the SOAP response
- except error_util.VimFaultException, excep:
- raise
- except suds.WebFault, excep:
- doc = excep.document
- detail = doc.childAtPath("/Envelope/Body/Fault/detail")
- fault_list = []
- for child in detail.getChildren():
- fault_list.append(child.get("type"))
- raise error_util.VimFaultException(fault_list, excep)
- except AttributeError, excep:
- raise error_util.VimAttributeError(_("No such SOAP method "
- "'%s' provided by VI SDK") % (attr_name), excep)
- except (httplib.CannotSendRequest,
- httplib.ResponseNotReady,
- httplib.CannotSendHeader), excep:
- raise error_util.SessionOverLoadException(_("httplib "
- "error in %s: ") % (attr_name), excep)
- except Exception, excep:
- # Socket errors which need special handling for they
- # might be caused by ESX API call overload
- if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
- str(excep).find(CONN_ABORT_ERROR)) != -1:
- raise error_util.SessionOverLoadException(_("Socket "
- "error in %s: ") % (attr_name), excep)
- # Type error that needs special handling for it might be
- # caused by ESX host API call overload
- elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
- raise error_util.SessionOverLoadException(_("Type "
- "error in %s: ") % (attr_name), excep)
- else:
- raise error_util.VimException(
- _("Exception in %s ") % (attr_name), excep)
- return vim_request_handler
+ def vim_request_handler(managed_object, **kwargs):
+ """
+ Builds the SOAP message and parses the response for fault
+ checking and other errors.
+
+ managed_object : Managed Object Reference or Managed
+ Object Name
+ **kwargs : Keyword arguments of the call
+ """
+ # Dynamic handler for VI SDK Calls
+ try:
+ request_mo = self._request_managed_object_builder(
+ managed_object)
+ request = getattr(self.client.service, attr_name)
+ response = request(request_mo, **kwargs)
+ # To check for the faults that are part of the message body
+ # and not returned as Fault object response from the ESX
+ # SOAP server
+ if hasattr(error_util.FaultCheckers,
+ attr_name.lower() + "_fault_checker"):
+ fault_checker = getattr(error_util.FaultCheckers,
+ attr_name.lower() + "_fault_checker")
+ fault_checker(response)
+ return response
+ # Catch the VimFaultException that is raised by the fault
+ # check of the SOAP response
+ except error_util.VimFaultException, excep:
+ raise
+ except suds.WebFault, excep:
+ doc = excep.document
+ detail = doc.childAtPath("/Envelope/Body/Fault/detail")
+ fault_list = []
+ for child in detail.getChildren():
+ fault_list.append(child.get("type"))
+ raise error_util.VimFaultException(fault_list, excep)
+ except AttributeError, excep:
+ raise error_util.VimAttributeError(_("No such SOAP method "
+ "'%s' provided by VI SDK") % (attr_name), excep)
+ except (httplib.CannotSendRequest,
+ httplib.ResponseNotReady,
+ httplib.CannotSendHeader), excep:
+ raise error_util.SessionOverLoadException(_("httplib "
+ "error in %s: ") % (attr_name), excep)
+ except Exception, excep:
+ # Socket errors which need special handling for they
+ # might be caused by ESX API call overload
+ if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
+ str(excep).find(CONN_ABORT_ERROR)) != -1:
+ raise error_util.SessionOverLoadException(_("Socket "
+ "error in %s: ") % (attr_name), excep)
+ # Type error that needs special handling for it might be
+ # caused by ESX host API call overload
+ elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
+ raise error_util.SessionOverLoadException(_("Type "
+ "error in %s: ") % (attr_name), excep)
+ else:
+ raise error_util.VimException(
+ _("Exception in %s ") % (attr_name), excep)
+ return vim_request_handler
def _request_managed_object_builder(self, managed_object):
"""Builds the request managed object."""
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index 740355679..e03b88804 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -26,7 +26,7 @@ def build_datastore_path(datastore_name, path):
def split_datastore_path(datastore_path):
"""
- Split the VMWare style datastore path to get the Datastore
+ Split the VMware style datastore path to get the Datastore
name and the entity path.
"""
spl = datastore_path.split('[', 1)[1].split(']', 1)
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index e591245e2..883e751a8 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -32,7 +32,7 @@ from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
-from nova.virt.vmwareapi import network_utils
+from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vif as vmwarevif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@@ -49,7 +49,7 @@ VMWARE_POWER_STATES = {
'suspended': power_state.PAUSED}
-class VMWareVMOps(object):
+class VMwareVMOps(object):
"""Management class for VM-related tasks."""
def __init__(self, session):
@@ -157,7 +157,7 @@ class VMWareVMOps(object):
vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()
def _check_if_network_bridge_exists(network_name):
- network_ref = network_utils.get_network_with_the_name(
+ network_ref = network_util.get_network_with_the_name(
self._session, network_name)
if network_ref is None:
raise exception.NetworkNotFoundForBridge(bridge=network_name)
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py
index 15237fd5b..7c4480ea0 100644
--- a/nova/virt/vmwareapi/vmware_images.py
+++ b/nova/virt/vmwareapi/vmware_images.py
@@ -50,11 +50,11 @@ def start_transfer(context, read_file_handle, data_size,
# to read.
read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
- # In case of Glance - VMWare transfer, we just need a handle to the
- # HTTP Connection that is to send transfer data to the VMWare datastore.
+ # In case of Glance - VMware transfer, we just need a handle to the
+ # HTTP Connection that is to send transfer data to the VMware datastore.
if write_file_handle:
write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
- # In case of VMWare - Glance transfer, we relinquish VMWare HTTP file read
+ # In case of VMware - Glance transfer, we relinquish VMware HTTP file read
# handle to Glance Client instance, but to be sure of the transfer we need
# to be sure of the status of the image on glnace changing to active.
# The GlanceWriteThread handles the same for us.
@@ -96,7 +96,7 @@ def fetch_image(context, image, instance, **kwargs):
f = StringIO.StringIO()
image_service.download(context, image_id, f)
read_file_handle = read_write_util.GlanceFileRead(f)
- write_file_handle = read_write_util.VMWareHTTPWriteFile(
+ write_file_handle = read_write_util.VMwareHTTPWriteFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
@@ -113,7 +113,7 @@ def upload_image(context, image, instance, **kwargs):
"""Upload the snapshotted vm disk file to Glance image server."""
LOG.debug(_("Uploading image %s to the Glance image server") % image,
instance=instance)
- read_file_handle = read_write_util.VmWareHTTPReadFile(
+ read_file_handle = read_write_util.VMwareHTTPReadFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 6a0116098..debba4f02 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -883,11 +883,6 @@ def generate_configdrive(session, instance, vm_ref, userdevice,
try:
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
- dev_path = utils.make_dev_path(dev)
-
- # NOTE(mikal): libvirt supports injecting the admin password as
- # well. This is not currently implemented for xenapi as it is not
- # supported by the existing file injection
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
@@ -899,6 +894,7 @@ def generate_configdrive(session, instance, vm_ref, userdevice,
tmp_file = os.path.join(tmp_path, 'configdrive')
cdb.make_drive(tmp_file)
+ dev_path = utils.make_dev_path(dev)
utils.execute('dd',
'if=%s' % tmp_file,
'of=%s' % dev_path,
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 514295605..fccdedac8 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -42,6 +42,9 @@ cinder_opts = [
default=None,
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
+ cfg.StrOpt('os_region_name',
+ default=None,
+ help='region name of this node'),
cfg.IntOpt('cinder_http_retries',
default=3,
help='Number of cinderclient retries on failed http calls'),
@@ -66,7 +69,16 @@ def cinderclient(context):
else:
info = CONF.cinder_catalog_info
service_type, service_name, endpoint_type = info.split(':')
- url = sc.url_for(service_type=service_type,
+ # extract the region if set in configuration
+ if CONF.os_region_name:
+ attr = 'region'
+ filter_value = CONF.os_region_name
+ else:
+ attr = None
+ filter_value = None
+ url = sc.url_for(attr=attr,
+ filter_value=filter_value,
+ service_type=service_type,
service_name=service_name,
endpoint_type=endpoint_type)
diff --git a/nova/wsgi.py b/nova/wsgi.py
index c103526da..16851dba8 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -83,13 +83,21 @@ class Server(object):
raise exception.InvalidInput(
reason='The backlog must be more than 1')
+ bind_addr = (host, port)
+ # TODO(dims): eventlet's green dns/socket module does not actually
+ # support IPv6 in getaddrinfo(). We need to get around this in the
+ # future or monitor upstream for a fix
try:
- socket.inet_pton(socket.AF_INET6, host)
- family = socket.AF_INET6
+ info = socket.getaddrinfo(bind_addr[0],
+ bind_addr[1],
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM)[0]
+ family = info[0]
+ bind_addr = info[-1]
except Exception:
family = socket.AF_INET
- self._socket = eventlet.listen((host, port), family, backlog=backlog)
+ self._socket = eventlet.listen(bind_addr, family, backlog=backlog)
(self.host, self.port) = self._socket.getsockname()[0:2]
LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__)