summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-compute3
-rwxr-xr-xbin/nova-dhcpbridge2
-rwxr-xr-xbin/nova-manage2
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.json12
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.xml4
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-get-resp.json20
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-get-resp.xml5
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-list-resp.json94
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-list-resp.xml23
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-post-req.json10
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-post-req.xml9
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-post-resp.json20
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-post-resp.xml5
-rw-r--r--doc/api_samples/os-tenant-networks/networks-list-res.json (renamed from doc/api_samples/os-networks/networks-list-res.json)0
-rw-r--r--doc/api_samples/os-tenant-networks/networks-post-res.json (renamed from doc/api_samples/os-networks/networks-post-res.json)0
-rw-r--r--nova/api/ec2/cloud.py2
-rw-r--r--nova/api/openstack/compute/contrib/admin_networks.py170
-rw-r--r--nova/api/openstack/compute/contrib/coverage_ext.py3
-rw-r--r--nova/api/openstack/compute/contrib/hosts.py78
-rw-r--r--nova/api/openstack/compute/contrib/networks_associate.py2
-rw-r--r--nova/api/openstack/compute/contrib/os_networks.py261
-rw-r--r--nova/api/openstack/compute/contrib/os_tenant_networks.py214
-rw-r--r--nova/api/openstack/compute/contrib/services.py2
-rw-r--r--nova/api/openstack/compute/servers.py24
-rw-r--r--nova/api/sizelimit.py2
-rw-r--r--nova/compute/api.py377
-rw-r--r--nova/compute/cells_api.py77
-rw-r--r--nova/compute/manager.py269
-rw-r--r--nova/compute/resource_tracker.py13
-rw-r--r--nova/compute/rpcapi.py10
-rw-r--r--nova/conductor/__init__.py3
-rw-r--r--nova/conductor/api.py79
-rw-r--r--nova/conductor/manager.py35
-rw-r--r--nova/conductor/rpcapi.py25
-rw-r--r--nova/console/manager.py2
-rw-r--r--nova/console/vmrc_manager.py4
-rw-r--r--nova/db/api.py23
-rw-r--r--nova/db/sqlalchemy/api.py77
-rw-r--r--nova/db/sqlalchemy/models.py13
-rw-r--r--nova/exception.py27
-rw-r--r--nova/locale/nova.pot4
-rw-r--r--nova/manager.py5
-rw-r--r--nova/network/api.py14
-rw-r--r--nova/network/linux_net.py14
-rw-r--r--nova/network/manager.py1
-rw-r--r--nova/network/model.py16
-rw-r--r--nova/network/quantumv2/api.py35
-rw-r--r--nova/quota.py83
-rw-r--r--nova/scheduler/driver.py13
-rw-r--r--nova/scheduler/manager.py7
-rw-r--r--nova/service.py36
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hosts.py10
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_networks.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_services.py18
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py1
-rw-r--r--nova/tests/baremetal/test_driver.py13
-rw-r--r--nova/tests/compute/test_compute.py497
-rw-r--r--nova/tests/compute/test_compute_cells.py86
-rw-r--r--nova/tests/compute/test_host_api.py175
-rw-r--r--nova/tests/compute/test_resource_tracker.py36
-rw-r--r--nova/tests/compute/test_rpcapi.py3
-rw-r--r--nova/tests/conductor/test_conductor.py108
-rw-r--r--nova/tests/fake_imagebackend.py2
-rw-r--r--nova/tests/fake_libvirt_utils.py11
-rw-r--r--nova/tests/fake_policy.py6
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gzbin291 -> 291 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gzbin0 -> 618 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gzbin735 -> 734 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gzbin313 -> 313 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gzbin433 -> 430 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gzbin645 -> 725 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gzbin17580 -> 21340 bytes
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl12
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl20
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl94
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl23
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl10
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl20
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl)0
-rw-r--r--nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl)0
-rw-r--r--nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl)0
-rw-r--r--nova/tests/integrated/test_api_samples.py93
-rw-r--r--nova/tests/integrated/test_extensions.py1
-rw-r--r--nova/tests/network/test_api.py22
-rw-r--r--nova/tests/network/test_manager.py4
-rw-r--r--nova/tests/network/test_quantumv2.py8
-rw-r--r--nova/tests/scheduler/test_scheduler.py113
-rw-r--r--nova/tests/test_configdrive2.py8
-rw-r--r--nova/tests/test_db_api.py21
-rw-r--r--nova/tests/test_exception.py12
-rw-r--r--nova/tests/test_hypervapi.py8
-rw-r--r--nova/tests/test_imagebackend.py11
-rw-r--r--nova/tests/test_libvirt.py22
-rw-r--r--nova/tests/test_libvirt_vif.py7
-rw-r--r--nova/tests/test_metadata.py10
-rw-r--r--nova/tests/test_periodic_tasks.py14
-rw-r--r--nova/tests/test_quota.py47
-rw-r--r--nova/tests/test_service.py7
-rw-r--r--nova/tests/test_virt_drivers.py2
-rw-r--r--nova/tests/test_vmwareapi.py10
-rw-r--r--nova/tests/utils.py3
-rw-r--r--nova/tests/virt/xenapi/test_vm_utils.py89
-rw-r--r--nova/tests/virt/xenapi/test_volumeops.py54
-rw-r--r--nova/tests/vmwareapi/stubs.py14
-rw-r--r--nova/virt/baremetal/driver.py26
-rw-r--r--nova/virt/baremetal/volume_driver.py1
-rw-r--r--nova/virt/configdrive.py43
-rw-r--r--nova/virt/driver.py29
-rw-r--r--nova/virt/hyperv/vif.py133
-rw-r--r--nova/virt/hyperv/vmops.py96
-rw-r--r--nova/virt/hyperv/vmutils.py16
-rw-r--r--nova/virt/libvirt/config.py8
-rw-r--r--nova/virt/libvirt/designer.py101
-rw-r--r--nova/virt/libvirt/driver.py187
-rw-r--r--nova/virt/libvirt/imagebackend.py12
-rw-r--r--nova/virt/libvirt/utils.py47
-rw-r--r--nova/virt/libvirt/vif.py138
-rw-r--r--nova/virt/vmwareapi/__init__.py2
-rw-r--r--nova/virt/vmwareapi/driver.py28
-rw-r--r--nova/virt/vmwareapi/fake.py2
-rw-r--r--nova/virt/vmwareapi/network_util.py (renamed from nova/virt/vmwareapi/network_utils.py)0
-rw-r--r--nova/virt/vmwareapi/read_write_util.py12
-rw-r--r--nova/virt/vmwareapi/vif.py14
-rw-r--r--nova/virt/vmwareapi/vim.py123
-rw-r--r--nova/virt/vmwareapi/vm_util.py2
-rw-r--r--nova/virt/vmwareapi/vmops.py6
-rw-r--r--nova/virt/vmwareapi/vmware_images.py10
-rw-r--r--nova/virt/xenapi/vm_utils.py47
-rw-r--r--nova/virt/xenapi/vmops.py21
-rw-r--r--nova/virt/xenapi/volume_utils.py26
-rw-r--r--nova/virt/xenapi/volumeops.py90
-rw-r--r--nova/volume/cinder.py14
-rw-r--r--nova/wsgi.py14
-rwxr-xr-xtools/lintstack.sh26
-rw-r--r--tools/pip-requires2
139 files changed, 3294 insertions, 1963 deletions
diff --git a/bin/nova-compute b/bin/nova-compute
index d93ddb5bd..8826015d4 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -55,6 +55,7 @@ if __name__ == '__main__':
logging.setup('nova')
utils.monkey_patch()
server = service.Service.create(binary='nova-compute',
- topic=CONF.compute_topic)
+ topic=CONF.compute_topic,
+ db_allowed=False)
service.serve(server)
service.wait()
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 6187e052d..ee7bf2da9 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -111,7 +111,7 @@ CONF.register_cli_opt(
def main():
- """Parse environment and arguments and call the approproate action."""
+ """Parse environment and arguments and call the appropriate action."""
try:
config_file = os.environ['CONFIG_FILE']
except KeyError:
diff --git a/bin/nova-manage b/bin/nova-manage
index 67212a198..c783c304b 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -205,7 +205,7 @@ class ShellCommands(object):
@args('--path', dest='path', metavar='<path>', help='Script path')
def script(self, path):
- """Runs the script from the specifed path with flags set properly.
+ """Runs the script from the specified path with flags set properly.
arguments: path"""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index 42e86eadd..25d077f27 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -297,19 +297,19 @@
"updated": "2012-08-07T00:00:00+00:00"
},
{
- "alias": "os-admin-networks",
+ "alias": "os-networks",
"description": "Admin-only Network Management Extension.",
"links": [],
- "name": "AdminNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1",
+ "name": "Networks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
"updated": "2011-12-23T00:00:00+00:00"
},
{
- "alias": "os-networks",
+ "alias": "os-tenant-networks",
"description": "Tenant-based Network Management Extension.",
"links": [],
- "name": "OSNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
+ "name": "OSTenantNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2",
"updated": "2011-12-23T00:00:00+00:00"
},
{
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index ea0b45a12..b66c3dbe7 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -125,13 +125,13 @@
<extension alias="os-multiple-create" updated="2012-08-07T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1" name="MultipleCreate">
<description>Allow multiple create in the Create Server v1.1 API.</description>
</extension>
- <extension alias="os-admin-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1" name="AdminNetworks">
+ <extension alias="os-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="Networks">
<description>Admin-only Network Management Extension.</description>
</extension>
<extension alias="os-networks-associate" updated="2012-11-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
<description>Network association support.</description>
</extension>
- <extension alias="os-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="OSNetworks">
+ <extension alias="os-tenant-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2" name="OSTenantNetworks">
<description>Tenant-based Network Management Extension.</description>
</extension>
<extension alias="os-quota-class-sets" updated="2012-03-12T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1" name="QuotaClasses">
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.json b/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.json
new file mode 100644
index 000000000..15604fe2b
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.json
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.xml b/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.xml
new file mode 100644
index 000000000..5357967f3
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+</flavor> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.json b/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.json
new file mode 100644
index 000000000..5bb94f348
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.json
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "swap": "",
+ "vcpus": 2
+ },
+ {
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "swap": "",
+ "vcpus": 4
+ },
+ {
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "swap": "",
+ "vcpus": 8
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.xml b/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.xml
new file mode 100644
index 000000000..55b54f700
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.xml
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-post-req.json b/doc/api_samples/os-flavor-swap/flavor-swap-post-req.json
new file mode 100644
index 000000000..83b94cea0
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-post-req.json
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "flavortest",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "100",
+ "swap": 5
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-post-req.xml b/doc/api_samples/os-flavor-swap/flavor-swap-post-req.xml
new file mode 100644
index 000000000..b604f9bdf
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-post-req.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
+ name="flavortest"
+ ram="1024"
+ vcpus="2"
+ disk="10"
+ id="100"
+ swap="5" /> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.json b/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.json
new file mode 100644
index 000000000..d8e75d381
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.json
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "100",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/100",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/100",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "flavortest",
+ "ram": 1024,
+ "swap": 5,
+ "vcpus": 2
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.xml b/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.xml
new file mode 100644
index 000000000..7b779cf3f
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="flavortest" id="100" swap="5">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/100" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/100" rel="bookmark"/>
+</flavor> \ No newline at end of file
diff --git a/doc/api_samples/os-networks/networks-list-res.json b/doc/api_samples/os-tenant-networks/networks-list-res.json
index b857e8112..b857e8112 100644
--- a/doc/api_samples/os-networks/networks-list-res.json
+++ b/doc/api_samples/os-tenant-networks/networks-list-res.json
diff --git a/doc/api_samples/os-networks/networks-post-res.json b/doc/api_samples/os-tenant-networks/networks-post-res.json
index 536a9a0a4..536a9a0a4 100644
--- a/doc/api_samples/os-networks/networks-post-res.json
+++ b/doc/api_samples/os-tenant-networks/networks-post-res.json
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 8a3fde834..414b2e969 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -149,7 +149,7 @@ def _properties_get_mappings(properties):
def _format_block_device_mapping(bdm):
- """Contruct BlockDeviceMappingItemType
+ """Construct BlockDeviceMappingItemType
{'device_name': '...', 'snapshot_id': , ...}
=> BlockDeviceMappingItemType
"""
diff --git a/nova/api/openstack/compute/contrib/admin_networks.py b/nova/api/openstack/compute/contrib/admin_networks.py
deleted file mode 100644
index f5facd601..000000000
--- a/nova/api/openstack/compute/contrib/admin_networks.py
+++ /dev/null
@@ -1,170 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Grid Dynamics
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import netaddr
-import webob
-from webob import exc
-
-from nova.api.openstack import extensions
-from nova.api.openstack import wsgi
-from nova import exception
-from nova import network
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-authorize = extensions.extension_authorizer('compute', 'admin_networks')
-authorize_view = extensions.extension_authorizer('compute',
- 'admin_networks:view')
-
-
-def network_dict(context, network):
- fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2',
- 'cidr_v6', 'gateway_v6', 'label', 'netmask_v6')
- admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted',
- 'injected', 'bridge', 'vlan', 'vpn_public_address',
- 'vpn_public_port', 'vpn_private_address', 'dhcp_start',
- 'project_id', 'host', 'bridge_interface', 'multi_host',
- 'priority', 'rxtx_base')
- if network:
- # NOTE(mnaser): We display a limited set of fields so users can know
- # what networks are available, extra system-only fields
- # are only visible if they are an admin.
- if context.is_admin:
- fields += admin_fields
- result = dict((field, network[field]) for field in fields)
- if 'uuid' in network:
- result['id'] = network['uuid']
- return result
- else:
- return {}
-
-
-class AdminNetworkController(wsgi.Controller):
-
- def __init__(self, network_api=None):
- self.network_api = network_api or network.API()
-
- def index(self, req):
- context = req.environ['nova.context']
- authorize_view(context)
- networks = self.network_api.get_all(context)
- result = [network_dict(context, net_ref) for net_ref in networks]
- return {'networks': result}
-
- @wsgi.action("disassociate")
- def _disassociate_host_and_project(self, req, id, body):
- context = req.environ['nova.context']
- authorize(context)
- LOG.debug(_("Disassociating network with id %s"), id)
-
- try:
- self.network_api.associate(context, id, host=None, project=None)
- except exception.NetworkNotFound:
- raise exc.HTTPNotFound(_("Network not found"))
- return exc.HTTPAccepted()
-
- def show(self, req, id):
- context = req.environ['nova.context']
- authorize_view(context)
- LOG.debug(_("Showing network with id %s") % id)
- try:
- network = self.network_api.get(context, id)
- except exception.NetworkNotFound:
- raise exc.HTTPNotFound(_("Network not found"))
- return {'network': network_dict(context, network)}
-
- def delete(self, req, id):
- context = req.environ['nova.context']
- authorize(context)
- LOG.info(_("Deleting network with id %s") % id)
- try:
- self.network_api.delete(context, id)
- except exception.NetworkNotFound:
- raise exc.HTTPNotFound(_("Network not found"))
- return exc.HTTPAccepted()
-
- def create(self, req, body):
- context = req.environ['nova.context']
- authorize(context)
-
- def bad(e):
- return exc.HTTPUnprocessableEntity(explanation=e)
-
- if not (body and body.get("network")):
- raise bad(_("Missing network in body"))
-
- params = body["network"]
- if not params.get("label"):
- raise bad(_("Network label is required"))
-
- cidr = params.get("cidr") or params.get("cidr_v6")
- if not cidr:
- raise bad(_("Network cidr or cidr_v6 is required"))
-
- LOG.debug(_("Creating network with label %s") % params["label"])
-
- params["num_networks"] = 1
- params["network_size"] = netaddr.IPNetwork(cidr).size
-
- network = self.network_api.create(context, **params)[0]
- return {"network": network_dict(context, network)}
-
- def add(self, req, body):
- context = req.environ['nova.context']
- authorize(context)
- if not body:
- raise exc.HTTPUnprocessableEntity()
-
- network_id = body.get('id', None)
- project_id = context.project_id
- LOG.debug(_("Associating network %(network)s"
- " with project %(project)s") %
- {"network": network_id or "",
- "project": project_id})
- try:
- self.network_api.add_network_to_project(
- context, project_id, network_id)
- except Exception as ex:
- msg = (_("Cannot associate network %(network)s"
- " with project %(project)s: %(message)s") %
- {"network": network_id or "",
- "project": project_id,
- "message": getattr(ex, "value", str(ex))})
- raise exc.HTTPBadRequest(explanation=msg)
-
- return webob.Response(status_int=202)
-
-
-class Admin_networks(extensions.ExtensionDescriptor):
- """Admin-only Network Management Extension."""
-
- name = "AdminNetworks"
- alias = "os-admin-networks"
- namespace = ("http://docs.openstack.org/compute/"
- "ext/os-admin-networks/api/v1.1")
- updated = "2011-12-23T00:00:00+00:00"
-
- def get_resources(self):
- member_actions = {'action': 'POST'}
- collection_actions = {'add': 'POST'}
- res = extensions.ResourceExtension(
- 'os-admin-networks',
- AdminNetworkController(),
- member_actions=member_actions,
- collection_actions=collection_actions)
- return [res]
diff --git a/nova/api/openstack/compute/contrib/coverage_ext.py b/nova/api/openstack/compute/contrib/coverage_ext.py
index 875fa6051..4b7d4e57f 100644
--- a/nova/api/openstack/compute/contrib/coverage_ext.py
+++ b/nova/api/openstack/compute/contrib/coverage_ext.py
@@ -203,6 +203,9 @@ class CoverageController(object):
if xml:
self.coverInst.xml_report(outfile=path)
elif html:
+ if os.path.isdir(path):
+ msg = _("Directory conflict: %s already exists")
+ raise exc.HTTPBadRequest(explanation=msg)
self.coverInst.html_report(directory=path)
else:
output = open(path, 'w')
diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py
index 52487c305..d1b39d6db 100644
--- a/nova/api/openstack/compute/contrib/hosts.py
+++ b/nova/api/openstack/compute/contrib/hosts.py
@@ -124,10 +124,17 @@ class HostController(object):
"""
context = req.environ['nova.context']
authorize(context)
+ filters = {}
zone = req.GET.get('zone', None)
- data = self.api.list_hosts(context, zone)
-
- return {'hosts': data}
+ if zone:
+ filters['availability_zone'] = zone
+ services = self.api.service_get_all(context, filters=filters)
+ hosts = []
+ for service in services:
+ hosts.append({'host_name': service['host'],
+ 'service': service['topic'],
+ 'zone': service['availability_zone']})
+ return {'hosts': hosts}
@wsgi.serializers(xml=HostUpdateTemplate)
@wsgi.deserializers(xml=HostUpdateDeserializer)
@@ -243,6 +250,55 @@ class HostController(object):
def reboot(self, req, id):
return self._host_power_action(req, host_name=id, action="reboot")
+ @staticmethod
+ def _get_total_resources(host_name, compute_node):
+ return {'resource': {'host': host_name,
+ 'project': '(total)',
+ 'cpu': compute_node['vcpus'],
+ 'memory_mb': compute_node['memory_mb'],
+ 'disk_gb': compute_node['local_gb']}}
+
+ @staticmethod
+ def _get_used_now_resources(host_name, compute_node):
+ return {'resource': {'host': host_name,
+ 'project': '(used_now)',
+ 'cpu': compute_node['vcpus_used'],
+ 'memory_mb': compute_node['memory_mb_used'],
+ 'disk_gb': compute_node['local_gb_used']}}
+
+ @staticmethod
+ def _get_resource_totals_from_instances(host_name, instances):
+ cpu_sum = 0
+ mem_sum = 0
+ hdd_sum = 0
+ for instance in instances:
+ cpu_sum += instance['vcpus']
+ mem_sum += instance['memory_mb']
+ hdd_sum += instance['root_gb'] + instance['ephemeral_gb']
+
+ return {'resource': {'host': host_name,
+ 'project': '(used_max)',
+ 'cpu': cpu_sum,
+ 'memory_mb': mem_sum,
+ 'disk_gb': hdd_sum}}
+
+ @staticmethod
+ def _get_resources_by_project(host_name, instances):
+ # Getting usage resource per project
+ project_map = {}
+ for instance in instances:
+ resource = project_map.setdefault(instance['project_id'],
+ {'host': host_name,
+ 'project': instance['project_id'],
+ 'cpu': 0,
+ 'memory_mb': 0,
+ 'disk_gb': 0})
+ resource['cpu'] += instance['vcpus']
+ resource['memory_mb'] += instance['memory_mb']
+ resource['disk_gb'] += (instance['root_gb'] +
+ instance['ephemeral_gb'])
+ return project_map
+
@wsgi.serializers(xml=HostShowTemplate)
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
@@ -256,14 +312,26 @@ class HostController(object):
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
context = req.environ['nova.context']
+ host_name = id
try:
- data = self.api.describe_host(context, id)
+ service = self.api.service_get_by_compute_host(context, host_name)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.message)
except exception.AdminRequired:
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
- return {'host': data}
+ compute_node = service['compute_node'][0]
+ instances = self.api.instance_get_all_by_host(context, host_name)
+ resources = [self._get_total_resources(host_name, compute_node)]
+ resources.append(self._get_used_now_resources(host_name,
+ compute_node))
+ resources.append(self._get_resource_totals_from_instances(host_name,
+ instances))
+ by_proj_resources = self._get_resources_by_project(host_name,
+ instances)
+ for resource in by_proj_resources.itervalues():
+ resources.append({'resource': resource})
+ return {'host': resources}
class Hosts(extensions.ExtensionDescriptor):
diff --git a/nova/api/openstack/compute/contrib/networks_associate.py b/nova/api/openstack/compute/contrib/networks_associate.py
index 4990c1b5e..3cdda1d76 100644
--- a/nova/api/openstack/compute/contrib/networks_associate.py
+++ b/nova/api/openstack/compute/contrib/networks_associate.py
@@ -62,6 +62,6 @@ class Networks_associate(extensions.ExtensionDescriptor):
def get_controller_extensions(self):
extension = extensions.ControllerExtension(
- self, 'os-admin-networks', NetworkAssociateActionController())
+ self, 'os-networks', NetworkAssociateActionController())
return [extension]
diff --git a/nova/api/openstack/compute/contrib/os_networks.py b/nova/api/openstack/compute/contrib/os_networks.py
index 4be0bd100..d1d172686 100644
--- a/nova/api/openstack/compute/contrib/os_networks.py
+++ b/nova/api/openstack/compute/contrib/os_networks.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2013 OpenStack LLC.
+# Copyright 2011 Grid Dynamics
+# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,199 +16,155 @@
# License for the specific language governing permissions and limitations
# under the License.
-
import netaddr
-import netaddr.core as netexc
+import webob
from webob import exc
from nova.api.openstack import extensions
-from nova import context as nova_context
+from nova.api.openstack import wsgi
from nova import exception
-import nova.network
-from nova.openstack.common import cfg
+from nova import network
from nova.openstack.common import log as logging
-from nova import quota
-
-
-CONF = cfg.CONF
-
-try:
- os_network_opts = [
- cfg.BoolOpt("enable_network_quota",
- default=False,
- help="Enables or disables quotaing of tenant networks"),
- cfg.StrOpt('use_quantum_default_nets',
- default="False",
- help=('Control for checking for default networks')),
- cfg.StrOpt('quantum_default_tenant_id',
- default="default",
- help=('Default tenant id when creating quantum '
- 'networks'))
- ]
- CONF.register_opts(os_network_opts)
-except cfg.DuplicateOptError:
- # NOTE(jkoelker) These options are verbatim elsewhere this is here
- # to make sure they are registered for our use.
- pass
-
-if CONF.enable_network_quota:
- opts = [
- cfg.IntOpt('quota_networks',
- default=3,
- help='number of private networks allowed per project'),
- ]
- CONF.register_opts(opts)
-
-QUOTAS = quota.QUOTAS
-LOG = logging.getLogger(__name__)
-authorize = extensions.extension_authorizer('compute', 'os-networks')
-
-
-def network_dict(network):
- return {"id": network.get("uuid") or network["id"],
- "cidr": network["cidr"],
- "label": network["label"]}
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'networks')
+authorize_view = extensions.extension_authorizer('compute',
+ 'networks:view')
+
+
+def network_dict(context, network):
+ fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2',
+ 'cidr_v6', 'gateway_v6', 'label', 'netmask_v6')
+ admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted',
+ 'injected', 'bridge', 'vlan', 'vpn_public_address',
+ 'vpn_public_port', 'vpn_private_address', 'dhcp_start',
+ 'project_id', 'host', 'bridge_interface', 'multi_host',
+ 'priority', 'rxtx_base')
+ if network:
+ # NOTE(mnaser): We display a limited set of fields so users can know
+ # what networks are available, extra system-only fields
+ # are only visible if they are an admin.
+ if context.is_admin:
+ fields += admin_fields
+ result = dict((field, network[field]) for field in fields)
+ if 'uuid' in network:
+ result['id'] = network['uuid']
+ return result
+ else:
+ return {}
+
+
+class NetworkController(wsgi.Controller):
-class NetworkController(object):
def __init__(self, network_api=None):
- self.network_api = nova.network.API()
- self._default_networks = []
-
- def _refresh_default_networks(self):
- self._default_networks = []
- if CONF.use_quantum_default_nets == "True":
- try:
- self._default_networks = self._get_default_networks()
- except Exception:
- LOG.exception("Failed to get default networks")
-
- def _get_default_networks(self):
- project_id = CONF.quantum_default_tenant_id
- ctx = nova_context.RequestContext(user_id=None,
- project_id=project_id)
- networks = {}
- for n in self.network_api.get_all(ctx):
- networks[n['id']] = n['label']
- return [{'id': k, 'label': v} for k, v in networks.iteritems()]
+ self.network_api = network_api or network.API()
def index(self, req):
context = req.environ['nova.context']
- authorize(context)
+ authorize_view(context)
networks = self.network_api.get_all(context)
- if not self._default_networks:
- self._refresh_default_networks()
- networks.extend(self._default_networks)
- return {'networks': [network_dict(n) for n in networks]}
+ result = [network_dict(context, net_ref) for net_ref in networks]
+ return {'networks': result}
- def show(self, req, id):
+ @wsgi.action("disassociate")
+ def _disassociate_host_and_project(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
+ LOG.debug(_("Disassociating network with id %s"), id)
+
+ try:
+ self.network_api.associate(context, id, host=None, project=None)
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return exc.HTTPAccepted()
+
+ def show(self, req, id):
+ context = req.environ['nova.context']
+ authorize_view(context)
LOG.debug(_("Showing network with id %s") % id)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
raise exc.HTTPNotFound(_("Network not found"))
- return network_dict(network)
+ return {'network': network_dict(context, network)}
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
- try:
- if CONF.enable_network_quota:
- reservation = QUOTAS.reserve(context, networks=-1)
- except Exception:
- reservation = None
- LOG.exception(_("Failed to update usages deallocating "
- "network."))
-
LOG.info(_("Deleting network with id %s") % id)
-
try:
self.network_api.delete(context, id)
- if CONF.enable_network_quota and reservation:
- QUOTAS.commit(context, reservation)
- response = exc.HTTPAccepted()
except exception.NetworkNotFound:
- response = exc.HTTPNotFound(_("Network not found"))
-
- return response
+ raise exc.HTTPNotFound(_("Network not found"))
+ return exc.HTTPAccepted()
def create(self, req, body):
- if not body:
- raise exc.HTTPUnprocessableEntity()
-
- context = req.environ["nova.context"]
+ context = req.environ['nova.context']
authorize(context)
- network = body["network"]
- keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
- "num_networks"]
- kwargs = dict((k, network.get(k)) for k in keys)
+ def bad(e):
+ return exc.HTTPUnprocessableEntity(explanation=e)
- label = network["label"]
+ if not (body and body.get("network")):
+ raise bad(_("Missing network in body"))
- if not (kwargs["cidr"] or kwargs["cidr_v6"]):
- msg = _("No CIDR requested")
- raise exc.HTTPBadRequest(explanation=msg)
- if kwargs["cidr"]:
- try:
- net = netaddr.IPNetwork(kwargs["cidr"])
- if net.size < 4:
- msg = _("Requested network does not contain "
- "enough (2+) usable hosts")
- raise exc.HTTPBadRequest(explanation=msg)
- except netexc.AddrFormatError:
- msg = _("CIDR is malformed.")
- raise exc.HTTPBadRequest(explanation=msg)
- except netexc.AddrConversionError:
- msg = _("Address could not be converted.")
- raise exc.HTTPBadRequest(explanation=msg)
-
- networks = []
+ params = body["network"]
+ if not params.get("label"):
+ raise bad(_("Network label is required"))
+
+ cidr = params.get("cidr") or params.get("cidr_v6")
+ if not cidr:
+ raise bad(_("Network cidr or cidr_v6 is required"))
+
+ LOG.debug(_("Creating network with label %s") % params["label"])
+
+ params["num_networks"] = 1
+ params["network_size"] = netaddr.IPNetwork(cidr).size
+
+ network = self.network_api.create(context, **params)[0]
+ return {"network": network_dict(context, network)}
+
+ def add(self, req, body):
+ context = req.environ['nova.context']
+ authorize(context)
+ if not body:
+ raise exc.HTTPUnprocessableEntity()
+
+ network_id = body.get('id', None)
+ project_id = context.project_id
+ LOG.debug(_("Associating network %(network)s"
+ " with project %(project)s") %
+ {"network": network_id or "",
+ "project": project_id})
try:
- if CONF.enable_network_quota:
- reservation = QUOTAS.reserve(context, networks=1)
- except exception.OverQuota:
- msg = _("Quota exceeded, too many networks.")
+ self.network_api.add_network_to_project(
+ context, project_id, network_id)
+ except Exception as ex:
+ msg = (_("Cannot associate network %(network)s"
+ " with project %(project)s: %(message)s") %
+ {"network": network_id or "",
+ "project": project_id,
+ "message": getattr(ex, "value", str(ex))})
raise exc.HTTPBadRequest(explanation=msg)
- try:
- networks = self.network_api.create(context,
- label=label, **kwargs)
- if CONF.enable_network_quota:
- QUOTAS.commit(context, reservation)
- except Exception:
- if CONF.enable_network_quota:
- QUOTAS.rollback(context, reservation)
- msg = _("Create networks failed")
- LOG.exception(msg, extra=network)
- raise exc.HTTPServiceUnavailable(explanation=msg)
- return {"network": network_dict(networks[0])}
+ return webob.Response(status_int=202)
class Os_networks(extensions.ExtensionDescriptor):
- """Tenant-based Network Management Extension."""
+ """Admin-only Network Management Extension."""
- name = "OSNetworks"
+ name = "Networks"
alias = "os-networks"
- namespace = "http://docs.openstack.org/compute/ext/os-networks/api/v1.1"
- updated = "2012-03-07T09:46:43-05:00"
+ namespace = ("http://docs.openstack.org/compute/"
+ "ext/os-networks/api/v1.1")
+ updated = "2011-12-23T00:00:00+00:00"
def get_resources(self):
- ext = extensions.ResourceExtension('os-networks',
- NetworkController())
- return [ext]
-
-
-def _sync_networks(context, project_id, session):
- ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
- ctx = ctx.elevated()
- networks = nova.network.api.API().get_all(ctx)
- return dict(networks=len(networks))
-
-
-if CONF.enable_network_quota:
- QUOTAS.register_resource(quota.ReservableResource('networks',
- _sync_networks,
- 'quota_networks'))
+ member_actions = {'action': 'POST'}
+ collection_actions = {'add': 'POST'}
+ res = extensions.ResourceExtension(
+ 'os-networks',
+ NetworkController(),
+ member_actions=member_actions,
+ collection_actions=collection_actions)
+ return [res]
diff --git a/nova/api/openstack/compute/contrib/os_tenant_networks.py b/nova/api/openstack/compute/contrib/os_tenant_networks.py
new file mode 100644
index 000000000..03178ab65
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/os_tenant_networks.py
@@ -0,0 +1,214 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import netaddr
+import netaddr.core as netexc
+from webob import exc
+
+from nova.api.openstack import extensions
+from nova import context as nova_context
+from nova import exception
+import nova.network
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import quota
+
+
+CONF = cfg.CONF
+
+try:
+ os_network_opts = [
+ cfg.BoolOpt("enable_network_quota",
+ default=False,
+ help="Enables or disables quotaing of tenant networks"),
+ cfg.StrOpt('use_quantum_default_nets',
+ default="False",
+ help=('Control for checking for default networks')),
+ cfg.StrOpt('quantum_default_tenant_id',
+ default="default",
+ help=('Default tenant id when creating quantum '
+ 'networks'))
+ ]
+ CONF.register_opts(os_network_opts)
+except cfg.DuplicateOptError:
+ # NOTE(jkoelker) These options are verbatim elsewhere this is here
+ # to make sure they are registered for our use.
+ pass
+
+if CONF.enable_network_quota:
+ opts = [
+ cfg.IntOpt('quota_networks',
+ default=3,
+ help='number of private networks allowed per project'),
+ ]
+ CONF.register_opts(opts)
+
+QUOTAS = quota.QUOTAS
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'os-tenant-networks')
+
+
+def network_dict(network):
+ return {"id": network.get("uuid") or network["id"],
+ "cidr": network["cidr"],
+ "label": network["label"]}
+
+
+class NetworkController(object):
+ def __init__(self, network_api=None):
+ self.network_api = nova.network.API()
+ self._default_networks = []
+
+ def _refresh_default_networks(self):
+ self._default_networks = []
+ if CONF.use_quantum_default_nets == "True":
+ try:
+ self._default_networks = self._get_default_networks()
+ except Exception:
+ LOG.exception("Failed to get default networks")
+
+ def _get_default_networks(self):
+ project_id = CONF.quantum_default_tenant_id
+ ctx = nova_context.RequestContext(user_id=None,
+ project_id=project_id)
+ networks = {}
+ for n in self.network_api.get_all(ctx):
+ networks[n['id']] = n['label']
+ return [{'id': k, 'label': v} for k, v in networks.iteritems()]
+
+ def index(self, req):
+ context = req.environ['nova.context']
+ authorize(context)
+ networks = self.network_api.get_all(context)
+ if not self._default_networks:
+ self._refresh_default_networks()
+ networks.extend(self._default_networks)
+ return {'networks': [network_dict(n) for n in networks]}
+
+ def show(self, req, id):
+ context = req.environ['nova.context']
+ authorize(context)
+ LOG.debug(_("Showing network with id %s") % id)
+ try:
+ network = self.network_api.get(context, id)
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return network_dict(network)
+
+ def delete(self, req, id):
+ context = req.environ['nova.context']
+ authorize(context)
+ try:
+ if CONF.enable_network_quota:
+ reservation = QUOTAS.reserve(context, networks=-1)
+ except Exception:
+ reservation = None
+ LOG.exception(_("Failed to update usages deallocating "
+ "network."))
+
+ LOG.info(_("Deleting network with id %s") % id)
+
+ try:
+ self.network_api.delete(context, id)
+ if CONF.enable_network_quota and reservation:
+ QUOTAS.commit(context, reservation)
+ response = exc.HTTPAccepted()
+ except exception.NetworkNotFound:
+ response = exc.HTTPNotFound(_("Network not found"))
+
+ return response
+
+ def create(self, req, body):
+ if not body:
+ raise exc.HTTPUnprocessableEntity()
+
+ context = req.environ["nova.context"]
+ authorize(context)
+
+ network = body["network"]
+ keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
+ "num_networks"]
+ kwargs = dict((k, network.get(k)) for k in keys)
+
+ label = network["label"]
+
+ if not (kwargs["cidr"] or kwargs["cidr_v6"]):
+ msg = _("No CIDR requested")
+ raise exc.HTTPBadRequest(explanation=msg)
+ if kwargs["cidr"]:
+ try:
+ net = netaddr.IPNetwork(kwargs["cidr"])
+ if net.size < 4:
+ msg = _("Requested network does not contain "
+ "enough (2+) usable hosts")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except netexc.AddrFormatError:
+ msg = _("CIDR is malformed.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except netexc.AddrConversionError:
+ msg = _("Address could not be converted.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ networks = []
+ try:
+ if CONF.enable_network_quota:
+ reservation = QUOTAS.reserve(context, networks=1)
+ except exception.OverQuota:
+ msg = _("Quota exceeded, too many networks.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ try:
+ networks = self.network_api.create(context,
+ label=label, **kwargs)
+ if CONF.enable_network_quota:
+ QUOTAS.commit(context, reservation)
+ except Exception:
+ if CONF.enable_network_quota:
+ QUOTAS.rollback(context, reservation)
+ msg = _("Create networks failed")
+ LOG.exception(msg, extra=network)
+ raise exc.HTTPServiceUnavailable(explanation=msg)
+ return {"network": network_dict(networks[0])}
+
+
+class Os_tenant_networks(extensions.ExtensionDescriptor):
+ """Tenant-based Network Management Extension."""
+
+ name = "OSTenantNetworks"
+ alias = "os-tenant-networks"
+ namespace = ("http://docs.openstack.org/compute/"
+ "ext/os-tenant-networks/api/v2")
+ updated = "2012-03-07T09:46:43-05:00"
+
+ def get_resources(self):
+ ext = extensions.ResourceExtension('os-tenant-networks',
+ NetworkController())
+ return [ext]
+
+
+def _sync_networks(context, project_id, session):
+ ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
+ ctx = ctx.elevated()
+ networks = nova.network.api.API().get_all(ctx)
+ return dict(networks=len(networks))
+
+
+if CONF.enable_network_quota:
+ QUOTAS.register_resource(quota.ReservableResource('networks',
+ _sync_networks,
+ 'quota_networks'))
diff --git a/nova/api/openstack/compute/contrib/services.py b/nova/api/openstack/compute/contrib/services.py
index c792c72da..2786ad814 100644
--- a/nova/api/openstack/compute/contrib/services.py
+++ b/nova/api/openstack/compute/contrib/services.py
@@ -21,6 +21,7 @@ import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
+from nova import availability_zones
from nova import db
from nova import exception
from nova.openstack.common import cfg
@@ -69,6 +70,7 @@ class ServiceController(object):
authorize(context)
now = timeutils.utcnow()
services = db.service_get_all(context)
+ services = availability_zones.set_availability_zones(context, services)
host = ''
if 'host' in req.GET:
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index d3a6fc8a9..f0fdb5a15 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -538,10 +538,10 @@ class Controller(wsgi.Controller):
marker=marker)
except exception.MarkerNotFound as e:
msg = _('marker [%s] not found') % marker
- raise webob.exc.HTTPBadRequest(explanation=msg)
+ raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as e:
msg = _("Flavor could not be found")
- raise webob.exc.HTTPUnprocessableEntity(explanation=msg)
+ raise exc.HTTPUnprocessableEntity(explanation=msg)
if is_detail:
self._add_instance_faults(context, instance_list)
@@ -828,21 +828,24 @@ class Controller(wsgi.Controller):
try:
min_count = int(min_count)
except ValueError:
- raise webob.exc.HTTPBadRequest(_('min_count must be an '
- 'integer value'))
+ msg = _('min_count must be an integer value')
+ raise exc.HTTPBadRequest(explanation=msg)
if min_count < 1:
- raise webob.exc.HTTPBadRequest(_('min_count must be > 0'))
+ msg = _('min_count must be > 0')
+ raise exc.HTTPBadRequest(explanation=msg)
try:
max_count = int(max_count)
except ValueError:
- raise webob.exc.HTTPBadRequest(_('max_count must be an '
- 'integer value'))
+ msg = _('max_count must be an integer value')
+ raise exc.HTTPBadRequest(explanation=msg)
if max_count < 1:
- raise webob.exc.HTTPBadRequest(_('max_count must be > 0'))
+ msg = _('max_count must be > 0')
+ raise exc.HTTPBadRequest(explanation=msg)
if min_count > max_count:
- raise webob.exc.HTTPBadRequest(_('min_count must be <= max_count'))
+ msg = _('min_count must be <= max_count')
+ raise exc.HTTPBadRequest(explanation=msg)
auto_disk_config = False
if self.ext_mgr.is_loaded('OS-DCF'):
@@ -1202,7 +1205,8 @@ class Controller(wsgi.Controller):
try:
body = body['rebuild']
except (KeyError, TypeError):
- raise exc.HTTPBadRequest(_("Invalid request body"))
+ msg = _('Invalid request body')
+ raise exc.HTTPBadRequest(explanation=msg)
try:
image_href = body["imageRef"]
diff --git a/nova/api/sizelimit.py b/nova/api/sizelimit.py
index 70ff73b2b..77ab4415c 100644
--- a/nova/api/sizelimit.py
+++ b/nova/api/sizelimit.py
@@ -38,7 +38,7 @@ LOG = logging.getLogger(__name__)
class RequestBodySizeLimiter(wsgi.Middleware):
- """Add a 'nova.context' to WSGI environ."""
+ """Limit the size of incoming requests."""
def __init__(self, *args, **kwargs):
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 22d0fc015..7770bc9e6 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -92,6 +92,7 @@ CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('consoleauth_topic', 'nova.consoleauth')
+CONF.import_opt('enable', 'nova.cells.opts', group='cells')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
@@ -404,20 +405,20 @@ class API(base.Base):
options_from_image['auto_disk_config'] = auto_disk_config
return options_from_image
- def _create_instance(self, context, instance_type,
- image_href, kernel_id, ramdisk_id,
- min_count, max_count,
- display_name, display_description,
- key_name, key_data, security_group,
- availability_zone, user_data, metadata,
- injected_files, admin_password,
- access_ip_v4, access_ip_v6,
- requested_networks, config_drive,
- block_device_mapping, auto_disk_config,
- reservation_id=None, scheduler_hints=None):
+ def _validate_and_provision_instance(self, context, instance_type,
+ image_href, kernel_id, ramdisk_id,
+ min_count, max_count,
+ display_name, display_description,
+ key_name, key_data, security_group,
+ availability_zone, user_data,
+ metadata, injected_files,
+ access_ip_v4, access_ip_v6,
+ requested_networks, config_drive,
+ block_device_mapping,
+ auto_disk_config, reservation_id,
+ scheduler_hints):
"""Verify all the input parameters regardless of the provisioning
- strategy being performed and schedule the instance(s) for
- creation."""
+ strategy being performed."""
if not metadata:
metadata = {}
@@ -437,6 +438,19 @@ class API(base.Base):
raise exception.InstanceTypeNotFound(
instance_type_id=instance_type['id'])
+ if user_data:
+ l = len(user_data)
+ if l > MAX_USERDATA_SIZE:
+ # NOTE(mikal): user_data is stored in a text column, and
+ # the database might silently truncate if its over length.
+ raise exception.InstanceUserDataTooLarge(
+ length=l, maxsize=MAX_USERDATA_SIZE)
+
+ try:
+ base64.decodestring(user_data)
+ except base64.binascii.Error:
+ raise exception.InstanceUserDataMalformed()
+
# Reserve quotas
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
@@ -484,9 +498,6 @@ class API(base.Base):
key_name)
key_data = key_pair['public_key']
- if reservation_id is None:
- reservation_id = utils.generate_uid('r')
-
root_device_name = block_device.properties_root_device_name(
image.get('properties', {}))
@@ -524,19 +535,6 @@ class API(base.Base):
'root_device_name': root_device_name,
'progress': 0}
- if user_data:
- l = len(user_data)
- if l > MAX_USERDATA_SIZE:
- # NOTE(mikal): user_data is stored in a text column, and
- # the database might silently truncate if its over length.
- raise exception.InstanceUserDataTooLarge(
- length=l, maxsize=MAX_USERDATA_SIZE)
-
- try:
- base64.decodestring(user_data)
- except base64.binascii.Error:
- raise exception.InstanceUserDataMalformed()
-
options_from_image = self._inherit_properties_from_image(
image, auto_disk_config)
@@ -579,6 +577,36 @@ class API(base.Base):
'security_group': security_group,
}
+ return (instances, request_spec, filter_properties)
+
+ def _create_instance(self, context, instance_type,
+ image_href, kernel_id, ramdisk_id,
+ min_count, max_count,
+ display_name, display_description,
+ key_name, key_data, security_group,
+ availability_zone, user_data, metadata,
+ injected_files, admin_password,
+ access_ip_v4, access_ip_v6,
+ requested_networks, config_drive,
+ block_device_mapping, auto_disk_config,
+ reservation_id=None, scheduler_hints=None):
+ """Verify all the input parameters regardless of the provisioning
+ strategy being performed and schedule the instance(s) for
+ creation."""
+
+ if reservation_id is None:
+ reservation_id = utils.generate_uid('r')
+
+ (instances, request_spec, filter_properties) = \
+ self._validate_and_provision_instance(context, instance_type,
+ image_href, kernel_id, ramdisk_id, min_count,
+ max_count, display_name, display_description,
+ key_name, key_data, security_group, availability_zone,
+ user_data, metadata, injected_files, access_ip_v4,
+ access_ip_v6, requested_networks, config_drive,
+ block_device_mapping, auto_disk_config,
+ reservation_id, scheduler_hints)
+
self.scheduler_rpcapi.run_instance(context,
request_spec=request_spec,
admin_password=admin_password, injected_files=injected_files,
@@ -885,6 +913,12 @@ class API(base.Base):
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])
reservations = None
+
+ if context.is_admin and context.project_id != instance['project_id']:
+ project_id = instance['project_id']
+ else:
+ project_id = context.project_id
+
try:
# NOTE(maoy): no expected_task_state needs to be set
attrs = {'progress': 0}
@@ -899,6 +933,7 @@ class API(base.Base):
old['task_state'] not in (task_states.DELETING,
task_states.SOFT_DELETING)):
reservations = QUOTAS.reserve(context,
+ project_id=project_id,
instances=-1,
cores=-instance['vcpus'],
ram=-instance['memory_mb'])
@@ -910,7 +945,9 @@ class API(base.Base):
self.db.instance_destroy(context, instance['uuid'],
constraint)
if reservations:
- QUOTAS.commit(context, reservations)
+ QUOTAS.commit(context,
+ reservations,
+ project_id=project_id)
return
except exception.ConstraintNotMet:
# Refresh to get new host information
@@ -945,32 +982,35 @@ class API(base.Base):
host=src_host, cast=False,
reservations=downsize_reservations)
- # NOTE(jogo): db allows for multiple compute services per host
+ is_up = False
try:
- services = self.db.service_get_all_compute_by_host(
+ service = self.db.service_get_by_compute_host(
context.elevated(), instance['host'])
- except exception.ComputeHostNotFound:
- services = []
-
- is_up = False
- for service in services:
if self.servicegroup_api.service_is_up(service):
is_up = True
cb(context, instance, bdms)
- break
+ except exception.ComputeHostNotFound:
+ pass
+
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms)
if reservations:
- QUOTAS.commit(context, reservations)
+ QUOTAS.commit(context,
+ reservations,
+ project_id=project_id)
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if reservations:
- QUOTAS.rollback(context, reservations)
+ QUOTAS.rollback(context,
+ reservations,
+ project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
if reservations:
- QUOTAS.rollback(context, reservations)
+ QUOTAS.rollback(context,
+ reservations,
+ project_id=project_id)
def _local_delete(self, context, instance, bdms):
LOG.warning(_("instance's host %s is down, deleting from "
@@ -1249,7 +1289,7 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
- extra_properties=None):
+ extra_properties=None, image_id=None):
"""Backup the given instance
:param instance: nova.db.sqlalchemy.models.Instance
@@ -1259,14 +1299,26 @@ class API(base.Base):
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
"""
- recv_meta = self._create_image(context, instance, name, 'backup',
- backup_type=backup_type, rotation=rotation,
- extra_properties=extra_properties)
- return recv_meta
+ instance = self.update(context, instance,
+ task_state=task_states.IMAGE_BACKUP,
+ expected_task_state=None)
+ if image_id:
+ # The image entry has already been created, so just pull the
+ # metadata.
+ image_meta = self.image_service.show(context, image_id)
+ else:
+ image_meta = self._create_image(context, instance, name,
+ 'backup', backup_type=backup_type,
+ rotation=rotation, extra_properties=extra_properties)
+ self.compute_rpcapi.snapshot_instance(context, instance=instance,
+ image_id=image_meta['id'], image_type='backup',
+ backup_type=backup_type, rotation=rotation)
+ return image_meta
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
- def snapshot(self, context, instance, name, extra_properties=None):
+ def snapshot(self, context, instance, name, extra_properties=None,
+ image_id=None):
"""Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
@@ -1275,12 +1327,25 @@ class API(base.Base):
:returns: A dict containing image metadata
"""
- return self._create_image(context, instance, name, 'snapshot',
- extra_properties=extra_properties)
+ instance = self.update(context, instance,
+ task_state=task_states.IMAGE_SNAPSHOT,
+ expected_task_state=None)
+ if image_id:
+ # The image entry has already been created, so just pull the
+ # metadata.
+ image_meta = self.image_service.show(context, image_id)
+ else:
+ image_meta = self._create_image(context, instance, name,
+ 'snapshot', extra_properties=extra_properties)
+ self.compute_rpcapi.snapshot_instance(context, instance=instance,
+ image_id=image_meta['id'], image_type='snapshot')
+ return image_meta
def _create_image(self, context, instance, name, image_type,
backup_type=None, rotation=None, extra_properties=None):
- """Create snapshot or backup for an instance on this host.
+ """Create new image entry in the image service. This new image
+ will be reserved for the compute manager to upload a snapshot
+ or backup.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
@@ -1294,29 +1359,6 @@ class API(base.Base):
"""
instance_uuid = instance['uuid']
- if image_type == "snapshot":
- task_state = task_states.IMAGE_SNAPSHOT
- elif image_type == "backup":
- task_state = task_states.IMAGE_BACKUP
- else:
- raise Exception(_('Image type not recognized %s') % image_type)
-
- # change instance state and notify
- old_vm_state = instance["vm_state"]
- old_task_state = instance["task_state"]
-
- self.db.instance_test_and_set(
- context, instance_uuid, 'task_state', [None], task_state)
-
- # NOTE(sirp): `instance_test_and_set` only sets the task-state in the
- # DB, but we also need to set it on the current instance so that the
- # correct value is passed down to the compute manager.
- instance['task_state'] = task_state
-
- notifications.send_update_with_states(context, instance, old_vm_state,
- instance["vm_state"], old_task_state, instance["task_state"],
- service="api", verify_states=True)
-
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
@@ -1363,11 +1405,7 @@ class API(base.Base):
# up above will not be overwritten by inherited values
properties.setdefault(key, value)
- recv_meta = self.image_service.create(context, sent_meta)
- self.compute_rpcapi.snapshot_instance(context, instance=instance,
- image_id=recv_meta['id'], image_type=image_type,
- backup_type=backup_type, rotation=rotation)
- return recv_meta
+ return self.image_service.create(context, sent_meta)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
@@ -1510,12 +1548,9 @@ class API(base.Base):
elevated = context.elevated()
block_info = self._get_block_device_info(elevated,
instance['uuid'])
- network_info = self.network_api.get_instance_nw_info(elevated,
- instance)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=block_info,
- network_info=network_info,
reboot_type=reboot_type)
def _get_image(self, context, image_href):
@@ -1628,6 +1663,11 @@ class API(base.Base):
self.db.migration_update(elevated, migration_ref['id'],
{'status': 'reverting'})
+ # With cells, the best we can do right now is commit the reservations
+ # immediately...
+ if CONF.cells.enable and reservations:
+ QUOTAS.commit(context, reservations)
+ reservations = []
self.compute_rpcapi.revert_resize(context,
instance=instance, migration=migration_ref,
@@ -1652,6 +1692,11 @@ class API(base.Base):
self.db.migration_update(elevated, migration_ref['id'],
{'status': 'confirming'})
+ # With cells, the best we can do right now is commit the reservations
+ # immediately...
+ if CONF.cells.enable and reservations:
+ QUOTAS.commit(context, reservations)
+ reservations = []
self.compute_rpcapi.confirm_resize(context,
instance=instance, migration=migration_ref,
@@ -1814,6 +1859,12 @@ class API(base.Base):
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
+ # With cells, the best we can do right now is commit the reservations
+ # immediately...
+ if CONF.cells.enable and reservations:
+ QUOTAS.commit(context, reservations)
+ reservations = []
+
args = {
"instance": instance,
"instance_type": new_instance_type,
@@ -2148,140 +2199,76 @@ class API(base.Base):
disk_over_commit, instance, host_name)
-def check_host(fn):
- """Decorator that makes sure that the host exists."""
- def wrapped(self, context, host_name, *args, **kwargs):
- if self.does_host_exist(context, host_name):
- return fn(self, context, host_name, *args, **kwargs)
- else:
- raise exception.HostNotFound(host=host_name)
- return wrapped
-
-
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
- def __init__(self):
- self.compute_rpcapi = compute_rpcapi.ComputeAPI()
+ def __init__(self, rpcapi=None):
+ self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
super(HostAPI, self).__init__()
- @check_host
+ def _assert_host_exists(self, context, host_name):
+ """Raise HostNotFound if compute host doesn't exist."""
+ if not self.db.service_get_by_host_and_topic(context, host_name,
+ CONF.compute_topic):
+ raise exception.HostNotFound(host=host_name)
+
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
- return self.compute_rpcapi.set_host_enabled(context, enabled=enabled,
+ self._assert_host_exists(context, host_name)
+ return self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
- @check_host
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
- return self.compute_rpcapi.get_host_uptime(context, host=host_name)
+ self._assert_host_exists(context, host_name)
+ return self.rpcapi.get_host_uptime(context, host=host_name)
- @check_host
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
- return self.compute_rpcapi.host_power_action(context, action=action,
+ self._assert_host_exists(context, host_name)
+ return self.rpcapi.host_power_action(context, action=action,
host=host_name)
- def list_hosts(self, context, zone=None, service=None):
- """Returns a summary list of enabled hosts, optionally filtering
- by zone and/or service type.
+ def set_host_maintenance(self, context, host_name, mode):
+ """Start/Stop host maintenance window. On start, it triggers
+ guest VMs evacuation."""
+ self._assert_host_exists(context, host_name)
+ return self.rpcapi.host_maintenance_mode(context,
+ host_param=host_name, mode=mode, host=host_name)
+
+ def service_get_all(self, context, filters=None):
+ """Returns a list of services, optionally filtering the results.
+
+ If specified, 'filters' should be a dictionary containing services
+ attributes and matching values. Ie, to get a list of services for
+ the 'compute' topic, use filters={'topic': 'compute'}.
"""
- LOG.debug(_("Listing hosts"))
+ if filters is None:
+ filters = {}
services = self.db.service_get_all(context, False)
- services = availability_zones.set_availability_zones(context, services)
- if zone:
- services = [s for s in services if s['availability_zone'] == zone]
- hosts = []
- for host in services:
- hosts.append({'host_name': host['host'], 'service': host['topic'],
- 'zone': host['availability_zone']})
- if service:
- hosts = [host for host in hosts
- if host["service"] == service]
- return hosts
-
- def does_host_exist(self, context, host_name):
- """
- Returns True if the host with host_name exists, False otherwise
- """
- return self.db.service_does_host_exist(context, host_name)
+ services = availability_zones.set_availability_zones(context,
+ services)
+ ret_services = []
+ for service in services:
+ for key, val in filters.iteritems():
+ if service[key] != val:
+ break
+ else:
+ # All filters matched.
+ ret_services.append(service)
+ return ret_services
- def describe_host(self, context, host_name):
- """
- Returns information about a host in this kind of format:
- :returns:
- ex.::
- {'host': 'hostname',
- 'project': 'admin',
- 'cpu': 1,
- 'memory_mb': 2048,
- 'disk_gb': 30}
- """
- # Getting compute node info and related instances info
- try:
- compute_ref = self.db.service_get_all_compute_by_host(context,
- host_name)
- compute_ref = compute_ref[0]
- except exception.ComputeHostNotFound:
- raise exception.HostNotFound(host=host_name)
- instance_refs = self.db.instance_get_all_by_host(context,
- compute_ref['host'])
-
- # Getting total available/used resource
- compute_ref = compute_ref['compute_node'][0]
- resources = [{'resource': {'host': host_name, 'project': '(total)',
- 'cpu': compute_ref['vcpus'],
- 'memory_mb': compute_ref['memory_mb'],
- 'disk_gb': compute_ref['local_gb']}},
- {'resource': {'host': host_name, 'project': '(used_now)',
- 'cpu': compute_ref['vcpus_used'],
- 'memory_mb': compute_ref['memory_mb_used'],
- 'disk_gb': compute_ref['local_gb_used']}}]
-
- cpu_sum = 0
- mem_sum = 0
- hdd_sum = 0
- for i in instance_refs:
- cpu_sum += i['vcpus']
- mem_sum += i['memory_mb']
- hdd_sum += i['root_gb'] + i['ephemeral_gb']
-
- resources.append({'resource': {'host': host_name,
- 'project': '(used_max)',
- 'cpu': cpu_sum,
- 'memory_mb': mem_sum,
- 'disk_gb': hdd_sum}})
-
- # Getting usage resource per project
- project_ids = [i['project_id'] for i in instance_refs]
- project_ids = list(set(project_ids))
- for project_id in project_ids:
- vcpus = [i['vcpus'] for i in instance_refs
- if i['project_id'] == project_id]
-
- mem = [i['memory_mb'] for i in instance_refs
- if i['project_id'] == project_id]
-
- disk = [i['root_gb'] + i['ephemeral_gb'] for i in instance_refs
- if i['project_id'] == project_id]
-
- resources.append({'resource': {'host': host_name,
- 'project': project_id,
- 'cpu': sum(vcpus),
- 'memory_mb': sum(mem),
- 'disk_gb': sum(disk)}})
- return resources
-
- @check_host
- def set_host_maintenance(self, context, host, mode):
- """Start/Stop host maintenance window. On start, it triggers
- guest VMs evacuation."""
- return self.compute_rpcapi.host_maintenance_mode(context,
- host_param=host, mode=mode, host=host)
+ def service_get_by_compute_host(self, context, host_name):
+ """Get service entry for the given compute hostname."""
+ return self.db.service_get_by_compute_host(context, host_name)
+
+ def instance_get_all_by_host(self, context, host_name):
+ """Return all instances on the given host."""
+ return self.db.instance_get_all_by_host(context, host_name)
class AggregateAPI(base.Base):
@@ -2345,8 +2332,7 @@ class AggregateAPI(base.Base):
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
# validates the host; ComputeHostNotFound is raised if invalid
- service = self.db.service_get_all_compute_by_host(
- context, host_name)[0]
+ self.db.service_get_by_compute_host(context, host_name)
aggregate = self.db.aggregate_get(context, aggregate_id)
self.db.aggregate_host_add(context, aggregate_id, host_name)
#NOTE(jogo): Send message to host to support resource pools
@@ -2357,8 +2343,7 @@ class AggregateAPI(base.Base):
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
# validates the host; ComputeHostNotFound is raised if invalid
- service = self.db.service_get_all_compute_by_host(
- context, host_name)[0]
+ self.db.service_get_by_compute_host(context, host_name)
aggregate = self.db.aggregate_get(context, aggregate_id)
self.db.aggregate_host_delete(context, aggregate_id, host_name)
self.compute_rpcapi.remove_aggregate_host(context,
diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py
index 698c6eed0..d547c363a 100644
--- a/nova/compute/cells_api.py
+++ b/nova/compute/cells_api.py
@@ -18,7 +18,7 @@
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute_api
-from nova.compute import task_states
+from nova.compute import instance_types
from nova.compute import vm_states
from nova import exception
from nova.openstack.common import excutils
@@ -115,15 +115,28 @@ class ComputeCellsAPI(compute_api.API):
"""
return
- def _create_image(self, context, instance, name, image_type,
- backup_type=None, rotation=None, extra_properties=None):
- if backup_type:
- return self._call_to_cells(context, instance, 'backup',
- name, backup_type, rotation,
- extra_properties=extra_properties)
- else:
- return self._call_to_cells(context, instance, 'snapshot',
- name, extra_properties=extra_properties)
+ def backup(self, context, instance, name, backup_type, rotation,
+ extra_properties=None, image_id=None):
+ """Backup the given instance."""
+ image_meta = super(ComputeCellsAPI, self).backup(context,
+ instance, name, backup_type, rotation,
+ extra_properties=extra_properties, image_id=image_id)
+ image_id = image_meta['id']
+ self._cast_to_cells(context, instance, 'backup', name,
+ backup_type=backup_type, rotation=rotation,
+ extra_properties=extra_properties, image_id=image_id)
+ return image_meta
+
+ def snapshot(self, context, instance, name, extra_properties=None,
+ image_id=None):
+ """Snapshot the given instance."""
+ image_meta = super(ComputeCellsAPI, self).snapshot(context,
+ instance, name, extra_properties=extra_properties,
+ image_id=image_id)
+ image_id = image_meta['id']
+ self._cast_to_cells(context, instance, 'snapshot',
+ name, extra_properties=extra_properties, image_id=image_id)
+ return image_meta
def create(self, *args, **kwargs):
"""We can use the base functionality, but I left this here just
@@ -241,22 +254,14 @@ class ComputeCellsAPI(compute_api.API):
@validate_cell
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
- # NOTE(markwash): regular api manipulates the migration here, but we
- # don't have access to it. So to preserve the interface just update the
- # vm and task state.
- self.update(context, instance,
- task_state=task_states.RESIZE_REVERTING)
+ super(ComputeCellsAPI, self).revert_resize(context, instance)
self._cast_to_cells(context, instance, 'revert_resize')
@check_instance_state(vm_state=[vm_states.RESIZED])
@validate_cell
def confirm_resize(self, context, instance):
"""Confirms a migration/resize and deletes the 'old' instance."""
- # NOTE(markwash): regular api manipulates migration here, but we don't
- # have the migration in the api database. So to preserve the interface
- # just update the vm and task state without calling super()
- self.update(context, instance, task_state=None,
- vm_state=vm_states.ACTIVE)
+ super(ComputeCellsAPI, self).confirm_resize(context, instance)
self._cast_to_cells(context, instance, 'confirm_resize')
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
@@ -269,8 +274,36 @@ class ComputeCellsAPI(compute_api.API):
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
- super(ComputeCellsAPI, self).resize(context, instance, *args,
- **kwargs)
+ super(ComputeCellsAPI, self).resize(context, instance, *args, **kwargs)
+
+ # NOTE(johannes): If we get to this point, then we know the
+ # specified flavor_id is valid and exists. We'll need to load
+ # it again, but that should be safe.
+
+ old_instance_type_id = instance['instance_type_id']
+ old_instance_type = instance_types.get_instance_type(
+ old_instance_type_id)
+
+ flavor_id = kwargs.get('flavor_id')
+
+ if not flavor_id:
+ new_instance_type = old_instance_type
+ else:
+ new_instance_type = instance_types.get_instance_type_by_flavor_id(
+ flavor_id)
+
+ # NOTE(johannes): Later, when the resize is confirmed or reverted,
+ # the superclass implementations of those methods will need access
+ # to a local migration record for quota reasons. We don't need
+ # source and/or destination information, just the old and new
+ # instance_types. Status is set to 'finished' since nothing else
+ # will update the status along the way.
+ self.db.migration_create(context.elevated(),
+ {'instance_uuid': instance['uuid'],
+ 'old_instance_type_id': old_instance_type['id'],
+ 'new_instance_type_id': new_instance_type['id'],
+ 'status': 'finished'})
+
# FIXME(comstud): pass new instance_type object down to a method
# that'll unfold it
self._cast_to_cells(context, instance, 'resize', *args, **kwargs)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index f138a3708..3bf8e61ef 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -25,10 +25,6 @@ building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
-**Related Flags**
-
-:instances_path: Where instances are kept on disk
-
"""
import contextlib
@@ -297,7 +293,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.22'
+ RPC_API_VERSION = '2.23'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -358,32 +354,6 @@ class ComputeManager(manager.SchedulerDependentManager):
'trying to set it to ERROR'),
instance_uuid=instance_uuid)
- def _get_instances_at_startup(self, context):
- '''Get instances for this host during service init.'''
- attempt = 0
- timeout = 10
- while True:
- # NOTE(danms): Try ten times with a short timeout, and then punt
- # to the configured RPC timeout after that
- if attempt == 10:
- timeout = None
- attempt += 1
-
- # NOTE(russellb): This is running during service startup. If we
- # allow an exception to be raised, the service will shut down.
- # This may fail the first time around if nova-conductor wasn't
- # running when nova-compute started.
- try:
- self.conductor_api.ping(context, '1.21 GigaWatts',
- timeout=timeout)
- break
- except rpc_common.Timeout as e:
- LOG.exception(_('Timed out waiting for nova-conductor. '
- 'Is it running? Or did nova-compute start '
- 'before nova-conductor?'))
-
- return self.conductor_api.instance_get_all_by_host(context, self.host)
-
def _get_instances_on_driver(self, context):
"""Return a list of instance records that match the instances found
on the hypervisor.
@@ -495,7 +465,7 @@ class ComputeManager(manager.SchedulerDependentManager):
'resume guests'), instance=instance)
elif drv_state == power_state.RUNNING:
- # VMWareAPI drivers will raise an exception
+ # VMwareAPI drivers will raise an exception
try:
self.driver.ensure_filtering_rules_for_instance(
instance,
@@ -508,7 +478,8 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
- instances = self._get_instances_at_startup(context)
+ instances = self.conductor_api.instance_get_all_by_host(context,
+ self.host)
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
@@ -707,9 +678,9 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
limits = filter_properties.get('limits', {})
with rt.instance_claim(context, instance, limits):
-
+ macs = self.driver.macs_for_instance(instance)
network_info = self._allocate_network(context, instance,
- requested_networks)
+ requested_networks, macs)
block_device_info = self._prep_block_device(context,
instance, bdms)
instance = self._spawn(context, instance, image_meta,
@@ -734,7 +705,8 @@ class ComputeManager(manager.SchedulerDependentManager):
# Spawn success:
if (is_first_time and not instance['access_ip_v4']
and not instance['access_ip_v6']):
- self._update_access_ip(context, instance, network_info)
+ instance = self._update_access_ip(context, instance,
+ network_info)
self._notify_about_instance_usage(context, instance,
"create.end", network_info=network_info,
@@ -851,7 +823,7 @@ class ComputeManager(manager.SchedulerDependentManager):
network_name = CONF.default_access_ip_network_name
if not network_name:
- return
+ return instance
update_info = {}
for vif in nw_info:
@@ -862,7 +834,9 @@ class ComputeManager(manager.SchedulerDependentManager):
if ip['version'] == 6:
update_info['access_ip_v6'] = ip['address']
if update_info:
- self._instance_update(context, instance['uuid'], **update_info)
+ instance = self._instance_update(context, instance['uuid'],
+ **update_info)
+ return instance
def _check_instance_not_already_created(self, context, instance):
"""Ensure an instance with the same name is not already present."""
@@ -937,18 +911,19 @@ class ComputeManager(manager.SchedulerDependentManager):
expected_task_state=(task_states.SCHEDULING,
None))
- def _allocate_network(self, context, instance, requested_networks):
+ def _allocate_network(self, context, instance, requested_networks, macs):
"""Allocate networks for an instance and return the network info."""
- self._instance_update(context, instance['uuid'],
- vm_state=vm_states.BUILDING,
- task_state=task_states.NETWORKING,
- expected_task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.NETWORKING,
+ expected_task_state=None)
is_vpn = pipelib.is_vpn_image(instance['image_ref'])
try:
# allocate and get network info
network_info = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
- requested_networks=requested_networks)
+ requested_networks=requested_networks,
+ macs=macs)
except Exception:
LOG.exception(_('Instance failed network setup'),
instance=instance)
@@ -961,9 +936,9 @@ class ComputeManager(manager.SchedulerDependentManager):
def _prep_block_device(self, context, instance, bdms):
"""Set up the block device for an instance with error logging."""
- self._instance_update(context, instance['uuid'],
- vm_state=vm_states.BUILDING,
- task_state=task_states.BLOCK_DEVICE_MAPPING)
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
return self._setup_block_device_mapping(context, instance, bdms)
except Exception:
@@ -974,11 +949,10 @@ class ComputeManager(manager.SchedulerDependentManager):
def _spawn(self, context, instance, image_meta, network_info,
block_device_info, injected_files, admin_password):
"""Spawn an instance with error logging and update its power state."""
- self._instance_update(context, instance['uuid'],
- vm_state=vm_states.BUILDING,
- task_state=task_states.SPAWNING,
- expected_task_state=task_states.
- BLOCK_DEVICE_MAPPING)
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.SPAWNING,
+ expected_task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
@@ -1205,13 +1179,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(context, instance, "power_off.start")
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.STOPPED,
- expected_task_state=(task_states.POWERING_OFF,
- task_states.STOPPING),
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.STOPPED,
+ expected_task_state=(task_states.POWERING_OFF,
+ task_states.STOPPING),
+ task_state=None)
self._notify_about_instance_usage(context, instance, "power_off.end")
# NOTE(johannes): This is probably better named power_on_instance
@@ -1225,13 +1198,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(context, instance, "power_on.start")
self.driver.power_on(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- expected_task_state=(task_states.POWERING_ON,
- task_states.STARTING))
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ expected_task_state=(task_states.POWERING_ON,
+ task_states.STARTING))
self._notify_about_instance_usage(context, instance, "power_on.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -1248,12 +1220,11 @@ class ComputeManager(manager.SchedulerDependentManager):
# doesn't implement the soft_delete method
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.SOFT_DELETED,
- expected_task_state=task_states.SOFT_DELETING,
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.SOFT_DELETED,
+ expected_task_state=task_states.SOFT_DELETING,
+ task_state=None)
self._notify_about_instance_usage(context, instance, "soft_delete.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -1269,12 +1240,11 @@ class ComputeManager(manager.SchedulerDependentManager):
# doesn't implement the restore method
self.driver.power_on(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- expected_task_state=task_states.RESTORING,
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ expected_task_state=task_states.RESTORING,
+ task_state=None)
self._notify_about_instance_usage(context, instance, "restore.end")
# NOTE(johannes): In the folsom release, power_off_instance was poorly
@@ -1382,11 +1352,10 @@ class ComputeManager(manager.SchedulerDependentManager):
"rebuild.start", extra_usage_info=extra_usage_info)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- task_state=task_states.REBUILDING,
- expected_task_state=task_states.REBUILDING)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ task_state=task_states.REBUILDING,
+ expected_task_state=task_states.REBUILDING)
if recreate:
# Detaching volumes.
@@ -1406,11 +1375,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.destroy(instance,
self._legacy_nw_info(network_info))
- instance = self._instance_update(context,
- instance['uuid'],
- task_state=task_states.
- REBUILD_BLOCK_DEVICE_MAPPING,
- expected_task_state=task_states.REBUILDING)
+ instance = self._instance_update(context, instance['uuid'],
+ task_state=task_states.REBUILD_BLOCK_DEVICE_MAPPING,
+ expected_task_state=task_states.REBUILDING)
instance['injected_files'] = injected_files
network_info = self._get_instance_nw_info(context, instance)
@@ -1421,14 +1388,11 @@ class ComputeManager(manager.SchedulerDependentManager):
device_info = self._setup_block_device_mapping(context, instance,
bdms)
- instance = self._instance_update(context,
- instance['uuid'],
- task_state=task_states.
- REBUILD_SPAWNING,
- expected_task_state=task_states.
- REBUILD_BLOCK_DEVICE_MAPPING)
- # pull in new password here since the original password isn't in
- # the db
+ expected_task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
+ instance = self._instance_update(context, instance['uuid'],
+ task_state=task_states.REBUILD_SPAWNING,
+ expected_task_state=expected_task_state)
+
admin_password = new_pass
self.driver.spawn(context, instance, image_meta,
@@ -1476,19 +1440,14 @@ class ComputeManager(manager.SchedulerDependentManager):
if block_device_info is None:
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
- # NOTE(danms): remove this when RPC API < 2.5 compatibility
- # is no longer needed
- if network_info is None:
- network_info = self._get_instance_nw_info(context, instance)
- else:
- network_info = network_model.NetworkInfo.hydrate(network_info)
+ network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context, instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE)
if instance['power_state'] != power_state.RUNNING:
state = instance['power_state']
@@ -1509,10 +1468,10 @@ class ComputeManager(manager.SchedulerDependentManager):
# Fall through and reset task_state to None
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context, instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
self._notify_about_instance_usage(context, instance, "reboot.end")
@@ -1535,9 +1494,8 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state)
LOG.audit(_('instance snapshotting'), context=context,
instance=instance)
@@ -1560,14 +1518,17 @@ class ComputeManager(manager.SchedulerDependentManager):
expected_task_state = task_states.IMAGE_BACKUP
def update_task_state(task_state, expected_state=expected_task_state):
- self._instance_update(context, instance['uuid'],
- task_state=task_state,
- expected_task_state=expected_state)
+ return self._instance_update(context, instance['uuid'],
+ task_state=task_state,
+ expected_task_state=expected_state)
self.driver.snapshot(context, instance, image_id, update_task_state)
+ # The instance could have changed from the driver. But since
+ # we're doing a fresh update here, we'll grab the changes.
- self._instance_update(context, instance['uuid'], task_state=None,
- expected_task_state=task_states.IMAGE_UPLOADING)
+ instance = self._instance_update(context, instance['uuid'],
+ task_state=None,
+ expected_task_state=task_states.IMAGE_UPLOADING)
if image_type == 'snapshot' and rotation:
raise exception.ImageRotationNotAllowed()
@@ -1925,18 +1886,15 @@ class ComputeManager(manager.SchedulerDependentManager):
# Just roll back the record. There's no need to resize down since
# the 'old' VM already has the preferred attributes
- self._instance_update(context,
- instance['uuid'],
- launched_at=timeutils.utcnow(),
- expected_task_state=task_states.
- RESIZE_REVERTING)
+ instance = self._instance_update(context,
+ instance['uuid'], launched_at=timeutils.utcnow(),
+ expected_task_state=task_states.RESIZE_REVERTING)
self.network_api.migrate_instance_finish(context, instance,
migration)
- self._instance_update(context, instance['uuid'],
- vm_state=vm_states.ACTIVE,
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.ACTIVE, task_state=None)
rt = self._get_resource_tracker(instance.get('node'))
rt.revert_resize(context, migration)
@@ -2342,12 +2300,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.suspend(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.SUSPENDED,
- task_state=None,
- expected_task_state=task_states.SUSPENDING)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.SUSPENDED,
+ task_state=None,
+ expected_task_state=task_states.SUSPENDING)
self._notify_about_instance_usage(context, instance, 'suspend')
@@ -2367,11 +2324,9 @@ class ComputeManager(manager.SchedulerDependentManager):
block_device_info)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None)
+ instance = self._instance_update(context,
+ instance['uuid'], power_state=current_power_state,
+ vm_state=vm_states.ACTIVE, task_state=None)
self._notify_about_instance_usage(context, instance, 'resume')
@@ -2629,10 +2584,10 @@ class ComputeManager(manager.SchedulerDependentManager):
pass
def _get_compute_info(self, context, host):
- compute_node_ref = self.conductor_api.service_get_all_compute_by_host(
+ compute_node_ref = self.conductor_api.service_get_by_compute_host(
context, host)
try:
- return compute_node_ref[0]['compute_node'][0]
+ return compute_node_ref['compute_node'][0]
except IndexError:
raise exception.NotFound(_("Host %(host)s not found") % locals())
@@ -2884,23 +2839,20 @@ class ComputeManager(manager.SchedulerDependentManager):
block_migration)
# Restore instance state
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- host=self.host,
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- expected_task_state=task_states.MIGRATING)
+ instance = self._instance_update(context, instance['uuid'],
+ host=self.host, power_state=current_power_state,
+ vm_state=vm_states.ACTIVE, task_state=None,
+ expected_task_state=task_states.MIGRATING)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
- def _rollback_live_migration(self, context, instance_ref,
+ def _rollback_live_migration(self, context, instance,
dest, block_migration, migrate_data=None):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
- :param instance_ref: nova.db.sqlalchemy.models.Instance
+ :param instance: nova.db.sqlalchemy.models.Instance
:param dest:
This method is called from live migration src host.
This param specifies destination host.
@@ -2909,23 +2861,18 @@ class ComputeManager(manager.SchedulerDependentManager):
if not none, contains implementation specific data.
"""
- host = instance_ref['host']
- self._instance_update(context,
- instance_ref['uuid'],
- host=host,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- expected_task_state=task_states.MIGRATING)
+ host = instance['host']
+ instance = self._instance_update(context, instance['uuid'],
+ host=host, vm_state=vm_states.ACTIVE,
+ task_state=None, expected_task_state=task_states.MIGRATING)
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
- self.network_api.setup_networks_on_host(context, instance_ref,
- self.host)
+ self.network_api.setup_networks_on_host(context, instance, self.host)
- for bdm in self._get_instance_volume_bdms(context, instance_ref):
+ for bdm in self._get_instance_volume_bdms(context, instance):
volume_id = bdm['volume_id']
- volume = self.volume_api.get(context, volume_id)
- self.compute_rpcapi.remove_volume_connection(context, instance_ref,
- volume['id'], dest)
+ self.compute_rpcapi.remove_volume_connection(context, instance,
+ volume_id, dest)
# Block migration needs empty image at destination host
# before migration starts, so if any failure occurs,
@@ -2940,7 +2887,7 @@ class ComputeManager(manager.SchedulerDependentManager):
is_shared_storage = migrate_data.get('is_shared_storage', True)
if block_migration or (is_volume_backed and not is_shared_storage):
self.compute_rpcapi.rollback_live_migration_at_destination(context,
- instance_ref, dest)
+ instance, dest)
def rollback_live_migration_at_destination(self, context, instance):
"""Cleaning up image directory that is created pre_live_migration.
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 256b64979..f3c3ae7a3 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -166,11 +166,8 @@ class ResourceTracker(object):
old_instance_type = instance_types.get_instance_type(
old_instance_type_id)
- return db.migration_create(context.elevated(),
- {'instance_uuid': instance['uuid'],
- 'source_compute': instance['host'],
- 'source_node': instance['node'],
- 'dest_compute': self.host,
+ return self.conductor_api.migration_create(context, instance,
+ {'dest_compute': self.host,
'dest_node': self.nodename,
'dest_host': self.driver.get_host_ip_addr(),
'old_instance_type_id': old_instance_type['id'],
@@ -311,8 +308,7 @@ class ResourceTracker(object):
def _get_service(self, context):
try:
- return db.service_get_all_compute_by_host(context,
- self.host)[0]
+ return db.service_get_by_compute_host(context, self.host)
except exception.NotFound:
LOG.warn(_("No service record for host %s"), self.host)
@@ -358,8 +354,7 @@ class ResourceTracker(object):
def confirm_resize(self, context, migration, status='confirmed'):
"""Cleanup usage for a confirmed resize."""
elevated = context.elevated()
- db.migration_update(elevated, migration['id'],
- {'status': status})
+ self.conductor_api.migration_update(elevated, migration, status)
self.update_available_resource(elevated)
def revert_resize(self, context, migration, status='reverted'):
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index ae283283b..3e7ed1cfd 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -157,6 +157,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.21 - Add migrate_data dict param to pre_live_migration()
2.22 - Add recreate, on_shared_storage and host arguments to
rebuild_instance()
+ 2.23 - Remove network_info from reboot_instance
'''
#
@@ -383,16 +384,15 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
_compute_topic(self.topic, ctxt, host, None),
version='2.20')
- def reboot_instance(self, ctxt, instance,
- block_device_info, network_info, reboot_type):
+ def reboot_instance(self, ctxt, instance, block_device_info,
+ reboot_type):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('reboot_instance',
instance=instance_p,
block_device_info=block_device_info,
- network_info=network_info,
reboot_type=reboot_type),
topic=_compute_topic(self.topic, ctxt, None, instance),
- version='2.5')
+ version='2.23')
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
@@ -525,7 +525,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
version='2.3')
def snapshot_instance(self, ctxt, instance, image_id, image_type,
- backup_type, rotation):
+ backup_type=None, rotation=None):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('snapshot_instance',
instance=instance_p, image_id=image_id,
diff --git a/nova/conductor/__init__.py b/nova/conductor/__init__.py
index 1e31da54c..4c702d037 100644
--- a/nova/conductor/__init__.py
+++ b/nova/conductor/__init__.py
@@ -18,7 +18,8 @@ import nova.openstack.common.importutils
def API(*args, **kwargs):
- if nova.openstack.common.cfg.CONF.conductor.use_local:
+ use_local = kwargs.pop('use_local', False)
+ if nova.openstack.common.cfg.CONF.conductor.use_local or use_local:
api = conductor_api.LocalAPI
else:
api = conductor_api.API
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 04a4f3d9c..63b64f830 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -20,6 +20,7 @@ from nova.conductor import manager
from nova.conductor import rpcapi
from nova import exception as exc
from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
conductor_opts = [
@@ -39,6 +40,8 @@ CONF = cfg.CONF
CONF.register_group(conductor_group)
CONF.register_opts(conductor_opts, conductor_group)
+LOG = logging.getLogger(__name__)
+
class ExceptionHelper(object):
"""Class to wrap another and translate the ClientExceptions raised by its
@@ -68,6 +71,10 @@ class LocalAPI(object):
# other/future users of this sort of functionality.
self._manager = ExceptionHelper(manager.ConductorManager())
+ def wait_until_ready(self, context, *args, **kwargs):
+ # nothing to wait for in the local case.
+ pass
+
def ping(self, context, arg, timeout=None):
return self._manager.ping(context, arg)
@@ -127,6 +134,9 @@ class LocalAPI(object):
return self._manager.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
+ def migration_create(self, context, instance, values):
+ return self._manager.migration_create(context, instance, values)
+
def migration_update(self, context, migration, status):
return self._manager.migration_update(context, migration, status)
@@ -239,8 +249,15 @@ class LocalAPI(object):
def service_get_by_host_and_topic(self, context, host, topic):
return self._manager.service_get_all_by(context, topic, host)
- def service_get_all_compute_by_host(self, context, host):
- return self._manager.service_get_all_by(context, 'compute', host)
+ def service_get_by_compute_host(self, context, host):
+ result = self._manager.service_get_all_by(context, 'compute', host)
+ # FIXME(comstud): A major revision bump to 2.0 should return a
+ # single entry, so we should just return 'result' at that point.
+ return result[0]
+
+ def service_get_by_args(self, context, host, binary):
+ return self._manager.service_get_all_by(context, host=host,
+ binary=binary)
def action_event_start(self, context, values):
return self._manager.action_event_start(context, values)
@@ -248,6 +265,12 @@ class LocalAPI(object):
def action_event_finish(self, context, values):
return self._manager.action_event_finish(context, values)
+ def service_create(self, context, values):
+ return self._manager.service_create(context, values)
+
+ def service_destroy(self, context, service_id):
+ return self._manager.service_destroy(context, service_id)
+
class API(object):
"""Conductor API that does updates via RPC to the ConductorManager."""
@@ -255,6 +278,35 @@ class API(object):
def __init__(self):
self.conductor_rpcapi = rpcapi.ConductorAPI()
+ def wait_until_ready(self, context, early_timeout=10, early_attempts=10):
+ '''Wait until a conductor service is up and running.
+
+ This method calls the remote ping() method on the conductor topic until
+ it gets a response. It starts with a shorter timeout in the loop
+ (early_timeout) up to early_attempts number of tries. It then drops
+ back to the globally configured timeout for rpc calls for each retry.
+ '''
+ attempt = 0
+ timeout = early_timeout
+ while True:
+ # NOTE(danms): Try ten times with a short timeout, and then punt
+ # to the configured RPC timeout after that
+ if attempt == early_attempts:
+ timeout = None
+ attempt += 1
+
+ # NOTE(russellb): This is running during service startup. If we
+ # allow an exception to be raised, the service will shut down.
+ # This may fail the first time around if nova-conductor wasn't
+ # running when this service started.
+ try:
+ self.ping(context, '1.21 GigaWatts', timeout=timeout)
+ break
+ except rpc_common.Timeout as e:
+ LOG.exception(_('Timed out waiting for nova-conductor. '
+ 'Is it running? Or did this service start '
+ 'before nova-conductor?'))
+
def ping(self, context, arg, timeout=None):
return self.conductor_rpcapi.ping(context, arg, timeout)
@@ -318,6 +370,10 @@ class API(object):
return crpcapi.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
+ def migration_create(self, context, instance, values):
+ return self.conductor_rpcapi.migration_create(context, instance,
+ values)
+
def migration_update(self, context, migration, status):
return self.conductor_rpcapi.migration_update(context, migration,
status)
@@ -440,12 +496,25 @@ class API(object):
def service_get_by_host_and_topic(self, context, host, topic):
return self.conductor_rpcapi.service_get_all_by(context, topic, host)
- def service_get_all_compute_by_host(self, context, host):
- return self.conductor_rpcapi.service_get_all_by(context, 'compute',
- host)
+ def service_get_by_compute_host(self, context, host):
+ result = self.conductor_rpcapi.service_get_all_by(context, 'compute',
+ host)
+ # FIXME(comstud): A major revision bump to 2.0 should return a
+ # single entry, so we should just return 'result' at that point.
+ return result[0]
+
+ def service_get_by_args(self, context, host, binary):
+ return self.conductor_rpcapi.service_get_all_by(context, host=host,
+ binary=binary)
def action_event_start(self, context, values):
return self.conductor_rpcapi.action_event_start(context, values)
def action_event_finish(self, context, values):
return self.conductor_rpcapi.action_event_finish(context, values)
+
+ def service_create(self, context, values):
+ return self.conductor_rpcapi.service_create(context, values)
+
+ def service_destroy(self, context, service_id):
+ return self.conductor_rpcapi.service_destroy(context, service_id)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index fb583d0ce..b0d4011ad 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -43,7 +43,7 @@ datetime_fields = ['launched_at', 'terminated_at']
class ConductorManager(manager.SchedulerDependentManager):
"""Mission: TBD."""
- RPC_API_VERSION = '1.26'
+ RPC_API_VERSION = '1.30'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
@@ -100,6 +100,13 @@ class ConductorManager(manager.SchedulerDependentManager):
context, confirm_window, dest_compute)
return jsonutils.to_primitive(migrations)
+ def migration_create(self, context, instance, values):
+ values.update({'instance_uuid': instance['uuid'],
+ 'source_compute': instance['host'],
+ 'source_node': instance['node']})
+ migration_ref = self.db.migration_create(context.elevated(), values)
+ return jsonutils.to_primitive(migration_ref)
+
@rpc_common.client_exceptions(exception.MigrationNotFound)
def migration_update(self, context, migration, status):
migration_ref = self.db.migration_update(context.elevated(),
@@ -248,16 +255,20 @@ class ConductorManager(manager.SchedulerDependentManager):
wr_bytes, instance['uuid'], last_refreshed,
update_totals)
- def service_get_all_by(self, context, topic=None, host=None):
- if not any((topic, host)):
+ @rpc_common.client_exceptions(exception.HostBinaryNotFound)
+ def service_get_all_by(self, context, topic=None, host=None, binary=None):
+ if not any((topic, host, binary)):
result = self.db.service_get_all(context)
elif all((topic, host)):
if topic == 'compute':
- result = self.db.service_get_all_compute_by_host(context,
- host)
+ result = self.db.service_get_by_compute_host(context, host)
+ # FIXME(comstud) Potentially remove this on bump to v2.0
+ result = [result]
else:
result = self.db.service_get_by_host_and_topic(context,
host, topic)
+ elif all((host, binary)):
+ result = self.db.service_get_by_args(context, host, binary)
elif topic:
result = self.db.service_get_all_by_topic(context, topic)
elif host:
@@ -266,7 +277,17 @@ class ConductorManager(manager.SchedulerDependentManager):
return jsonutils.to_primitive(result)
def action_event_start(self, context, values):
- return self.db.action_event_start(context, values)
+ evt = self.db.action_event_start(context, values)
+ return jsonutils.to_primitive(evt)
def action_event_finish(self, context, values):
- return self.db.action_event_finish(context, values)
+ evt = self.db.action_event_finish(context, values)
+ return jsonutils.to_primitive(evt)
+
+ def service_create(self, context, values):
+ svc = self.db.service_create(context, values)
+ return jsonutils.to_primitive(svc)
+
+ @rpc_common.client_exceptions(exception.ServiceNotFound)
+ def service_destroy(self, context, service_id):
+ self.db.service_destroy(context, service_id)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 8850bca01..b7f760cf5 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -59,6 +59,10 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.24 - Added instance_get
1.25 - Added action_event_start and action_event_finish
1.26 - Added instance_info_cache_update
+ 1.27 - Added service_create
+ 1.28 - Added binary arg to service_get_all_by
+ 1.29 - Added service_destroy
+ 1.30 - Added migration_create
"""
BASE_RPC_API_VERSION = '1.0'
@@ -102,6 +106,12 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
dest_compute=dest_compute)
return self.call(context, msg, version='1.20')
+ def migration_create(self, context, instance, values):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('migration_create', instance=instance_p,
+ values=values)
+ return self.call(context, msg, version='1.30')
+
def migration_update(self, context, migration, status):
migration_p = jsonutils.to_primitive(migration)
msg = self.make_msg('migration_update', migration=migration_p,
@@ -252,9 +262,10 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
update_totals=update_totals)
return self.call(context, msg, version='1.19')
- def service_get_all_by(self, context, topic=None, host=None):
- msg = self.make_msg('service_get_all_by', topic=topic, host=host)
- return self.call(context, msg, version='1.21')
+ def service_get_all_by(self, context, topic=None, host=None, binary=None):
+ msg = self.make_msg('service_get_all_by', topic=topic, host=host,
+ binary=binary)
+ return self.call(context, msg, version='1.28')
def instance_get_all(self, context):
msg = self.make_msg('instance_get_all')
@@ -278,3 +289,11 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
instance=instance_p,
values=values)
return self.call(context, msg, version='1.26')
+
+ def service_create(self, context, values):
+ msg = self.make_msg('service_create', values=values)
+ return self.call(context, msg, version='1.27')
+
+ def service_destroy(self, context, service_id):
+ msg = self.make_msg('service_destroy', service_id=service_id)
+ return self.call(context, msg, version='1.29')
diff --git a/nova/console/manager.py b/nova/console/manager.py
index 243c028d9..2045f824d 100644
--- a/nova/console/manager.py
+++ b/nova/console/manager.py
@@ -65,7 +65,6 @@ class ConsoleProxyManager(manager.Manager):
def init_host(self):
self.driver.init_host()
- @exception.wrap_exception()
def add_console(self, context, instance_id, password=None,
port=None, **kwargs):
instance = self.db.instance_get(context, instance_id)
@@ -93,7 +92,6 @@ class ConsoleProxyManager(manager.Manager):
return console['id']
- @exception.wrap_exception()
def remove_console(self, context, console_id, **_kwargs):
try:
console = self.db.console_get(context, console_id)
diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py
index e8eab4db2..bb1818943 100644
--- a/nova/console/vmrc_manager.py
+++ b/nova/console/vmrc_manager.py
@@ -49,7 +49,7 @@ class ConsoleVMRCManager(manager.Manager):
"""Get VIM session for the pool specified."""
vim_session = None
if pool['id'] not in self.sessions.keys():
- vim_session = vmwareapi_conn.VMWareAPISession(
+ vim_session = vmwareapi_conn.VMwareAPISession(
pool['address'],
pool['username'],
pool['password'],
@@ -75,7 +75,6 @@ class ConsoleVMRCManager(manager.Manager):
self.driver.setup_console(context, console)
return console
- @exception.wrap_exception()
def add_console(self, context, instance_id, password=None,
port=None, **kwargs):
"""Adds a console for the instance.
@@ -105,7 +104,6 @@ class ConsoleVMRCManager(manager.Manager):
instance)
return console['id']
- @exception.wrap_exception()
def remove_console(self, context, console_id, **_kwargs):
"""Removes a console entry."""
try:
diff --git a/nova/db/api.py b/nova/db/api.py
index 3a57e71af..d7d9bd0d2 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -151,9 +151,12 @@ def service_get_all_by_host(context, host):
return IMPL.service_get_all_by_host(context, host)
-def service_get_all_compute_by_host(context, host):
- """Get all compute services for a given host."""
- return IMPL.service_get_all_compute_by_host(context, host)
+def service_get_by_compute_host(context, host):
+ """Get the service entry for a given compute host.
+
+ Returns the service entry joined with the compute_node entry.
+ """
+ return IMPL.service_get_by_compute_host(context, host)
def service_get_all_compute_sorted(context):
@@ -1016,20 +1019,22 @@ def reservation_destroy(context, uuid):
def quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age):
+ until_refresh, max_age, project_id=None):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age)
+ until_refresh, max_age, project_id=project_id)
-def reservation_commit(context, reservations):
+def reservation_commit(context, reservations, project_id=None):
"""Commit quota reservations."""
- return IMPL.reservation_commit(context, reservations)
+ return IMPL.reservation_commit(context, reservations,
+ project_id=project_id)
-def reservation_rollback(context, reservations):
+def reservation_rollback(context, reservations, project_id=None):
"""Roll back quota reservations."""
- return IMPL.reservation_rollback(context, reservations)
+ return IMPL.reservation_rollback(context, reservations,
+ project_id=project_id)
def quota_destroy_all_by_project(context, project_id):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 698f79317..3fdfd53c8 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -337,15 +337,6 @@ def service_get_all(context, disabled=None):
@require_admin_context
-def service_does_host_exist(context, host_name, include_disabled):
- query = get_session().query(func.count(models.Service.host)).\
- filter_by(host=host_name)
- if not include_disabled:
- query = query.filter_by(disabled=False)
- return query.scalar() > 0
-
-
-@require_admin_context
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
@@ -370,12 +361,12 @@ def service_get_all_by_host(context, host):
@require_admin_context
-def service_get_all_compute_by_host(context, host):
+def service_get_by_compute_host(context, host):
result = model_query(context, models.Service, read_deleted="no").\
options(joinedload('compute_node')).\
filter_by(host=host).\
filter_by(topic=CONF.compute_topic).\
- all()
+ first()
if not result:
raise exception.ComputeHostNotFound(host=host)
@@ -1787,42 +1778,6 @@ def instance_get_all_hung_in_rebooting(context, reboot_window):
@require_context
-def instance_test_and_set(context, instance_uuid, attr, ok_states, new_state):
- """Atomically check if an instance is in a valid state, and if it is, set
- the instance into a new state.
- """
- if not uuidutils.is_uuid_like(instance_uuid):
- raise exception.InvalidUUID(instance_uuid)
-
- session = get_session()
- with session.begin():
- query = model_query(context, models.Instance, session=session,
- project_only=True).\
- filter_by(uuid=instance_uuid)
-
- attr_column = getattr(models.Instance, attr)
- filter_op = None
- # NOTE(boris-42): `SELECT IN` doesn't work with None values because
- # they are incomparable.
- if None in ok_states:
- filter_op = or_(attr_column == None,
- attr_column.in_(filter(lambda x: x is not None,
- ok_states)))
- else:
- filter_op = attr_column.in_(ok_states)
-
- count = query.filter(filter_op).\
- update({attr: new_state}, synchronize_session=False)
- if count == 0:
- instance_ref = query.first()
- raise exception.InstanceInvalidState(
- attr=attr,
- instance_uuid=instance_ref['uuid'],
- state=instance_ref[attr],
- method='instance_test_and_set')
-
-
-@require_context
def instance_update(context, instance_uuid, values):
instance_ref = _instance_update(context, instance_uuid, values)[1]
return instance_ref
@@ -2642,12 +2597,12 @@ def reservation_destroy(context, uuid):
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
-def _get_quota_usages(context, session):
+def _get_quota_usages(context, session, project_id):
# Broken out for testability
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
- filter_by(project_id=context.project_id).\
+ filter_by(project_id=project_id).\
with_lockmode('update').\
all()
return dict((row.resource, row) for row in rows)
@@ -2655,12 +2610,16 @@ def _get_quota_usages(context, session):
@require_context
def quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age):
+ until_refresh, max_age, project_id=None):
elevated = context.elevated()
session = get_session()
with session.begin():
+
+ if project_id is None:
+ project_id = context.project_id
+
# Get the current usages
- usages = _get_quota_usages(context, session)
+ usages = _get_quota_usages(context, session, project_id)
# Handle usage refresh
work = set(deltas.keys())
@@ -2671,7 +2630,7 @@ def quota_reserve(context, resources, quotas, deltas, expire,
refresh = False
if resource not in usages:
usages[resource] = _quota_usage_create(elevated,
- context.project_id,
+ project_id,
resource,
0, 0,
until_refresh or None,
@@ -2694,12 +2653,12 @@ def quota_reserve(context, resources, quotas, deltas, expire,
# Grab the sync routine
sync = resources[resource].sync
- updates = sync(elevated, context.project_id, session)
+ updates = sync(elevated, project_id, session)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
if res not in usages:
usages[res] = _quota_usage_create(elevated,
- context.project_id,
+ project_id,
res,
0, 0,
until_refresh or None,
@@ -2749,7 +2708,7 @@ def quota_reserve(context, resources, quotas, deltas, expire,
reservation = reservation_create(elevated,
str(uuid.uuid4()),
usages[resource],
- context.project_id,
+ project_id,
resource, delta, expire,
session=session)
reservations.append(reservation.uuid)
@@ -2797,10 +2756,10 @@ def _quota_reservations_query(session, context, reservations):
@require_context
-def reservation_commit(context, reservations):
+def reservation_commit(context, reservations, project_id=None):
session = get_session()
with session.begin():
- usages = _get_quota_usages(context, session)
+ usages = _get_quota_usages(context, session, project_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
@@ -2812,10 +2771,10 @@ def reservation_commit(context, reservations):
@require_context
-def reservation_rollback(context, reservations):
+def reservation_rollback(context, reservations, project_id=None):
session = get_session()
with session.begin():
- usages = _get_quota_usages(context, session)
+ usages = _get_quota_usages(context, session, project_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 95fa6313f..52985a3eb 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -48,8 +48,17 @@ class NovaBase(object):
"""Save this object."""
if not session:
session = get_session()
- session.add(self)
- session.flush()
+ # NOTE(boris-42): This part of code should be look like:
+ # sesssion.add(self)
+ # session.flush()
+ # But there is a bug in sqlalchemy and eventlet that
+ # raises NoneType exception if there is no running
+ # transaction and rollback is called. As long as
+ # sqlalchemy has this bug we have to create transaction
+ # explicity.
+ with session.begin(subtransactions=True):
+ session.add(self)
+ session.flush()
def soft_delete(self, session=None):
"""Mark this object as deleted."""
diff --git a/nova/exception.py b/nova/exception.py
index 7ec23d32d..f96b1eaf3 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -82,9 +82,11 @@ def wrap_exception(notifier=None, publisher_id=None, event_type=None,
# to pass it in as a parameter. Otherwise we get a cyclic import of
# nova.notifier.api -> nova.utils -> nova.exception :(
def inner(f):
- def wrapped(*args, **kw):
+ def wrapped(self, context, *args, **kw):
+ # Don't store self or context in the payload, it now seems to
+ # contain confidential information.
try:
- return f(*args, **kw)
+ return f(self, context, *args, **kw)
except Exception, e:
with excutils.save_and_reraise_exception():
if notifier:
@@ -104,10 +106,6 @@ def wrap_exception(notifier=None, publisher_id=None, event_type=None,
# propagated.
temp_type = f.__name__
- context = get_context_from_function_and_args(f,
- args,
- kw)
-
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
@@ -1089,20 +1087,3 @@ class CryptoCAFileNotFound(FileNotFound):
class CryptoCRLFileNotFound(FileNotFound):
message = _("The CRL file for %(project)s could not be found")
-
-
-def get_context_from_function_and_args(function, args, kwargs):
- """Find an arg of type RequestContext and return it.
-
- This is useful in a couple of decorators where we don't
- know much about the function we're wrapping.
- """
-
- # import here to avoid circularity:
- from nova import context
-
- for arg in itertools.chain(kwargs.values(), args):
- if isinstance(arg, context.RequestContext):
- return arg
-
- return None
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index 3fb397298..347b98733 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -7538,7 +7538,7 @@ msgstr ""
#: nova/virt/vmwareapi/driver.py:107
msgid ""
"Must specify vmwareapi_host_ip,vmwareapi_host_username and "
-"vmwareapi_host_password to usecompute_driver=vmwareapi.VMWareESXDriver"
+"vmwareapi_host_password to usecompute_driver=vmwareapi.VMwareESXDriver"
msgstr ""
#: nova/virt/vmwareapi/driver.py:258
@@ -7635,7 +7635,7 @@ msgstr ""
#: nova/virt/vmwareapi/read_write_util.py:142
#, python-format
-msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s"
+msgid "Exception during HTTP connection close in VMwareHTTPWrite. Exception is %s"
msgstr ""
#: nova/virt/vmwareapi/vim.py:83
diff --git a/nova/manager.py b/nova/manager.py
index cb15b776e..7df63f719 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -215,8 +215,9 @@ class Manager(base.Base):
if self._periodic_spacing[task_name] is None:
wait = 0
else:
- wait = time.time() - (self._periodic_last_run[task_name] +
- self._periodic_spacing[task_name])
+ due = (self._periodic_last_run[task_name] +
+ self._periodic_spacing[task_name])
+ wait = max(0, due - time.time())
if wait > 0.2:
if wait < idle_for:
idle_for = wait
diff --git a/nova/network/api.py b/nova/network/api.py
index 25680e656..976be93ed 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -74,7 +74,11 @@ def update_instance_cache_with_nw_info(api, context, instance,
class API(base.Base):
- """API for interacting with the network manager."""
+ """API for doing networking via the nova-network network manager.
+
+ This is a pluggable module - other implementations do networking via
+ other services (such as Quantum).
+ """
_sentinel = object()
@@ -180,9 +184,15 @@ class API(base.Base):
@refresh_cache
def allocate_for_instance(self, context, instance, vpn,
- requested_networks):
+ requested_networks, macs=None):
"""Allocates all network structures for an instance.
+ TODO(someone): document the rest of these parameters.
+
+ :param macs: None or a set of MAC addresses that the instance
+ should use. macs is supplied by the hypervisor driver (contrast
+ with requested_networks which is user supplied).
+ NB: macs is ignored by nova-network.
:returns: network info as from get_instance_nw_info() below
"""
args = {}
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index ea09f69b2..e6abde609 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -647,12 +647,18 @@ def remove_floating_forward(floating_ip, fixed_ip, device):
def floating_forward_rules(floating_ip, fixed_ip, device):
+ rules = []
rule = '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip)
if device:
- rule += ' -o %s' % device
- return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
- ('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
- ('float-snat', rule)]
+ rules.append(('float-snat', rule + ' -d %s' % fixed_ip))
+ rules.append(('float-snat', rule + ' -o %s' % device))
+ else:
+ rules.append(('float-snat', rule))
+ rules.append(
+ ('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)))
+ rules.append(
+ ('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)))
+ return rules
def initialize_gateway_device(dev, network_ref):
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 8d9255dac..ccdac6f60 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -1231,6 +1231,7 @@ class NetworkManager(manager.SchedulerDependentManager):
nw_info = network_model.NetworkInfo()
for vif in vifs:
vif_dict = {'id': vif['uuid'],
+ 'type': network_model.VIF_TYPE_BRIDGE,
'address': vif['address']}
# handle case where vif doesn't have a network
diff --git a/nova/network/model.py b/nova/network/model.py
index dcee68f8c..e4fe0d54c 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -25,6 +25,18 @@ def ensure_string_keys(d):
# http://bugs.python.org/issue4978
return dict([(str(k), v) for k, v in d.iteritems()])
+# Constants for the 'vif_type' field in VIF class
+VIF_TYPE_OVS = 'ovs'
+VIF_TYPE_BRIDGE = 'bridge'
+VIF_TYPE_802_QBG = '802.1qbg'
+VIF_TYPE_802_QBH = '802.1qbh'
+VIF_TYPE_OTHER = 'other'
+
+# Constant for max length of network interface names
+# eg 'bridge' in the Network class or 'devname' in
+# the VIF class
+NIC_NAME_LEN = 14
+
class Model(dict):
"""Defines some necessary structures for most of the network models."""
@@ -195,13 +207,14 @@ class Network(Model):
class VIF(Model):
"""Represents a Virtual Interface in Nova."""
def __init__(self, id=None, address=None, network=None, type=None,
- **kwargs):
+ devname=None, **kwargs):
super(VIF, self).__init__()
self['id'] = id
self['address'] = address
self['network'] = network or None
self['type'] = type
+ self['devname'] = devname
self._set_meta(kwargs)
@@ -366,6 +379,7 @@ class NetworkInfo(list):
'broadcast': str(subnet_v4.as_netaddr().broadcast),
'mac': vif['address'],
'vif_type': vif['type'],
+ 'vif_devname': vif.get('devname'),
'vif_uuid': vif['id'],
'rxtx_cap': vif.get_meta('rxtx_cap', 0),
'dns': [get_ip(ip) for ip in subnet_v4['dns']],
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 51386b4fd..8347ee94d 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -48,6 +48,11 @@ quantum_opts = [
default='keystone',
help='auth strategy for connecting to '
'quantum in admin context'),
+ # TODO(berrange) temporary hack until Quantum can pass over the
+ # name of the OVS bridge it is configured with
+ cfg.StrOpt('quantum_ovs_bridge',
+ default='br-int',
+ help='Name of Integration Bridge used by Open vSwitch'),
]
CONF = cfg.CONF
@@ -99,7 +104,15 @@ class API(base.Base):
return nets
def allocate_for_instance(self, context, instance, **kwargs):
- """Allocate all network resources for the instance."""
+ """Allocate all network resources for the instance.
+
+ TODO(someone): document the rest of these parameters.
+
+ :param macs: None or a set of MAC addresses that the instance
+ should use. macs is supplied by the hypervisor driver (contrast
+ with requested_networks which is user supplied).
+ NB: QuantumV2 does not yet honour mac address limits.
+ """
quantum = quantumv2.get_client(context)
LOG.debug(_('allocate_for_instance() for %s'),
instance['display_name'])
@@ -570,9 +583,24 @@ class API(base.Base):
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
+ bridge = None
+ vif_type = port.get('binding:vif_type')
+ # TODO(berrange) Quantum should pass the bridge name
+ # in another binding metadata field
+ if vif_type == network_model.VIF_TYPE_OVS:
+ bridge = CONF.quantum_ovs_bridge
+ elif vif_type == network_model.VIF_TYPE_BRIDGE:
+ bridge = "brq" + port['network_id']
+
+ if bridge is not None:
+ bridge = bridge[:network_model.NIC_NAME_LEN]
+
+ devname = "tap" + port['id']
+ devname = devname[:network_model.NIC_NAME_LEN]
+
network = network_model.Network(
id=port['network_id'],
- bridge='', # Quantum ignores this field
+ bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=net['tenant_id']
@@ -582,7 +610,8 @@ class API(base.Base):
id=port['id'],
address=port['mac_address'],
network=network,
- type=port.get('binding:vif_type')))
+ type=port.get('binding:vif_type'),
+ devname=devname))
return nw_info
def _get_subnets_from_port(self, context, port):
diff --git a/nova/quota.py b/nova/quota.py
index c2e34cca5..96e612503 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -198,7 +198,7 @@ class DbQuotaDriver(object):
return quotas
- def _get_quotas(self, context, resources, keys, has_sync):
+ def _get_quotas(self, context, resources, keys, has_sync, project_id=None):
"""
A helper method which retrieves the quotas for the specific
resources identified by keys, and which apply to the current
@@ -211,6 +211,9 @@ class DbQuotaDriver(object):
have a sync attribute; if False, indicates
that the resource must NOT have a sync
attribute.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
# Filter resources
@@ -229,12 +232,12 @@ class DbQuotaDriver(object):
# Grab and return the quotas (without usages)
quotas = self.get_project_quotas(context, sub_resources,
- context.project_id,
+ project_id,
context.quota_class, usages=False)
return dict((k, v['limit']) for k, v in quotas.items())
- def limit_check(self, context, resources, values):
+ def limit_check(self, context, resources, values, project_id=None):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
@@ -254,6 +257,9 @@ class DbQuotaDriver(object):
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
# Ensure no value is less than zero
@@ -261,9 +267,13 @@ class DbQuotaDriver(object):
if unders:
raise exception.InvalidQuotaValue(unders=sorted(unders))
+ # If project_id is None, then we use the project_id in context
+ if project_id is None:
+ project_id = context.project_id
+
# Get the applicable quotas
quotas = self._get_quotas(context, resources, values.keys(),
- has_sync=False)
+ has_sync=False, project_id=project_id)
# Check the quotas and construct a list of the resources that
# would be put over limit by the desired values
@@ -273,7 +283,8 @@ class DbQuotaDriver(object):
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
usages={})
- def reserve(self, context, resources, deltas, expire=None):
+ def reserve(self, context, resources, deltas, expire=None,
+ project_id=None):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
@@ -303,6 +314,9 @@ class DbQuotaDriver(object):
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
# Set up the reservation expiration
@@ -315,12 +329,16 @@ class DbQuotaDriver(object):
if not isinstance(expire, datetime.datetime):
raise exception.InvalidReservationExpiration(expire=expire)
+ # If project_id is None, then we use the project_id in context
+ if project_id is None:
+ project_id = context.project_id
+
# Get the applicable quotas.
# NOTE(Vek): We're not worried about races at this point.
# Yes, the admin may be in the process of reducing
# quotas, but that's a pretty rare thing.
quotas = self._get_quotas(context, resources, deltas.keys(),
- has_sync=True)
+ has_sync=True, project_id=project_id)
# NOTE(Vek): Most of the work here has to be done in the DB
# API, because we have to do it in a transaction,
@@ -328,27 +346,40 @@ class DbQuotaDriver(object):
# session isn't available outside the DBAPI, we
# have to do the work there.
return db.quota_reserve(context, resources, quotas, deltas, expire,
- CONF.until_refresh, CONF.max_age)
+ CONF.until_refresh, CONF.max_age,
+ project_id=project_id)
- def commit(self, context, reservations):
+ def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
+ # If project_id is None, then we use the project_id in context
+ if project_id is None:
+ project_id = context.project_id
- db.reservation_commit(context, reservations)
+ db.reservation_commit(context, reservations, project_id=project_id)
- def rollback(self, context, reservations):
+ def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
+ # If project_id is None, then we use the project_id in context
+ if project_id is None:
+ project_id = context.project_id
- db.reservation_rollback(context, reservations)
+ db.reservation_rollback(context, reservations, project_id=project_id)
def usage_reset(self, context, resources):
"""
@@ -843,7 +874,7 @@ class QuotaEngine(object):
return res.count(context, *args, **kwargs)
- def limit_check(self, context, **values):
+ def limit_check(self, context, project_id=None, **values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
@@ -863,11 +894,15 @@ class QuotaEngine(object):
nothing.
:param context: The request context, for access checks.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
- return self._driver.limit_check(context, self._resources, values)
+ return self._driver.limit_check(context, self._resources, values,
+ project_id=project_id)
- def reserve(self, context, expire=None, **deltas):
+ def reserve(self, context, expire=None, project_id=None, **deltas):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
@@ -897,25 +932,32 @@ class QuotaEngine(object):
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
reservations = self._driver.reserve(context, self._resources, deltas,
- expire=expire)
+ expire=expire,
+ project_id=project_id)
LOG.debug(_("Created reservations %(reservations)s") % locals())
return reservations
- def commit(self, context, reservations):
+ def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
try:
- self._driver.commit(context, reservations)
+ self._driver.commit(context, reservations, project_id=project_id)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
@@ -924,16 +966,19 @@ class QuotaEngine(object):
LOG.exception(_("Failed to commit reservations "
"%(reservations)s") % locals())
- def rollback(self, context, reservations):
+ def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
try:
- self._driver.rollback(context, reservations)
+ self._driver.rollback(context, reservations, project_id=project_id)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index a45e21a16..d1ae1cd6e 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -51,8 +51,6 @@ scheduler_driver_opts = [
CONF = cfg.CONF
CONF.register_opts(scheduler_driver_opts)
-CONF.import_opt('instances_path', 'nova.compute.manager')
-CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
def handle_schedule_error(context, ex, instance_uuid, request_spec):
@@ -194,12 +192,12 @@ class Scheduler(object):
# Checking src host exists and compute node
src = instance_ref['host']
try:
- services = db.service_get_all_compute_by_host(context, src)
+ service = db.service_get_by_compute_host(context, src)
except exception.NotFound:
raise exception.ComputeServiceUnavailable(host=src)
# Checking src host is alive.
- if not self.servicegroup_api.service_is_up(services[0]):
+ if not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=src)
def _live_migration_dest_check(self, context, instance_ref, dest):
@@ -211,8 +209,7 @@ class Scheduler(object):
"""
# Checking dest exists and compute node.
- dservice_refs = db.service_get_all_compute_by_host(context, dest)
- dservice_ref = dservice_refs[0]
+ dservice_ref = db.service_get_by_compute_host(context, dest)
# Checking dest host is alive.
if not self.servicegroup_api.service_is_up(dservice_ref):
@@ -292,5 +289,5 @@ class Scheduler(object):
:return: value specified by key
"""
- compute_node_ref = db.service_get_all_compute_by_host(context, host)
- return compute_node_ref[0]['compute_node'][0]
+ service_ref = db.service_get_by_compute_host(context, host)
+ return service_ref['compute_node'][0]
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 033ee9cc8..84bdcddb5 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -220,13 +220,12 @@ class SchedulerManager(manager.Manager):
"""
# Getting compute node info and related instances info
- compute_ref = db.service_get_all_compute_by_host(context, host)
- compute_ref = compute_ref[0]
+ service_ref = db.service_get_by_compute_host(context, host)
instance_refs = db.instance_get_all_by_host(context,
- compute_ref['host'])
+ service_ref['host'])
# Getting total available/used resource
- compute_ref = compute_ref['compute_node'][0]
+ compute_ref = service_ref['compute_node'][0]
resource = {'vcpus': compute_ref['vcpus'],
'memory_mb': compute_ref['memory_mb'],
'local_gb': compute_ref['local_gb'],
diff --git a/nova/service.py b/nova/service.py
index 86f022f61..39e414eb6 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -30,6 +30,7 @@ import time
import eventlet
import greenlet
+from nova import conductor
from nova import context
from nova import db
from nova import exception
@@ -38,6 +39,7 @@ from nova.openstack.common import eventlet_backdoor
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
+from nova.openstack.common.rpc import common as rpc_common
from nova import servicegroup
from nova import utils
from nova import version
@@ -392,7 +394,7 @@ class Service(object):
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_enable=None, periodic_fuzzy_delay=None,
- periodic_interval_max=None,
+ periodic_interval_max=None, db_allowed=True,
*args, **kwargs):
self.host = host
self.binary = binary
@@ -407,6 +409,9 @@ class Service(object):
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
self.backdoor_port = None
+ self.db_allowed = db_allowed
+ self.conductor_api = conductor.API(use_local=db_allowed)
+ self.conductor_api.wait_until_ready(context.get_admin_context())
self.servicegroup_api = servicegroup.API()
def start(self):
@@ -417,9 +422,9 @@ class Service(object):
self.model_disconnected = False
ctxt = context.get_admin_context()
try:
- service_ref = db.service_get_by_args(ctxt,
- self.host,
- self.binary)
+ service_ref = self.conductor_api.service_get_by_args(ctxt,
+ self.host,
+ self.binary)
self.service_id = service_ref['id']
except exception.NotFound:
self._create_service_ref(ctxt)
@@ -467,12 +472,14 @@ class Service(object):
self.timers.append(periodic)
def _create_service_ref(self, context):
- service_ref = db.service_create(context,
- {'host': self.host,
- 'binary': self.binary,
- 'topic': self.topic,
- 'report_count': 0})
- self.service_id = service_ref['id']
+ svc_values = {
+ 'host': self.host,
+ 'binary': self.binary,
+ 'topic': self.topic,
+ 'report_count': 0
+ }
+ service = self.conductor_api.service_create(context, svc_values)
+ self.service_id = service['id']
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
@@ -481,7 +488,8 @@ class Service(object):
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_enable=None,
- periodic_fuzzy_delay=None, periodic_interval_max=None):
+ periodic_fuzzy_delay=None, periodic_interval_max=None,
+ db_allowed=True):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
@@ -514,7 +522,8 @@ class Service(object):
report_interval=report_interval,
periodic_enable=periodic_enable,
periodic_fuzzy_delay=periodic_fuzzy_delay,
- periodic_interval_max=periodic_interval_max)
+ periodic_interval_max=periodic_interval_max,
+ db_allowed=db_allowed)
return service_obj
@@ -522,7 +531,8 @@ class Service(object):
"""Destroy the service object in the datastore."""
self.stop()
try:
- db.service_destroy(context.get_admin_context(), self.service_id)
+ self.conductor_api.service_destroy(context.get_admin_context(),
+ self.service_id)
except exception.NotFound:
LOG.warn(_('Service killed that has no database entry'))
diff --git a/nova/tests/api/openstack/compute/contrib/test_hosts.py b/nova/tests/api/openstack/compute/contrib/test_hosts.py
index be4465cf9..e103b5b19 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hosts.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hosts.py
@@ -32,8 +32,10 @@ def stub_service_get_all(context, disabled=None):
return fake_hosts.SERVICES_LIST
-def stub_service_does_host_exist(context, host_name):
- return host_name in [row['host'] for row in stub_service_get_all(context)]
+def stub_service_get_by_host_and_topic(context, host_name, topic):
+ for service in stub_service_get_all(context):
+ if service['host'] == host_name and service['topic'] == topic:
+ return service
def stub_set_host_enabled(context, host_name, enabled):
@@ -130,8 +132,8 @@ class HostTestCase(test.TestCase):
self.stubs.Set(db, 'service_get_all',
stub_service_get_all)
# Only hosts in our fake DB exist
- self.stubs.Set(db, 'service_does_host_exist',
- stub_service_does_host_exist)
+ self.stubs.Set(db, 'service_get_by_host_and_topic',
+ stub_service_get_by_host_and_topic)
# 'host_c1' always succeeds, and 'host_c2'
self.stubs.Set(self.hosts_api, 'set_host_enabled',
stub_set_host_enabled)
diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py
index ba65e8f6a..44d9e8af3 100644
--- a/nova/tests/api/openstack/compute/contrib/test_networks.py
+++ b/nova/tests/api/openstack/compute/contrib/test_networks.py
@@ -21,8 +21,8 @@ import uuid
import webob
-from nova.api.openstack.compute.contrib import admin_networks as networks
from nova.api.openstack.compute.contrib import networks_associate
+from nova.api.openstack.compute.contrib import os_networks as networks
from nova import exception
from nova.openstack.common import cfg
from nova import test
@@ -177,7 +177,7 @@ class NetworksTest(test.TestCase):
def setUp(self):
super(NetworksTest, self).setUp()
self.fake_network_api = FakeNetworkAPI()
- self.controller = networks.AdminNetworkController(
+ self.controller = networks.NetworkController(
self.fake_network_api)
self.associate_controller = networks_associate\
.NetworkAssociateActionController(self.fake_network_api)
diff --git a/nova/tests/api/openstack/compute/contrib/test_services.py b/nova/tests/api/openstack/compute/contrib/test_services.py
index 24f169d98..1bd47b67a 100644
--- a/nova/tests/api/openstack/compute/contrib/test_services.py
+++ b/nova/tests/api/openstack/compute/contrib/test_services.py
@@ -26,30 +26,30 @@ from nova.tests.api.openstack import fakes
fake_services_list = [{'binary': 'nova-scheduler',
'host': 'host1',
- 'availability_zone': 'nova',
'id': 1,
'disabled': True,
+ 'topic': 'scheduler',
'updated_at': datetime(2012, 10, 29, 13, 42, 2),
'created_at': datetime(2012, 9, 18, 2, 46, 27)},
{'binary': 'nova-compute',
'host': 'host1',
- 'availability_zone': 'nova',
'id': 2,
'disabled': True,
+ 'topic': 'compute',
'updated_at': datetime(2012, 10, 29, 13, 42, 5),
'created_at': datetime(2012, 9, 18, 2, 46, 27)},
{'binary': 'nova-scheduler',
'host': 'host2',
- 'availability_zone': 'nova',
'id': 3,
'disabled': False,
+ 'topic': 'scheduler',
'updated_at': datetime(2012, 9, 19, 6, 55, 34),
'created_at': datetime(2012, 9, 18, 2, 46, 28)},
{'binary': 'nova-compute',
'host': 'host2',
- 'availability_zone': 'nova',
'id': 4,
'disabled': True,
+ 'topic': 'compute',
'updated_at': datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime(2012, 9, 18, 2, 46, 28)},
]
@@ -75,7 +75,7 @@ class FakeRequestWithHostService(object):
GET = {"host": "host1", "service": "nova-compute"}
-def fake_servcie_get_all(context):
+def fake_service_get_all(context):
return fake_services_list
@@ -111,7 +111,7 @@ class ServicesTest(test.TestCase):
def setUp(self):
super(ServicesTest, self).setUp()
- self.stubs.Set(db, "service_get_all", fake_servcie_get_all)
+ self.stubs.Set(db, "service_get_all", fake_service_get_all)
self.stubs.Set(timeutils, "utcnow", fake_utcnow)
self.stubs.Set(db, "service_get_by_args",
fake_service_get_by_host_binary)
@@ -128,7 +128,7 @@ class ServicesTest(test.TestCase):
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'nova-scheduler',
- 'host': 'host1', 'zone': 'nova',
+ 'host': 'host1', 'zone': 'internal',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute',
@@ -136,7 +136,7 @@ class ServicesTest(test.TestCase):
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'nova-scheduler', 'host': 'host2',
- 'zone': 'nova',
+ 'zone': 'internal',
'status': 'enabled', 'state': 'down',
'updated_at': datetime(2012, 9, 19, 6, 55, 34)},
{'binary': 'nova-compute', 'host': 'host2',
@@ -150,7 +150,7 @@ class ServicesTest(test.TestCase):
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'nova-scheduler', 'host': 'host1',
- 'zone': 'nova',
+ 'zone': 'internal',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute', 'host': 'host1',
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
index e3810510b..485968209 100644
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ b/nova/tests/api/openstack/compute/test_extensions.py
@@ -185,7 +185,6 @@ class ExtensionControllerTest(ExtensionTestCase):
"Keypairs",
"Multinic",
"MultipleCreate",
- "OSNetworks",
"QuotaClasses",
"Quotas",
"Rescue",
diff --git a/nova/tests/baremetal/test_driver.py b/nova/tests/baremetal/test_driver.py
index d5384eff0..37ef71881 100644
--- a/nova/tests/baremetal/test_driver.py
+++ b/nova/tests/baremetal/test_driver.py
@@ -136,6 +136,19 @@ class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
row = db.bm_node_get(self.context, self.node['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
+ def test_macs_for_instance(self):
+ self._create_node()
+ expected = set(['01:23:45:67:89:01', '01:23:45:67:89:02'])
+ self.assertEqual(
+ expected, self.driver.macs_for_instance(self.test_instance))
+
+ def test_macs_for_instance_no_interfaces(self):
+ # Nodes cannot boot with no MACs, so we raise an error if that happens.
+ self.nic_info = []
+ self._create_node()
+ self.assertRaises(exception.NovaException,
+ self.driver.macs_for_instance, self.test_instance)
+
def test_spawn_node_in_use(self):
self._create_node()
db.bm_node_update(self.context, self.node['id'],
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index bf619bbec..0d9f67231 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -60,7 +60,6 @@ from nova import test
from nova.tests.compute import fake_resource_tracker
from nova.tests.db.fakes import FakeModel
from nova.tests import fake_network
-from nova.tests import fake_network_cache_model
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova import utils
@@ -146,10 +145,11 @@ class BaseTestCase(test.TestCase):
fake_network.set_stub_network_methods(self.stubs)
def tearDown(self):
+ ctxt = context.get_admin_context()
fake_image.FakeImageService_reset()
- instances = db.instance_get_all(self.context.elevated())
+ instances = db.instance_get_all(ctxt)
for instance in instances:
- db.instance_destroy(self.context.elevated(), instance['uuid'])
+ db.instance_destroy(ctxt, instance['uuid'])
fake.restore_nodes()
super(BaseTestCase, self).tearDown()
@@ -996,96 +996,109 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
- def _stub_out_reboot(self, fake_net_info, fake_block_dev_info):
- def fake_reboot(driver, inst, net_info, reboot_type, block_dev_info):
- self.assertEqual(block_dev_info, fake_block_dev_info)
- self.assertEqual(net_info, fake_net_info)
-
- self.stubs.Set(nova.virt.fake.FakeDriver, 'legacy_nwinfo',
- lambda x: False)
- self.stubs.Set(nova.virt.fake.FakeDriver, 'reboot', fake_reboot)
+ def _test_reboot(self, soft, legacy_nwinfo_driver):
+ # This is a true unit test, so we don't need the network stubs.
+ fake_network.unset_stub_network_methods(self.stubs)
- def test_reboot_soft(self):
- # Ensure instance can be soft rebooted.
- instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.REBOOTING})
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute, '_instance_update')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute.driver, 'legacy_nwinfo')
+ self.mox.StubOutWithMock(self.compute.driver, 'reboot')
+
+ instance = dict(uuid='fake-instance',
+ power_state='unknown')
+ updated_instance1 = dict(uuid='updated-instance1',
+ power_state='fake')
+ updated_instance2 = dict(uuid='updated-instance2',
+ power_state='fake')
+
+ fake_nw_model = network_model.NetworkInfo()
+ self.mox.StubOutWithMock(fake_nw_model, 'legacy')
+
+ fake_block_dev_info = 'fake_block_dev_info'
+ fake_power_state1 = 'fake_power_state1'
+ fake_power_state2 = 'fake_power_state2'
+ reboot_type = soft and 'SOFT' or 'HARD'
+
+ # Beginning of calls we expect.
+
+ # FIXME(comstud): I don't feel like the context needs to
+ # be elevated at all. Hopefully remove elevated from
+ # reboot_instance and remove the stub here in a future patch.
+ # econtext would just become self.context below then.
+ econtext = self.context.elevated()
+
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.context.elevated().AndReturn(econtext)
+
+ self.compute._get_instance_nw_info(econtext,
+ instance).AndReturn(
+ fake_nw_model)
+ self.compute._notify_about_instance_usage(econtext,
+ instance,
+ 'reboot.start')
+ self.compute._get_power_state(econtext,
+ instance).AndReturn(fake_power_state1)
+ self.compute._instance_update(econtext, instance['uuid'],
+ power_state=fake_power_state1,
+ vm_state=vm_states.ACTIVE).AndReturn(updated_instance1)
+
+ # Reboot should check the driver to see if legacy nwinfo is
+ # needed. If it is, the model's legacy() method should be
+ # called and the result passed to driver.reboot. If the
+ # driver wants the model, we pass the model.
+ self.compute.driver.legacy_nwinfo().AndReturn(legacy_nwinfo_driver)
+ if legacy_nwinfo_driver:
+ expected_nw_info = 'legacy-nwinfo'
+ fake_nw_model.legacy().AndReturn(expected_nw_info)
+ else:
+ expected_nw_info = fake_nw_model
+
+ # Annoying. driver.reboot is wrapped in a try/except, and
+ # doesn't re-raise. It eats exception generated by mox if
+ # this is called with the wrong args, so we have to hack
+ # around it.
+ reboot_call_info = {}
+ expected_call_info = {'args': (updated_instance1, expected_nw_info,
+ reboot_type, fake_block_dev_info),
+ 'kwargs': {}}
+
+ def fake_reboot(*args, **kwargs):
+ reboot_call_info['args'] = args
+ reboot_call_info['kwargs'] = kwargs
+
+ self.stubs.Set(self.compute.driver, 'reboot', fake_reboot)
+
+ # Power state should be updated again
+ self.compute._get_power_state(econtext,
+ updated_instance1).AndReturn(fake_power_state2)
+ self.compute._instance_update(econtext, updated_instance1['uuid'],
+ power_state=fake_power_state2,
+ task_state=None,
+ vm_state=vm_states.ACTIVE).AndReturn(updated_instance2)
+ self.compute._notify_about_instance_usage(econtext,
+ updated_instance2,
+ 'reboot.end')
- reboot_type = "SOFT"
- fake_net_info = []
- fake_block_dev_info = {'foo': 'bar'}
- self._stub_out_reboot(fake_net_info, fake_block_dev_info)
+ self.mox.ReplayAll()
self.compute.reboot_instance(self.context, instance=instance,
- network_info=fake_net_info,
block_device_info=fake_block_dev_info,
reboot_type=reboot_type)
+ self.assertEqual(expected_call_info, reboot_call_info)
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
- self.assertEqual(inst_ref['task_state'], None)
-
- self.compute.terminate_instance(self.context,
- instance=jsonutils.to_primitive(inst_ref))
+ def test_reboot_soft(self):
+ self._test_reboot(True, False)
def test_reboot_hard(self):
- # Ensure instance can be hard rebooted.
- instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.REBOOTING_HARD})
+ self._test_reboot(False, False)
- reboot_type = "HARD"
- fake_net_info = []
- fake_block_dev_info = {'foo': 'bar'}
- self._stub_out_reboot(fake_net_info, fake_block_dev_info)
- self.compute.reboot_instance(self.context, instance=instance,
- network_info=fake_net_info,
- block_device_info=fake_block_dev_info,
- reboot_type=reboot_type)
-
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
- self.assertEqual(inst_ref['task_state'], None)
+ def test_reboot_soft_legacy_nwinfo_driver(self):
+ self._test_reboot(True, True)
- self.compute.terminate_instance(self.context,
- instance=jsonutils.to_primitive(inst_ref))
-
- def test_reboot_nwinfo(self):
- # Ensure instance network info is rehydrated in reboot.
- instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.REBOOTING_HARD})
-
- result = {'was_instance': []}
-
- # NOTE(danms): Beware the dragons ahead:
- # Since the _legacy_nw_info() method in manager runs inside a
- # try..except block, we can't assert from here. Further, this
- # will be run more than once during the operation we're about
- # to fire off, which means we need to make sure that it doesn't
- # fail any of the times it is run. Hence the obscurity below.
- def fake_legacy_nw_info(network_info):
- result['was_instance'].append(
- isinstance(network_info, network_model.NetworkInfo))
- self.stubs.Set(self.compute, '_legacy_nw_info', fake_legacy_nw_info)
-
- fake_net_info = network_model.NetworkInfo([
- fake_network_cache_model.new_vif(),
- fake_network_cache_model.new_vif(
- {'address': 'bb:bb:bb:bb:bb:bb'})])
- fake_net_info_p = jsonutils.to_primitive(fake_net_info)
- fake_block_dev_info = {'foo': 'bar'}
- self.compute.reboot_instance(self.context, instance=instance,
- network_info=fake_net_info_p,
- block_device_info=fake_block_dev_info,
- reboot_type="SOFT")
-
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.compute.terminate_instance(self.context,
- instance=jsonutils.to_primitive(inst_ref))
- self.assertFalse(False in result['was_instance'])
+ def test_reboot_hard_legacy_nwinfo_driver(self):
+ self._test_reboot(False, True)
def test_set_admin_password(self):
# Ensure instance can have its admin password set.
@@ -1510,6 +1523,27 @@ class ComputeTestCase(BaseTestCase):
instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
+ def test_run_instance_queries_macs(self):
+ # run_instance should ask the driver for node mac addresses and pass
+ # that to the network_api in use.
+ fake_network.unset_stub_network_methods(self.stubs)
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+
+ macs = set(['01:23:45:67:89:ab'])
+ self.mox.StubOutWithMock(self.compute.network_api,
+ "allocate_for_instance")
+ self.compute.network_api.allocate_for_instance(
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ requested_networks=None,
+ vpn=False, macs=macs).AndReturn(
+ fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+ spectacular=True))
+ self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance")
+ self.compute.driver.macs_for_instance(instance).AndReturn(macs)
+ self.mox.ReplayAll()
+ self.compute.run_instance(self.context, instance=instance)
+
def test_instance_set_to_error_on_uncaught_exception(self):
# Test that instance is set to error state when exception is raised.
instance = jsonutils.to_primitive(self._create_fake_instance())
@@ -1520,7 +1554,8 @@ class ComputeTestCase(BaseTestCase):
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None,
- vpn=False).AndRaise(rpc_common.RemoteError())
+ vpn=False,
+ macs=None).AndRaise(rpc_common.RemoteError())
fake_network.unset_stub_network_methods(self.stubs)
@@ -2368,80 +2403,60 @@ class ComputeTestCase(BaseTestCase):
# cleanup
db.instance_destroy(c, instance['uuid'])
- def test_live_migration_dest_raises_exception(self):
+ def test_live_migration_exception_rolls_back(self):
# Confirm exception when pre_live_migration fails.
- # creating instance testdata
- instance_ref = self._create_fake_instance({'host': 'dummy'})
- instance = jsonutils.to_primitive(instance_ref)
- inst_uuid = instance['uuid']
- inst_id = instance['id']
-
c = context.get_admin_context()
- topic = rpc.queue_get_for(c, CONF.compute_topic, instance['host'])
-
- # creating volume testdata
- volume_id = 'fake'
- values = {'instance_uuid': inst_uuid, 'device_name': '/dev/vdc',
- 'delete_on_termination': False, 'volume_id': volume_id}
- db.block_device_mapping_create(c, values)
-
- def fake_volume_get(self, context, volume_id):
- return {'id': volume_id}
-
- self.stubs.Set(cinder.API, 'get', fake_volume_get)
- def fake_instance_update(context, instance_uuid, **updates):
- return db.instance_update_and_get_original(context, instance_uuid,
- updates)
- self.stubs.Set(self.compute, '_instance_update',
- fake_instance_update)
+ src_host = 'fake-src-host'
+ dest_host = 'fake-dest-host'
+ instance = dict(uuid='fake_instance', host=src_host,
+ name='fake-name')
+ updated_instance = 'fake_updated_instance'
+ fake_bdms = [dict(volume_id='vol1-id'), dict(volume_id='vol2-id')]
# creating mocks
self.mox.StubOutWithMock(rpc, 'call')
-
self.mox.StubOutWithMock(self.compute.driver,
'get_instance_disk_info')
- self.compute.driver.get_instance_disk_info(instance['name'])
-
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'pre_live_migration')
- self.compute.compute_rpcapi.pre_live_migration(c,
- mox.IsA(instance), True, None, instance['host'],
- None).AndRaise(rpc.common.RemoteError('', '', ''))
+ self.mox.StubOutWithMock(self.compute, '_instance_update')
+ self.mox.StubOutWithMock(self.compute, '_get_instance_volume_bdms')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'remove_volume_connection')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'rollback_live_migration_at_destination')
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.MIGRATING})
- # mocks for rollback
- rpc.call(c, 'network', {'method': 'setup_networks_on_host',
- 'args': {'instance_id': inst_id,
- 'host': self.compute.host,
- 'teardown': False},
- 'version': '1.0'}, None)
- rpcinst = jsonutils.to_primitive(
- db.instance_get_by_uuid(self.context, instance['uuid']))
- rpc.call(c, topic,
- {"method": "remove_volume_connection",
- "args": {'instance': rpcinst,
- 'volume_id': volume_id},
- "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
- None)
- rpc.cast(c, topic,
- {"method": "rollback_live_migration_at_destination",
- "args": {'instance': rpcinst},
- "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
+ self.compute.driver.get_instance_disk_info(
+ instance['name']).AndReturn('fake_disk')
+ self.compute.compute_rpcapi.pre_live_migration(c,
+ instance, True, 'fake_disk', dest_host,
+ None).AndRaise(test.TestingException())
+
+ self.compute._instance_update(c, instance['uuid'],
+ host=src_host, vm_state=vm_states.ACTIVE,
+ task_state=None,
+ expected_task_state=task_states.MIGRATING).AndReturn(
+ updated_instance)
+ self.compute.network_api.setup_networks_on_host(c,
+ updated_instance, self.compute.host)
+ self.compute._get_instance_volume_bdms(c,
+ updated_instance).AndReturn(fake_bdms)
+ self.compute.compute_rpcapi.remove_volume_connection(
+ c, updated_instance, 'vol1-id', dest_host)
+ self.compute.compute_rpcapi.remove_volume_connection(
+ c, updated_instance, 'vol2-id', dest_host)
+ self.compute.compute_rpcapi.rollback_live_migration_at_destination(
+ c, updated_instance, dest_host)
# start test
self.mox.ReplayAll()
- self.assertRaises(rpc_common.RemoteError,
+ self.assertRaises(test.TestingException,
self.compute.live_migration,
- c, dest=instance['host'], block_migration=True,
- instance=rpcinst)
-
- # cleanup
- for bdms in db.block_device_mapping_get_all_by_instance(
- c, inst_uuid):
- db.block_device_mapping_destroy(c, bdms['id'])
- db.instance_destroy(c, inst_uuid)
+ c, dest=dest_host, block_migration=True,
+ instance=instance)
def test_live_migration_works_correctly(self):
# Confirm live_migration() works as expected correctly.
@@ -2559,38 +2574,50 @@ class ComputeTestCase(BaseTestCase):
self.compute._post_live_migration(c, inst_ref, dest)
def test_post_live_migration_at_destination(self):
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_finish')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'post_live_migration_at_destination')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute, '_instance_update')
+
params = {'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED, }
instance = jsonutils.to_primitive(self._create_fake_instance(params))
admin_ctxt = context.get_admin_context()
instance = db.instance_get_by_uuid(admin_ctxt, instance['uuid'])
- self.mox.StubOutWithMock(self.compute.network_api,
- 'setup_networks_on_host')
+
self.compute.network_api.setup_networks_on_host(admin_ctxt, instance,
self.compute.host)
- self.mox.StubOutWithMock(self.compute.network_api,
- 'migrate_instance_finish')
migration = {'source_compute': instance['host'],
'dest_compute': self.compute.host, }
self.compute.network_api.migrate_instance_finish(admin_ctxt,
instance, migration)
- self.mox.StubOutWithMock(self.compute.driver,
- 'post_live_migration_at_destination')
fake_net_info = []
self.compute.driver.post_live_migration_at_destination(admin_ctxt,
instance,
fake_net_info,
False)
- self.compute.network_api.setup_networks_on_host(admin_ctxt, instance,
- self.compute.host)
+ self.compute._get_power_state(admin_ctxt, instance).AndReturn(
+ 'fake_power_state')
+
+ updated_instance = 'fake_updated_instance'
+ self.compute._instance_update(admin_ctxt, instance['uuid'],
+ host=self.compute.host,
+ power_state='fake_power_state',
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ expected_task_state=task_states.MIGRATING).AndReturn(
+ updated_instance)
+ self.compute.network_api.setup_networks_on_host(admin_ctxt,
+ updated_instance, self.compute.host)
self.mox.ReplayAll()
+
self.compute.post_live_migration_at_destination(admin_ctxt, instance)
- instance = db.instance_get_by_uuid(admin_ctxt, instance['uuid'])
- self.assertEqual(instance['host'], self.compute.host)
- self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
- self.assertEqual(instance['task_state'], None)
def test_run_kill_vm(self):
# Detect when a vm is terminated behind the scenes.
@@ -3121,21 +3148,6 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance(params)
self.compute._instance_update(self.context, instance['uuid'])
- def test_startup_conductor_ping(self):
- timeouts = []
- calls = dict(count=0)
-
- def fake_ping(context, message, timeout):
- timeouts.append(timeout)
- calls['count'] += 1
- if calls['count'] < 15:
- raise rpc_common.Timeout("fake")
-
- self.stubs.Set(self.compute.conductor_api, 'ping', fake_ping)
- self.compute._get_instances_at_startup(self.context)
- self.assertEqual(timeouts.count(10), 10)
- self.assertTrue(None in timeouts)
-
def test_destroy_evacuated_instances(self):
fake_context = context.get_admin_context()
@@ -3182,7 +3194,6 @@ class ComputeTestCase(BaseTestCase):
self.compute._destroy_evacuated_instances(fake_context)
def test_init_host(self):
-
our_host = self.compute.host
fake_context = 'fake-context'
startup_instances = ['inst1', 'inst2', 'inst3']
@@ -3190,8 +3201,8 @@ class ComputeTestCase(BaseTestCase):
def _do_mock_calls(defer_iptables_apply):
self.compute.driver.init_host(host=our_host)
context.get_admin_context().AndReturn(fake_context)
- self.compute._get_instances_at_startup(fake_context).AndReturn(
- startup_instances)
+ self.compute.conductor_api.instance_get_all_by_host(
+ fake_context, our_host).AndReturn(startup_instances)
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_on()
self.compute._destroy_evacuated_instances(fake_context)
@@ -3208,8 +3219,8 @@ class ComputeTestCase(BaseTestCase):
'filter_defer_apply_on')
self.mox.StubOutWithMock(self.compute.driver,
'filter_defer_apply_off')
- self.mox.StubOutWithMock(self.compute,
- '_get_instances_at_startup')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'instance_get_all_by_host')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.compute,
'_destroy_evacuated_instances')
@@ -3235,7 +3246,10 @@ class ComputeTestCase(BaseTestCase):
self.mox.ReplayAll()
self.compute.init_host()
- # VerifyCall done by tearDown
+ # tearDown() uses context.get_admin_context(), so we have
+ # to do the verification here and unstub it.
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
def test_get_instances_on_driver(self):
fake_context = context.get_admin_context()
@@ -3934,12 +3948,12 @@ class ComputeAPITestCase(BaseTestCase):
def test_repeated_delete_quota(self):
in_use = {'instances': 1}
- def fake_reserve(context, **deltas):
+ def fake_reserve(context, expire=None, project_id=None, **deltas):
return dict(deltas.iteritems())
self.stubs.Set(QUOTAS, 'reserve', fake_reserve)
- def fake_commit(context, deltas):
+ def fake_commit(context, deltas, project_id=None):
for k, v in deltas.iteritems():
in_use[k] = in_use.get(k, 0) + v
@@ -3993,7 +4007,8 @@ class ComputeAPITestCase(BaseTestCase):
'host': CONF.host})
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
- nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg())
+ nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg(),
+ project_id=mox.IgnoreArg())
self.mox.ReplayAll()
self.compute_api.soft_delete(self.context, instance)
@@ -4021,7 +4036,8 @@ class ComputeAPITestCase(BaseTestCase):
'host': CONF.host})
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback')
- nova.quota.QUOTAS.rollback(mox.IgnoreArg(), mox.IgnoreArg())
+ nova.quota.QUOTAS.rollback(mox.IgnoreArg(), mox.IgnoreArg(),
+ project_id=mox.IgnoreArg())
self.mox.ReplayAll()
def fail(*args, **kwargs):
@@ -4219,12 +4235,10 @@ class ComputeAPITestCase(BaseTestCase):
def _stub_out_reboot(self, device_name):
def fake_reboot_instance(rpcapi, context, instance,
block_device_info,
- network_info,
reboot_type):
self.assertEqual(
block_device_info['block_device_mapping'][0]['mount_device'],
device_name)
- self.assertEqual(network_info[0]['network']['bridge'], 'fake_br1')
self.stubs.Set(nova.compute.rpcapi.ComputeAPI, 'reboot_instance',
fake_reboot_instance)
@@ -4397,6 +4411,31 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_snapshot_given_image_uuid(self):
+ """Ensure a snapshot of an instance can be created when image UUID
+ is already known.
+ """
+ instance = self._create_fake_instance()
+ name = 'snap1'
+ extra_properties = {'extra_param': 'value1'}
+ recv_meta = self.compute_api.snapshot(self.context, instance, name,
+ extra_properties)
+ image_id = recv_meta['id']
+
+ def fake_show(meh, context, id):
+ return recv_meta
+
+ instance = db.instance_update(self.context, instance['uuid'],
+ {'task_state': None})
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ image = self.compute_api.snapshot(self.context, instance, name,
+ extra_properties,
+ image_id=image_id)
+ self.assertEqual(image, recv_meta)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
def test_snapshot_minram_mindisk_VHD(self):
"""Ensure a snapshots min_ram and min_disk are correct.
@@ -4404,27 +4443,25 @@ class ComputeAPITestCase(BaseTestCase):
and min_disk set to that of the original instances flavor.
"""
- self.fake_image['disk_format'] = 'vhd'
+ self.fake_image.update(disk_format='vhd',
+ min_ram=1, min_disk=1)
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
- instance = self._create_fake_instance()
- inst_params = {'root_gb': 2, 'memory_mb': 256}
- instance['instance_type'].update(inst_params)
+ instance = self._create_fake_instance(type_name='m1.small')
image = self.compute_api.snapshot(self.context, instance, 'snap1',
{'extra_param': 'value1'})
self.assertEqual(image['name'], 'snap1')
- self.assertEqual(image['min_ram'], 256)
- self.assertEqual(image['min_disk'], 2)
+ instance_type = instance['instance_type']
+ self.assertEqual(image['min_ram'], instance_type['memory_mb'])
+ self.assertEqual(image['min_disk'], instance_type['root_gb'])
properties = image['properties']
self.assertTrue('backup_type' not in properties)
self.assertEqual(properties['image_type'], 'snapshot')
self.assertEqual(properties['instance_uuid'], instance['uuid'])
self.assertEqual(properties['extra_param'], 'value1')
- db.instance_destroy(self.context, instance['uuid'])
-
def test_snapshot_minram_mindisk(self):
"""Ensure a snapshots min_ram and min_disk are correct.
@@ -4490,7 +4527,10 @@ class ComputeAPITestCase(BaseTestCase):
def fake_show(*args):
raise exception.ImageNotFound(image_id="fake")
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ if not self.__class__.__name__ == "CellsComputeAPITestCase":
+ # Cells tests will call this a 2nd time in child cell with
+ # the newly created image_id, and we want that one to succeed.
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
instance = self._create_fake_instance()
@@ -6111,81 +6151,6 @@ class ComputePolicyTestCase(BaseTestCase):
availability_zone='1:1')
-class ComputeHostAPITestCase(BaseTestCase):
- def setUp(self):
- super(ComputeHostAPITestCase, self).setUp()
- self.host_api = compute_api.HostAPI()
-
- def _rpc_call_stub(self, call_info):
- def fake_rpc_call(context, topic, msg, timeout=None):
- call_info['context'] = context
- call_info['topic'] = topic
- call_info['msg'] = msg
- self.stubs.Set(rpc, 'call', fake_rpc_call)
-
- def _pretend_fake_host_exists(self, ctxt):
- """Sets it so that the host API always thinks that 'fake_host'
- exists"""
- self.mox.StubOutWithMock(self.host_api, 'does_host_exist')
- self.host_api.does_host_exist(ctxt, 'fake_host').AndReturn(True)
- self.mox.ReplayAll()
-
- def test_set_host_enabled(self):
- ctxt = context.get_admin_context()
- call_info = {}
- self._rpc_call_stub(call_info)
-
- self._pretend_fake_host_exists(ctxt)
- self.host_api.set_host_enabled(ctxt, 'fake_host', 'fake_enabled')
- self.assertEqual(call_info['context'], ctxt)
- self.assertEqual(call_info['topic'], 'compute.fake_host')
- self.assertEqual(call_info['msg'],
- {'method': 'set_host_enabled',
- 'args': {'enabled': 'fake_enabled'},
- 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
-
- def test_get_host_uptime(self):
- ctxt = context.RequestContext('fake', 'fake')
- call_info = {}
- self._rpc_call_stub(call_info)
-
- self._pretend_fake_host_exists(ctxt)
- self.host_api.get_host_uptime(ctxt, 'fake_host')
- self.assertEqual(call_info['context'], ctxt)
- self.assertEqual(call_info['topic'], 'compute.fake_host')
- self.assertEqual(call_info['msg'],
- {'method': 'get_host_uptime',
- 'args': {},
- 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
-
- def test_host_power_action(self):
- ctxt = context.get_admin_context()
- call_info = {}
- self._rpc_call_stub(call_info)
- self._pretend_fake_host_exists(ctxt)
- self.host_api.host_power_action(ctxt, 'fake_host', 'fake_action')
- self.assertEqual(call_info['context'], ctxt)
- self.assertEqual(call_info['topic'], 'compute.fake_host')
- self.assertEqual(call_info['msg'],
- {'method': 'host_power_action',
- 'args': {'action': 'fake_action'},
- 'version':
- compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
-
- def test_set_host_maintenance(self):
- ctxt = context.get_admin_context()
- call_info = {}
- self._rpc_call_stub(call_info)
- self._pretend_fake_host_exists(ctxt)
- self.host_api.set_host_maintenance(ctxt, 'fake_host', 'fake_mode')
- self.assertEqual(call_info['context'], ctxt)
- self.assertEqual(call_info['topic'], 'compute.fake_host')
- self.assertEqual(call_info['msg'],
- {'method': 'host_maintenance_mode',
- 'args': {'host': 'fake_host', 'mode': 'fake_mode'},
- 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
-
-
class KeypairAPITestCase(BaseTestCase):
def setUp(self):
super(KeypairAPITestCase, self).setUp()
diff --git a/nova/tests/compute/test_compute_cells.py b/nova/tests/compute/test_compute_cells.py
index aa4b448d4..3c25f9b43 100644
--- a/nova/tests/compute/test_compute_cells.py
+++ b/nova/tests/compute/test_compute_cells.py
@@ -16,7 +16,11 @@
"""
Tests For Compute w/ Cells
"""
+import functools
+
from nova.compute import cells_api as compute_cells_api
+from nova import db
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.tests.compute import test_compute
@@ -28,17 +32,57 @@ ORIG_COMPUTE_API = None
def stub_call_to_cells(context, instance, method, *args, **kwargs):
fn = getattr(ORIG_COMPUTE_API, method)
+ original_instance = kwargs.pop('original_instance', None)
+ if original_instance:
+ instance = original_instance
+ # Restore this in 'child cell DB'
+ db.instance_update(context, instance['uuid'],
+ dict(vm_state=instance['vm_state'],
+ task_state=instance['task_state']))
+
return fn(context, instance, *args, **kwargs)
def stub_cast_to_cells(context, instance, method, *args, **kwargs):
fn = getattr(ORIG_COMPUTE_API, method)
+ original_instance = kwargs.pop('original_instance', None)
+ if original_instance:
+ instance = original_instance
+ # Restore this in 'child cell DB'
+ db.instance_update(context, instance['uuid'],
+ dict(vm_state=instance['vm_state'],
+ task_state=instance['task_state']))
fn(context, instance, *args, **kwargs)
-def deploy_stubs(stubs, api):
- stubs.Set(api, '_call_to_cells', stub_call_to_cells)
- stubs.Set(api, '_cast_to_cells', stub_cast_to_cells)
+def deploy_stubs(stubs, api, original_instance=None):
+ call = stub_call_to_cells
+ cast = stub_cast_to_cells
+
+ if original_instance:
+ kwargs = dict(original_instance=original_instance)
+ call = functools.partial(stub_call_to_cells, **kwargs)
+ cast = functools.partial(stub_cast_to_cells, **kwargs)
+
+ stubs.Set(api, '_call_to_cells', call)
+ stubs.Set(api, '_cast_to_cells', cast)
+
+
+def wrap_create_instance(func):
+ @functools.wraps(func)
+ def wrapper(self, *args, **kwargs):
+ instance = self._create_fake_instance()
+
+ def fake(*args, **kwargs):
+ return instance
+
+ self.stubs.Set(self, '_create_fake_instance', fake)
+ original_instance = jsonutils.to_primitive(instance)
+ deploy_stubs(self.stubs, self.compute_api,
+ original_instance=original_instance)
+ return func(self, *args, **kwargs)
+
+ return wrapper
class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
@@ -84,6 +128,42 @@ class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
def test_get_backdoor_port(self):
self.skipTest("Test is incompatible with cells.")
+ def test_snapshot_given_image_uuid(self):
+ self.skipTest("Test doesn't apply to API cell.")
+
+ @wrap_create_instance
+ def test_snapshot(self):
+ return super(CellsComputeAPITestCase, self).test_snapshot()
+
+ @wrap_create_instance
+ def test_snapshot_image_metadata_inheritance(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_image_metadata_inheritance()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk_VHD(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk_VHD()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk_img_missing_minram(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk_img_missing_minram()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk_no_image(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk_no_image()
+
+ @wrap_create_instance
+ def test_backup(self):
+ return super(CellsComputeAPITestCase, self).test_backup()
+
class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase):
def setUp(self):
diff --git a/nova/tests/compute/test_host_api.py b/nova/tests/compute/test_host_api.py
index f00245d1e..95d3c4926 100644
--- a/nova/tests/compute/test_host_api.py
+++ b/nova/tests/compute/test_host_api.py
@@ -13,93 +13,114 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova.compute import api
+from nova import compute
+from nova.compute import rpcapi as compute_rpcapi
from nova import context
-from nova import db
-from nova import exception
+from nova.openstack.common import rpc
from nova import test
-from nova.tests import fake_hosts
-class HostApiTestCase(test.TestCase):
- """
- Tests 'host' subset of the compute api
- """
-
+class ComputeHostAPITestCase(test.TestCase):
def setUp(self):
- super(HostApiTestCase, self).setUp()
- self.compute_rpcapi = api.compute_rpcapi
- self.api = api.HostAPI()
+ super(ComputeHostAPITestCase, self).setUp()
+ self.host_api = compute.HostAPI()
+ self.ctxt = context.get_admin_context()
- def test_bad_host_set_enabled(self):
- """
- Tests that actions on single hosts that don't exist blow up without
- having to reach the host via rpc. Should raise HostNotFound if you
- try to update a host that is not in the DB
+ def _mock_rpc_call(self, expected_message, result=None):
+ if result is None:
+ result = 'fake-result'
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(self.ctxt, 'compute.fake_host',
+ expected_message, None).AndReturn(result)
+
+ def _mock_assert_host_exists(self):
+ """Sets it so that the host API always thinks that 'fake_host'
+ exists.
"""
- self.assertRaises(exception.HostNotFound, self.api.set_host_enabled,
- context.get_admin_context(), "bogus_host_name", False)
+ self.mox.StubOutWithMock(self.host_api, '_assert_host_exists')
+ self.host_api._assert_host_exists(self.ctxt, 'fake_host')
+
+ def test_set_host_enabled(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call(
+ {'method': 'set_host_enabled',
+ 'args': {'enabled': 'fake_enabled'},
+ 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
+
+ self.mox.ReplayAll()
+ result = self.host_api.set_host_enabled(self.ctxt, 'fake_host',
+ 'fake_enabled')
+ self.assertEqual('fake-result', result)
+
+ def test_get_host_uptime(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call(
+ {'method': 'get_host_uptime',
+ 'args': {},
+ 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
+ self.mox.ReplayAll()
+ result = self.host_api.get_host_uptime(self.ctxt, 'fake_host')
+ self.assertEqual('fake-result', result)
+
+ def test_host_power_action(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call(
+ {'method': 'host_power_action',
+ 'args': {'action': 'fake_action'},
+ 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
+ self.mox.ReplayAll()
+ result = self.host_api.host_power_action(self.ctxt, 'fake_host',
+ 'fake_action')
+ self.assertEqual('fake-result', result)
- def test_list_compute_hosts(self):
- ctx = context.get_admin_context()
- self.mox.StubOutWithMock(db, 'service_get_all')
- db.service_get_all(ctx, False).AndReturn(fake_hosts.SERVICES_LIST)
+ def test_set_host_maintenance(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call(
+ {'method': 'host_maintenance_mode',
+ 'args': {'host': 'fake_host', 'mode': 'fake_mode'},
+ 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
self.mox.ReplayAll()
- compute_hosts = self.api.list_hosts(ctx, service="compute")
+ result = self.host_api.set_host_maintenance(self.ctxt, 'fake_host',
+ 'fake_mode')
+ self.assertEqual('fake-result', result)
+
+ def test_service_get_all(self):
+ services = [dict(id=1, key1='val1', key2='val2', topic='compute',
+ host='host1'),
+ dict(id=2, key1='val2', key3='val3', topic='compute',
+ host='host2')]
+ exp_services = []
+ for service in services:
+ exp_service = {}
+ exp_service.update(availability_zone='nova', **service)
+ exp_services.append(exp_service)
+
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'service_get_all')
+
+ # Test no filters
+ self.host_api.db.service_get_all(self.ctxt, False).AndReturn(
+ services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt)
self.mox.VerifyAll()
- expected = [host for host in fake_hosts.HOST_LIST
- if host["service"] == "compute"]
- self.assertEqual(expected, compute_hosts)
+ self.assertEqual(exp_services, result)
- def test_describe_host(self):
- """
- Makes sure that describe_host returns the correct information
- given our fake input.
- """
- ctx = context.get_admin_context()
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
- host_name = 'host_c1'
- db.service_get_all_compute_by_host(ctx, host_name).AndReturn(
- [{'host': 'fake_host',
- 'compute_node': [
- {'vcpus': 4,
- 'vcpus_used': 1,
- 'memory_mb': 8192,
- 'memory_mb_used': 2048,
- 'local_gb': 1024,
- 'local_gb_used': 648}
- ]
- }])
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
- db.instance_get_all_by_host(ctx, 'fake_host').AndReturn(
- [{'project_id': 42,
- 'vcpus': 1,
- 'memory_mb': 2048,
- 'root_gb': 648,
- 'ephemeral_gb': 0,
- }])
+ # Test no filters #2
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt, False).AndReturn(
+ services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt, filters={})
+ self.mox.VerifyAll()
+ self.assertEqual(exp_services, result)
+
+ # Test w/ filter
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt, False).AndReturn(
+ services)
self.mox.ReplayAll()
- result = self.api.describe_host(ctx, host_name)
- self.assertEqual(result,
- [{'resource': {'cpu': 4,
- 'disk_gb': 1024,
- 'host': 'host_c1',
- 'memory_mb': 8192,
- 'project': '(total)'}},
- {'resource': {'cpu': 1,
- 'disk_gb': 648,
- 'host': 'host_c1',
- 'memory_mb': 2048,
- 'project': '(used_now)'}},
- {'resource': {'cpu': 1,
- 'disk_gb': 648,
- 'host': 'host_c1',
- 'memory_mb': 2048,
- 'project': '(used_max)'}},
- {'resource': {'cpu': 1,
- 'disk_gb': 648,
- 'host': 'host_c1',
- 'memory_mb': 2048,
- 'project': 42}}]
- )
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=dict(key1='val2'))
self.mox.VerifyAll()
+ self.assertEqual([exp_services[1]], result)
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index f5d523ec1..53d92a13f 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -24,6 +24,7 @@ from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import test
@@ -35,6 +36,7 @@ LOG = logging.getLogger(__name__)
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_LOCAL_GB = 6
FAKE_VIRT_VCPUS = 1
+CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
@@ -97,14 +99,21 @@ class BaseTestCase(test.TestCase):
self.context = context.get_admin_context()
+ self.flags(use_local=True, group='conductor')
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+
self._instances = {}
self._instance_types = {}
- self.stubs.Set(db, 'instance_get_all_by_host_and_node',
+ self.stubs.Set(self.conductor.db,
+ 'instance_get_all_by_host_and_node',
self._fake_instance_get_all_by_host_and_node)
- self.stubs.Set(db, 'instance_update_and_get_original',
+ self.stubs.Set(self.conductor.db,
+ 'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
- self.stubs.Set(db, 'instance_type_get', self._fake_instance_type_get)
+ self.stubs.Set(self.conductor.db,
+ 'instance_type_get', self._fake_instance_type_get)
self.host = 'fakehost'
@@ -288,8 +297,8 @@ class MissingComputeNodeTestCase(BaseTestCase):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
- self.stubs.Set(db, 'service_get_all_compute_by_host',
- self._fake_service_get_all_compute_by_host)
+ self.stubs.Set(db, 'service_get_by_compute_host',
+ self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
@@ -297,10 +306,10 @@ class MissingComputeNodeTestCase(BaseTestCase):
self.created = True
return self._create_compute_node()
- def _fake_service_get_all_compute_by_host(self, ctx, host):
+ def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
- return [service]
+ return service
def test_create_compute_node(self):
self.tracker.update_available_resource(self.context)
@@ -321,8 +330,8 @@ class BaseTrackerTestCase(BaseTestCase):
self.tracker = self._tracker()
self._migrations = {}
- self.stubs.Set(db, 'service_get_all_compute_by_host',
- self._fake_service_get_all_compute_by_host)
+ self.stubs.Set(db, 'service_get_by_compute_host',
+ self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'migration_update',
@@ -333,10 +342,10 @@ class BaseTrackerTestCase(BaseTestCase):
self.tracker.update_available_resource(self.context)
self.limits = self._limits()
- def _fake_service_get_all_compute_by_host(self, ctx, host):
+ def _fake_service_get_by_compute_host(self, ctx, host):
self.compute = self._create_compute_node()
self.service = self._create_service(host, compute=self.compute)
- return [self.service]
+ return self.service
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
@@ -622,7 +631,8 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
- self.stubs.Set(db, 'migration_create', self._fake_migration_create)
+ self.stubs.Set(self.conductor.db,
+ 'migration_create', self._fake_migration_create)
self.instance = self._fake_instance()
self.instance_type = self._fake_instance_type_create()
@@ -645,7 +655,7 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
if values:
migration.update(values)
- self._migrations[instance_uuid] = migration
+ self._migrations[migration['instance_uuid']] = migration
return migration
def test_claim(self):
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index a31d9a14b..00b90ea65 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -236,9 +236,8 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('reboot_instance', 'cast',
instance=self.fake_instance,
block_device_info={},
- network_info={},
reboot_type='type',
- version='2.5')
+ version='2.23')
def test_rebuild_instance(self):
self._test_compute_api('rebuild_instance', 'cast',
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index ffe09c95e..cc3dbfcc0 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -35,14 +35,21 @@ from nova import test
FAKE_IMAGE_REF = 'fake-image-ref'
+class FakeContext(context.RequestContext):
+ def elevated(self):
+ """Return a consistent elevated context so we can detect it."""
+ if not hasattr(self, '_elevated'):
+ self._elevated = super(FakeContext, self).elevated()
+ return self._elevated
+
+
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id,
- self.project_id)
+ self.context = FakeContext(self.user_id, self.project_id)
def stub_out_client_exceptions(self):
def passthru(exceptions, func, *args, **kwargs):
@@ -123,6 +130,21 @@ class _BaseTestCase(object):
'fake-window',
'fake-host')
+ def test_migration_create(self):
+ inst = {'uuid': 'fake-uuid',
+ 'host': 'fake-host',
+ 'node': 'fake-node'}
+ self.mox.StubOutWithMock(db, 'migration_create')
+ db.migration_create(self.context.elevated(),
+ {'instance_uuid': inst['uuid'],
+ 'source_compute': inst['host'],
+ 'source_node': inst['node'],
+ 'fake-key': 'fake-value'}).AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.migration_create(self.context, inst,
+ {'fake-key': 'fake-value'})
+ self.assertEqual(result, 'result')
+
def test_migration_update(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
@@ -429,12 +451,16 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
- def _test_stubbed(self, name, dbargs, condargs):
+ def _test_stubbed(self, name, dbargs, condargs,
+ db_result_listified=False):
self.mox.StubOutWithMock(db, name)
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_get_all_by(self.context, **condargs)
- self.assertEqual(result, 'fake-result')
+ if db_result_listified:
+ self.assertEqual(['fake-result'], result)
+ else:
+ self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
@@ -454,10 +480,16 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
('host',),
dict(host='host'))
- def test_service_get_all_compute_by_host(self):
- self._test_stubbed('service_get_all_compute_by_host',
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host',
('host',),
- dict(topic='compute', host='host'))
+ dict(topic='compute', host='host'),
+ db_result_listified=True)
+
+ def test_service_get_by_args(self):
+ self._test_stubbed('service_get_by_args',
+ ('host', 'binary'),
+ dict(host='host', binary='binary'))
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
@@ -520,12 +552,16 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
- def _test_stubbed(self, name, dbargs, condargs):
+ def _test_stubbed(self, name, dbargs, condargs,
+ db_result_listified=False):
self.mox.StubOutWithMock(db, name)
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_get_all_by(self.context, **condargs)
- self.assertEqual(result, 'fake-result')
+ if db_result_listified:
+ self.assertEqual(['fake-result'], result)
+ else:
+ self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
@@ -545,10 +581,11 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
('host',),
dict(host='host'))
- def test_service_get_all_compute_by_host(self):
- self._test_stubbed('service_get_all_compute_by_host',
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host',
('host',),
- dict(topic='compute', host='host'))
+ dict(topic='compute', host='host'),
+ db_result_listified=True)
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
@@ -628,12 +665,19 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
{'name': 'fake-inst'},
'updated_at', 'asc')
- def _test_stubbed(self, name, *args):
+ def _test_stubbed(self, name, *args, **kwargs):
self.mox.StubOutWithMock(db, name)
getattr(db, name)(self.context, *args).AndReturn('fake-result')
+ if name == 'service_destroy':
+ # TODO(russellb) This is a hack ... SetUp() starts the conductor()
+ # service. There is a cleanup step that runs after this test which
+ # also deletes the associated service record. This involves a call
+ # to db.service_destroy(), which we have stubbed out.
+ db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
result = getattr(self.conductor, name)(self.context, *args)
- self.assertEqual(result, 'fake-result')
+ self.assertEqual(
+ result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
self._test_stubbed('service_get_all')
@@ -647,8 +691,31 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
- def test_service_get_all_compute_by_host(self):
- self._test_stubbed('service_get_all_compute_by_host', 'host')
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host', 'host')
+
+ def test_service_create(self):
+ self._test_stubbed('service_create', {})
+
+ def test_service_destroy(self):
+ self._test_stubbed('service_destroy', '', returns=False)
+
+ def test_ping(self):
+ timeouts = []
+ calls = dict(count=0)
+
+ def fake_ping(_self, context, message, timeout):
+ timeouts.append(timeout)
+ calls['count'] += 1
+ if calls['count'] < 15:
+ raise rpc_common.Timeout("fake")
+
+ self.stubs.Set(conductor_api.API, 'ping', fake_ping)
+
+ self.conductor.wait_until_ready(self.context)
+
+ self.assertEqual(timeouts.count(10), 10)
+ self.assertTrue(None in timeouts)
class ConductorLocalAPITestCase(ConductorAPITestCase):
@@ -667,6 +734,10 @@ class ConductorLocalAPITestCase(ConductorAPITestCase):
self.assertRaises(KeyError,
self._do_update, instance['uuid'], foo='bar')
+ def test_ping(self):
+ # Override test in ConductorAPITestCase
+ pass
+
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
@@ -679,6 +750,11 @@ class ConductorImportTest(test.TestCase):
self.assertTrue(isinstance(conductor.API(),
conductor_api.API))
+ def test_import_conductor_override_to_local(self):
+ self.flags(use_local=False, group='conductor')
+ self.assertTrue(isinstance(conductor.API(use_local=True),
+ conductor_api.LocalAPI))
+
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
diff --git a/nova/tests/fake_imagebackend.py b/nova/tests/fake_imagebackend.py
index 978c879fd..c284a5042 100644
--- a/nova/tests/fake_imagebackend.py
+++ b/nova/tests/fake_imagebackend.py
@@ -28,7 +28,7 @@ class Backend(object):
def image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name):
- self.path = os.path.join(instance, name)
+ self.path = os.path.join(instance['name'], name)
def create_image(self, prepare_template, base,
size, *args, **kwargs):
diff --git a/nova/tests/fake_libvirt_utils.py b/nova/tests/fake_libvirt_utils.py
index bb789b74a..b3d842468 100644
--- a/nova/tests/fake_libvirt_utils.py
+++ b/nova/tests/fake_libvirt_utils.py
@@ -17,6 +17,12 @@
import os
import StringIO
+from nova.openstack.common import cfg
+
+
+CONF = cfg.CONF
+CONF.import_opt('instances_path', 'nova.compute.manager')
+
files = {'console.log': True}
disk_sizes = {}
@@ -133,3 +139,8 @@ def get_fs_info(path):
def fetch_image(context, target, image_id, user_id, project_id):
pass
+
+
+def get_instance_path(instance):
+ # TODO(mikal): we should really just call the real one here
+ return os.path.join(CONF.instances_path, instance['name'])
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index c5d160209..51f3a3f85 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -136,10 +136,10 @@ policy_data = """
"compute_extension:instance_usage_audit_log": "",
"compute_extension:keypairs": "",
"compute_extension:multinic": "",
- "compute_extension:admin_networks": "",
- "compute_extension:admin_networks:view": "",
+ "compute_extension:networks": "",
+ "compute_extension:networks:view": "",
"compute_extension:networks_associate": "",
- "compute_extension:os-networks": "",
+ "compute_extension:os-tenant-networks": "",
"compute_extension:quotas:show": "",
"compute_extension:quotas:update": "",
"compute_extension:quota_classes": "",
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz
index 861c1ee8e..df40b08c0 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..b51766f75
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
index acf47a4f6..092a1f933 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
index af57ccc47..77f333c00 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
index b67b1a894..8ab166a60 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
index 24fb6e539..97e96be17 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
index 0634adcba..728464ca9 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
Binary files differ
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index 0dd777fe2..3d69fad45 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -305,19 +305,19 @@
"updated": "%(timestamp)s"
},
{
- "alias": "os-admin-networks",
+ "alias": "os-networks",
"description": "%(text)s",
"links": [],
- "name": "AdminNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1",
+ "name": "Networks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
"updated": "%(timestamp)s"
},
{
- "alias": "os-networks",
+ "alias": "os-tenant-networks",
"description": "%(text)s",
"links": [],
- "name": "OSNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
+ "name": "OSTenantNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2",
"updated": "%(timestamp)s"
},
{
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index fe34f369b..5953ba704 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -114,10 +114,10 @@
<extension alias="os-multiple-create" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1" name="MultipleCreate">
<description>%(text)s</description>
</extension>
- <extension alias="os-admin-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1" name="AdminNetworks">
+ <extension alias="os-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="Networks">
<description>%(text)s</description>
</extension>
- <extension alias="os-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="OSNetworks">
+ <extension alias="os-tenant-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2" name="OSTenantNetworks">
<description>%(text)s</description>
</extension>
<extension alias="os-networks-associate" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl
new file mode 100644
index 000000000..be9afe012
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 0,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl
new file mode 100644
index 000000000..53afae086
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="0" vcpus="1" ram="512" name="%(flavor_name)s" id="%(flavor_id)s" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl
new file mode 100644
index 000000000..c46a1695d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "swap": "",
+ "vcpus": 2
+ },
+ {
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "swap": "",
+ "vcpus": 4
+ },
+ {
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "swap": "",
+ "vcpus": 8
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl
new file mode 100644
index 000000000..ced8e1779
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors>
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl
new file mode 100644
index 000000000..ca86aeb4e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "swap": 5
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl
new file mode 100644
index 000000000..5f54df5cd
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
+ name="%(flavor_name)s"
+ ram="1024"
+ vcpus="2"
+ disk="10"
+ id="%(flavor_id)s"
+ swap="5" />
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl
new file mode 100644
index 000000000..e61a08dc1
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "swap": 5,
+ "vcpus": 2
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl
new file mode 100644
index 000000000..e8c69ecee
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="%(flavor_name)s" id="%(flavor_id)s" swap="5">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl
index 757084d2f..757084d2f 100644
--- a/nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl
+++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl
index fb1c2d3d0..fb1c2d3d0 100644
--- a/nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl
+++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl
index ff9e2273d..ff9e2273d 100644
--- a/nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl
+++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 7853d1429..98ac6a230 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -28,14 +28,13 @@ from lxml import etree
from nova.api.metadata import password
from nova.api.openstack.compute.contrib import coverage_ext
# Import extensions to pull in osapi_compute_extension CONF option used below.
-from nova.api.openstack.compute import extensions
from nova.cloudpipe.pipelib import CloudPipe
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
-from nova.network import api
-from nova.network.manager import NetworkManager
+from nova.network import api as network_api
+from nova.network import manager as network_manager
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -143,7 +142,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
template = self._get_template(name)
if self.generate_samples and not os.path.exists(template):
- with open(template, 'w') as outf:
+ with open(template, 'w'):
pass
with open(template) as inf:
return inf.read().strip()
@@ -367,13 +366,12 @@ class ApiSamplesTrap(ApiSampleTestBase):
do_not_approve_additions.append('os-create-server-ext')
do_not_approve_additions.append('os-flavor-access')
do_not_approve_additions.append('os-flavor-extra-specs')
- do_not_approve_additions.append('os-flavor-swap')
do_not_approve_additions.append('os-floating-ip-dns')
do_not_approve_additions.append('os-floating-ip-pools')
do_not_approve_additions.append('os-fping')
do_not_approve_additions.append('os-hypervisors')
do_not_approve_additions.append('os-instance_usage_audit_log')
- do_not_approve_additions.append('os-admin-networks')
+ do_not_approve_additions.append('os-networks')
do_not_approve_additions.append('os-services')
do_not_approve_additions.append('os-volumes')
@@ -1030,6 +1028,55 @@ class FlavorRxtxXmlTest(FlavorRxtxJsonTest):
ctype = 'xml'
+class FlavorSwapJsonTest(ApiSampleTestBase):
+ extension_name = ('nova.api.openstack.compute.contrib.flavor_swap.'
+ 'Flavor_swap')
+
+ def _get_flags(self):
+ f = super(FlavorSwapJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # FlavorSwap extension also needs Flavormanage to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
+ return f
+
+ def test_flavor_swap_get(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ self.assertEqual(response.status, 200)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'flavor_name': 'm1.tiny'
+ }
+ subs.update(self._get_regexes())
+ return self._verify_response('flavor-swap-get-resp', subs,
+ response)
+
+ def test_flavor_swap_list(self):
+ response = self._do_get('flavors/detail')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('flavor-swap-list-resp', subs,
+ response)
+
+ def test_flavor_swap_create(self):
+ subs = {
+ 'flavor_id': 100,
+ 'flavor_name': 'flavortest'
+ }
+ response = self._do_post('flavors',
+ 'flavor-swap-post-req',
+ subs)
+ self.assertEqual(response.status, 200)
+ subs.update(self._get_regexes())
+ return self._verify_response('flavor-swap-post-resp',
+ subs, response)
+
+
+class FlavorSwapXmlTest(FlavorSwapJsonTest):
+ ctype = 'xml'
+
+
class SecurityGroupsSampleJsonTest(ServersSampleBase):
extension_name = "nova.api.openstack.compute.contrib" + \
".security_groups.Security_groups"
@@ -1454,7 +1501,8 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase):
'vpn_public_port': 22}
self.stubs.Set(CloudPipe, 'get_encoded_zip', get_user_data)
- self.stubs.Set(NetworkManager, "get_network", network_api_get)
+ self.stubs.Set(network_manager.NetworkManager, "get_network",
+ network_api_get)
def generalize_subs(self, subs, vanilla_regexes):
subs['project_id'] = 'cloudpipe-[0-9a-f-]+'
@@ -2043,8 +2091,8 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
hypervisor_type='bar',
hypervisor_version='1',
disabled=False)
- return [{'compute_node': [service]}]
- self.stubs.Set(db, "service_get_all_compute_by_host", fake_get_compute)
+ return {'compute_node': [service]}
+ self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-live-migrate',
@@ -2311,8 +2359,8 @@ class DiskConfigXmlTest(DiskConfigJsonTest):
class OsNetworksJsonTests(ApiSampleTestBase):
- extension_name = ("nova.api.openstack.compute.contrib.os_networks"
- ".Os_networks")
+ extension_name = ("nova.api.openstack.compute.contrib.os_tenant_networks"
+ ".Os_tenant_networks")
def setUp(self):
super(OsNetworksJsonTests, self).setUp()
@@ -2329,21 +2377,22 @@ class OsNetworksJsonTests(ApiSampleTestBase):
self.stubs.Set(nova.quota.QuotaEngine, "rollback", fake)
def test_list_networks(self):
- response = self._do_get('os-networks')
+ response = self._do_get('os-tenant-networks')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('networks-list-res', subs, response)
def test_create_network(self):
- response = self._do_post('os-networks', "networks-post-req", {})
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('networks-post-res', subs, response)
- def test_delete_networK(self):
- response = self._do_post('os-networks', "networks-post-req", {})
+ def test_delete_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
net = json.loads(response.read())
- response = self._do_delete('os-networks/%s' % net["network"]["id"])
+ response = self._do_delete('os-tenant-networks/%s' %
+ net["network"]["id"])
self.assertEqual(response.status, 202)
@@ -2358,7 +2407,7 @@ class NetworksAssociateJsonTests(ApiSampleTestBase):
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# Networks_associate requires Networks to be update
f['osapi_compute_extension'].append(
- 'nova.api.openstack.compute.contrib.admin_networks.Admin_networks')
+ 'nova.api.openstack.compute.contrib.os_networks.Os_networks')
return f
def setUp(self):
@@ -2369,28 +2418,28 @@ class NetworksAssociateJsonTests(ApiSampleTestBase):
project=NetworksAssociateJsonTests._sentinel):
return True
- self.stubs.Set(api.API, "associate", fake_associate)
+ self.stubs.Set(network_api.API, "associate", fake_associate)
def test_disassociate(self):
- response = self._do_post('os-admin-networks/1/action',
+ response = self._do_post('os-networks/1/action',
'network-disassociate-req',
{})
self.assertEqual(response.status, 202)
def test_disassociate_host(self):
- response = self._do_post('os-admin-networks/1/action',
+ response = self._do_post('os-networks/1/action',
'network-disassociate-host-req',
{})
self.assertEqual(response.status, 202)
def test_disassociate_project(self):
- response = self._do_post('os-admin-networks/1/action',
+ response = self._do_post('os-networks/1/action',
'network-disassociate-project-req',
{})
self.assertEqual(response.status, 202)
def test_associate_host(self):
- response = self._do_post('os-admin-networks/1/action',
+ response = self._do_post('os-networks/1/action',
'network-associate-host-req',
{"host": "testHost"})
self.assertEqual(response.status, 202)
diff --git a/nova/tests/integrated/test_extensions.py b/nova/tests/integrated/test_extensions.py
index b6e1adc73..ca5ff8374 100644
--- a/nova/tests/integrated/test_extensions.py
+++ b/nova/tests/integrated/test_extensions.py
@@ -16,7 +16,6 @@
# under the License.
# Import extensions to pull in osapi_compute_extension CONF option used below.
-from nova.api.openstack.compute import extensions
from nova.openstack.common import cfg
from nova.openstack.common.log import logging
from nova.tests.integrated import integrated_helpers
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
index 94cccd9d9..959c5a472 100644
--- a/nova/tests/network/test_api.py
+++ b/nova/tests/network/test_api.py
@@ -17,8 +17,11 @@
"""Tests for network API."""
+import itertools
import random
+import mox
+
from nova import context
from nova import exception
from nova import network
@@ -37,6 +40,25 @@ class ApiTestCase(test.TestCase):
self.context = context.RequestContext('fake-user',
'fake-project')
+ def test_allocate_for_instance_handles_macs_passed(self):
+ # If a macs argument is supplied to the 'nova-network' API, it is just
+ # ignored. This test checks that the call down to the rpcapi layer
+ # doesn't pass macs down: nova-network doesn't support hypervisor
+ # mac address limits (today anyhow).
+ macs = set(['ab:cd:ef:01:23:34'])
+ self.mox.StubOutWithMock(
+ self.network_api.network_rpcapi, "allocate_for_instance")
+ kwargs = dict(zip(['host', 'instance_id', 'instance_uuid',
+ 'project_id', 'requested_networks', 'rxtx_factor', 'vpn'],
+ itertools.repeat(mox.IgnoreArg())))
+ self.network_api.network_rpcapi.allocate_for_instance(
+ mox.IgnoreArg(), **kwargs).AndReturn([])
+ self.mox.ReplayAll()
+ instance = dict(id='id', uuid='uuid', project_id='project_id',
+ host='host', instance_type={'rxtx_factor': 0})
+ self.network_api.allocate_for_instance(
+ 'context', instance, 'vpn', 'requested_networks', macs=macs)
+
def _do_test_associate_floating_ip(self, orig_instance_uuid):
"""Test post-association logic."""
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index d825a86d1..1552630fb 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -27,6 +27,7 @@ from nova import exception
from nova import ipv6
from nova.network import linux_net
from nova.network import manager as network_manager
+from nova.network import model as net_model
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
@@ -184,7 +185,8 @@ class FlatNetworkTestCase(test.TestCase):
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
- 'vif_type': None,
+ 'vif_type': net_model.VIF_TYPE_BRIDGE,
+ 'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'should_create_vlan': False,
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index 004e76071..f92dba443 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -420,6 +420,14 @@ class TestQuantumv2(test.TestCase):
# Allocate one port in two networks env.
self._allocate_for_instance(2)
+ def test_allocate_for_instance_accepts_macs_kwargs_None(self):
+ # The macs kwarg should be accepted as None.
+ self._allocate_for_instance(1, macs=None)
+
+ def test_allocate_for_instance_accepts_macs_kwargs_set(self):
+ # The macs kwarg should be accepted, as a set.
+ self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45']))
+
def test_allocate_for_instance_with_requested_networks(self):
# specify only first and last network
requested_networks = [
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index ceea74e70..dd5b0ae32 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -111,13 +111,13 @@ class SchedulerManagerTestCase(test.TestCase):
def test_show_host_resources(self):
host = 'fake_host'
- computes = [{'host': host,
- 'compute_node': [{'vcpus': 4,
- 'vcpus_used': 2,
- 'memory_mb': 1024,
- 'memory_mb_used': 512,
- 'local_gb': 1024,
- 'local_gb_used': 512}]}]
+ compute_node = {'host': host,
+ 'compute_node': [{'vcpus': 4,
+ 'vcpus_used': 2,
+ 'memory_mb': 1024,
+ 'memory_mb_used': 512,
+ 'local_gb': 1024,
+ 'local_gb_used': 512}]}
instances = [{'project_id': 'project1',
'vcpus': 1,
'memory_mb': 128,
@@ -134,11 +134,11 @@ class SchedulerManagerTestCase(test.TestCase):
'root_gb': 256,
'ephemeral_gb': 0}]
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
- db.service_get_all_compute_by_host(self.context, host).AndReturn(
- computes)
+ db.service_get_by_compute_host(self.context, host).AndReturn(
+ compute_node)
db.instance_get_all_by_host(self.context, host).AndReturn(instances)
self.mox.ReplayAll()
@@ -338,8 +338,6 @@ class SchedulerTestCase(test.TestCase):
block_migration = False
disk_over_commit = False
instance = jsonutils.to_primitive(self._live_migration_instance())
- instance_id = instance['id']
- instance_uuid = instance['uuid']
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance, dest)
@@ -362,7 +360,7 @@ class SchedulerTestCase(test.TestCase):
# Test live migration when all checks pass.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
@@ -373,34 +371,32 @@ class SchedulerTestCase(test.TestCase):
block_migration = True
disk_over_commit = True
instance = jsonutils.to_primitive(self._live_migration_instance())
- instance_id = instance['id']
- instance_uuid = instance['uuid']
# Source checks
- db.service_get_all_compute_by_host(self.context,
- instance['host']).AndReturn(['fake_service2'])
+ db.service_get_by_compute_host(self.context,
+ instance['host']).AndReturn('fake_service2')
self.servicegroup_api.service_is_up('fake_service2').AndReturn(True)
# Destination checks (compute is up, enough memory, disk)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
# assert_compute_node_has_enough_memory()
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'memory_mb': 2048,
- 'hypervisor_version': 1}]}])
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'memory_mb': 2048,
+ 'hypervisor_version': 1}]})
db.instance_get_all_by_host(self.context, dest).AndReturn(
[dict(memory_mb=256), dict(memory_mb=512)])
# Common checks (same hypervisor, etc)
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1}]}])
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]})
+ db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1,
- 'cpu_info': 'fake_cpu_info'}]}])
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1,
+ 'cpu_info': 'fake_cpu_info'}]})
rpc.call(self.context, "compute.fake_host2",
{"method": 'check_can_live_migrate_destination',
@@ -440,7 +436,7 @@ class SchedulerTestCase(test.TestCase):
# Raise exception when src compute node is does not exist.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -448,9 +444,9 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
# Compute down
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context,
instance['host']).AndRaise(
- exception.NotFound())
+ exception.ComputeHostNotFound(host='fake'))
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
@@ -463,7 +459,7 @@ class SchedulerTestCase(test.TestCase):
# Raise exception when src compute node is not alive.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -471,8 +467,8 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
# Compute down
- db.service_get_all_compute_by_host(self.context,
- instance['host']).AndReturn(['fake_service2'])
+ db.service_get_by_compute_host(self.context,
+ instance['host']).AndReturn('fake_service2')
self.servicegroup_api.service_is_up('fake_service2').AndReturn(False)
self.mox.ReplayAll()
@@ -486,7 +482,7 @@ class SchedulerTestCase(test.TestCase):
# Raise exception when dest compute node is not alive.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
dest = 'fake_host2'
@@ -495,8 +491,8 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
# Compute is down
self.servicegroup_api.service_is_up('fake_service3').AndReturn(False)
@@ -511,17 +507,16 @@ class SchedulerTestCase(test.TestCase):
# Confirms exception raises in case dest and src is same host.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
block_migration = False
- disk_over_commit = False
instance = self._live_migration_instance()
# make dest same as src
dest = instance['host']
self.driver._live_migration_src_check(self.context, instance)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
self.mox.ReplayAll()
@@ -535,7 +530,7 @@ class SchedulerTestCase(test.TestCase):
# Confirms exception raises when dest doesn't have enough memory.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
@@ -546,8 +541,8 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
self.driver._get_compute_info(self.context, dest).AndReturn(
@@ -569,7 +564,7 @@ class SchedulerTestCase(test.TestCase):
self.mox.StubOutWithMock(rpc, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -579,13 +574,13 @@ class SchedulerTestCase(test.TestCase):
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance, dest)
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1}]}])
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]})
+ db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'not-xen',
- 'hypervisor_version': 1}]}])
+ {'compute_node': [{'hypervisor_type': 'not-xen',
+ 'hypervisor_version': 1}]})
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
@@ -601,7 +596,7 @@ class SchedulerTestCase(test.TestCase):
self.mox.StubOutWithMock(rpc, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -611,13 +606,13 @@ class SchedulerTestCase(test.TestCase):
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance, dest)
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1}]}])
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]})
+ db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 2}]}])
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 2}]})
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.driver.schedule_live_migration, self.context,
diff --git a/nova/tests/test_configdrive2.py b/nova/tests/test_configdrive2.py
index 91c2a4e5e..28fa423e0 100644
--- a/nova/tests/test_configdrive2.py
+++ b/nova/tests/test_configdrive2.py
@@ -45,7 +45,7 @@ class ConfigDriveTestCase(test.TestCase):
self.mox.ReplayAll()
- with configdrive.config_drive_helper() as c:
+ with configdrive.ConfigDriveBuilder() as c:
c._add_file('this/is/a/path/hello', 'This is some content')
(fd, imagefile) = tempfile.mkstemp(prefix='cd_iso_')
os.close(fd)
@@ -67,17 +67,15 @@ class ConfigDriveTestCase(test.TestCase):
utils.mkfs('vfat', mox.IgnoreArg(),
label='config-2').AndReturn(None)
- utils.trycmd('mount', '-o', 'loop', mox.IgnoreArg(),
+ utils.trycmd('mount', '-o', mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(),
run_as_root=True).AndReturn((None, None))
- utils.trycmd('chown', mox.IgnoreArg(), mox.IgnoreArg(),
- run_as_root=True).AndReturn((None, None))
utils.execute('umount', mox.IgnoreArg(),
run_as_root=True).AndReturn(None)
self.mox.ReplayAll()
- with configdrive.config_drive_helper() as c:
+ with configdrive.ConfigDriveBuilder() as c:
c._add_file('this/is/a/path/hello', 'This is some content')
(fd, imagefile) = tempfile.mkstemp(prefix='cd_vfat_')
os.close(fd)
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 7df28bfcb..c70e96cdc 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -299,27 +299,6 @@ class DbApiTestCase(test.TestCase):
self.assertRaises(exception.DuplicateVlan,
db.network_create_safe, ctxt, values2)
- def test_instance_test_and_set(self):
- ctxt = context.get_admin_context()
- states = [
- (None, [None, 'some'], 'building'),
- (None, [None], 'building'),
- ('building', ['building'], 'ready'),
- ('building', [None, 'building'], 'ready')]
- for st in states:
- inst = db.instance_create(ctxt, {'vm_state': st[0]})
- uuid = inst['uuid']
- db.instance_test_and_set(ctxt, uuid, 'vm_state', st[1], st[2])
- inst = db.instance_get_by_uuid(ctxt, uuid)
- self.assertEqual(inst["vm_state"], st[2])
-
- def test_instance_test_and_set_exception(self):
- ctxt = context.get_admin_context()
- inst = db.instance_create(ctxt, {'vm_state': 'building'})
- self.assertRaises(exception.InstanceInvalidState,
- db.instance_test_and_set, ctxt,
- inst['uuid'], 'vm_state', [None, 'disable'], 'run')
-
def test_instance_update_with_instance_uuid(self):
# test instance_update() works when an instance UUID is passed.
ctxt = context.get_admin_context()
diff --git a/nova/tests/test_exception.py b/nova/tests/test_exception.py
index 9e34f287c..ad67cff26 100644
--- a/nova/tests/test_exception.py
+++ b/nova/tests/test_exception.py
@@ -52,23 +52,23 @@ class FakeNotifier(object):
self.provided_context = context
-def good_function():
+def good_function(self, context):
return 99
-def bad_function_exception(blah="a", boo="b", context=None):
+def bad_function_exception(self, context, extra, blah="a", boo="b", zoo=None):
raise test.TestingException()
class WrapExceptionTestCase(test.TestCase):
def test_wrap_exception_good_return(self):
wrapped = exception.wrap_exception()
- self.assertEquals(99, wrapped(good_function)())
+ self.assertEquals(99, wrapped(good_function)(1, 2))
def test_wrap_exception_throws_exception(self):
wrapped = exception.wrap_exception()
self.assertRaises(test.TestingException,
- wrapped(bad_function_exception))
+ wrapped(bad_function_exception), 1, 2, 3)
def test_wrap_exception_with_notifier(self):
notifier = FakeNotifier()
@@ -76,7 +76,7 @@ class WrapExceptionTestCase(test.TestCase):
"level")
ctxt = context.get_admin_context()
self.assertRaises(test.TestingException,
- wrapped(bad_function_exception), context=ctxt)
+ wrapped(bad_function_exception), 1, ctxt, 3, zoo=3)
self.assertEquals(notifier.provided_publisher, "publisher")
self.assertEquals(notifier.provided_event, "event")
self.assertEquals(notifier.provided_priority, "level")
@@ -88,7 +88,7 @@ class WrapExceptionTestCase(test.TestCase):
notifier = FakeNotifier()
wrapped = exception.wrap_exception(notifier)
self.assertRaises(test.TestingException,
- wrapped(bad_function_exception))
+ wrapped(bad_function_exception), 1, 2, 3)
self.assertEquals(notifier.provided_publisher, None)
self.assertEquals(notifier.provided_event, "bad_function_exception")
self.assertEquals(notifier.provided_priority, notifier.ERROR)
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index f5713c457..9fec9d151 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -68,7 +68,8 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._setup_stubs()
self.flags(instances_path=r'C:\Hyper-V\test\instances',
- vswitch_name='external')
+ vswitch_name='external',
+ network_api_class='nova.network.quantumv2.api.API')
self._hypervutils = hypervutils.HyperVUtils()
self._conn = driver_hyperv.HyperVDriver(None)
@@ -119,6 +120,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
from nova.virt.hyperv import hostops
from nova.virt.hyperv import livemigrationops
from nova.virt.hyperv import snapshotops
+ from nova.virt.hyperv import vif
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
from nova.virt.hyperv import volumeutils
@@ -129,6 +131,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
basevolumeutils,
baseops,
hostops,
+ vif,
vmops,
vmutils,
volumeops,
@@ -240,6 +243,9 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self.assertEquals(len(dvd_paths), 0)
def test_spawn_no_vswitch_exception(self):
+ self.flags(network_api_class='nova.network.api.API')
+ # Reinstantiate driver, as the VIF plugin is loaded during __init__
+ self._conn = driver_hyperv.HyperVDriver(None)
# Set flag to a non existing vswitch
self.flags(vswitch_name=str(uuid.uuid4()))
self.assertRaises(vmutils.HyperVException, self._spawn_instance, True)
diff --git a/nova/tests/test_imagebackend.py b/nova/tests/test_imagebackend.py
index 82b5eb475..a9865cb44 100644
--- a/nova/tests/test_imagebackend.py
+++ b/nova/tests/test_imagebackend.py
@@ -22,6 +22,7 @@ from nova.openstack.common import cfg
from nova import test
from nova.tests import fake_libvirt_utils
from nova.virt.libvirt import imagebackend
+from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
@@ -38,12 +39,12 @@ class _ImageTestCase(object):
super(_ImageTestCase, self).setUp()
self.flags(disable_process_locking=True,
instances_path=self.INSTANCES_PATH)
- self.INSTANCE = 'instance'
+ self.INSTANCE = {'name': 'instance'}
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
- self.PATH = os.path.join(CONF.instances_path, self.INSTANCE,
- self.NAME)
+ self.PATH = os.path.join(
+ libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
self.TEMPLATE_DIR = os.path.join(CONF.instances_path,
'_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
@@ -215,7 +216,7 @@ class LvmTestCase(_ImageTestCase, test.TestCase):
self.image_class = imagebackend.Lvm
super(LvmTestCase, self).setUp()
self.flags(libvirt_images_volume_group=self.VG)
- self.LV = '%s_%s' % (self.INSTANCE, self.NAME)
+ self.LV = '%s_%s' % (self.INSTANCE['name'], self.NAME)
self.PATH = os.path.join('/dev', self.VG, self.LV)
self.disk = imagebackend.disk
@@ -342,7 +343,7 @@ class LvmTestCase(_ImageTestCase, test.TestCase):
class BackendTestCase(test.TestCase):
- INSTANCE = 'fake-instance'
+ INSTANCE = {'name': 'fake-instance'}
NAME = 'fake-name.suffix'
def get_image(self, use_cow, image_type):
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 53bb1b984..de0745654 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -478,7 +478,8 @@ class CacheConcurrencyTestCase(test.TestCase):
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
- thr1 = eventlet.spawn(backend.image('instance', 'name').cache,
+ thr1 = eventlet.spawn(backend.image({'name': 'instance'},
+ 'name').cache,
_concurrency, 'fname', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
@@ -488,7 +489,8 @@ class CacheConcurrencyTestCase(test.TestCase):
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
- thr2 = eventlet.spawn(backend.image('instance', 'name').cache,
+ thr2 = eventlet.spawn(backend.image({'name': 'instance'},
+ 'name').cache,
_concurrency, 'fname', None,
signal=sig2, wait=wait2, done=done2)
@@ -512,7 +514,8 @@ class CacheConcurrencyTestCase(test.TestCase):
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
- thr1 = eventlet.spawn(backend.image('instance', 'name').cache,
+ thr1 = eventlet.spawn(backend.image({'name': 'instance'},
+ 'name').cache,
_concurrency, 'fname2', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
@@ -522,7 +525,8 @@ class CacheConcurrencyTestCase(test.TestCase):
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
- thr2 = eventlet.spawn(backend.image('instance', 'name').cache,
+ thr2 = eventlet.spawn(backend.image({'name': 'instance'},
+ 'name').cache,
_concurrency, 'fname1', None,
signal=sig2, wait=wait2, done=done2)
eventlet.sleep(0)
@@ -904,6 +908,9 @@ class LibvirtConnTestCase(test.TestCase):
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
+ cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("tm2"))
+ cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("ht"))
+
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
@@ -927,6 +934,9 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(conf.cpu.mode, None)
self.assertEquals(conf.cpu.model, "Opteron_G4")
self.assertEquals(conf.cpu.vendor, "AMD")
+ self.assertEquals(len(conf.cpu.features), 2)
+ self.assertEquals(conf.cpu.features[0].name, "tm2")
+ self.assertEquals(conf.cpu.features[1].name, "ht")
def test_get_guest_cpu_config_custom_old(self):
def get_lib_version_stub(self):
@@ -4447,7 +4457,7 @@ class LibvirtDriverTestCase(test.TestCase):
block_device_info=None):
pass
- def fake_create_domain(xml, inst_name=''):
+ def fake_create_domain(xml, instance=None):
return None
def fake_enable_hairpin(instance):
@@ -4493,7 +4503,7 @@ class LibvirtDriverTestCase(test.TestCase):
def fake_plug_vifs(instance, network_info):
pass
- def fake_create_domain(xml, inst_name=''):
+ def fake_create_domain(xml, instance=None):
return None
def fake_enable_hairpin(instance):
diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py
index aeebb5742..11ffa020f 100644
--- a/nova/tests/test_libvirt_vif.py
+++ b/nova/tests/test_libvirt_vif.py
@@ -47,7 +47,8 @@ class LibvirtVifTestCase(test.TestCase):
'gateway_v6': net['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
- 'vif_uuid': 'vif-xxx-yyy-zzz'
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
+ 'vif_devname': 'tap-xxx-yyy-zzz'
}
instance = {
@@ -229,7 +230,7 @@ class LibvirtVifTestCase(test.TestCase):
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
- self.assertEqual(br_name, CONF.libvirt_ovs_bridge)
+ self.assertEqual(br_name, "br0")
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
vp = node.find("virtualport")
@@ -257,7 +258,7 @@ class LibvirtVifTestCase(test.TestCase):
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
br_name = node.find("source").get("bridge")
- self.assertTrue(br_name.startswith("brq"))
+ self.assertEqual(br_name, "br0")
def test_quantum_hybrid_driver(self):
d = vif.LibvirtHybridOVSBridgeDriver()
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index 29e63aba7..f15d71633 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -342,7 +342,7 @@ class OpenStackMetadataTestCase(test.TestCase):
mdinst = fake_InstanceMetadata(self.stubs, inst)
# since this instance had no user-data it should not be there.
- self.assertFalse('user-data' in mdinst.lookup("/openstack/2012-08-10"))
+ self.assertFalse('user_data' in mdinst.lookup("/openstack/2012-08-10"))
self.assertRaises(base.InvalidMetadataPath,
mdinst.lookup, "/openstack/2012-08-10/user_data")
@@ -362,6 +362,14 @@ class OpenStackMetadataTestCase(test.TestCase):
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
self.assertFalse("random_seed" in json.loads(mdjson))
+ def test_no_dashes_in_metadata(self):
+ # top level entries in meta_data should not contain '-' in their name
+ inst = copy(self.instance)
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+ mdjson = json.loads(mdinst.lookup("/openstack/latest/meta_data.json"))
+
+ self.assertEqual([], [k for k in mdjson.keys() if k.find("-") != -1])
+
class MetadataHandlerTestCase(test.TestCase):
"""Test that metadata is returning proper values."""
diff --git a/nova/tests/test_periodic_tasks.py b/nova/tests/test_periodic_tasks.py
index 5804ea49b..39669967f 100644
--- a/nova/tests/test_periodic_tasks.py
+++ b/nova/tests/test_periodic_tasks.py
@@ -17,6 +17,7 @@
import fixtures
+import time
from nova import manager
from nova import test
@@ -76,6 +77,19 @@ class Manager(test.TestCase):
idle = m.periodic_tasks(None)
self.assertAlmostEqual(60, idle, 1)
+ def test_periodic_tasks_idle_calculation(self):
+ class Manager(manager.Manager):
+ @manager.periodic_task(spacing=10)
+ def bar(self):
+ return 'bar'
+
+ m = Manager()
+ m.periodic_tasks(None)
+ time.sleep(0.1)
+ idle = m.periodic_tasks(None)
+ self.assertTrue(idle > 9.7)
+ self.assertTrue(idle < 9.9)
+
def test_periodic_tasks_disabled(self):
class Manager(manager.Manager):
@manager.periodic_task(spacing=-1)
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index b6759de54..08b33e201 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -281,18 +281,21 @@ class FakeDriver(object):
project_id, quota_class, defaults, usages))
return resources
- def limit_check(self, context, resources, values):
- self.called.append(('limit_check', context, resources, values))
-
- def reserve(self, context, resources, deltas, expire=None):
- self.called.append(('reserve', context, resources, deltas, expire))
+ def limit_check(self, context, resources, values, project_id=None):
+ self.called.append(('limit_check', context, resources,
+ values, project_id))
+
+ def reserve(self, context, resources, deltas, expire=None,
+ project_id=None):
+ self.called.append(('reserve', context, resources, deltas,
+ expire, project_id))
return self.reservations
- def commit(self, context, reservations):
- self.called.append(('commit', context, reservations))
+ def commit(self, context, reservations, project_id=None):
+ self.called.append(('commit', context, reservations, project_id))
- def rollback(self, context, reservations):
- self.called.append(('rollback', context, reservations))
+ def rollback(self, context, reservations, project_id=None):
+ self.called.append(('rollback', context, reservations, project_id))
def usage_reset(self, context, resources):
self.called.append(('usage_reset', context, resources))
@@ -600,7 +603,7 @@ class QuotaEngineTestCase(test.TestCase):
test_resource2=3,
test_resource3=2,
test_resource4=1,
- )),
+ ), None),
])
def test_reserve(self):
@@ -615,6 +618,9 @@ class QuotaEngineTestCase(test.TestCase):
result2 = quota_obj.reserve(context, expire=3600,
test_resource1=1, test_resource2=2,
test_resource3=3, test_resource4=4)
+ result3 = quota_obj.reserve(context, project_id='fake_project',
+ test_resource1=1, test_resource2=2,
+ test_resource3=3, test_resource4=4)
self.assertEqual(driver.called, [
('reserve', context, quota_obj._resources, dict(
@@ -622,13 +628,19 @@ class QuotaEngineTestCase(test.TestCase):
test_resource2=3,
test_resource3=2,
test_resource4=1,
- ), None),
+ ), None, None),
+ ('reserve', context, quota_obj._resources, dict(
+ test_resource1=1,
+ test_resource2=2,
+ test_resource3=3,
+ test_resource4=4,
+ ), 3600, None),
('reserve', context, quota_obj._resources, dict(
test_resource1=1,
test_resource2=2,
test_resource3=3,
test_resource4=4,
- ), 3600),
+ ), None, 'fake_project'),
])
self.assertEqual(result1, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
@@ -636,6 +648,9 @@ class QuotaEngineTestCase(test.TestCase):
self.assertEqual(result2, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
+ self.assertEqual(result3, [
+ 'resv-01', 'resv-02', 'resv-03', 'resv-04',
+ ])
def test_commit(self):
context = FakeContext(None, None)
@@ -644,7 +659,7 @@ class QuotaEngineTestCase(test.TestCase):
quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
- ('commit', context, ['resv-01', 'resv-02', 'resv-03']),
+ ('commit', context, ['resv-01', 'resv-02', 'resv-03'], None),
])
def test_rollback(self):
@@ -654,7 +669,7 @@ class QuotaEngineTestCase(test.TestCase):
quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
- ('rollback', context, ['resv-01', 'resv-02', 'resv-03']),
+ ('rollback', context, ['resv-01', 'resv-02', 'resv-03'], None),
])
def test_usage_reset(self):
@@ -1205,7 +1220,7 @@ class DbQuotaDriverTestCase(test.TestCase):
def _stub_quota_reserve(self):
def fake_quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age):
+ until_refresh, max_age, project_id=None):
self.calls.append(('quota_reserve', expire, until_refresh,
max_age))
return ['resv-1', 'resv-2', 'resv-3']
@@ -1389,7 +1404,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
def fake_get_session():
return FakeSession()
- def fake_get_quota_usages(context, session):
+ def fake_get_quota_usages(context, session, project_id):
return self.usages.copy()
def fake_quota_usage_create(context, project_id, resource, in_use,
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 0bb57d542..4873714f3 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -113,6 +113,9 @@ class ServiceTestCase(test.TestCase):
self.binary = 'nova-fake'
self.topic = 'fake'
self.mox.StubOutWithMock(service, 'db')
+ self.mox.StubOutWithMock(db, 'service_create')
+ self.mox.StubOutWithMock(db, 'service_get_by_args')
+ self.flags(use_local=True, group='conductor')
def test_create(self):
@@ -134,9 +137,9 @@ class ServiceTestCase(test.TestCase):
'report_count': 0,
'id': 1}
- service.db.service_get_by_args(mox.IgnoreArg(),
+ db.service_get_by_args(mox.IgnoreArg(),
self.host, self.binary).AndRaise(exception.NotFound())
- service.db.service_create(mox.IgnoreArg(),
+ db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
return service_ref
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index 9e9309dfe..199ae30b1 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -114,7 +114,7 @@ class _FakeDriverBackendTestCase(object):
# We can't actually make a config drive v2 because ensure_tree has
# been faked out
- self.stubs.Set(nova.virt.configdrive._ConfigDriveBuilder,
+ self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
'make_drive', fake_make_drive)
def _teardown_fakelibvirt(self):
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index 86b3a5730..577d227ce 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -16,7 +16,7 @@
# under the License.
"""
-Test suite for VMWareAPI.
+Test suite for VMwareAPI.
"""
from nova.compute import power_state
@@ -33,11 +33,11 @@ from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import fake as vmwareapi_fake
-class VMWareAPIVMTestCase(test.TestCase):
+class VMwareAPIVMTestCase(test.TestCase):
"""Unit tests for Vmware API connection calls."""
def setUp(self):
- super(VMWareAPIVMTestCase, self).setUp()
+ super(VMwareAPIVMTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake', is_admin=False)
self.flags(vmwareapi_host_ip='test_url',
vmwareapi_host_username='test_username',
@@ -48,7 +48,7 @@ class VMWareAPIVMTestCase(test.TestCase):
vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs)
- self.conn = driver.VMWareESXDriver(None, False)
+ self.conn = driver.VMwareESXDriver(None, False)
# NOTE(vish): none of the network plugging code is actually
# being tested
self.network_info = [({'bridge': 'fa0',
@@ -78,7 +78,7 @@ class VMWareAPIVMTestCase(test.TestCase):
nova.tests.image.fake.stub_out_image_service(self.stubs)
def tearDown(self):
- super(VMWareAPIVMTestCase, self).tearDown()
+ super(VMwareAPIVMTestCase, self).tearDown()
vmwareapi_fake.cleanup()
nova.tests.image.fake.FakeImageService_reset()
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
index 00b70ceb3..6437f9537 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/utils.py
@@ -20,6 +20,7 @@ import nova.context
import nova.db
from nova.image import glance
from nova.network import minidns
+from nova.network import model as network_model
from nova.openstack.common import cfg
CONF = cfg.CONF
@@ -91,6 +92,8 @@ def get_test_network_info(count=1):
'bridge_interface': fake_bridge_interface,
'injected': False}
mapping = {'mac': fake,
+ 'vif_type': network_model.VIF_TYPE_BRIDGE,
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
'dhcp_server': fake,
'dns': ['fake1', 'fake2'],
'gateway': fake,
diff --git a/nova/tests/virt/xenapi/test_vm_utils.py b/nova/tests/virt/xenapi/test_vm_utils.py
new file mode 100644
index 000000000..275088af0
--- /dev/null
+++ b/nova/tests/virt/xenapi/test_vm_utils.py
@@ -0,0 +1,89 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import contextlib
+import fixtures
+import mox
+import uuid
+
+from nova import test
+from nova.tests.xenapi import stubs
+from nova import utils
+from nova.virt.xenapi import vm_utils
+
+
+@contextlib.contextmanager
+def contextified(result):
+ yield result
+
+
+def _fake_noop(*args, **kwargs):
+ return
+
+
+class GenerateConfigDriveTestCase(test.TestCase):
+ def test_no_admin_pass(self):
+ # This is here to avoid masking errors, it shouldn't be used normally
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.xenapi.vm_utils.destroy_vdi', _fake_noop))
+
+ # Mocks
+ instance = {}
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr('session').AndReturn('sr_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vdi')
+ vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2',
+ 'configdrive',
+ 64 * 1024 * 1024).AndReturn('vdi_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here')
+ vm_utils.vdi_attached_here(
+ 'session', 'vdi_ref', read_only=False).AndReturn(
+ contextified('mounted_dev'))
+
+ class FakeInstanceMetadata(object):
+ def __init__(self, instance, content=None, extra_md=None):
+ pass
+
+ def metadata_for_config_drive(self):
+ return []
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.api.metadata.base.InstanceMetadata',
+ FakeInstanceMetadata))
+
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
+ '-allow-lowercase', '-allow-multidot', '-l',
+ '-publisher', mox.IgnoreArg(), '-quiet',
+ '-J', '-r', '-V', 'config-2', mox.IgnoreArg(),
+ attempts=1, run_as_root=False).AndReturn(None)
+ utils.execute('dd', mox.IgnoreArg(), mox.IgnoreArg(),
+ run_as_root=True).AndReturn(None)
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vbd')
+ vm_utils.create_vbd('session', 'vm_ref', 'vdi_ref', mox.IgnoreArg(),
+ bootable=False, read_only=True).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ # And the actual call we're testing
+ vm_utils.generate_configdrive('session', instance, 'vm_ref',
+ 'userdevice')
diff --git a/nova/tests/virt/xenapi/test_volumeops.py b/nova/tests/virt/xenapi/test_volumeops.py
index 5b5c38139..7cc5c70da 100644
--- a/nova/tests/virt/xenapi/test_volumeops.py
+++ b/nova/tests/virt/xenapi/test_volumeops.py
@@ -20,9 +20,47 @@ from nova.virt.xenapi import volumeops
class VolumeAttachTestCase(test.TestCase):
+ def test_detach_volume_call(self):
+ ops = volumeops.VolumeOps('session')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'find_vbd_by_number')
+ self.mox.StubOutWithMock(volumeops.vm_utils, '_is_vm_shutdown')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
+
+ volumeops.vm_utils.vm_ref_or_raise('session', 'instance_1').AndReturn(
+ 'vmref')
+
+ volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
+ 'devnumber')
+
+ volumeops.vm_utils.find_vbd_by_number(
+ 'session', 'vmref', 'devnumber').AndReturn('vbdref')
+
+ volumeops.vm_utils._is_vm_shutdown('session', 'vmref').AndReturn(
+ False)
+
+ volumeops.vm_utils.unplug_vbd('session', 'vbdref')
+
+ volumeops.vm_utils.destroy_vbd('session', 'vbdref')
+
+ volumeops.volume_utils.find_sr_from_vbd(
+ 'session', 'vbdref').AndReturn('srref')
+
+ volumeops.volume_utils.purge_sr('session', 'srref')
+
+ self.mox.ReplayAll()
+
+ ops.detach_volume(
+ dict(driver_volume_type='iscsi', data='conn_data'),
+ 'instance_1', 'mountpoint')
+
def test_attach_volume_call(self):
ops = volumeops.VolumeOps('session')
- self.mox.StubOutWithMock(ops, 'connect_volume')
+ self.mox.StubOutWithMock(ops, '_connect_volume')
self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
@@ -32,7 +70,7 @@ class VolumeAttachTestCase(test.TestCase):
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
- ops.connect_volume(
+ ops._connect_volume(
'conn_data', 'devnumber', 'instance_1', 'vmref', hotplug=True)
self.mox.ReplayAll()
@@ -42,7 +80,7 @@ class VolumeAttachTestCase(test.TestCase):
def test_attach_volume_no_hotplug(self):
ops = volumeops.VolumeOps('session')
- self.mox.StubOutWithMock(ops, 'connect_volume')
+ self.mox.StubOutWithMock(ops, '_connect_volume')
self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
@@ -52,7 +90,7 @@ class VolumeAttachTestCase(test.TestCase):
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
- ops.connect_volume(
+ ops._connect_volume(
'conn_data', 'devnumber', 'instance_1', 'vmref', hotplug=False)
self.mox.ReplayAll()
@@ -85,7 +123,8 @@ class VolumeAttachTestCase(test.TestCase):
self.stubs.Set(ops._session, 'call_xenapi', fake_call_xenapi)
self.mox.StubOutWithMock(volumeops.volume_utils, 'parse_sr_info')
- self.mox.StubOutWithMock(ops, 'introduce_sr')
+ self.mox.StubOutWithMock(
+ volumeops.volume_utils, 'introduce_sr_unless_present')
self.mox.StubOutWithMock(volumeops.volume_utils, 'introduce_vdi')
self.mox.StubOutWithMock(volumeops.vm_utils, 'create_vbd')
@@ -93,7 +132,8 @@ class VolumeAttachTestCase(test.TestCase):
connection_data, sr_label).AndReturn(
tuple([sr_uuid, sr_label, sr_params]))
- ops.introduce_sr(sr_uuid, sr_label, sr_params).AndReturn(sr_ref)
+ volumeops.volume_utils.introduce_sr_unless_present(
+ session, sr_uuid, sr_label, sr_params).AndReturn(sr_ref)
volumeops.volume_utils.introduce_vdi(
session, sr_ref, vdi_uuid, None).AndReturn(vdi_ref)
@@ -104,7 +144,7 @@ class VolumeAttachTestCase(test.TestCase):
self.mox.ReplayAll()
- ops.connect_volume(connection_data, dev_number, instance_name,
+ ops._connect_volume(connection_data, dev_number, instance_name,
vm_ref, hotplug=False)
self.assertEquals(False, called['xenapi'])
diff --git a/nova/tests/vmwareapi/stubs.py b/nova/tests/vmwareapi/stubs.py
index 494b201d0..0330246e2 100644
--- a/nova/tests/vmwareapi/stubs.py
+++ b/nova/tests/vmwareapi/stubs.py
@@ -21,31 +21,31 @@ Stubouts for the test suite
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import fake
-from nova.virt.vmwareapi import network_utils
+from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import vmware_images
def fake_get_vim_object(arg):
- """Stubs out the VMWareAPISession's get_vim_object method."""
+ """Stubs out the VMwareAPISession's get_vim_object method."""
return fake.FakeVim()
def fake_is_vim_object(arg, module):
- """Stubs out the VMWareAPISession's is_vim_object method."""
+ """Stubs out the VMwareAPISession's is_vim_object method."""
return isinstance(module, fake.FakeVim)
def set_stubs(stubs):
"""Set the stubs."""
- stubs.Set(vmops.VMWareVMOps, 'plug_vifs', fake.fake_plug_vifs)
- stubs.Set(network_utils, 'get_network_with_the_name',
+ stubs.Set(vmops.VMwareVMOps, 'plug_vifs', fake.fake_plug_vifs)
+ stubs.Set(network_util, 'get_network_with_the_name',
fake.fake_get_network)
stubs.Set(vmware_images, 'fetch_image', fake.fake_fetch_image)
stubs.Set(vmware_images, 'get_vmdk_size_and_properties',
fake.fake_get_vmdk_size_and_properties)
stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image)
- stubs.Set(driver.VMWareAPISession, "_get_vim_object",
+ stubs.Set(driver.VMwareAPISession, "_get_vim_object",
fake_get_vim_object)
- stubs.Set(driver.VMWareAPISession, "_is_vim_object",
+ stubs.Set(driver.VMwareAPISession, "_is_vim_object",
fake_is_vim_object)
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 462e0c444..9904fdcd4 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -188,13 +188,28 @@ class BareMetalDriver(driver.ComputeDriver):
l.append(inst['name'])
return l
- def spawn(self, context, instance, image_meta, injected_files,
- admin_password, network_info=None, block_device_info=None):
+ def _require_node(self, instance):
+ """Get a node_id out of a manager instance dict.
+ The compute manager is meant to know the node id, so a missing node is
+ a significant issue - it may mean we've been passed someone elses data.
+ """
node_id = instance.get('node')
if not node_id:
raise exception.NovaException(_(
- "Baremetal node id not supplied to driver"))
+ "Baremetal node id not supplied to driver for %r")
+ % instance['uuid'])
+ return node_id
+
+ def macs_for_instance(self, instance):
+ context = nova_context.get_admin_context()
+ node_id = self._require_node(instance)
+ return set(iface['address'] for iface in
+ db.bm_interface_get_all_by_bm_node_id(context, node_id))
+
+ def spawn(self, context, instance, image_meta, injected_files,
+ admin_password, network_info=None, block_device_info=None):
+ node_id = self._require_node(instance)
# NOTE(deva): this db method will raise an exception if the node is
# already in use. We call it here to ensure no one else
@@ -324,10 +339,9 @@ class BareMetalDriver(driver.ComputeDriver):
return self.volume_driver.attach_volume(connection_info,
instance, mountpoint)
- @exception.wrap_exception()
- def detach_volume(self, connection_info, instance, mountpoint):
+ def detach_volume(self, connection_info, instance_name, mountpoint):
return self.volume_driver.detach_volume(connection_info,
- instance, mountpoint)
+ instance_name, mountpoint)
def get_info(self, instance):
# NOTE(deva): compute/manager.py expects to get NotFound exception
diff --git a/nova/virt/baremetal/volume_driver.py b/nova/virt/baremetal/volume_driver.py
index 570cea1d8..2e6f82b93 100644
--- a/nova/virt/baremetal/volume_driver.py
+++ b/nova/virt/baremetal/volume_driver.py
@@ -246,7 +246,6 @@ class LibvirtVolumeDriver(VolumeDriver):
# TODO(NTTdocomo): support CHAP
_allow_iscsi_tgtadm(tid, 'ALL')
- @exception.wrap_exception()
def detach_volume(self, connection_info, instance, mountpoint):
mount_device = mountpoint.rpartition("/")[2]
try:
diff --git a/nova/virt/configdrive.py b/nova/virt/configdrive.py
index 321bf8389..886136460 100644
--- a/nova/virt/configdrive.py
+++ b/nova/virt/configdrive.py
@@ -17,7 +17,6 @@
"""Config Drive v2 helper."""
-import contextlib
import os
import shutil
import tempfile
@@ -54,18 +53,12 @@ configdrive_opts = [
CONF = cfg.CONF
CONF.register_opts(configdrive_opts)
+# Config drives are 64mb, if we can't size to the exact size of the data
+CONFIGDRIVESIZE_BYTES = 64 * 1024 * 1024
-@contextlib.contextmanager
-def config_drive_helper(instance_md=None):
- cdb = _ConfigDriveBuilder(instance_md=instance_md)
- try:
- yield cdb
- finally:
- cdb.cleanup()
-
-class _ConfigDriveBuilder(object):
- """Don't use this directly, use the fancy pants contextlib helper above!"""
+class ConfigDriveBuilder(object):
+ """Build config drives, optionally as a context manager."""
def __init__(self, instance_md=None):
self.imagefile = None
@@ -79,6 +72,17 @@ class _ConfigDriveBuilder(object):
if instance_md is not None:
self.add_instance_metadata(instance_md)
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exctype, excval, exctb):
+ if exctype is not None:
+ # NOTE(mikal): this means we're being cleaned up because an
+ # exception was thrown. All bets are off now, and we should not
+ # swallow the exception
+ return False
+ self.cleanup()
+
def _add_file(self, path, data):
filepath = os.path.join(self.tempdir, path)
dirname = os.path.dirname(filepath)
@@ -116,10 +120,9 @@ class _ConfigDriveBuilder(object):
def _make_vfat(self, path):
# NOTE(mikal): This is a little horrible, but I couldn't find an
- # equivalent to genisoimage for vfat filesystems. vfat images are
- # always 64mb.
+ # equivalent to genisoimage for vfat filesystems.
with open(path, 'w') as f:
- f.truncate(64 * 1024 * 1024)
+ f.truncate(CONFIGDRIVESIZE_BYTES)
utils.mkfs('vfat', path, label='config-2')
@@ -127,20 +130,16 @@ class _ConfigDriveBuilder(object):
try:
mountdir = tempfile.mkdtemp(dir=CONF.config_drive_tempdir,
prefix='cd_mnt_')
- _out, err = utils.trycmd('mount', '-o', 'loop', path, mountdir,
+ _out, err = utils.trycmd('mount', '-o',
+ 'loop,uid=%d,gid=%d' % (os.getuid(),
+ os.getgid()),
+ path, mountdir,
run_as_root=True)
if err:
raise exception.ConfigDriveMountFailed(operation='mount',
error=err)
mounted = True
- _out, err = utils.trycmd('chown',
- '%s.%s' % (os.getuid(), os.getgid()),
- mountdir, run_as_root=True)
- if err:
- raise exception.ConfigDriveMountFailed(operation='chown',
- error=err)
-
# NOTE(mikal): I can't just use shutils.copytree here, because the
# destination directory already exists. This is annoying.
for ent in os.listdir(self.tempdir):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index e396de6a0..a8f779e66 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -732,6 +732,35 @@ class ComputeDriver(object):
# related helpers.
raise NotImplementedError(self.legacy_nwinfo)
+ def macs_for_instance(self, instance):
+ """What MAC addresses must this instance have?
+
+ Some hypervisors (such as bare metal) cannot do freeform virtualisation
+ of MAC addresses. This method allows drivers to return a set of MAC
+ addresses that the instance is to have. allocate_for_instance will take
+ this into consideration when provisioning networking for the instance.
+
+ Mapping of MAC addresses to actual networks (or permitting them to be
+ freeform) is up to the network implementation layer. For instance,
+ with openflow switches, fixed MAC addresses can still be virtualised
+ onto any L2 domain, with arbitrary VLANs etc, but regular switches
+ require pre-configured MAC->network mappings that will match the
+ actual configuration.
+
+ Most hypervisors can use the default implementation which returns None.
+ Hypervisors with MAC limits should return a set of MAC addresses, which
+ will be supplied to the allocate_for_instance call by the compute
+ manager, and it is up to that call to ensure that all assigned network
+ details are compatible with the set of MAC addresses.
+
+ This is called during spawn_instance by the compute manager.
+
+ :return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
+ None means 'no constraints', a set means 'these and only these
+ MAC addresses'.
+ """
+ return None
+
def manage_image_cache(self, context, all_instances):
"""
Manage the driver's local image cache.
diff --git a/nova/virt/hyperv/vif.py b/nova/virt/hyperv/vif.py
new file mode 100644
index 000000000..a898d3ac2
--- /dev/null
+++ b/nova/virt/hyperv/vif.py
@@ -0,0 +1,133 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 Cloudbase Solutions Srl
+# Copyright 2013 Pedro Navarro Perez
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import sys
+import uuid
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import wmi
+
+from abc import abstractmethod
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.virt.hyperv import vmutils
+
+hyperv_opts = [
+ cfg.StrOpt('vswitch_name',
+ default=None,
+ help='External virtual switch Name, '
+ 'if not provided, the first external virtual '
+ 'switch is used'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(hyperv_opts)
+
+LOG = logging.getLogger(__name__)
+
+
+class HyperVBaseVIFDriver(object):
+ @abstractmethod
+ def plug(self, instance, vif):
+ pass
+
+ @abstractmethod
+ def unplug(self, instance, vif):
+ pass
+
+
+class HyperVQuantumVIFDriver(HyperVBaseVIFDriver):
+ """Quantum VIF driver."""
+
+ def plug(self, instance, vif):
+ # Quantum takes care of plugging the port
+ pass
+
+ def unplug(self, instance, vif):
+ # Quantum takes care of unplugging the port
+ pass
+
+
+class HyperVNovaNetworkVIFDriver(HyperVBaseVIFDriver):
+ """Nova network VIF driver."""
+
+ def __init__(self):
+ self._vmutils = vmutils.VMUtils()
+ self._conn = wmi.WMI(moniker='//./root/virtualization')
+
+ def _find_external_network(self):
+ """Find the vswitch that is connected to the physical nic.
+ Assumes only one physical nic on the host
+ """
+ #If there are no physical nics connected to networks, return.
+ LOG.debug(_("Attempting to bind NIC to %s ")
+ % CONF.vswitch_name)
+ if CONF.vswitch_name:
+ LOG.debug(_("Attempting to bind NIC to %s ")
+ % CONF.vswitch_name)
+ bound = self._conn.Msvm_VirtualSwitch(
+ ElementName=CONF.vswitch_name)
+ else:
+ LOG.debug(_("No vSwitch specified, attaching to default"))
+ self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
+ if len(bound) == 0:
+ return None
+ if CONF.vswitch_name:
+ return self._conn.Msvm_VirtualSwitch(
+ ElementName=CONF.vswitch_name)[0]\
+ .associators(wmi_result_class='Msvm_SwitchPort')[0]\
+ .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
+ else:
+ return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\
+ .associators(wmi_result_class='Msvm_SwitchPort')[0]\
+ .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
+
+ def plug(self, instance, vif):
+ extswitch = self._find_external_network()
+ if extswitch is None:
+ raise vmutils.HyperVException(_('Cannot find vSwitch'))
+
+ vm_name = instance['name']
+
+ nic_data = self._conn.Msvm_SyntheticEthernetPortSettingData(
+ ElementName=vif['id'])[0]
+
+ switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
+ #Create a port on the vswitch.
+ (new_port, ret_val) = switch_svc.CreateSwitchPort(
+ Name=str(uuid.uuid4()),
+ FriendlyName=vm_name,
+ ScopeOfResidence="",
+ VirtualSwitch=extswitch.path_())
+ if ret_val != 0:
+ LOG.error(_('Failed creating a port on the external vswitch'))
+ raise vmutils.HyperVException(_('Failed creating port for %s') %
+ vm_name)
+ ext_path = extswitch.path_()
+ LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
+ % locals())
+
+ vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+
+ nic_data.Connection = [new_port]
+ self._vmutils.modify_virt_resource(self._conn, nic_data, vm)
+
+ def unplug(self, instance, vif):
+ #TODO(alepilotti) Not implemented
+ pass
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index 83493f7ff..3d8958266 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -24,6 +24,7 @@ import uuid
from nova.api.metadata import base as instance_metadata
from nova import exception
from nova.openstack.common import cfg
+from nova.openstack.common import importutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova import utils
@@ -35,10 +36,6 @@ from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
hyperv_opts = [
- cfg.StrOpt('vswitch_name',
- default=None,
- help='Default vSwitch Name, '
- 'if none provided first external is used'),
cfg.BoolOpt('limit_cpu_features',
default=False,
help='Required for live migration among '
@@ -59,14 +56,32 @@ hyperv_opts = [
CONF = cfg.CONF
CONF.register_opts(hyperv_opts)
CONF.import_opt('use_cow_images', 'nova.virt.driver')
+CONF.import_opt('network_api_class', 'nova.network')
class VMOps(baseops.BaseOps):
+ _vif_driver_class_map = {
+ 'nova.network.quantumv2.api.API':
+ 'nova.virt.hyperv.vif.HyperVQuantumVIFDriver',
+ 'nova.network.api.API':
+ 'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver',
+ }
+
def __init__(self, volumeops):
super(VMOps, self).__init__()
self._vmutils = vmutils.VMUtils()
self._volumeops = volumeops
+ self._load_vif_driver_class()
+
+ def _load_vif_driver_class(self):
+ try:
+ class_name = self._vif_driver_class_map[CONF.network_api_class]
+ self._vif_driver = importutils.import_object(class_name)
+ except KeyError:
+ raise TypeError(_("VIF driver not found for "
+ "network_api_class: %s") %
+ CONF.network_api_class)
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
@@ -158,8 +173,8 @@ class VMOps(baseops.BaseOps):
self._create_scsi_controller(instance['name'])
for vif in network_info:
- mac_address = vif['address'].replace(':', '')
- self._create_nic(instance['name'], mac_address)
+ self._create_nic(instance['name'], vif)
+ self._vif_driver.plug(instance, vif)
if configdrive.required_by(instance):
self._create_config_drive(instance, injected_files,
@@ -192,7 +207,7 @@ class VMOps(baseops.BaseOps):
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path_iso}, instance=instance)
- with configdrive.config_drive_helper(instance_md=inst_md) as cdb:
+ with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
try:
cdb.make_drive(configdrive_path_iso)
except exception.ProcessExecutionError, e:
@@ -367,46 +382,28 @@ class VMOps(baseops.BaseOps):
LOG.info(_('Created drive type %(drive_type)s for %(vm_name)s') %
locals())
- def _create_nic(self, vm_name, mac):
+ def _create_nic(self, vm_name, vif):
"""Create a (synthetic) nic and attach it to the vm."""
LOG.debug(_('Creating nic for %s '), vm_name)
- #Find the vswitch that is connected to the physical nic.
- vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
- extswitch = self._find_external_network()
- if extswitch is None:
- raise vmutils.HyperVException(_('Cannot find vSwitch'))
- vm = vms[0]
- switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
- #Find the default nic and clone it to create a new nic for the vm.
- #Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with
- #Linux Integration Components installed.
+ #Create a new nic
syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData()
default_nic_data = [n for n in syntheticnics_data
if n.InstanceID.rfind('Default') > 0]
new_nic_data = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_SyntheticEthernetPortSettingData',
default_nic_data[0])
- #Create a port on the vswitch.
- (new_port, ret_val) = switch_svc.CreateSwitchPort(
- Name=str(uuid.uuid4()),
- FriendlyName=vm_name,
- ScopeOfResidence="",
- VirtualSwitch=extswitch.path_())
- if ret_val != 0:
- LOG.error(_('Failed creating a port on the external vswitch'))
- raise vmutils.HyperVException(_('Failed creating port for %s') %
- vm_name)
- ext_path = extswitch.path_()
- LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
- % locals())
- #Connect the new nic to the new port.
- new_nic_data.Connection = [new_port]
- new_nic_data.ElementName = vm_name + ' nic'
- new_nic_data.Address = mac
+
+ #Configure the nic
+ new_nic_data.ElementName = vif['id']
+ new_nic_data.Address = vif['address'].replace(':', '')
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
- #Add the new nic to the vm.
+
+ #Add the new nic to the vm
+ vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+
new_resources = self._vmutils.add_virt_resource(self._conn,
new_nic_data, vm)
if new_resources is None:
@@ -414,33 +411,6 @@ class VMOps(baseops.BaseOps):
vm_name)
LOG.info(_("Created nic for %s "), vm_name)
- def _find_external_network(self):
- """Find the vswitch that is connected to the physical nic.
- Assumes only one physical nic on the host
- """
- #If there are no physical nics connected to networks, return.
- LOG.debug(_("Attempting to bind NIC to %s ")
- % CONF.vswitch_name)
- if CONF.vswitch_name:
- LOG.debug(_("Attempting to bind NIC to %s ")
- % CONF.vswitch_name)
- bound = self._conn.Msvm_VirtualSwitch(
- ElementName=CONF.vswitch_name)
- else:
- LOG.debug(_("No vSwitch specified, attaching to default"))
- self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
- if len(bound) == 0:
- return None
- if CONF.vswitch_name:
- return self._conn.Msvm_VirtualSwitch(
- ElementName=CONF.vswitch_name)[0]\
- .associators(wmi_result_class='Msvm_SwitchPort')[0]\
- .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
- else:
- return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\
- .associators(wmi_result_class='Msvm_SwitchPort')[0]\
- .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
-
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
vm = self._vmutils.lookup(self._conn, instance['name'])
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index bae8a1f1a..d899f977d 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -130,7 +130,7 @@ class VMUtils(object):
return newinst
def add_virt_resource(self, conn, res_setting_data, target_vm):
- """Add a new resource (disk/nic) to the VM."""
+ """Adds a new resource to the VM."""
vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
(job, new_resources, ret_val) = vs_man_svc.\
AddVirtualSystemResources([res_setting_data.GetText_(1)],
@@ -145,8 +145,20 @@ class VMUtils(object):
else:
return None
+ def modify_virt_resource(self, conn, res_setting_data, target_vm):
+ """Updates a VM resource."""
+ vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
+ (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
+ ResourceSettingData=[res_setting_data.GetText_(1)],
+ ComputerSystem=target_vm.path_())
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self.check_job_status(job)
+ else:
+ success = (ret_val == 0)
+ return success
+
def remove_virt_resource(self, conn, res_setting_data, target_vm):
- """Add a new resource (disk/nic) to the VM."""
+ """Removes a VM resource."""
vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
(job, ret_val) = vs_man_svc.\
RemoveVirtualSystemResources([res_setting_data.path_()],
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index 222e6d52d..6785c8823 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright (c) 2012 Red Hat, Inc.
+# Copyright (C) 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -18,7 +18,11 @@
Configuration for libvirt objects.
Classes to represent the configuration of various libvirt objects
-and support conversion to/from XML
+and support conversion to/from XML. These classes are solely concerned
+by providing direct Object <-> XML document conversions. No policy or
+operational decisions should be made by code in these classes. Such
+policy belongs in the 'designer.py' module which provides simplified
+helpers for populating up config object instances.
"""
from nova import exception
diff --git a/nova/virt/libvirt/designer.py b/nova/virt/libvirt/designer.py
new file mode 100644
index 000000000..b832db4fa
--- /dev/null
+++ b/nova/virt/libvirt/designer.py
@@ -0,0 +1,101 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (C) 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Policy based configuration of libvirt objects
+
+This module provides helper APIs for populating the config.py
+classes based on common operational needs / policies
+"""
+
+from nova.virt import netutils
+
+
+def set_vif_guest_frontend_config(conf, mac, model, driver):
+ """Populate a LibvirtConfigGuestInterface instance
+ with guest frontend details"""
+ conf.mac_addr = mac
+ if model is not None:
+ conf.model = model
+ if driver is not None:
+ conf.driver_name = driver
+
+
+def set_vif_host_backend_bridge_config(conf, brname, tapname=None):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for a software bridge"""
+ conf.net_type = "bridge"
+ conf.source_dev = brname
+ if tapname:
+ conf.target_dev = tapname
+ conf.script = ""
+
+
+def set_vif_host_backend_ethernet_config(conf, tapname):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for an externally configured
+ host device.
+
+ NB use of this configuration is discouraged by
+ libvirt project and will mark domains as 'tainted'"""
+
+ conf.net_type = "ethernet"
+ conf.target_dev = tapname
+ conf.script = ""
+
+
+def set_vif_host_backend_ovs_config(conf, brname, interfaceid, tapname=None):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for an OpenVSwitch bridge"""
+
+ conf.net_type = "bridge"
+ conf.source_dev = brname
+ conf.vporttype = "openvswitch"
+ conf.add_vport_param("interfaceid", interfaceid)
+ if tapname:
+ conf.target_dev = tapname
+ conf.script = ""
+
+
+def set_vif_host_backend_filter_config(conf, name,
+ primary_addr,
+ dhcp_server=None,
+ ra_server=None,
+ allow_same_net=False,
+ ipv4_cidr=None,
+ ipv6_cidr=None):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for traffic filtering"""
+
+ conf.filtername = name
+ conf.add_filter_param("IP", primary_addr)
+
+ if dhcp_server:
+ conf.add_filter_param("DHCPSERVER", dhcp_server)
+
+ if ra_server:
+ conf.add_filter_param("RASERVER", ra_server)
+
+ if allow_same_net:
+ if ipv4_cidr:
+ net, mask = netutils.get_net_and_mask(ipv4_cidr)
+ conf.add_filter_param("PROJNET", net)
+ conf.add_filter_param("PROJMASK", mask)
+
+ if ipv6_cidr:
+ net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
+ conf.add_filter_param("PROJNET6", net)
+ conf.add_filter_param("PROJMASK6", prefix)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 1e087e61a..4312086a8 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -586,7 +586,7 @@ class LibvirtDriver(driver.ComputeDriver):
mount_device)
if destroy_disks:
- target = os.path.join(CONF.instances_path, instance['name'])
+ target = libvirt_utils.get_instance_path(instance)
LOG.info(_('Deleting instance files %(target)s') % locals(),
instance=instance)
if os.path.exists(target):
@@ -642,8 +642,7 @@ class LibvirtDriver(driver.ComputeDriver):
}
def _cleanup_resize(self, instance, network_info):
- target = os.path.join(CONF.instances_path,
- instance['name'] + "_resize")
+ target = libvirt_utils.get_instance_path(instance) + "_resize"
if os.path.exists(target):
shutil.rmtree(target)
@@ -661,7 +660,6 @@ class LibvirtDriver(driver.ComputeDriver):
method = getattr(driver, method_name)
return method(connection_info, *args, **kwargs)
- @exception.wrap_exception()
def attach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
@@ -716,7 +714,6 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info=block_device_info)
return xml
- @exception.wrap_exception()
def detach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
mount_device = mountpoint.rpartition("/")[2]
@@ -749,7 +746,6 @@ class LibvirtDriver(driver.ComputeDriver):
connection_info,
mount_device)
- @exception.wrap_exception()
def snapshot(self, context, instance, image_href, update_task_state):
"""Create snapshot from a running VM instance.
@@ -845,7 +841,6 @@ class LibvirtDriver(driver.ComputeDriver):
metadata,
image_file)
- @exception.wrap_exception()
def reboot(self, instance, network_info, reboot_type='SOFT',
block_device_info=None):
"""Reboot a virtual machine, given an instance reference."""
@@ -932,24 +927,20 @@ class LibvirtDriver(driver.ComputeDriver):
timer = utils.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
- @exception.wrap_exception()
def pause(self, instance):
"""Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
- @exception.wrap_exception()
def unpause(self, instance):
"""Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
- @exception.wrap_exception()
def power_off(self, instance):
"""Power off the specified instance."""
self._destroy(instance)
- @exception.wrap_exception()
def power_on(self, instance):
"""Power on the specified instance."""
dom = self._lookup_by_name(instance['name'])
@@ -958,20 +949,17 @@ class LibvirtDriver(driver.ComputeDriver):
instance)
timer.start(interval=0.5).wait()
- @exception.wrap_exception()
def suspend(self, instance):
"""Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
dom.managedSave(0)
- @exception.wrap_exception()
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_domain_xml(instance, network_info, block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
- @exception.wrap_exception()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
@@ -979,7 +967,6 @@ class LibvirtDriver(driver.ComputeDriver):
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
- @exception.wrap_exception()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
@@ -990,11 +977,9 @@ class LibvirtDriver(driver.ComputeDriver):
data recovery.
"""
-
+ instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_domain_xml(instance, network_info)
- unrescue_xml_path = os.path.join(CONF.instances_path,
- instance['name'],
- 'unrescue.xml')
+ unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_images = {
@@ -1010,24 +995,20 @@ class LibvirtDriver(driver.ComputeDriver):
self._destroy(instance)
self._create_domain(xml)
- @exception.wrap_exception()
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
- unrescue_xml_path = os.path.join(CONF.instances_path,
- instance['name'],
- 'unrescue.xml')
+ instance_dir = libvirt_utils.get_instance_path(instance)
+ unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance['name'])
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
- rescue_files = os.path.join(CONF.instances_path, instance['name'],
- "*.rescue")
+ rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
- @exception.wrap_exception()
def poll_rebooting_instances(self, timeout, instances):
pass
@@ -1042,7 +1023,6 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
- @exception.wrap_exception()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
xml = self.to_xml(instance, network_info, image_meta,
@@ -1083,7 +1063,6 @@ class LibvirtDriver(driver.ComputeDriver):
fp.write(data)
return fpath
- @exception.wrap_exception()
def get_console_output(self, instance):
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
@@ -1134,9 +1113,9 @@ class LibvirtDriver(driver.ComputeDriver):
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
- self._chown_console_log_for_instance(instance['name'])
+ self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
- console_log = self._get_console_log_path(instance['name'])
+ console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
@@ -1150,7 +1129,6 @@ class LibvirtDriver(driver.ComputeDriver):
def get_host_ip_addr():
return CONF.my_ip
- @exception.wrap_exception()
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
@@ -1227,11 +1205,12 @@ class LibvirtDriver(driver.ComputeDriver):
utils.mkfs('swap', target)
@staticmethod
- def _get_console_log_path(instance_name):
- return os.path.join(CONF.instances_path, instance_name, 'console.log')
+ def _get_console_log_path(instance):
+ return os.path.join(libvirt_utils.get_instance_path(instance),
+ 'console.log')
- def _chown_console_log_for_instance(self, instance_name):
- console_log = self._get_console_log_path(instance_name)
+ def _chown_console_log_for_instance(self, instance):
+ console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
@@ -1243,12 +1222,11 @@ class LibvirtDriver(driver.ComputeDriver):
# syntactic nicety
def basepath(fname='', suffix=suffix):
- return os.path.join(CONF.instances_path,
- instance['name'],
+ return os.path.join(libvirt_utils.get_instance_path(instance),
fname + suffix)
def image(fname, image_type=CONF.libvirt_images_type):
- return self.image_backend.image(instance['name'],
+ return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
@@ -1261,11 +1239,11 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml)
# NOTE(dprince): for rescue console.log may already exist... chown it.
- self._chown_console_log_for_instance(instance['name'])
+ self._chown_console_log_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
- self._get_console_log_path(instance['name']), '', 007)
+ self._get_console_log_path(instance), '', 007)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
@@ -1385,7 +1363,7 @@ class LibvirtDriver(driver.ComputeDriver):
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md)
- with configdrive.config_drive_helper(instance_md=inst_md) as cdb:
+ with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = basepath(fname='disk.config')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
@@ -1472,6 +1450,7 @@ class LibvirtDriver(driver.ComputeDriver):
for hostfeat in hostcpu.features:
guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name)
guestfeat.policy = "require"
+ guestcpu.features.append(guestfeat)
return guestcpu
@@ -1537,9 +1516,8 @@ class LibvirtDriver(driver.ComputeDriver):
if CONF.libvirt_type == "lxc":
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
- fs.source_dir = os.path.join(CONF.instances_path,
- instance['name'],
- 'rootfs')
+ fs.source_dir = os.path.join(
+ libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
if image_meta and image_meta.get('disk_format') == 'iso':
@@ -1557,8 +1535,7 @@ class LibvirtDriver(driver.ComputeDriver):
def disk_info(name, disk_dev, disk_bus=default_disk_bus,
device_type="disk"):
- image = self.image_backend.image(instance['name'],
- name)
+ image = self.image_backend.image(instance, name)
return image.libvirt_info(disk_bus,
disk_dev,
device_type,
@@ -1645,9 +1622,8 @@ class LibvirtDriver(driver.ComputeDriver):
diskconfig.source_type = "file"
diskconfig.driver_format = "raw"
diskconfig.driver_cache = self.disk_cachemode
- diskconfig.source_path = os.path.join(CONF.instances_path,
- instance['name'],
- "disk.config")
+ diskconfig.source_path = os.path.join(
+ libvirt_utils.get_instance_path(instane), "disk.config")
diskconfig.target_dev = self.default_last_device
diskconfig.target_bus = default_disk_bus
devices.append(diskconfig)
@@ -1675,6 +1651,7 @@ class LibvirtDriver(driver.ComputeDriver):
'kernel_id' if a kernel is needed for the rescue image.
"""
inst_type = instance['instance_type']
+ inst_path = libvirt_utils.get_instance_path(instance)
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt_type
@@ -1733,9 +1710,7 @@ class LibvirtDriver(driver.ComputeDriver):
if rescue:
if rescue.get('kernel_id'):
- guest.os_kernel = os.path.join(CONF.instances_path,
- instance['name'],
- "kernel.rescue")
+ guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
@@ -1743,22 +1718,16 @@ class LibvirtDriver(driver.ComputeDriver):
(root_device_name or "/dev/vda",))
if rescue.get('ramdisk_id'):
- guest.os_initrd = os.path.join(CONF.instances_path,
- instance['name'],
- "ramdisk.rescue")
+ guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
elif instance['kernel_id']:
- guest.os_kernel = os.path.join(CONF.instances_path,
- instance['name'],
- "kernel")
+ guest.os_kernel = os.path.join(inst_path, "kernel")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s console=ttyS0" %
(root_device_name or "/dev/vda",))
if instance['ramdisk_id']:
- guest.os_initrd = os.path.join(CONF.instances_path,
- instance['name'],
- "ramdisk")
+ guest.os_initrd = os.path.join(inst_path, "ramdisk")
else:
guest.os_boot_dev = "hd"
@@ -1806,8 +1775,7 @@ class LibvirtDriver(driver.ComputeDriver):
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
- consolelog.source_path = self._get_console_log_path(
- instance['name'])
+ consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
@@ -1877,18 +1845,23 @@ class LibvirtDriver(driver.ComputeDriver):
'cpu_time': cpu_time}
def _create_domain(self, xml=None, domain=None,
- inst_name='', launch_flags=0):
+ instance=None, launch_flags=0):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
+ inst_path = None
+ if instance:
+ inst_path = libvirt_utils.get_instance_path(instance)
+
if CONF.libvirt_type == 'lxc':
- container_dir = os.path.join(CONF.instances_path,
- inst_name,
- 'rootfs')
+ if not inst_path:
+ inst_path = None
+
+ container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
- image = self.image_backend.image(inst_name, 'disk')
+ image = self.image_backend.image(instance, 'disk')
disk.setup_container(image.path,
container_dir=container_dir,
use_cow=CONF.use_cow_images)
@@ -1902,9 +1875,7 @@ class LibvirtDriver(driver.ComputeDriver):
# namespace and so there is no need to keep the container rootfs
# mounted in the host namespace
if CONF.libvirt_type == 'lxc':
- container_dir = os.path.join(CONF.instances_path,
- inst_name,
- 'rootfs')
+ container_dir = os.path.join(inst_path, 'rootfs')
disk.teardown_container(container_dir=container_dir)
return domain
@@ -1926,7 +1897,7 @@ class LibvirtDriver(driver.ComputeDriver):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
- domain = self._create_domain(xml, inst_name=instance['name'])
+ domain = self._create_domain(xml, instance=instance)
self.firewall_driver.apply_instance_filter(instance, network_info)
return domain
@@ -2034,8 +2005,13 @@ class LibvirtDriver(driver.ComputeDriver):
"""
- stats = libvirt_utils.get_fs_info(CONF.instances_path)
- return stats['total'] / (1024 ** 3)
+ if CONF.libvirt_images_type == 'lvm':
+ vg_total = libvirt_utils.volume_group_total_space(
+ CONF.libvirt_images_volume_group)
+ return vg_total / (1024 ** 3)
+ else:
+ stats = libvirt_utils.get_fs_info(CONF.instances_path)
+ return stats['total'] / (1024 ** 3)
def get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
@@ -2045,15 +2021,26 @@ class LibvirtDriver(driver.ComputeDriver):
"""
total = 0
- for dom_id in self.list_instance_ids():
- dom = self._conn.lookupByID(dom_id)
- vcpus = dom.vcpus()
- if vcpus is None:
- # dom.vcpus is not implemented for lxc, but returning 0 for
- # a used count is hardly useful for something measuring usage
- total += 1
- else:
- total += len(vcpus[1])
+ dom_ids = self.list_instance_ids()
+ for dom_id in dom_ids:
+ try:
+ dom = self._conn.lookupByID(dom_id)
+ vcpus = dom.vcpus()
+ if vcpus is None:
+ # dom.vcpus is not implemented for lxc, but returning 0 for
+ # a used count is hardly useful for something measuring
+ # usage
+ total += 1
+ else:
+ total += len(vcpus[1])
+ except libvirt.libvirtError as err:
+ if err.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
+ LOG.debug(_("List of domains returned by libVirt: %s")
+ % dom_ids)
+ LOG.warn(_("libVirt can't find a domain with id: %s")
+ % dom_id)
+ continue
+ raise
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return total
@@ -2103,8 +2090,13 @@ class LibvirtDriver(driver.ComputeDriver):
"""
- stats = libvirt_utils.get_fs_info(CONF.instances_path)
- return stats['used'] / (1024 ** 3)
+ if CONF.libvirt_images_type == 'lvm':
+ vg_used = libvirt_utils.volume_group_used_space(
+ CONF.libvirt_images_volume_group)
+ return vg_used / (1024 ** 3)
+ else:
+ stats = libvirt_utils.get_fs_info(CONF.instances_path)
+ return stats['used'] / (1024 ** 3)
def get_hypervisor_type(self):
"""Get hypervisor type.
@@ -2594,7 +2586,7 @@ class LibvirtDriver(driver.ComputeDriver):
def _fetch_instance_kernel_ramdisk(self, context, instance):
"""Download kernel and ramdisk for instance in instance directory."""
- instance_dir = os.path.join(CONF.instances_path, instance['name'])
+ instance_dir = libvirt_utils.get_instance_path(instance)
if instance['kernel_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir, 'kernel'),
@@ -2624,14 +2616,13 @@ class LibvirtDriver(driver.ComputeDriver):
if is_volume_backed and not (is_block_migration or is_shared_storage):
# Create the instance directory on destination compute node.
- instance_dir = os.path.join(CONF.instances_path,
- instance_ref['name'])
+ instance_dir = libvirt_utils.get_instance_path(instance_ref)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
# Touch the console.log file, required by libvirt.
- console_file = self._get_console_log_path(instance_ref['name'])
+ console_file = self._get_console_log_path(instance_ref)
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
@@ -2680,7 +2671,7 @@ class LibvirtDriver(driver.ComputeDriver):
disk_info = jsonutils.loads(disk_info_json)
# make instance directory
- instance_dir = os.path.join(CONF.instances_path, instance['name'])
+ instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
@@ -2699,7 +2690,7 @@ class LibvirtDriver(driver.ComputeDriver):
# Remove any size tags which the cache manages
cache_name = cache_name.split('_')[0]
- image = self.image_backend.image(instance['name'],
+ image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt_images_type)
image.cache(fetch_func=libvirt_utils.fetch_image,
@@ -2730,8 +2721,7 @@ class LibvirtDriver(driver.ComputeDriver):
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance_ref["name"] not in dom_list:
- instance_dir = os.path.join(CONF.instances_path,
- instance_ref["name"])
+ instance_dir = libvirt_utils.get_instance_path(instance_ref)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
# In case of block migration, destination does not have
# libvirt.xml
@@ -2870,7 +2860,6 @@ class LibvirtDriver(driver.ComputeDriver):
except Exception:
pass
- @exception.wrap_exception()
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
@@ -2894,7 +2883,7 @@ class LibvirtDriver(driver.ComputeDriver):
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
same_host = (dest == self.get_host_ip_addr())
- inst_base = "%s/%s" % (CONF.instances_path, instance['name'])
+ inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
try:
utils.execute('mv', inst_base, inst_base_resize)
@@ -2936,7 +2925,6 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.info(_("Instance running successfully."), instance=instance)
raise utils.LoopingCallDone()
- @exception.wrap_exception()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
@@ -2989,13 +2977,12 @@ class LibvirtDriver(driver.ComputeDriver):
instance)
timer.start(interval=0.5).wait()
- @exception.wrap_exception()
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
- inst_base = "%s/%s" % (CONF.instances_path, instance['name'])
+ inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
utils.execute('mv', inst_base_resize, inst_base)
@@ -3101,12 +3088,10 @@ class LibvirtDriver(driver.ComputeDriver):
def instance_on_disk(self, instance):
# ensure directories exist and are writable
- instance_path = os.path.join(CONF.instances_path, instance["name"])
-
+ instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug(_('Checking instance files accessability'
'%(instance_path)s')
% locals())
-
return os.access(instance_path, os.W_OK)
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index f4c41f539..d272e408c 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -142,8 +142,9 @@ class Raw(Image):
def __init__(self, instance=None, name=None, path=None):
super(Raw, self).__init__("file", "raw", is_block_dev=False)
- self.path = path or os.path.join(CONF.instances_path,
- instance, name)
+ self.path = (path or
+ os.path.join(libvirt_utils.get_instance_path(instance),
+ name))
def create_image(self, prepare_template, base, size, *args, **kwargs):
@lockutils.synchronized(base, 'nova-', external=True,
@@ -170,8 +171,9 @@ class Qcow2(Image):
def __init__(self, instance=None, name=None, path=None):
super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False)
- self.path = path or os.path.join(CONF.instances_path,
- instance, name)
+ self.path = (path or
+ os.path.join(libvirt_utils.get_instance_path(instance),
+ name))
def create_image(self, prepare_template, base, size, *args, **kwargs):
@lockutils.synchronized(base, 'nova-', external=True,
@@ -208,7 +210,7 @@ class Lvm(Image):
' libvirt_images_volume_group'
' flag to use LVM images.'))
self.vg = CONF.libvirt_images_volume_group
- self.lv = '%s_%s' % (self.escape(instance),
+ self.lv = '%s_%s' % (self.escape(instance['name']),
self.escape(name))
self.path = os.path.join('/dev', self.vg, self.lv)
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 73c3b552b..4b3517da7 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -30,6 +30,7 @@ from nova import utils
from nova.virt import images
CONF = cfg.CONF
+CONF.import_opt('instances_path', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
@@ -144,6 +145,36 @@ def volume_group_free_space(vg):
return int(out.strip())
+def volume_group_total_space(vg):
+ """Return total space on volume group in bytes.
+
+ :param vg: volume group name
+ """
+
+ out, err = execute('vgs', '--noheadings', '--nosuffix',
+ '--units', 'b', '-o', 'vg_size', vg,
+ run_as_root=True)
+ return int(out.strip())
+
+
+def volume_group_used_space(vg):
+ """Return available space on volume group in bytes.
+
+ :param vg: volume group name
+ """
+
+ out, err = execute('vgs', '--noheadings', '--nosuffix',
+ '--separator', '|',
+ '--units', 'b', '-o', 'vg_size,vg_free', vg,
+ run_as_root=True)
+
+ info = out.split('|')
+ if len(info) != 2:
+ raise RuntimeError(_("vg %s must be LVM volume group") % vg)
+
+ return int(info[0]) - int(info[1])
+
+
def list_logical_volumes(vg):
"""List logical volumes paths for given volume group.
@@ -468,3 +499,19 @@ def get_fs_info(path):
def fetch_image(context, target, image_id, user_id, project_id):
"""Grab image."""
images.fetch_to_raw(context, image_id, target, user_id, project_id)
+
+
+def get_instance_path(instance):
+ """Determine the correct path for instance storage.
+
+ This used to be calculated all over the place. This method centralizes
+ this into one location, which will make it easier to change the
+ algorithm used to name instance storage directories.
+
+ :param instance: the instance we want a path for
+
+ :returns: a path to store information about that instance
+ """
+ # TODO(mikal): we should use UUID instead of name, as name isn't
+ # nessesarily unique
+ return os.path.join(CONF.instances_path, instance['name'])
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 1dc30f73e..54de9da2d 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -21,19 +21,17 @@
from nova import exception
from nova.network import linux_net
+from nova.network import model as network_model
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
-from nova.virt import netutils
from nova.virt.libvirt import config as vconfig
-
+from nova.virt.libvirt import designer
+from nova.virt import netutils
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
- cfg.StrOpt('libvirt_ovs_bridge',
- default='br-int',
- help='Name of Integration Bridge used by Open vSwitch'),
cfg.BoolOpt('libvirt_use_virtio_for_bridges',
default=True,
help='Use virtio for bridge interfaces with KVM/QEMU'),
@@ -44,21 +42,28 @@ CONF.register_opts(libvirt_vif_opts)
CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
CONF.import_opt('use_ipv6', 'nova.netconf')
-LINUX_DEV_LEN = 14
-
class LibvirtBaseVIFDriver(object):
+ def get_vif_devname(self, mapping):
+ if 'vif_devname' in mapping:
+ return mapping['vif_devname']
+ return ("nic" + mapping['vif_uuid'])[:network_model.NIC_NAME_LEN]
+
def get_config(self, instance, network, mapping):
conf = vconfig.LibvirtConfigGuestInterface()
- conf.mac_addr = mapping['mac']
- if CONF.libvirt_type in ('kvm', 'qemu') and \
- CONF.libvirt_use_virtio_for_bridges:
- conf.model = "virtio"
+ model = None
+ driver = None
+ if (CONF.libvirt_type in ('kvm', 'qemu') and
+ CONF.libvirt_use_virtio_for_bridges):
+ model = "virtio"
# Workaround libvirt bug, where it mistakenly
# enables vhost mode, even for non-KVM guests
if CONF.libvirt_type == "qemu":
- conf.driver_name = "qemu"
+ driver = "qemu"
+
+ designer.set_vif_guest_frontend_config(
+ conf, mapping['mac'], model, driver)
return conf
@@ -75,28 +80,26 @@ class LibvirtBridgeDriver(LibvirtBaseVIFDriver):
self).get_config(instance,
network,
mapping)
- conf.net_type = "bridge"
- conf.source_dev = network['bridge']
- conf.script = ""
- conf.filtername = "nova-instance-" + instance['name'] + "-" + mac_id
- conf.add_filter_param("IP", mapping['ips'][0]['ip'])
- if mapping['dhcp_server']:
- conf.add_filter_param("DHCPSERVER", mapping['dhcp_server'])
+ designer.set_vif_host_backend_bridge_config(
+ conf, network['bridge'], self.get_vif_devname(mapping))
- if CONF.use_ipv6:
- conf.add_filter_param("RASERVER",
- mapping.get('gateway_v6') + "/128")
+ name = "nova-instance-" + instance['name'] + "-" + mac_id
+ primary_addr = mapping['ips'][0]['ip']
+ dhcp_server = ra_server = ipv4_cidr = ipv6_cidr = None
+ if mapping['dhcp_server']:
+ dhcp_server = mapping['dhcp_server']
+ if CONF.use_ipv6:
+ ra_server = mapping.get('gateway_v6') + "/128"
if CONF.allow_same_net_traffic:
- net, mask = netutils.get_net_and_mask(network['cidr'])
- conf.add_filter_param("PROJNET", net)
- conf.add_filter_param("PROJMASK", mask)
+ ipv4_cidr = network['cidr']
if CONF.use_ipv6:
- net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen(
- network['cidr_v6'])
- conf.add_filter_param("PROJNET6", net_v6)
- conf.add_filter_param("PROJMASK6", prefixlen_v6)
+ ipv6_cidr = network['cidr_v6']
+
+ designer.set_vif_host_backend_filter_config(
+ conf, name, primary_addr, dhcp_server,
+ ra_server, ipv4_cidr, ipv6_cidr)
return conf
@@ -135,42 +138,37 @@ class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver):
OVS virtual port XML (0.9.10 or earlier).
"""
- def get_dev_name(self, iface_id):
- return ("tap" + iface_id)[:LINUX_DEV_LEN]
-
def get_config(self, instance, network, mapping):
- dev = self.get_dev_name(mapping['vif_uuid'])
+ dev = self.get_vif_devname(mapping)
conf = super(LibvirtOpenVswitchDriver,
self).get_config(instance,
network,
mapping)
- conf.net_type = "ethernet"
- conf.target_dev = dev
- conf.script = ""
+ designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
- def create_ovs_vif_port(self, dev, iface_id, mac, instance_id):
+ def create_ovs_vif_port(self, bridge, dev, iface_id, mac, instance_id):
utils.execute('ovs-vsctl', '--', '--may-exist', 'add-port',
- CONF.libvirt_ovs_bridge, dev,
- '--', 'set', 'Interface', dev,
- 'external-ids:iface-id=%s' % iface_id,
- 'external-ids:iface-status=active',
- 'external-ids:attached-mac=%s' % mac,
- 'external-ids:vm-uuid=%s' % instance_id,
- run_as_root=True)
-
- def delete_ovs_vif_port(self, dev):
- utils.execute('ovs-vsctl', 'del-port', CONF.libvirt_ovs_bridge,
- dev, run_as_root=True)
+ bridge, dev,
+ '--', 'set', 'Interface', dev,
+ 'external-ids:iface-id=%s' % iface_id,
+ 'external-ids:iface-status=active',
+ 'external-ids:attached-mac=%s' % mac,
+ 'external-ids:vm-uuid=%s' % instance_id,
+ run_as_root=True)
+
+ def delete_ovs_vif_port(self, bridge, dev):
+ utils.execute('ovs-vsctl', 'del-port', bridge, dev,
+ run_as_root=True)
utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
def plug(self, instance, vif):
network, mapping = vif
iface_id = mapping['vif_uuid']
- dev = self.get_dev_name(iface_id)
+ dev = self.get_vif_devname(mapping)
if not linux_net.device_exists(dev):
# Older version of the command 'ip' from the iproute2 package
# don't have support for the tuntap option (lp:882568). If it
@@ -185,14 +183,16 @@ class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver):
utils.execute('tunctl', '-b', '-t', dev, run_as_root=True)
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
- self.create_ovs_vif_port(dev, iface_id, mapping['mac'],
+ self.create_ovs_vif_port(network['bridge'],
+ dev, iface_id, mapping['mac'],
instance['uuid'])
def unplug(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
network, mapping = vif
- self.delete_ovs_vif_port(self.get_dev_name(mapping['vif_uuid']))
+ self.delete_ovs_vif_port(network['bridge'],
+ self.get_vif_devname(mapping))
except exception.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
@@ -208,11 +208,11 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver,
"""
def get_br_name(self, iface_id):
- return ("qbr" + iface_id)[:LINUX_DEV_LEN]
+ return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
- return (("qvb%s" % iface_id)[:LINUX_DEV_LEN],
- ("qvo%s" % iface_id)[:LINUX_DEV_LEN])
+ return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
+ ("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
def get_config(self, instance, network, mapping):
br_name = self.get_br_name(mapping['vif_uuid'])
@@ -243,7 +243,8 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver,
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
- self.create_ovs_vif_port(v2_name, iface_id, mapping['mac'],
+ self.create_ovs_vif_port(network['bridge'],
+ v2_name, iface_id, mapping['mac'],
instance['uuid'])
def unplug(self, instance, vif):
@@ -263,7 +264,7 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver,
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
- self.delete_ovs_vif_port(v2_name)
+ self.delete_ovs_vif_port(network['bridge'], v2_name)
except exception.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
@@ -279,10 +280,9 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
network,
mapping)
- conf.net_type = "bridge"
- conf.source_dev = CONF.libvirt_ovs_bridge
- conf.vporttype = "openvswitch"
- conf.add_vport_param("interfaceid", mapping['vif_uuid'])
+ designer.set_vif_host_backend_ovs_config(
+ conf, network['bridge'], mapping['vif_uuid'],
+ self.get_vif_devname(mapping))
return conf
@@ -297,18 +297,9 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
class QuantumLinuxBridgeVIFDriver(LibvirtBaseVIFDriver):
"""VIF driver for Linux Bridge when running Quantum."""
- def get_bridge_name(self, network_id):
- return ("brq" + network_id)[:LINUX_DEV_LEN]
-
- def get_dev_name(self, iface_id):
- return ("tap" + iface_id)[:LINUX_DEV_LEN]
-
def get_config(self, instance, network, mapping):
- iface_id = mapping['vif_uuid']
- dev = self.get_dev_name(iface_id)
-
- bridge = self.get_bridge_name(network['id'])
- linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(bridge, None,
+ linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(network['bridge'],
+ None,
filtering=False)
conf = super(QuantumLinuxBridgeVIFDriver,
@@ -316,9 +307,8 @@ class QuantumLinuxBridgeVIFDriver(LibvirtBaseVIFDriver):
network,
mapping)
- conf.target_dev = dev
- conf.net_type = "bridge"
- conf.source_dev = bridge
+ designer.set_vif_host_backend_bridge_config(
+ conf, network['bridge'], self.get_vif_devname(mapping))
return conf
diff --git a/nova/virt/vmwareapi/__init__.py b/nova/virt/vmwareapi/__init__.py
index fa6f6ceb5..66e7d9b02 100644
--- a/nova/virt/vmwareapi/__init__.py
+++ b/nova/virt/vmwareapi/__init__.py
@@ -18,4 +18,4 @@
:mod:`vmwareapi` -- Nova support for VMware ESX/ESXi Server through VMware API.
"""
# NOTE(sdague) for nicer compute_driver specification
-from nova.virt.vmwareapi.driver import VMWareESXDriver
+from nova.virt.vmwareapi.driver import VMwareESXDriver
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 8734df1f6..986c4ef28 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -52,29 +52,29 @@ LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('vmwareapi_host_ip',
default=None,
- help='URL for connection to VMWare ESX host.Required if '
- 'compute_driver is vmwareapi.VMWareESXDriver.'),
+ help='URL for connection to VMware ESX host.Required if '
+ 'compute_driver is vmwareapi.VMwareESXDriver.'),
cfg.StrOpt('vmwareapi_host_username',
default=None,
- help='Username for connection to VMWare ESX host. '
+ help='Username for connection to VMware ESX host. '
'Used only if compute_driver is '
- 'vmwareapi.VMWareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver.'),
cfg.StrOpt('vmwareapi_host_password',
default=None,
- help='Password for connection to VMWare ESX host. '
+ help='Password for connection to VMware ESX host. '
'Used only if compute_driver is '
- 'vmwareapi.VMWareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver.'),
cfg.FloatOpt('vmwareapi_task_poll_interval',
default=5.0,
help='The interval used for polling of remote tasks. '
'Used only if compute_driver is '
- 'vmwareapi.VMWareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver.'),
cfg.IntOpt('vmwareapi_api_retry_count',
default=10,
help='The number of times we retry on failures, e.g., '
'socket error, etc. '
'Used only if compute_driver is '
- 'vmwareapi.VMWareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver.'),
]
CONF = cfg.CONF
@@ -93,11 +93,11 @@ class Failure(Exception):
return str(self.details)
-class VMWareESXDriver(driver.ComputeDriver):
+class VMwareESXDriver(driver.ComputeDriver):
"""The ESX host connection object."""
def __init__(self, virtapi, read_only=False, scheme="https"):
- super(VMWareESXDriver, self).__init__(virtapi)
+ super(VMwareESXDriver, self).__init__(virtapi)
host_ip = CONF.vmwareapi_host_ip
host_username = CONF.vmwareapi_host_username
@@ -107,11 +107,11 @@ class VMWareESXDriver(driver.ComputeDriver):
raise Exception(_("Must specify vmwareapi_host_ip,"
"vmwareapi_host_username "
"and vmwareapi_host_password to use"
- "compute_driver=vmwareapi.VMWareESXDriver"))
+ "compute_driver=vmwareapi.VMwareESXDriver"))
- session = VMWareAPISession(host_ip, host_username, host_password,
+ session = VMwareAPISession(host_ip, host_username, host_password,
api_retry_count, scheme=scheme)
- self._vmops = vmops.VMWareVMOps(session)
+ self._vmops = vmops.VMwareVMOps(session)
def init_host(self, host):
"""Do the initialization that needs to be done."""
@@ -209,7 +209,7 @@ class VMWareESXDriver(driver.ComputeDriver):
self._vmops.unplug_vifs(instance, network_info)
-class VMWareAPISession(object):
+class VMwareAPISession(object):
"""
Sets up a session with the ESX host and handles all
the calls made to the host.
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
index fdf85dc8b..3f5041c22 100644
--- a/nova/virt/vmwareapi/fake.py
+++ b/nova/virt/vmwareapi/fake.py
@@ -16,7 +16,7 @@
# under the License.
"""
-A fake VMWare VI API implementation.
+A fake VMware VI API implementation.
"""
import pprint
diff --git a/nova/virt/vmwareapi/network_utils.py b/nova/virt/vmwareapi/network_util.py
index a3b20137d..a3b20137d 100644
--- a/nova/virt/vmwareapi/network_utils.py
+++ b/nova/virt/vmwareapi/network_util.py
diff --git a/nova/virt/vmwareapi/read_write_util.py b/nova/virt/vmwareapi/read_write_util.py
index 52d095ad3..39ea8e2e8 100644
--- a/nova/virt/vmwareapi/read_write_util.py
+++ b/nova/virt/vmwareapi/read_write_util.py
@@ -108,8 +108,8 @@ class VMwareHTTPFile(object):
raise NotImplementedError
-class VMWareHTTPWriteFile(VMwareHTTPFile):
- """VMWare file write handler class."""
+class VMwareHTTPWriteFile(VMwareHTTPFile):
+ """VMware file write handler class."""
def __init__(self, host, data_center_name, datastore_name, cookies,
file_path, file_size, scheme="https"):
@@ -140,12 +140,12 @@ class VMWareHTTPWriteFile(VMwareHTTPFile):
self.conn.getresponse()
except Exception, excep:
LOG.debug(_("Exception during HTTP connection close in "
- "VMWareHTTpWrite. Exception is %s") % excep)
- super(VMWareHTTPWriteFile, self).close()
+ "VMwareHTTpWrite. Exception is %s") % excep)
+ super(VMwareHTTPWriteFile, self).close()
-class VmWareHTTPReadFile(VMwareHTTPFile):
- """VMWare file read handler class."""
+class VMwareHTTPReadFile(VMwareHTTPFile):
+ """VMware file read handler class."""
def __init__(self, host, data_center_name, datastore_name, cookies,
file_path, scheme="https"):
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py
index 4d53e266d..c5b524186 100644
--- a/nova/virt/vmwareapi/vif.py
+++ b/nova/virt/vmwareapi/vif.py
@@ -15,12 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""VIF drivers for VMWare."""
+"""VIF drivers for VMware."""
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
-from nova.virt.vmwareapi import network_utils
+from nova.virt.vmwareapi import network_util
LOG = logging.getLogger(__name__)
@@ -44,28 +44,28 @@ def ensure_vlan_bridge(self, session, network):
# Check if the vlan_interface physical network adapter exists on the
# host.
- if not network_utils.check_if_vlan_interface_exists(session,
+ if not network_util.check_if_vlan_interface_exists(session,
vlan_interface):
raise exception.NetworkAdapterNotFound(adapter=vlan_interface)
# Get the vSwitch associated with the Physical Adapter
- vswitch_associated = network_utils.get_vswitch_for_vlan_interface(
+ vswitch_associated = network_util.get_vswitch_for_vlan_interface(
session, vlan_interface)
if vswitch_associated is None:
raise exception.SwitchNotFoundForNetworkAdapter(
adapter=vlan_interface)
# Check whether bridge already exists and retrieve the the ref of the
# network whose name_label is "bridge"
- network_ref = network_utils.get_network_with_the_name(session, bridge)
+ network_ref = network_util.get_network_with_the_name(session, bridge)
if network_ref is None:
# Create a port group on the vSwitch associated with the
# vlan_interface corresponding physical network adapter on the ESX
# host.
- network_utils.create_port_group(session, bridge,
+ network_util.create_port_group(session, bridge,
vswitch_associated, vlan_num)
else:
# Get the vlan id and vswitch corresponding to the port group
- _get_pg_info = network_utils.get_vlanid_and_vswitch_for_portgroup
+ _get_pg_info = network_util.get_vlanid_and_vswitch_for_portgroup
pg_vlanid, pg_vswitch = _get_pg_info(session, bridge)
# Check if the vswitch associated is proper
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py
index 194b78a1d..83d120df5 100644
--- a/nova/virt/vmwareapi/vim.py
+++ b/nova/virt/vmwareapi/vim.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -101,69 +102,65 @@ class Vim:
def __getattr__(self, attr_name):
"""Makes the API calls and gets the result."""
- try:
- return getattr(self, attr_name)
- except AttributeError:
-
- def vim_request_handler(managed_object, **kwargs):
- """
- Builds the SOAP message and parses the response for fault
- checking and other errors.
-
- managed_object : Managed Object Reference or Managed
- Object Name
- **kwargs : Keyword arguments of the call
- """
- # Dynamic handler for VI SDK Calls
- try:
- request_mo = self._request_managed_object_builder(
- managed_object)
- request = getattr(self.client.service, attr_name)
- response = request(request_mo, **kwargs)
- # To check for the faults that are part of the message body
- # and not returned as Fault object response from the ESX
- # SOAP server
- if hasattr(error_util.FaultCheckers,
- attr_name.lower() + "_fault_checker"):
- fault_checker = getattr(error_util.FaultCheckers,
- attr_name.lower() + "_fault_checker")
- fault_checker(response)
- return response
- # Catch the VimFaultException that is raised by the fault
- # check of the SOAP response
- except error_util.VimFaultException, excep:
- raise
- except suds.WebFault, excep:
- doc = excep.document
- detail = doc.childAtPath("/Envelope/Body/Fault/detail")
- fault_list = []
- for child in detail.getChildren():
- fault_list.append(child.get("type"))
- raise error_util.VimFaultException(fault_list, excep)
- except AttributeError, excep:
- raise error_util.VimAttributeError(_("No such SOAP method "
- "'%s' provided by VI SDK") % (attr_name), excep)
- except (httplib.CannotSendRequest,
- httplib.ResponseNotReady,
- httplib.CannotSendHeader), excep:
- raise error_util.SessionOverLoadException(_("httplib "
- "error in %s: ") % (attr_name), excep)
- except Exception, excep:
- # Socket errors which need special handling for they
- # might be caused by ESX API call overload
- if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
- str(excep).find(CONN_ABORT_ERROR)) != -1:
- raise error_util.SessionOverLoadException(_("Socket "
- "error in %s: ") % (attr_name), excep)
- # Type error that needs special handling for it might be
- # caused by ESX host API call overload
- elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
- raise error_util.SessionOverLoadException(_("Type "
- "error in %s: ") % (attr_name), excep)
- else:
- raise error_util.VimException(
- _("Exception in %s ") % (attr_name), excep)
- return vim_request_handler
+ def vim_request_handler(managed_object, **kwargs):
+ """
+ Builds the SOAP message and parses the response for fault
+ checking and other errors.
+
+ managed_object : Managed Object Reference or Managed
+ Object Name
+ **kwargs : Keyword arguments of the call
+ """
+ # Dynamic handler for VI SDK Calls
+ try:
+ request_mo = self._request_managed_object_builder(
+ managed_object)
+ request = getattr(self.client.service, attr_name)
+ response = request(request_mo, **kwargs)
+ # To check for the faults that are part of the message body
+ # and not returned as Fault object response from the ESX
+ # SOAP server
+ if hasattr(error_util.FaultCheckers,
+ attr_name.lower() + "_fault_checker"):
+ fault_checker = getattr(error_util.FaultCheckers,
+ attr_name.lower() + "_fault_checker")
+ fault_checker(response)
+ return response
+ # Catch the VimFaultException that is raised by the fault
+ # check of the SOAP response
+ except error_util.VimFaultException, excep:
+ raise
+ except suds.WebFault, excep:
+ doc = excep.document
+ detail = doc.childAtPath("/Envelope/Body/Fault/detail")
+ fault_list = []
+ for child in detail.getChildren():
+ fault_list.append(child.get("type"))
+ raise error_util.VimFaultException(fault_list, excep)
+ except AttributeError, excep:
+ raise error_util.VimAttributeError(_("No such SOAP method "
+ "'%s' provided by VI SDK") % (attr_name), excep)
+ except (httplib.CannotSendRequest,
+ httplib.ResponseNotReady,
+ httplib.CannotSendHeader), excep:
+ raise error_util.SessionOverLoadException(_("httplib "
+ "error in %s: ") % (attr_name), excep)
+ except Exception, excep:
+ # Socket errors which need special handling for they
+ # might be caused by ESX API call overload
+ if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
+ str(excep).find(CONN_ABORT_ERROR)) != -1:
+ raise error_util.SessionOverLoadException(_("Socket "
+ "error in %s: ") % (attr_name), excep)
+ # Type error that needs special handling for it might be
+ # caused by ESX host API call overload
+ elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
+ raise error_util.SessionOverLoadException(_("Type "
+ "error in %s: ") % (attr_name), excep)
+ else:
+ raise error_util.VimException(
+ _("Exception in %s ") % (attr_name), excep)
+ return vim_request_handler
def _request_managed_object_builder(self, managed_object):
"""Builds the request managed object."""
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index 740355679..e03b88804 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -26,7 +26,7 @@ def build_datastore_path(datastore_name, path):
def split_datastore_path(datastore_path):
"""
- Split the VMWare style datastore path to get the Datastore
+ Split the VMware style datastore path to get the Datastore
name and the entity path.
"""
spl = datastore_path.split('[', 1)[1].split(']', 1)
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index e591245e2..883e751a8 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -32,7 +32,7 @@ from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
-from nova.virt.vmwareapi import network_utils
+from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vif as vmwarevif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@@ -49,7 +49,7 @@ VMWARE_POWER_STATES = {
'suspended': power_state.PAUSED}
-class VMWareVMOps(object):
+class VMwareVMOps(object):
"""Management class for VM-related tasks."""
def __init__(self, session):
@@ -157,7 +157,7 @@ class VMWareVMOps(object):
vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()
def _check_if_network_bridge_exists(network_name):
- network_ref = network_utils.get_network_with_the_name(
+ network_ref = network_util.get_network_with_the_name(
self._session, network_name)
if network_ref is None:
raise exception.NetworkNotFoundForBridge(bridge=network_name)
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py
index 15237fd5b..7c4480ea0 100644
--- a/nova/virt/vmwareapi/vmware_images.py
+++ b/nova/virt/vmwareapi/vmware_images.py
@@ -50,11 +50,11 @@ def start_transfer(context, read_file_handle, data_size,
# to read.
read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
- # In case of Glance - VMWare transfer, we just need a handle to the
- # HTTP Connection that is to send transfer data to the VMWare datastore.
+ # In case of Glance - VMware transfer, we just need a handle to the
+ # HTTP Connection that is to send transfer data to the VMware datastore.
if write_file_handle:
write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
- # In case of VMWare - Glance transfer, we relinquish VMWare HTTP file read
+ # In case of VMware - Glance transfer, we relinquish VMware HTTP file read
# handle to Glance Client instance, but to be sure of the transfer we need
# to be sure of the status of the image on glnace changing to active.
# The GlanceWriteThread handles the same for us.
@@ -96,7 +96,7 @@ def fetch_image(context, image, instance, **kwargs):
f = StringIO.StringIO()
image_service.download(context, image_id, f)
read_file_handle = read_write_util.GlanceFileRead(f)
- write_file_handle = read_write_util.VMWareHTTPWriteFile(
+ write_file_handle = read_write_util.VMwareHTTPWriteFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
@@ -113,7 +113,7 @@ def upload_image(context, image, instance, **kwargs):
"""Upload the snapshotted vm disk file to Glance image server."""
LOG.debug(_("Uploading image %s to the Glance image server") % image,
instance=instance)
- read_file_handle = read_write_util.VmWareHTTPReadFile(
+ read_file_handle = read_write_util.VMwareHTTPReadFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 40d43da8d..debba4f02 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -34,6 +34,7 @@ from xml.parsers import expat
from eventlet import greenthread
+from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import power_state
from nova.compute import task_states
@@ -43,6 +44,7 @@ from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import utils
+from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import localfs as vfsimpl
from nova.virt import driver
@@ -153,6 +155,7 @@ class ImageType(object):
| 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
| linux, HVM assumed for Windows)
| 5 - ISO disk image (local SR, NOT partitioned by plugin)
+ | 6 - config drive
"""
KERNEL = 0
@@ -161,7 +164,9 @@ class ImageType(object):
DISK_RAW = 3
DISK_VHD = 4
DISK_ISO = 5
- _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO)
+ DISK_CONFIGDRIVE = 6
+ _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO,
+ DISK_CONFIGDRIVE)
KERNEL_STR = "kernel"
RAMDISK_STR = "ramdisk"
@@ -169,8 +174,9 @@ class ImageType(object):
DISK_RAW_STR = "os_raw"
DISK_VHD_STR = "vhd"
DISK_ISO_STR = "iso"
+ DISK_CONFIGDRIVE_STR = "configdrive"
_strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR,
- DISK_ISO_STR)
+ DISK_ISO_STR, DISK_CONFIGDRIVE_STR)
@classmethod
def to_string(cls, image_type):
@@ -178,14 +184,15 @@ class ImageType(object):
@classmethod
def get_role(cls, image_type_id):
- " Get the role played by the image, based on its type "
+ """Get the role played by the image, based on its type."""
return {
cls.KERNEL: 'kernel',
cls.RAMDISK: 'ramdisk',
cls.DISK: 'root',
cls.DISK_RAW: 'root',
cls.DISK_VHD: 'root',
- cls.DISK_ISO: 'iso'
+ cls.DISK_ISO: 'iso',
+ cls.DISK_CONFIGDRIVE: 'configdrive'
}.get(image_type_id)
@@ -868,6 +875,38 @@ def generate_ephemeral(session, instance, vm_ref, userdevice, name_label,
CONF.default_ephemeral_format)
+def generate_configdrive(session, instance, vm_ref, userdevice,
+ admin_password=None, files=None):
+ sr_ref = safe_find_sr(session)
+ vdi_ref = create_vdi(session, sr_ref, instance, 'config-2',
+ 'configdrive', configdrive.CONFIGDRIVESIZE_BYTES)
+
+ try:
+ with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
+ extra_md = {}
+ if admin_password:
+ extra_md['admin_pass'] = admin_password
+ inst_md = instance_metadata.InstanceMetadata(instance,
+ content=files,
+ extra_md=extra_md)
+ with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
+ with utils.tempdir() as tmp_path:
+ tmp_file = os.path.join(tmp_path, 'configdrive')
+ cdb.make_drive(tmp_file)
+
+ dev_path = utils.make_dev_path(dev)
+ utils.execute('dd',
+ 'if=%s' % tmp_file,
+ 'of=%s' % dev_path,
+ run_as_root=True)
+
+ create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False,
+ read_only=True)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ destroy_vdi(session, vdi_ref)
+
+
def create_kernel_image(context, session, instance, name_label, image_id,
image_type):
"""Creates kernel/ramdisk file from the image stored in the cache.
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index e8e0f3cb0..4a8372cda 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -40,6 +40,7 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
+from nova.virt import configdrive
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.xenapi import agent as xapi_agent
@@ -77,6 +78,7 @@ DEVICE_RESCUE = '1'
DEVICE_SWAP = '2'
DEVICE_EPHEMERAL = '3'
DEVICE_CD = '4'
+DEVICE_CONFIGDRIVE = '5'
def cmp_version(a, b):
@@ -344,7 +346,8 @@ class VMOps(object):
@step
def attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type):
self._attach_disks(instance, vm_ref, name_label, vdis,
- disk_image_type)
+ disk_image_type, admin_password,
+ injected_files)
if rescue:
# NOTE(johannes): Attach root disk to rescue VM now, before
@@ -437,7 +440,12 @@ class VMOps(object):
disk_image_type)
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
- self.inject_instance_metadata(instance, vm_ref)
+
+ # NOTE(mikal): file injection only happens if we are _not_ using a
+ # configdrive.
+ if not configdrive.required_by(instance):
+ self.inject_instance_metadata(instance, vm_ref)
+
return vm_ref
def _setup_vm_networking(self, instance, vm_ref, vdis, network_info,
@@ -491,7 +499,7 @@ class VMOps(object):
return vm_ref
def _attach_disks(self, instance, vm_ref, name_label, vdis,
- disk_image_type):
+ disk_image_type, admin_password=None, files=None):
ctx = nova_context.get_admin_context()
instance_type = instance['instance_type']
@@ -537,6 +545,13 @@ class VMOps(object):
DEVICE_EPHEMERAL, name_label,
ephemeral_gb)
+ # Attach (optional) configdrive v2 disk
+ if configdrive.required_by(instance):
+ vm_utils.generate_configdrive(self._session, instance, vm_ref,
+ DEVICE_CONFIGDRIVE,
+ admin_password=admin_password,
+ files=files)
+
def _boot_new_instance(self, instance, vm_ref, injected_files,
admin_password):
"""Boot a new instance and configure it."""
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index e584bac67..7921e3e87 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -22,6 +22,7 @@ and storage repositories
import re
import string
+from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -381,3 +382,28 @@ def _get_target_port(iscsi_string):
return iscsi_string[iscsi_string.find(':') + 1:]
elif iscsi_string is None or CONF.target_port:
return CONF.target_port
+
+
+def introduce_sr_unless_present(session, sr_uuid, label, params):
+ LOG.debug(_("Introducing SR %s") % label)
+ sr_ref = find_sr_by_uuid(session, sr_uuid)
+ if sr_ref:
+ LOG.debug(_('SR found in xapi database. No need to introduce'))
+ return sr_ref
+ sr_ref = introduce_sr(session, sr_uuid, label, params)
+
+ if sr_ref is None:
+ raise exception.NovaException(_('Could not introduce SR'))
+ return sr_ref
+
+
+def forget_sr_if_present(session, sr_uuid):
+ sr_ref = find_sr_by_uuid(session, sr_uuid)
+ if sr_ref is None:
+ LOG.debug(_('SR %s not found in the xapi database') % sr_uuid)
+ return
+ try:
+ forget_sr(session, sr_uuid)
+ except StorageError, exc:
+ LOG.exception(exc)
+ raise exception.NovaException(_('Could not forget SR'))
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 51c97c9de..5f79b6c3a 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -35,76 +35,6 @@ class VolumeOps(object):
def __init__(self, session):
self._session = session
- def create_volume_for_sm(self, volume, sr_uuid):
- LOG.debug("Creating volume for Storage Manager")
-
- sm_vol_rec = {}
- try:
- sr_ref = self._session.call_xenapi("SR.get_by_uuid", sr_uuid)
- except self._session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise volume_utils.StorageError(_('Unable to get SR using uuid'))
- #Create VDI
- label = 'vol-' + volume['id']
- desc = 'xensm volume for ' + volume['id']
- # size presented to xenapi is in bytes, while euca api is in GB
- vdi_size = volume['size'] * 1024 * 1024 * 1024
- vdi_ref = vm_utils.create_vdi(self._session, sr_ref,
- None, label, desc,
- vdi_size, False)
- vdi_rec = self._session.call_xenapi("VDI.get_record", vdi_ref)
- sm_vol_rec['vdi_uuid'] = vdi_rec['uuid']
- return sm_vol_rec
-
- def delete_volume_for_sm(self, vdi_uuid):
- vdi_ref = self._session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
- if vdi_ref is None:
- raise exception.NovaException(_('Could not find VDI ref'))
-
- vm_utils.destroy_vdi(self._session, vdi_ref)
-
- def create_sr(self, label, params):
- LOG.debug(_("Creating SR %s") % label)
- sr_ref = volume_utils.create_sr(self._session, label, params)
- if sr_ref is None:
- raise exception.NovaException(_('Could not create SR'))
- sr_rec = self._session.call_xenapi("SR.get_record", sr_ref)
- if sr_rec is None:
- raise exception.NovaException(_('Could not retrieve SR record'))
- return sr_rec['uuid']
-
- # Checks if sr has already been introduced to this host
- def introduce_sr(self, sr_uuid, label, params):
- LOG.debug(_("Introducing SR %s") % label)
- sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
- if sr_ref:
- LOG.debug(_('SR found in xapi database. No need to introduce'))
- return sr_ref
- sr_ref = volume_utils.introduce_sr(self._session, sr_uuid, label,
- params)
- if sr_ref is None:
- raise exception.NovaException(_('Could not introduce SR'))
- return sr_ref
-
- def is_sr_on_host(self, sr_uuid):
- LOG.debug(_('Checking for SR %s') % sr_uuid)
- sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
- if sr_ref:
- return True
- return False
-
- # Checks if sr has been introduced
- def forget_sr(self, sr_uuid):
- sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
- if sr_ref is None:
- LOG.INFO(_('SR %s not found in the xapi database') % sr_uuid)
- return
- try:
- volume_utils.forget_sr(self._session, sr_uuid)
- except volume_utils.StorageError, exc:
- LOG.exception(exc)
- raise exception.NovaException(_('Could not forget SR'))
-
def attach_volume(self, connection_info, instance_name, mountpoint,
hotplug=True):
"""Attach volume storage to VM instance."""
@@ -122,13 +52,13 @@ class VolumeOps(object):
connection_data = connection_info['data']
dev_number = volume_utils.get_device_number(mountpoint)
- self.connect_volume(connection_data, dev_number, instance_name,
+ self._connect_volume(connection_data, dev_number, instance_name,
vm_ref, hotplug=hotplug)
LOG.info(_('Mountpoint %(mountpoint)s attached to'
' instance %(instance_name)s') % locals())
- def connect_volume(self, connection_data, dev_number, instance_name,
+ def _connect_volume(self, connection_data, dev_number, instance_name,
vm_ref, hotplug=True):
description = 'Disk-for:%s' % instance_name
@@ -137,7 +67,8 @@ class VolumeOps(object):
# Introduce SR
try:
- sr_ref = self.introduce_sr(uuid, label, sr_params)
+ sr_ref = volume_utils.introduce_sr_unless_present(
+ self._session, uuid, label, sr_params)
LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
except self._session.XenAPI.Failure, exc:
LOG.exception(exc)
@@ -159,7 +90,7 @@ class VolumeOps(object):
vdi_uuid, target_lun)
except volume_utils.StorageError, exc:
LOG.exception(exc)
- self.forget_sr(uuid)
+ volume_utils.forget_sr_if_present(self._session, uuid)
raise Exception(_('Unable to create VDI on SR %(sr_ref)s for'
' instance %(instance_name)s') % locals())
@@ -169,7 +100,7 @@ class VolumeOps(object):
osvol=True)
except self._session.XenAPI.Failure, exc:
LOG.exception(exc)
- self.forget_sr(uuid)
+ volume_utils.forget_sr_if_present(self._session, uuid)
raise Exception(_('Unable to use SR %(sr_ref)s for'
' instance %(instance_name)s') % locals())
@@ -178,7 +109,7 @@ class VolumeOps(object):
self._session.call_xenapi("VBD.plug", vbd_ref)
except self._session.XenAPI.Failure, exc:
LOG.exception(exc)
- self.forget_sr(uuid)
+ volume_utils.forget_sr_if_present(self._session, uuid)
raise Exception(_('Unable to attach volume to instance %s')
% instance_name)
@@ -190,7 +121,7 @@ class VolumeOps(object):
# Detach VBD from VM
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
% locals())
- device_number = volume_utils.mountpoint_to_number(mountpoint)
+ device_number = volume_utils.get_device_number(mountpoint)
try:
vbd_ref = vm_utils.find_vbd_by_number(self._session, vm_ref,
device_number)
@@ -199,9 +130,7 @@ class VolumeOps(object):
raise Exception(_('Unable to locate volume %s') % mountpoint)
try:
- vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
- sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
- if vm_rec['power_state'] != 'Halted':
+ if not vm_utils._is_vm_shutdown(self._session, vm_ref):
vm_utils.unplug_vbd(self._session, vbd_ref)
except volume_utils.StorageError, exc:
LOG.exception(exc)
@@ -214,6 +143,7 @@ class VolumeOps(object):
# Forget SR only if no other volumes on this host are using it
try:
+ sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
volume_utils.purge_sr(self._session, sr_ref)
except volume_utils.StorageError, exc:
LOG.exception(exc)
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 514295605..fccdedac8 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -42,6 +42,9 @@ cinder_opts = [
default=None,
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
+ cfg.StrOpt('os_region_name',
+ default=None,
+ help='region name of this node'),
cfg.IntOpt('cinder_http_retries',
default=3,
help='Number of cinderclient retries on failed http calls'),
@@ -66,7 +69,16 @@ def cinderclient(context):
else:
info = CONF.cinder_catalog_info
service_type, service_name, endpoint_type = info.split(':')
- url = sc.url_for(service_type=service_type,
+ # extract the region if set in configuration
+ if CONF.os_region_name:
+ attr = 'region'
+ filter_value = CONF.os_region_name
+ else:
+ attr = None
+ filter_value = None
+ url = sc.url_for(attr=attr,
+ filter_value=filter_value,
+ service_type=service_type,
service_name=service_name,
endpoint_type=endpoint_type)
diff --git a/nova/wsgi.py b/nova/wsgi.py
index c103526da..16851dba8 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -83,13 +83,21 @@ class Server(object):
raise exception.InvalidInput(
reason='The backlog must be more than 1')
+ bind_addr = (host, port)
+ # TODO(dims): eventlet's green dns/socket module does not actually
+ # support IPv6 in getaddrinfo(). We need to get around this in the
+ # future or monitor upstream for a fix
try:
- socket.inet_pton(socket.AF_INET6, host)
- family = socket.AF_INET6
+ info = socket.getaddrinfo(bind_addr[0],
+ bind_addr[1],
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM)[0]
+ family = info[0]
+ bind_addr = info[-1]
except Exception:
family = socket.AF_INET
- self._socket = eventlet.listen((host, port), family, backlog=backlog)
+ self._socket = eventlet.listen(bind_addr, family, backlog=backlog)
(self.host, self.port) = self._socket.getsockname()[0:2]
LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__)
diff --git a/tools/lintstack.sh b/tools/lintstack.sh
index 848a16fa5..42c6a60b3 100755
--- a/tools/lintstack.sh
+++ b/tools/lintstack.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
-# Copyright (c) 2012, AT&T Labs, Yun Mao <yunmao@gmail.com>
+# Copyright (c) 2012-2013, AT&T Labs, Yun Mao <yunmao@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,13 +15,31 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Use lintstack.py to compare pylint errors between HEAD and HEAD~1
-
+# Use lintstack.py to compare pylint errors.
+# We run pylint twice, once on HEAD, once on the code before the latest
+# commit for review.
set -e
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
GITHEAD=`git rev-parse HEAD`
cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py
-git checkout HEAD~1
+
+if git rev-parse HEAD^2 2>/dev/null; then
+ # The HEAD is a Merge commit. Here, the patch to review is
+ # HEAD^2, the master branch is at HEAD^1, and the patch was
+ # written based on HEAD^2~1.
+ PREV_COMMIT=`git rev-parse HEAD^2~1`
+ git checkout HEAD~1
+ # The git merge is necessary for reviews with a series of patches.
+ # If not, this is a no-op so won't hurt either.
+ git merge $PREV_COMMIT
+else
+ # The HEAD is not a merge commit. This won't happen on gerrit.
+ # Most likely you are running against your own patch locally.
+ # We assume the patch to examine is HEAD, and we compare it against
+ # HEAD~1
+ git checkout HEAD~1
+fi
+
# First generate tools/pylint_exceptions from HEAD~1
$TOOLS_DIR/lintstack.head.py generate
# Then use that as a reference to compare against HEAD
diff --git a/tools/pip-requires b/tools/pip-requires
index 1eb09ff65..1845ba7dd 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -8,7 +8,7 @@ eventlet>=0.9.17
kombu>=1.0.4
lxml>=2.3
routes==1.12.3
-WebOb>=1.0.8
+WebOb==1.2.3
greenlet>=0.3.1
PasteDeploy==1.5.0
paste