summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc3
-rwxr-xr-xbin/nova-baremetal-deploy-helper20
-rwxr-xr-xbin/nova-compute3
-rwxr-xr-xbin/nova-manage5
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.json4
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.xml4
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-get-resp.json20
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-get-resp.xml5
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-list-resp.json94
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-list-resp.xml23
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-post-req.json10
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-post-req.xml9
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-post-resp.json20
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-post-resp.xml5
-rw-r--r--etc/nova/nova.conf.sample3072
-rw-r--r--nova/api/openstack/compute/contrib/admin_networks.py2
-rw-r--r--nova/api/openstack/compute/contrib/coverage_ext.py3
-rw-r--r--nova/api/openstack/compute/contrib/hosts.py259
-rw-r--r--nova/api/openstack/compute/contrib/os_networks.py2
-rw-r--r--nova/api/openstack/compute/contrib/server_password.py2
-rw-r--r--nova/availability_zones.py2
-rw-r--r--nova/cells/messaging.py11
-rw-r--r--nova/compute/__init__.py17
-rw-r--r--nova/compute/api.py190
-rw-r--r--nova/compute/manager.py329
-rw-r--r--nova/compute/resource_tracker.py7
-rw-r--r--nova/compute/utils.py60
-rw-r--r--nova/conductor/__init__.py3
-rw-r--r--nova/conductor/api.py72
-rw-r--r--nova/conductor/manager.py34
-rw-r--r--nova/conductor/rpcapi.py33
-rw-r--r--nova/context.py17
-rw-r--r--nova/db/api.py35
-rw-r--r--nova/db/sqlalchemy/api.py43
-rw-r--r--nova/db/sqlalchemy/models.py17
-rw-r--r--nova/network/api.py30
-rw-r--r--nova/network/linux_net.py14
-rw-r--r--nova/network/quantumv2/api.py14
-rw-r--r--nova/quota.py83
-rw-r--r--nova/scheduler/driver.py2
-rw-r--r--nova/service.py36
-rw-r--r--nova/servicegroup/api.py4
-rw-r--r--nova/servicegroup/drivers/__init__.py0
-rw-r--r--nova/servicegroup/drivers/db.py (renamed from nova/servicegroup/db_driver.py)0
-rw-r--r--nova/tests/api/ec2/test_cloud.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hosts.py126
-rw-r--r--nova/tests/cells/test_cells_messaging.py2
-rw-r--r--nova/tests/compute/test_compute.py455
-rw-r--r--nova/tests/compute/test_compute_utils.py94
-rw-r--r--nova/tests/compute/test_host_api.py105
-rw-r--r--nova/tests/compute/test_resource_tracker.py26
-rw-r--r--nova/tests/conductor/test_conductor.py85
-rw-r--r--nova/tests/fake_hosts.py32
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl20
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl94
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl23
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl10
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl20
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/test_api_samples.py50
-rw-r--r--nova/tests/integrated/test_multiprocess_api.py54
-rw-r--r--nova/tests/test_configdrive2.py4
-rw-r--r--nova/tests/test_db_api.py14
-rw-r--r--nova/tests/test_quota.py47
-rw-r--r--nova/tests/test_service.py15
-rw-r--r--nova/tests/test_virt_drivers.py2
-rw-r--r--nova/tests/test_wsgi.py9
-rw-r--r--nova/tests/test_xenapi.py20
-rw-r--r--nova/tests/virt/xenapi/test_vm_utils.py89
-rw-r--r--nova/tests/virt/xenapi/test_volumeops.py54
-rw-r--r--nova/virt/baremetal/driver.py3
-rw-r--r--nova/virt/baremetal/pxe.py16
-rw-r--r--nova/virt/configdrive.py31
-rw-r--r--nova/virt/driver.py9
-rw-r--r--nova/virt/fake.py3
-rw-r--r--nova/virt/hyperv/vmops.py2
-rw-r--r--nova/virt/libvirt/driver.py55
-rw-r--r--nova/virt/libvirt/utils.py30
-rw-r--r--nova/virt/vmwareapi/driver.py3
-rw-r--r--nova/virt/xenapi/driver.py2
-rw-r--r--nova/virt/xenapi/vm_utils.py51
-rw-r--r--nova/virt/xenapi/vmops.py21
-rw-r--r--nova/virt/xenapi/volume_utils.py26
-rw-r--r--nova/virt/xenapi/volumeops.py90
-rw-r--r--nova/wsgi.py11
-rw-r--r--plugins/xenserver/networking/etc/xensource/scripts/novalib.py4
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py2
-rwxr-xr-xrun_tests.sh16
-rw-r--r--smoketests/base.py12
-rw-r--r--tools/conf/extract_opts.py146
-rwxr-xr-xtools/lintstack.sh26
-rw-r--r--tools/pip-requires2
-rw-r--r--tools/test-requires2
-rw-r--r--tox.ini19
96 files changed, 4276 insertions, 2395 deletions
diff --git a/.coveragerc b/.coveragerc
index 82fe47792..902a94349 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,6 +1,7 @@
[run]
branch = True
-omit = /usr*,setup.py,*egg*,.venv/*,.tox/*,nova/tests/*
+source = nova
+omit = nova/tests/*,DynamicallyCompiledCheetahTemplate.py
[report]
ignore-errors = True
diff --git a/bin/nova-baremetal-deploy-helper b/bin/nova-baremetal-deploy-helper
index fa0a30d9e..f8a487d37 100755
--- a/bin/nova-baremetal-deploy-helper
+++ b/bin/nova-baremetal-deploy-helper
@@ -56,7 +56,7 @@ QUEUE = Queue.Queue()
# They are split for stub-out.
def discovery(portal_address, portal_port):
- """Do iSCSI discovery on portal"""
+ """Do iSCSI discovery on portal."""
utils.execute('iscsiadm',
'-m', 'discovery',
'-t', 'st',
@@ -66,7 +66,7 @@ def discovery(portal_address, portal_port):
def login_iscsi(portal_address, portal_port, target_iqn):
- """Login to an iSCSI target"""
+ """Login to an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
@@ -79,7 +79,7 @@ def login_iscsi(portal_address, portal_port, target_iqn):
def logout_iscsi(portal_address, portal_port, target_iqn):
- """Logout from an iSCSI target"""
+ """Logout from an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
@@ -90,7 +90,7 @@ def logout_iscsi(portal_address, portal_port, target_iqn):
def make_partitions(dev, root_mb, swap_mb):
- """Create partitions for root and swap on a disk device"""
+ """Create partitions for root and swap on a disk device."""
commands = ['o,w',
'n,p,1,,+%dM,t,1,83,w' % root_mb,
'n,p,2,,+%dM,t,2,82,w' % swap_mb,
@@ -106,13 +106,13 @@ def make_partitions(dev, root_mb, swap_mb):
def is_block_device(dev):
- """Check whether a device is block or not"""
+ """Check whether a device is block or not."""
s = os.stat(dev)
return stat.S_ISBLK(s.st_mode)
def dd(src, dst):
- """Execute dd from src to dst"""
+ """Execute dd from src to dst."""
utils.execute('dd',
'if=%s' % src,
'of=%s' % dst,
@@ -122,7 +122,7 @@ def dd(src, dst):
def mkswap(dev, label='swap1'):
- """Execute mkswap on a device"""
+ """Execute mkswap on a device."""
utils.execute('mkswap',
'-L', label,
dev,
@@ -131,7 +131,7 @@ def mkswap(dev, label='swap1'):
def block_uuid(dev):
- """Get UUID of a block device"""
+ """Get UUID of a block device."""
out, _ = utils.execute('blkid', '-s', 'UUID', '-o', 'value', dev,
run_as_root=True,
check_exit_code=[0])
@@ -219,7 +219,7 @@ def deploy(address, port, iqn, lun, image_path, pxe_config_path,
class Worker(threading.Thread):
- """Thread that handles requests in queue"""
+ """Thread that handles requests in queue."""
def __init__(self):
super(Worker, self).__init__()
@@ -250,7 +250,7 @@ class Worker(threading.Thread):
class BareMetalDeploy(object):
- """WSGI server for bare-metal deployment"""
+ """WSGI server for bare-metal deployment."""
def __init__(self):
self.worker = Worker()
diff --git a/bin/nova-compute b/bin/nova-compute
index d93ddb5bd..8826015d4 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -55,6 +55,7 @@ if __name__ == '__main__':
logging.setup('nova')
utils.monkey_patch()
server = service.Service.create(binary='nova-compute',
- topic=CONF.compute_topic)
+ topic=CONF.compute_topic,
+ db_allowed=False)
service.serve(server)
service.wait()
diff --git a/bin/nova-manage b/bin/nova-manage
index 62a6cdc3a..67212a198 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -307,7 +307,6 @@ class FixedIpCommands(object):
for fixed_ip in fixed_ips:
hostname = None
host = None
- mac_address = None
network = all_networks.get(fixed_ip['network_id'])
if network:
has_ip = True
@@ -627,7 +626,7 @@ class ServiceCommands(object):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
services = db.service_get_all(ctxt)
- services = availability_zone.set_availability_zones(ctxt, services)
+ services = availability_zones.set_availability_zones(ctxt, services)
if host:
services = [s for s in services if s['host'] == host]
if service:
@@ -741,7 +740,6 @@ class HostCommands(object):
print "%-25s\t%-15s" % (_('host'),
_('zone'))
ctxt = context.get_admin_context()
- now = timeutils.utcnow()
services = db.service_get_all(ctxt)
services = availability_zones.set_availability_zones(ctxt, services)
if zone:
@@ -776,7 +774,6 @@ class InstanceTypeCommands(object):
"""Class for managing instance types / flavors."""
def _print_instance_types(self, name, val):
- deleted = ('', ', inactive')[val["deleted"] == 1]
is_public = ('private', 'public')[val["is_public"] == 1]
print ("%s: Memory: %sMB, VCPUS: %s, Root: %sGB, Ephemeral: %sGb, "
"FlavorID: %s, Swap: %sMB, RXTX Factor: %s, %s, ExtraSpecs %s") % (
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index 5f92c1366..42e86eadd 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -298,7 +298,7 @@
},
{
"alias": "os-admin-networks",
- "description": "Admin-only Network Management Extension",
+ "description": "Admin-only Network Management Extension.",
"links": [],
"name": "AdminNetworks",
"namespace": "http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1",
@@ -306,7 +306,7 @@
},
{
"alias": "os-networks",
- "description": "Tenant-based Network Management Extension",
+ "description": "Tenant-based Network Management Extension.",
"links": [],
"name": "OSNetworks",
"namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index 8f92b274b..ea0b45a12 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -126,13 +126,13 @@
<description>Allow multiple create in the Create Server v1.1 API.</description>
</extension>
<extension alias="os-admin-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1" name="AdminNetworks">
- <description>Admin-only Network Management Extension</description>
+ <description>Admin-only Network Management Extension.</description>
</extension>
<extension alias="os-networks-associate" updated="2012-11-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
<description>Network association support.</description>
</extension>
<extension alias="os-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="OSNetworks">
- <description>Tenant-based Network Management Extension</description>
+ <description>Tenant-based Network Management Extension.</description>
</extension>
<extension alias="os-quota-class-sets" updated="2012-03-12T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1" name="QuotaClasses">
<description>Quota classes management support.</description>
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.json b/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.json
new file mode 100644
index 000000000..15604fe2b
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.json
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.xml b/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.xml
new file mode 100644
index 000000000..5357967f3
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+</flavor> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.json b/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.json
new file mode 100644
index 000000000..5bb94f348
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.json
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "swap": "",
+ "vcpus": 2
+ },
+ {
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "swap": "",
+ "vcpus": 4
+ },
+ {
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "swap": "",
+ "vcpus": 8
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.xml b/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.xml
new file mode 100644
index 000000000..55b54f700
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.xml
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-post-req.json b/doc/api_samples/os-flavor-swap/flavor-swap-post-req.json
new file mode 100644
index 000000000..83b94cea0
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-post-req.json
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "flavortest",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "100",
+ "swap": 5
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-post-req.xml b/doc/api_samples/os-flavor-swap/flavor-swap-post-req.xml
new file mode 100644
index 000000000..b604f9bdf
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-post-req.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
+ name="flavortest"
+ ram="1024"
+ vcpus="2"
+ disk="10"
+ id="100"
+ swap="5" /> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.json b/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.json
new file mode 100644
index 000000000..d8e75d381
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.json
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "100",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/100",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/100",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "flavortest",
+ "ram": 1024,
+ "swap": 5,
+ "vcpus": 2
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.xml b/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.xml
new file mode 100644
index 000000000..7b779cf3f
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="flavortest" id="100" swap="5">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/100" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/100" rel="bookmark"/>
+</flavor> \ No newline at end of file
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
index bee408412..77133d988 100644
--- a/etc/nova/nova.conf.sample
+++ b/etc/nova/nova.conf.sample
@@ -1,1813 +1,2521 @@
-####################
-# nova.conf sample #
-####################
-
[DEFAULT]
-######## defined in nova.openstack.common.cfg:CommonConfigOpts ########
+#
+# Options defined in nova.openstack.common.cfg:CommonConfigOpts
+#
+
+# Print debugging output (boolean value)
+#debug=false
+
+# Print more verbose output (boolean value)
+#verbose=false
+
+# If this option is specified, the logging configuration file
+# specified is used and overrides any other logging options
+# specified. Please see the Python logging module
+# documentation for details on logging configuration files.
+# (string value)
+#log_config=<None>
+
+# A logging.Formatter log message format string which may use
+# any of the available logging.LogRecord attributes. Default:
+# %(default)s (string value)
+#log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If not set,
+# logging will go to stdout. (string value)
+#log_file=<None>
+
+# (Optional) The directory to keep log files in (will be
+# prepended to --log-file) (string value)
+#log_dir=<None>
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+# syslog facility to receive log lines (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in nova.availability_zones
+#
+
+# availability_zone to show internal services under (string
+# value)
+#internal_service_availability_zone=internal
+
+# default compute node availability_zone (string value)
+#default_availability_zone=nova
+
+
+#
+# Options defined in nova.crypto
+#
+
+# Filename of root CA (string value)
+#ca_file=cacert.pem
+
+# Filename of private key (string value)
+#key_file=private/cakey.pem
+
+# Filename of root Certificate Revocation List (string value)
+#crl_file=crl.pem
+
+# Where we keep our keys (string value)
+#keys_path=$state_path/keys
+
+# Where we keep our root CA (string value)
+#ca_path=$state_path/CA
+
+# Should we use a CA for each project? (boolean value)
+#use_project_ca=false
+
+# Subject for certificate for users, %s for project, user,
+# timestamp (string value)
+#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
+
+# Subject for certificate for projects, %s for project,
+# timestamp (string value)
+#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
+
+
+#
+# Options defined in nova.exception
+#
+
+# make exception message format errors fatal (boolean value)
+#fatal_exception_format_errors=false
+
+
+#
+# Options defined in nova.manager
+#
+
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+
+#
+# Options defined in nova.netconf
+#
+
+# ip address of this host (string value)
+#my_ip=10.0.0.1
+
+# Name of this node. This can be an opaque identifier. It is
+# not necessarily a hostname, FQDN, or IP address. However,
+# the node name must be valid within an AMQP key, and if using
+# ZeroMQ, a valid hostname, FQDN, or IP address (string value)
+#host=nova
+
+# use ipv6 (boolean value)
+#use_ipv6=false
+
+
+#
+# Options defined in nova.notifications
+#
+
+# If set, send compute.instance.update notifications on
+# instance state changes. Valid values are False for no
+# notifications, True for notifications on any instance
+# changes. (boolean value)
+#notify_on_any_change=false
+
+# If set, send api.fault notifications on caught exceptions in
+# the API service. (boolean value)
+#notify_api_faults=false
+
+# If set, send compute.instance.update notifications on
+# instance state changes. Valid values are None for no
+# notifications, "vm_state" for notifications on VM state
+# changes, or "vm_and_task_state" for notifications on VM and
+# task state changes. (string value)
+#notify_on_state_change=<None>
+
+
+#
+# Options defined in nova.paths
+#
+
+# Directory where the nova python module is installed (string
+# value)
+#pybasedir=/usr/lib/python/site-packages
+
+# Directory where nova binaries are installed (string value)
+#bindir=$pybasedir/bin
+
+# Top-level directory for maintaining nova's state (string
+# value)
+#state_path=$pybasedir
+
+
+#
+# Options defined in nova.policy
+#
+
+# JSON file representing policy (string value)
+#policy_file=policy.json
+
+# Rule checked when requested rule is not found (string value)
+#policy_default_rule=default
+
+
+#
+# Options defined in nova.quota
+#
+
+# number of instances allowed per project (integer value)
+#quota_instances=10
+
+# number of instance cores allowed per project (integer value)
+#quota_cores=20
+
+# megabytes of instance ram allowed per project (integer
+# value)
+#quota_ram=51200
+
+# number of floating ips allowed per project (integer value)
+#quota_floating_ips=10
+
+# number of metadata items allowed per instance (integer
+# value)
+#quota_metadata_items=128
+
+# number of injected files allowed (integer value)
+#quota_injected_files=5
+
+# number of bytes allowed per injected file (integer value)
+#quota_injected_file_content_bytes=10240
+
+# number of bytes allowed per injected file path (integer
+# value)
+#quota_injected_file_path_bytes=255
+
+# number of security groups per project (integer value)
+#quota_security_groups=10
+
+# number of security rules per security group (integer value)
+#quota_security_group_rules=20
+
+# number of key pairs per user (integer value)
+#quota_key_pairs=100
+
+# number of seconds until a reservation expires (integer
+# value)
+#reservation_expire=86400
+
+# count of reservations until usage is refreshed (integer
+# value)
+#until_refresh=0
+
+# number of seconds between subsequent usage refreshes
+# (integer value)
+#max_age=0
+
+# default driver to use for quota checks (string value)
+#quota_driver=nova.quota.DbQuotaDriver
+
+
+#
+# Options defined in nova.service
+#
+
+# seconds between nodes reporting state to datastore (integer
+# value)
+#report_interval=10
+
+# enable periodic tasks (boolean value)
+#periodic_enable=true
+
+# range of seconds to randomly delay when starting the
+# periodic task scheduler to reduce stampeding. (Disable by
+# setting to 0) (integer value)
+#periodic_fuzzy_delay=60
+
+# a list of APIs to enable by default (list value)
+#enabled_apis=ec2,osapi_compute,metadata
+
+# IP address for EC2 API to listen (string value)
+#ec2_listen=0.0.0.0
+
+# port for ec2 api to listen (integer value)
+#ec2_listen_port=8773
+
+# Number of workers for EC2 API service (integer value)
+#ec2_workers=<None>
+
+# IP address for OpenStack API to listen (string value)
+#osapi_compute_listen=0.0.0.0
+
+# list port for osapi compute (integer value)
+#osapi_compute_listen_port=8774
+
+# Number of workers for OpenStack API service (integer value)
+#osapi_compute_workers=<None>
+
+# OpenStack metadata service manager (string value)
+#metadata_manager=nova.api.manager.MetadataManager
+
+# IP address for metadata api to listen (string value)
+#metadata_listen=0.0.0.0
+
+# port for metadata api to listen (integer value)
+#metadata_listen_port=8775
+
+# Number of workers for metadata service (integer value)
+#metadata_workers=<None>
+
+# full class name for the Manager for compute (string value)
+#compute_manager=nova.compute.manager.ComputeManager
+
+# full class name for the Manager for console proxy (string
+# value)
+#console_manager=nova.console.manager.ConsoleProxyManager
+
+# full class name for the Manager for cert (string value)
+#cert_manager=nova.cert.manager.CertManager
+
+# full class name for the Manager for network (string value)
+#network_manager=nova.network.manager.VlanManager
+
+# full class name for the Manager for scheduler (string value)
+#scheduler_manager=nova.scheduler.manager.SchedulerManager
+
+# maximum time since last check-in for up service (integer
+# value)
+#service_down_time=60
+
+
+#
+# Options defined in nova.test
+#
+
+# File name of clean sqlite db (string value)
+#sqlite_clean_db=clean.sqlite
+
+
+#
+# Options defined in nova.utils
+#
+
+# Whether to log monkey patching (boolean value)
+#monkey_patch=false
+
+# List of modules/decorators to monkey patch (list value)
+#monkey_patch_modules=nova.api.ec2.cloud:nova.notifier.api.notify_decorator,nova.compute.api:nova.notifier.api.notify_decorator
+
+# Length of generated instance admin passwords (integer value)
+#password_length=12
+
+# Whether to disable inter-process locks (boolean value)
+#disable_process_locking=false
+
+# time period to generate instance usages for. Time period
+# must be hour, day, month or year (string value)
+#instance_usage_audit_period=month
+
+# Path to the rootwrap configuration file to use for running
+# commands as root (string value)
+#rootwrap_config=/etc/nova/rootwrap.conf
+
+
+#
+# Options defined in nova.wsgi
+#
+
+# File name for the paste.deploy config for nova-api (string
+# value)
+#api_paste_config=api-paste.ini
+
+# A python format string that is used as the template to
+# generate log lines. The following values can be formatted
+# into it: client_ip, date_time, request_line, status_code,
+# body_length, wall_seconds. (string value)
+#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
+
+
+#
+# Options defined in nova.api.auth
+#
+
+# whether to rate limit the api (boolean value)
+#api_rate_limit=true
+
+# The strategy to use for auth: noauth or keystone. (string
+# value)
+#auth_strategy=noauth
+
+# Treat X-Forwarded-For as the canonical remote address. Only
+# enable this if you have a sanitizing proxy. (boolean value)
+#use_forwarded_for=false
+
+
+#
+# Options defined in nova.api.ec2
+#
+
+# Number of failed auths before lockout. (integer value)
+#lockout_attempts=5
+
+# Number of minutes to lockout if triggered. (integer value)
+#lockout_minutes=15
+
+# Number of minutes for lockout window. (integer value)
+#lockout_window=15
+
+# URL to get token from ec2 request. (string value)
+#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens
+
+# Return the IP address as private dns hostname in describe
+# instances (boolean value)
+#ec2_private_dns_show_ip=false
+
+# Validate security group names according to EC2 specification
+# (boolean value)
+#ec2_strict_validation=true
+
+# Time in seconds before ec2 timestamp expires (integer value)
+#ec2_timestamp_expiry=300
+
+
+#
+# Options defined in nova.api.ec2.cloud
+#
+
+# the ip of the ec2 api server (string value)
+#ec2_host=$my_ip
+
+# the internal ip of the ec2 api server (string value)
+#ec2_dmz_host=$my_ip
+
+# the port of the ec2 api server (integer value)
+#ec2_port=8773
+
+# the protocol to use when connecting to the ec2 api server
+# (http, https) (string value)
+#ec2_scheme=http
+
+# the path prefix used to call the ec2 api server (string
+# value)
+#ec2_path=/services/Cloud
+
+# list of region=fqdn pairs separated by commas (list value)
+#region_list=
+
+
+#
+# Options defined in nova.api.metadata.base
+#
+
+# List of metadata versions to skip placing into the config
+# drive (string value)
+#config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
+
+
+#
+# Options defined in nova.api.metadata.handler
+#
+
+# Set flag to indicate Quantum will proxy metadata requests
+# and resolve instance ids. (boolean value)
+#service_quantum_metadata_proxy=false
+
+# Shared secret to validate proxies Quantum metadata requests
+# (string value)
+#quantum_metadata_proxy_shared_secret=
+
+
+#
+# Options defined in nova.api.openstack.common
+#
+
+# the maximum number of items returned in a single response
+# from a collection resource (integer value)
+#osapi_max_limit=1000
+
+# Base URL that will be presented to users in links to the
+# OpenStack Compute API (string value)
+#osapi_compute_link_prefix=<None>
+
+# Base URL that will be presented to users in links to glance
+# resources (string value)
+#osapi_glance_link_prefix=<None>
+
+
+#
+# Options defined in nova.api.openstack.compute
+#
+
+# Permit instance snapshot operations. (boolean value)
+#allow_instance_snapshots=true
+
-# debug=false
-#### (BoolOpt) Print debugging output
+#
+# Options defined in nova.api.openstack.compute.contrib
+#
-# verbose=false
-#### (BoolOpt) Print more verbose output
+# Specify list of extensions to load when using
+# osapi_compute_extension option with
+# nova.api.openstack.compute.contrib.select_extensions (list
+# value)
+#osapi_compute_ext_list=
-# log_config=<None>
-#### (StrOpt) If this option is specified, the logging configuration file
-#### specified is used and overrides any other logging options
-#### specified. Please see the Python logging module
-#### documentation for details on logging configuration files.
-# log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s
-#### (StrOpt) A logging.Formatter log message format string which may use
-#### any of the available logging.LogRecord attributes. Default:
-#### %default
+#
+# Options defined in nova.api.openstack.compute.contrib.fping
+#
-# log_date_format=%Y-%m-%d %H:%M:%S
-#### (StrOpt) Format string for %(asctime)s in log records. Default:
-#### %default
+# Full path to fping. (string value)
+#fping_path=/usr/sbin/fping
-# log_file=<None>
-#### (StrOpt) (Optional) Name of log file to output to. If not set,
-#### logging will go to stdout.
-# log_dir=<None>
-#### (StrOpt) (Optional) The directory to keep log files in (will be
-#### prepended to --logfile)
+#
+# Options defined in nova.api.openstack.compute.contrib.hide_server_addresses
+#
-# use_syslog=false
-#### (BoolOpt) Use syslog for logging.
+# List of instance states that should hide network info (list
+# value)
+#osapi_hide_server_address_states=building
-# syslog_log_facility=LOG_USER
-#### (StrOpt) syslog facility to receive log lines
+#
+# Options defined in nova.api.openstack.compute.extensions
+#
-######## defined in nova.crypto ########
+# osapi compute extension to load (multi valued)
+#osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions
-# ca_file=cacert.pem
-#### (StrOpt) Filename of root CA
-# key_file=private/cakey.pem
-#### (StrOpt) Filename of private key
+#
+# Options defined in nova.api.openstack.compute.servers
+#
-# crl_file=crl.pem
-#### (StrOpt) Filename of root Certificate Revocation List
+# Allows use of instance password during server creation
+# (boolean value)
+#enable_instance_password=true
-# keys_path=$state_path/keys
-#### (StrOpt) Where we keep our keys
-# ca_path=$state_path/CA
-#### (StrOpt) Where we keep our root CA
+#
+# Options defined in nova.api.sizelimit
+#
-# use_project_ca=false
-#### (BoolOpt) Should we use a CA for each project?
+# the maximum body size per each osapi request(bytes) (integer
+# value)
+#osapi_max_request_body_size=114688
-# user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
-#### (StrOpt) Subject for certificate for users, %s for project, user,
-#### timestamp
-# project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
-#### (StrOpt) Subject for certificate for projects, %s for project,
-#### timestamp
+#
+# Options defined in nova.cert.rpcapi
+#
+# the topic cert nodes listen on (string value)
+#cert_topic=cert
-######## defined in nova.flags ########
-# sql_connection=sqlite:///$state_path/$sqlite_db
-#### (StrOpt) The SQLAlchemy connection string used to connect to the
-#### database
+#
+# Options defined in nova.cloudpipe.pipelib
+#
-# api_paste_config=api-paste.ini
-#### (StrOpt) File name for the paste.deploy config for nova-api
+# image id used when starting up a cloudpipe vpn server
+# (string value)
+#vpn_image_id=0
-# pybasedir=/usr/lib/python/site-packages
-#### (StrOpt) Directory where the nova python module is installed
+# Instance type for vpn instances (string value)
+#vpn_instance_type=m1.tiny
-# bindir=$pybasedir/bin
-#### (StrOpt) Directory where nova binaries are installed
+# Template for cloudpipe instance boot script (string value)
+#boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template
-# state_path=$pybasedir
-#### (StrOpt) Top-level directory for maintaining nova's state
+# Network to push into openvpn config (string value)
+#dmz_net=10.0.0.0
-# fake_network=false
-#### (BoolOpt) If passed, use fake network devices and addresses
+# Netmask to push into openvpn config (string value)
+#dmz_mask=255.255.255.0
-# sql_connection_debug=0
-#### (IntOpt) Verbosity of SQL debugging information. 0=None,
-#### 100=Everything
+# Suffix to add to project name for vpn key and secgroups
+# (string value)
+#vpn_key_suffix=-vpn
-# sql_connection_trace=false
-#### (BoolOpt) Add python stack traces to SQL as comment strings
-# my_ip=10.0.0.1
-#### (StrOpt) ip address of this host
+#
+# Options defined in nova.common.memorycache
+#
-# region_list=
-#### (ListOpt) list of region=fqdn pairs separated by commas
+# Memcached servers or None for in process cache. (list value)
+#memcached_servers=<None>
-# aws_access_key_id=admin
-#### (StrOpt) AWS Access ID
-# aws_secret_access_key=admin
-#### (StrOpt) AWS Access Key
+#
+# Options defined in nova.compute
+#
-# glance_host=$my_ip
-#### (StrOpt) default glance hostname or ip
+# The full class name of the compute API class to use (string
+# value)
+#compute_api_class=nova.compute.api.API
-# glance_port=9292
-#### (IntOpt) default glance port
-# glance_api_servers=$glance_host:$glance_port
-#### (ListOpt) A list of the glance api servers available to nova. Prefix
-#### with https:// for ssl-based glance api servers.
-#### ([hostname|ip]:port)
+#
+# Options defined in nova.compute.api
+#
-# glance_api_insecure=false
-#### (BoolOpt) Allow to perform insecure SSL (https) requests to glance
+# Allow destination machine to match source for resize. Useful
+# when testing in single-host environments. (boolean value)
+#allow_resize_to_same_host=false
-# glance_num_retries=0
-#### (IntOpt) Number retries when downloading an image from glance
+# availability zone to use when user doesn't specify one
+# (string value)
+#default_schedule_zone=<None>
-# s3_port=3333
-#### (IntOpt) port used when accessing the s3 api
+# These are image properties which a snapshot should not
+# inherit from an instance (list value)
+#non_inheritable_image_properties=cache_in_nova,bittorrent
-# s3_host=$my_ip
-#### (StrOpt) hostname or ip for openstack to use when accessing the s3
-#### api
+# kernel image that indicates not to use a kernel, but to use
+# a raw disk image instead (string value)
+#null_kernel=nokernel
-# cert_topic=cert
-#### (StrOpt) the topic cert nodes listen on
+# The full class name of the security group handler class
+# (string value)
+#security_group_handler=nova.network.sg.NullSecurityGroupHandler
-# compute_topic=compute
-#### (StrOpt) the topic compute nodes listen on
+# The full class name of the security API class (string value)
+#security_group_api=nova.compute.api.SecurityGroupAPI
-# console_topic=console
-#### (StrOpt) the topic console proxy nodes listen on
-# scheduler_topic=scheduler
-#### (StrOpt) the topic scheduler nodes listen on
+#
+# Options defined in nova.compute.instance_types
+#
-# network_topic=network
-#### (StrOpt) the topic network nodes listen on
+# default instance type to use, testing only (string value)
+#default_instance_type=m1.small
-# api_rate_limit=true
-#### (BoolOpt) whether to rate limit the api
-# enabled_apis=ec2,osapi_compute,metadata
-#### (ListOpt) a list of APIs to enable by default
+#
+# Options defined in nova.compute.manager
+#
-# ec2_host=$my_ip
-#### (StrOpt) the ip of the ec2 api server
+# Console proxy host to use to connect to instances on this
+# host. (string value)
+#console_host=nova
-# ec2_dmz_host=$my_ip
-#### (StrOpt) the internal ip of the ec2 api server
+# Name of network to use to set access ips for instances
+# (string value)
+#default_access_ip_network_name=<None>
-# ec2_port=8773
-#### (IntOpt) the port of the ec2 api server
+# Whether to batch up the application of IPTables rules during
+# a host restart and apply all at the end of the init phase
+# (boolean value)
+#defer_iptables_apply=false
-# ec2_scheme=http
-#### (StrOpt) the protocol to use when connecting to the ec2 api server
-#### (http, https)
+# where instances are stored on disk (string value)
+#instances_path=$state_path/instances
-# ec2_path=/services/Cloud
-#### (StrOpt) the path prefix used to call the ec2 api server
+# Generate periodic compute.instance.exists notifications
+# (boolean value)
+#instance_usage_audit=false
-# osapi_compute_ext_list=
-#### (ListOpt) Specify list of extensions to load when using
-#### osapi_compute_extension option with
-#### nova.api.openstack.compute.contrib.select_extensions
+# Number of 1 second retries needed in live_migration (integer
+# value)
+#live_migration_retry_count=30
-# osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions
-#### (MultiStrOpt) osapi compute extension to load
+# Whether to start guests that were running before the host
+# rebooted (boolean value)
+#resume_guests_state_on_host_boot=false
-# osapi_path=/v1.1/
-#### (StrOpt) the path prefix used to call the openstack api server
+# interval to pull bandwidth usage info (integer value)
+#bandwidth_poll_interval=600
-# osapi_compute_link_prefix=<None>
-#### (StrOpt) Base URL that will be presented to users in links to the
-#### OpenStack Compute API
+# Number of seconds between instance info_cache self healing
+# updates (integer value)
+#heal_instance_info_cache_interval=60
-# osapi_glance_link_prefix=<None>
-#### (StrOpt) Base URL that will be presented to users in links to glance
-#### resources
+# Interval in seconds for querying the host status (integer
+# value)
+#host_state_interval=120
-# osapi_max_limit=1000
-#### (IntOpt) the maximum number of items returned in a single response
-#### from a collection resource
+# Number of seconds to wait between runs of the image cache
+# manager (integer value)
+#image_cache_manager_interval=2400
-# metadata_host=$my_ip
-#### (StrOpt) the ip for the metadata api server
+# Interval in seconds for reclaiming deleted instances
+# (integer value)
+#reclaim_instance_interval=0
-# metadata_port=8775
-#### (IntOpt) the port for the metadata api port
+# Interval in seconds for gathering volume usages (integer
+# value)
+#volume_usage_poll_interval=0
-# default_image=ami-11111
-#### (StrOpt) default image to use, testing only
+# Action to take if a running deleted instance is
+# detected.Valid options are 'noop', 'log' and 'reap'. Set to
+# 'noop' to disable. (string value)
+#running_deleted_instance_action=log
-# default_instance_type=m1.small
-#### (StrOpt) default instance type to use, testing only
+# Number of seconds to wait between runs of the cleanup task.
+# (integer value)
+#running_deleted_instance_poll_interval=1800
-# null_kernel=nokernel
-#### (StrOpt) kernel image that indicates not to use a kernel, but to use
-#### a raw disk image instead
+# Number of seconds after being deleted when a running
+# instance should be considered eligible for cleanup. (integer
+# value)
+#running_deleted_instance_timeout=0
-# vpn_image_id=0
-#### (StrOpt) image id used when starting up a cloudpipe vpn server
+# Automatically hard reboot an instance if it has been stuck
+# in a rebooting state longer than N seconds. Set to 0 to
+# disable. (integer value)
+#reboot_timeout=0
-# vpn_key_suffix=-vpn
-#### (StrOpt) Suffix to add to project name for vpn key and secgroups
+# Amount of time in seconds an instance can be in BUILD before
+# going into ERROR status.Set to 0 to disable. (integer value)
+#instance_build_timeout=0
-# sqlite_db=nova.sqlite
-#### (StrOpt) the filename to use with sqlite
+# Automatically unrescue an instance after N seconds. Set to 0
+# to disable. (integer value)
+#rescue_timeout=0
-# sqlite_synchronous=true
-#### (BoolOpt) If passed, use synchronous mode for sqlite
+# Automatically confirm resizes after N seconds. Set to 0 to
+# disable. (integer value)
+#resize_confirm_window=0
-# sql_idle_timeout=3600
-#### (IntOpt) timeout before idle sql connections are reaped
-# sql_max_retries=10
-#### (IntOpt) maximum db connection retries during startup. (setting -1
-#### implies an infinite retry count)
+#
+# Options defined in nova.compute.resource_tracker
+#
-# sql_retry_interval=10
-#### (IntOpt) interval between retries of opening a sql connection
+# Amount of disk in MB to reserve for the host (integer value)
+#reserved_host_disk_mb=0
-# compute_manager=nova.compute.manager.ComputeManager
-#### (StrOpt) full class name for the Manager for compute
+# Amount of memory in MB to reserve for the host (integer
+# value)
+#reserved_host_memory_mb=512
-# console_manager=nova.console.manager.ConsoleProxyManager
-#### (StrOpt) full class name for the Manager for console proxy
+# Class that will manage stats for the local compute host
+# (string value)
+#compute_stats_class=nova.compute.stats.Stats
-# cert_manager=nova.cert.manager.CertManager
-#### (StrOpt) full class name for the Manager for cert
-# instance_dns_manager=nova.network.dns_driver.DNSDriver
-#### (StrOpt) full class name for the DNS Manager for instance IPs
+#
+# Options defined in nova.compute.rpcapi
+#
-# instance_dns_domain=
-#### (StrOpt) full class name for the DNS Zone for instance IPs
+# the topic compute nodes listen on (string value)
+#compute_topic=compute
-# floating_ip_dns_manager=nova.network.dns_driver.DNSDriver
-#### (StrOpt) full class name for the DNS Manager for floating IPs
-# network_manager=nova.network.manager.VlanManager
-#### (StrOpt) full class name for the Manager for network
+#
+# Options defined in nova.console.manager
+#
-# scheduler_manager=nova.scheduler.manager.SchedulerManager
-#### (StrOpt) full class name for the Manager for scheduler
+# Driver to use for the console proxy (string value)
+#console_driver=nova.console.xvp.XVPConsoleProxy
-# host=nova
-#### (StrOpt) Name of this node. This can be an opaque identifier. It is
-#### not necessarily a hostname, FQDN, or IP address. However,
-#### the node name must be valid within an AMQP key, and if using
-#### ZeroMQ, a valid hostname, FQDN, or IP address
+# Stub calls to compute worker for tests (boolean value)
+#stub_compute=false
-# node_availability_zone=nova
-#### (StrOpt) availability zone of this node
+# Publicly visible name for this console host (string value)
+#console_public_hostname=nova
-# memcached_servers=<None>
-#### (ListOpt) Memcached servers or None for in process cache.
-# instance_usage_audit_period=month
-#### (StrOpt) time period to generate instance usages for. Time period
-#### must be hour, day, month or year
+#
+# Options defined in nova.console.rpcapi
+#
-# bandwidth_poll_interval=600
-#### (IntOpt) interval to pull bandwidth usage info
+# the topic console proxy nodes listen on (string value)
+#console_topic=console
-# resume_guests_state_on_host_boot=false
-#### (BoolOpt) Whether to start guests that were running before the host
-#### rebooted
-# default_ephemeral_format=<None>
-#### (StrOpt) The default format an ephemeral_volume will be formatted
-#### with on creation.
+#
+# Options defined in nova.console.vmrc
+#
-# rootwrap_config=/etc/nova/rootwrap.conf
-#### (StrOpt) Path to the rootwrap configuration file to use for running
-#### commands as root
+# port for VMware VMRC connections (integer value)
+#console_vmrc_port=443
-# network_driver=nova.network.linux_net
-#### (StrOpt) Driver to use for network creation
+# number of retries for retrieving VMRC information (integer
+# value)
+#console_vmrc_error_retries=10
-# use_ipv6=false
-#### (BoolOpt) use ipv6
-# enable_instance_password=true
-#### (BoolOpt) Allows use of instance password during server creation
+#
+# Options defined in nova.console.xvp
+#
-# password_length=12
-#### (IntOpt) Length of generated instance admin passwords
+# XVP conf template (string value)
+#console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template
-# monkey_patch=false
-#### (BoolOpt) Whether to log monkey patching
+# generated XVP conf file (string value)
+#console_xvp_conf=/etc/xvp.conf
-# monkey_patch_modules=nova.api.ec2.cloud:nova.notifier.api.notify_decorator,nova.compute.api:nova.notifier.api.notify_decorator
-#### (ListOpt) List of modules/decorators to monkey patch
+# XVP master process pid file (string value)
+#console_xvp_pid=/var/run/xvp.pid
-# allow_resize_to_same_host=false
-#### (BoolOpt) Allow destination machine to match source for resize. Useful
-#### when testing in single-host environments.
+# XVP log file (string value)
+#console_xvp_log=/var/log/xvp.log
-# reclaim_instance_interval=0
-#### (IntOpt) Interval in seconds for reclaiming deleted instances
+# port for XVP to multiplex VNC connections on (integer value)
+#console_xvp_multiplex_port=5900
-# zombie_instance_updated_at_window=172800
-#### (IntOpt) Number of seconds zombie instances are cleaned up.
-# service_down_time=60
-#### (IntOpt) maximum time since last check-in for up service
+#
+# Options defined in nova.consoleauth
+#
-# default_schedule_zone=<None>
-#### (StrOpt) availability zone to use when user doesn't specify one
+# the topic console auth proxy nodes listen on (string value)
+#consoleauth_topic=consoleauth
-# isolated_images=
-#### (ListOpt) Images to run on isolated host
-# isolated_hosts=
-#### (ListOpt) Host reserved for specific images
+#
+# Options defined in nova.consoleauth.manager
+#
-# cache_images=all
-#### (StrOpt) Cache glance images locally. `all` will cache all images,
-#### `some` will only cache images that have the image_property
-#### `cache_in_nova=True`, and `none` turns off caching entirely
+# How many seconds before deleting tokens (integer value)
+#console_token_ttl=600
-# use_cow_images=true
-#### (BoolOpt) Whether to use cow images
+# Manager for console auth (string value)
+#consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager
-# compute_api_class=nova.compute.api.API
-#### (StrOpt) The full class name of the compute API class to use
-# network_api_class=nova.network.api.API
-#### (StrOpt) The full class name of the network API class to use
+#
+# Options defined in nova.db.api
+#
-# volume_api_class=nova.volume.cinder.API
-#### (StrOpt) The full class name of the volume API class to use
+# The backend to use for db (string value)
+#db_backend=sqlalchemy
-# security_group_handler=nova.network.sg.NullSecurityGroupHandler
-#### (StrOpt) The full class name of the security group handler class
+# Services to be added to the available pool on create
+# (boolean value)
+#enable_new_services=true
-# default_access_ip_network_name=<None>
-#### (StrOpt) Name of network to use to set access ips for instances
+# Template string to be used to generate instance names
+# (string value)
+#instance_name_template=instance-%08x
-# auth_strategy=noauth
-#### (StrOpt) The strategy to use for auth: noauth or keystone.
+# Template string to be used to generate snapshot names
+# (string value)
+#snapshot_name_template=snapshot-%s
-# non_inheritable_image_properties=cache_in_nova,bittorrent
-#### (ListOpt) These are image properties which a snapshot should not
-#### inherit from an instance
-# defer_iptables_apply=false
-#### (BoolOpt) Whether to batch up the application of IPTables rules during
-#### a host restart and apply all at the end of the init phase
+#
+# Options defined in nova.db.base
+#
-# compute_driver=<None>
-#### (StrOpt) Driver to use for controlling virtualization. Options
-#### include: libvirt.LibvirtDriver, xenapi.XenAPIDriver,
-#### fake.FakeDriver, baremetal.BareMetalDriver,
-#### vmwareapi.VMWareESXDriver
+# driver to use for database access (string value)
+#db_driver=nova.db
-######## defined in nova.notifications ########
+#
+# Options defined in nova.db.sqlalchemy.api
+#
-# notify_on_any_change=false
-#### (BoolOpt) If set, send compute.instance.update notifications on
-#### instance state changes. Valid values are False for no
-#### notifications, True for notifications on any instance
-#### changes.
+# When set, compute API will consider duplicate hostnames
+# invalid within the specified scope, regardless of case.
+# Should be empty, "project" or "global". (string value)
+#osapi_compute_unique_server_name_scope=
-# notify_api_faults=false
-#### (BoolOpt) If set, send api.fault notifications on caught exceptions in
-#### the API service.
-# notify_on_state_change=<None>
-#### (StrOpt) If set, send compute.instance.update notifications on
-#### instance state changes. Valid values are None for no
-#### notifications, "vm_state" for notifications on VM state
-#### changes, or "vm_and_task_state" for notifications on VM and
-#### task state changes.
+#
+# Options defined in nova.db.sqlalchemy.session
+#
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+#sql_connection=sqlite:///$state_path/$sqlite_db
-######## defined in nova.policy ########
+# the filename to use with sqlite (string value)
+#sqlite_db=nova.sqlite
-# policy_file=policy.json
-#### (StrOpt) JSON file representing policy
+# timeout before idle sql connections are reaped (integer
+# value)
+#sql_idle_timeout=3600
-# policy_default_rule=default
-#### (StrOpt) Rule checked when requested rule is not found
+# If passed, use synchronous mode for sqlite (boolean value)
+#sqlite_synchronous=true
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+#sql_min_pool_size=1
-######## defined in nova.quota ########
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+#sql_max_pool_size=5
-# quota_instances=10
-#### (IntOpt) number of instances allowed per project
+# maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+#sql_max_retries=10
-# quota_cores=20
-#### (IntOpt) number of instance cores allowed per project
+# interval between retries of opening a sql connection
+# (integer value)
+#sql_retry_interval=10
-# quota_ram=51200
-#### (IntOpt) megabytes of instance ram allowed per project
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+#sql_max_overflow=<None>
-# quota_floating_ips=10
-#### (IntOpt) number of floating ips allowed per project
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+#sql_connection_debug=0
-# quota_metadata_items=128
-#### (IntOpt) number of metadata items allowed per instance
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+#sql_connection_trace=false
-# quota_injected_files=5
-#### (IntOpt) number of injected files allowed
+# enable the use of eventlet's db_pool for MySQL (boolean
+# value)
+#sql_dbpool_enable=false
-# quota_injected_file_content_bytes=10240
-#### (IntOpt) number of bytes allowed per injected file
-# quota_injected_file_path_bytes=255
-#### (IntOpt) number of bytes allowed per injected file path
+#
+# Options defined in nova.image.glance
+#
-# quota_security_groups=10
-#### (IntOpt) number of security groups per project
+# default glance hostname or ip (string value)
+#glance_host=$my_ip
-# quota_security_group_rules=20
-#### (IntOpt) number of security rules per security group
+# default glance port (integer value)
+#glance_port=9292
-# quota_key_pairs=100
-#### (IntOpt) number of key pairs per user
+# Default protocol to use when connecting to glance. Set to
+# https for SSL. (string value)
+#glance_protocol=http
-# reservation_expire=86400
-#### (IntOpt) number of seconds until a reservation expires
+# A list of the glance api servers available to nova. Prefix
+# with https:// for ssl-based glance api servers.
+# ([hostname|ip]:port) (list value)
+#glance_api_servers=$glance_host:$glance_port
-# until_refresh=0
-#### (IntOpt) count of reservations until usage is refreshed
+# Allow to perform insecure SSL (https) requests to glance
+# (boolean value)
+#glance_api_insecure=false
-# max_age=0
-#### (IntOpt) number of seconds between subsequent usage refreshes
+# Number retries when downloading an image from glance
+# (integer value)
+#glance_num_retries=0
-# quota_driver=nova.quota.DbQuotaDriver
-#### (StrOpt) default driver to use for quota checks
+#
+# Options defined in nova.image.s3
+#
-######## defined in nova.service ########
+# parent dir for tempdir used for image decryption (string
+# value)
+#image_decryption_dir=/tmp
-# report_interval=10
-#### (IntOpt) seconds between nodes reporting state to datastore
+# hostname or ip for openstack to use when accessing the s3
+# api (string value)
+#s3_host=$my_ip
-# periodic_interval=60
-#### (IntOpt) seconds between running periodic tasks
+# port used when accessing the s3 api (integer value)
+#s3_port=3333
-# periodic_fuzzy_delay=60
-#### (IntOpt) range of seconds to randomly delay when starting the
-#### periodic task scheduler to reduce stampeding. (Disable by
-#### setting to 0)
+# access key to use for s3 server for images (string value)
+#s3_access_key=notchecked
-# ec2_listen=0.0.0.0
-#### (StrOpt) IP address for EC2 API to listen
+# secret key to use for s3 server for images (string value)
+#s3_secret_key=notchecked
-# ec2_listen_port=8773
-#### (IntOpt) port for ec2 api to listen
+# whether to use ssl when talking to s3 (boolean value)
+#s3_use_ssl=false
-# ec2_workers=<None>
-#### (IntOpt) Number of workers for EC2 API service
+# whether to affix the tenant id to the access key when
+# downloading from s3 (boolean value)
+#s3_affix_tenant=false
-# osapi_compute_listen=0.0.0.0
-#### (StrOpt) IP address for OpenStack API to listen
-# osapi_compute_listen_port=8774
-#### (IntOpt) list port for osapi compute
+#
+# Options defined in nova.ipv6.api
+#
-# osapi_compute_workers=<None>
-#### (IntOpt) Number of workers for OpenStack API service
+# Backend to use for IPv6 generation (string value)
+#ipv6_backend=rfc2462
-# metadata_manager=nova.api.manager.MetadataManager
-#### (StrOpt) OpenStack metadata service manager
-# metadata_listen=0.0.0.0
-#### (StrOpt) IP address for metadata api to listen
+#
+# Options defined in nova.network
+#
-# metadata_listen_port=8775
-#### (IntOpt) port for metadata api to listen
+# The full class name of the network API class to use (string
+# value)
+#network_api_class=nova.network.api.API
-# metadata_workers=<None>
-#### (IntOpt) Number of workers for metadata service
+#
+# Options defined in nova.network.driver
+#
-######## defined in nova.test ########
+# Driver to use for network creation (string value)
+#network_driver=nova.network.linux_net
-# sqlite_clean_db=clean.sqlite
-#### (StrOpt) File name of clean sqlite db
-# fake_tests=true
-#### (BoolOpt) should we use everything for testing
+#
+# Options defined in nova.network.ldapdns
+#
+# URL for ldap server which will store dns entries (string
+# value)
+#ldap_dns_url=ldap://ldap.example.com:389
-######## defined in nova.wsgi ########
+# user for ldap DNS (string value)
+#ldap_dns_user=uid=admin,ou=people,dc=example,dc=org
-# wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
-#### (StrOpt) A python format string that is used as the template to
-#### generate log lines. The following values can be formatted
-#### into it: client_ip, date_time, request_line, status_code,
-#### body_length, wall_seconds.
+# password for ldap DNS (string value)
+#ldap_dns_password=password
+# Hostmaster for ldap dns driver Statement of Authority
+# (string value)
+#ldap_dns_soa_hostmaster=hostmaster@example.org
-######## defined in nova.api.auth ########
+# DNS Servers for ldap dns driver (multi valued)
+#ldap_dns_servers=dns.example.org
-# use_forwarded_for=false
-#### (BoolOpt) Treat X-Forwarded-For as the canonical remote address. Only
-#### enable this if you have a sanitizing proxy.
+# Base DN for DNS entries in ldap (string value)
+#ldap_dns_base_dn=ou=hosts,dc=example,dc=org
+# Refresh interval (in seconds) for ldap dns driver Statement
+# of Authority (string value)
+#ldap_dns_soa_refresh=1800
-######## defined in nova.api.ec2 ########
+# Retry interval (in seconds) for ldap dns driver Statement of
+# Authority (string value)
+#ldap_dns_soa_retry=3600
-# lockout_attempts=5
-#### (IntOpt) Number of failed auths before lockout.
+# Expiry interval (in seconds) for ldap dns driver Statement
+# of Authority (string value)
+#ldap_dns_soa_expiry=86400
-# lockout_minutes=15
-#### (IntOpt) Number of minutes to lockout if triggered.
+# Minimum interval (in seconds) for ldap dns driver Statement
+# of Authority (string value)
+#ldap_dns_soa_minimum=7200
-# lockout_window=15
-#### (IntOpt) Number of minutes for lockout window.
-# keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens
-#### (StrOpt) URL to get token from ec2 request.
+#
+# Options defined in nova.network.linux_net
+#
-# ec2_private_dns_show_ip=false
-#### (BoolOpt) Return the IP address as private dns hostname in describe
-#### instances
+# location of flagfile for dhcpbridge (string value)
+#dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf
-# ec2_strict_validation=true
-#### (BoolOpt) Validate security group names according to EC2 specification
+# Location to keep network config files (string value)
+#networks_path=$state_path/networks
-# ec2_timestamp_expiry=300
-#### (IntOpt) Time in seconds before ec2 timestamp expires
+# Interface for public IP addresses (string value)
+#public_interface=eth0
+# MTU setting for vlan (string value)
+#network_device_mtu=<None>
-######## defined in nova.api.metadata.base ########
+# location of nova-dhcpbridge (string value)
+#dhcpbridge=$bindir/nova-dhcpbridge
-# config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
-#### (StrOpt) List of metadata versions to skip placing into the config
-#### drive
+# Public IP of network host (string value)
+#routing_source_ip=$my_ip
+# Lifetime of a DHCP lease in seconds (integer value)
+#dhcp_lease_time=120
-######## defined in nova.api.openstack.compute ########
+# if set, uses specific dns server for dnsmasq (string value)
+#dns_server=<None>
-# allow_instance_snapshots=true
-#### (BoolOpt) Permit instance snapshot operations.
+# A list of dmz range that should be accepted (list value)
+#dmz_cidr=
+# Override the default dnsmasq settings with this file (string
+# value)
+#dnsmasq_config_file=
-######## defined in nova.api.sizelimit ########
+# Driver used to create ethernet devices. (string value)
+#linuxnet_interface_driver=nova.network.linux_net.LinuxBridgeInterfaceDriver
-# osapi_max_request_body_size=114688
-#### (IntOpt) the maximum body size per each osapi request(bytes)
+# Name of Open vSwitch bridge used with linuxnet (string
+# value)
+#linuxnet_ovs_integration_bridge=br-int
+# send gratuitous ARPs for HA setup (boolean value)
+#send_arp_for_ha=false
-######## defined in nova.cloudpipe.pipelib ########
+# send this many gratuitous ARPs for HA setup (integer value)
+#send_arp_for_ha_count=3
-# vpn_instance_type=m1.tiny
-#### (StrOpt) Instance type for vpn instances
+# Use single default gateway. Only first nic of vm will get
+# default gateway from dhcp server (boolean value)
+#use_single_default_gateway=false
-# boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template
-#### (StrOpt) Template for cloudpipe instance boot script
+# the ip for the metadata api server (string value)
+#metadata_host=$my_ip
-# dmz_net=10.0.0.0
-#### (StrOpt) Network to push into openvpn config
+# the port for the metadata api port (integer value)
+#metadata_port=8775
-# dmz_mask=255.255.255.0
-#### (StrOpt) Netmask to push into openvpn config
+#
+# Options defined in nova.network.manager
+#
-######## defined in nova.common.eventlet_backdoor ########
+# Bridge for simple network instances (string value)
+#flat_network_bridge=<None>
-# backdoor_port=<None>
-#### (IntOpt) port for eventlet backdoor to listen
+# Dns for simple network (string value)
+#flat_network_dns=8.8.4.4
+# Whether to attempt to inject network setup into guest
+# (boolean value)
+#flat_injected=false
-######## defined in nova.compute.manager ########
+# FlatDhcp will bridge into this interface if set (string
+# value)
+#flat_interface=<None>
-# instances_path=$state_path/instances
-#### (StrOpt) where instances are stored on disk
+# First VLAN for private networks (integer value)
+#vlan_start=100
-# base_dir_name=_base
-#### (StrOpt) Where cached images are stored under $instances_path.This is
-#### NOT the full path - just a folder name.For per-compute-host
-#### cached images, set to _base_$my_ip
+# vlans will bridge into this interface if set (string value)
+#vlan_interface=<None>
-# console_host=nova
-#### (StrOpt) Console proxy host to use to connect to instances on this
-#### host.
+# Number of networks to support (integer value)
+#num_networks=1
-# live_migration_retry_count=30
-#### (IntOpt) Number of 1 second retries needed in live_migration
+# Public IP for the cloudpipe VPN servers (string value)
+#vpn_ip=$my_ip
-# reboot_timeout=0
-#### (IntOpt) Automatically hard reboot an instance if it has been stuck
-#### in a rebooting state longer than N seconds. Set to 0 to
-#### disable.
+# First Vpn port for private networks (integer value)
+#vpn_start=1000
-# instance_build_timeout=0
-#### (IntOpt) Amount of time in seconds an instance can be in BUILD before
-#### going into ERROR status.Set to 0 to disable.
+# Default value for multi_host in networks (boolean value)
+#multi_host=false
-# rescue_timeout=0
-#### (IntOpt) Automatically unrescue an instance after N seconds. Set to 0
-#### to disable.
+# Number of addresses in each private subnet (integer value)
+#network_size=256
-# resize_confirm_window=0
-#### (IntOpt) Automatically confirm resizes after N seconds. Set to 0 to
-#### disable.
+# Floating IP address block (string value)
+#floating_range=4.4.4.0/24
-# host_state_interval=120
-#### (IntOpt) Interval in seconds for querying the host status
+# Default pool for floating ips (string value)
+#default_floating_pool=nova
-# running_deleted_instance_timeout=0
-#### (IntOpt) Number of seconds after being deleted when a running
-#### instance should be considered eligible for cleanup.
+# Fixed IP address block (string value)
+#fixed_range=10.0.0.0/8
-# running_deleted_instance_poll_interval=30
-#### (IntOpt) Number of periodic scheduler ticks to wait between runs of
-#### the cleanup task.
+# Fixed IPv6 address block (string value)
+#fixed_range_v6=fd00::/48
-# running_deleted_instance_action=log
-#### (StrOpt) Action to take if a running deleted instance is
-#### detected.Valid options are 'noop', 'log' and 'reap'. Set to
-#### 'noop' to disable.
+# Default IPv4 gateway (string value)
+#gateway=<None>
-# image_cache_manager_interval=40
-#### (IntOpt) Number of periodic scheduler ticks to wait between runs of
-#### the image cache manager.
+# Default IPv6 gateway (string value)
+#gateway_v6=<None>
-# heal_instance_info_cache_interval=60
-#### (IntOpt) Number of seconds between instance info_cache self healing
-#### updates
+# Number of addresses reserved for vpn clients (integer value)
+#cnt_vpn_clients=0
-# instance_usage_audit=false
-#### (BoolOpt) Generate periodic compute.instance.exists notifications
+# Seconds after which a deallocated ip is disassociated
+# (integer value)
+#fixed_ip_disassociate_timeout=600
+# Number of attempts to create unique mac address (integer
+# value)
+#create_unique_mac_address_attempts=5
-######## defined in nova.compute.resource_tracker ########
+# Autoassigning floating ip to VM (boolean value)
+#auto_assign_floating_ip=false
-# reserved_host_disk_mb=0
-#### (IntOpt) Amount of disk in MB to reserve for the host
+# Network host to use for ip allocation in flat modes (string
+# value)
+#network_host=nova
-# reserved_host_memory_mb=512
-#### (IntOpt) Amount of memory in MB to reserve for the host
+# If passed, use fake network devices and addresses (boolean
+# value)
+#fake_network=false
-# compute_stats_class=nova.compute.stats.Stats
-#### (StrOpt) Class that will manage stats for the local compute host
+# If True, skip using the queue and make local calls (boolean
+# value)
+#fake_call=false
+# If True, unused gateway devices (VLAN and bridge) are
+# deleted in VLAN network mode with multi hosted networks
+# (boolean value)
+#teardown_unused_network_gateway=false
-######## defined in nova.console.manager ########
+# If True, send a dhcp release on instance termination
+# (boolean value)
+#force_dhcp_release=false
-# console_driver=nova.console.xvp.XVPConsoleProxy
-#### (StrOpt) Driver to use for the console proxy
+# If True in multi_host mode, all compute hosts share the same
+# dhcp address. (boolean value)
+#share_dhcp_address=false
-# stub_compute=false
-#### (BoolOpt) Stub calls to compute worker for tests
+# If True, when a DNS entry must be updated, it sends a fanout
+# cast to all network hosts to update their DNS entries in
+# multi host mode (boolean value)
+#update_dns_entries=false
-# console_public_hostname=nova
-#### (StrOpt) Publicly visible name for this console host
+# Number of seconds to wait between runs of updates to DNS
+# entries. (integer value)
+#dns_update_periodic_interval=-1
+# domain to use for building the hostnames (string value)
+#dhcp_domain=novalocal
-######## defined in nova.console.vmrc ########
+# Indicates underlying L3 management library (string value)
+#l3_lib=nova.network.l3.LinuxNetL3
-# console_vmrc_port=443
-#### (IntOpt) port for VMware VMRC connections
+# full class name for the DNS Manager for instance IPs (string
+# value)
+#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
-# console_vmrc_error_retries=10
-#### (IntOpt) number of retries for retrieving VMRC information
+# full class name for the DNS Zone for instance IPs (string
+# value)
+#instance_dns_domain=
+# full class name for the DNS Manager for floating IPs (string
+# value)
+#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
-######## defined in nova.console.xvp ########
-# console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template
-#### (StrOpt) XVP conf template
+#
+# Options defined in nova.network.quantumv2.api
+#
-# console_xvp_conf=/etc/xvp.conf
-#### (StrOpt) generated XVP conf file
+# URL for connecting to quantum (string value)
+#quantum_url=http://127.0.0.1:9696
-# console_xvp_pid=/var/run/xvp.pid
-#### (StrOpt) XVP master process pid file
+# timeout value for connecting to quantum in seconds (integer
+# value)
+#quantum_url_timeout=30
-# console_xvp_log=/var/log/xvp.log
-#### (StrOpt) XVP log file
+# username for connecting to quantum in admin context (string
+# value)
+#quantum_admin_username=<None>
-# console_xvp_multiplex_port=5900
-#### (IntOpt) port for XVP to multiplex VNC connections on
+# password for connecting to quantum in admin context (string
+# value)
+#quantum_admin_password=<None>
+# tenant name for connecting to quantum in admin context
+# (string value)
+#quantum_admin_tenant_name=<None>
-######## defined in nova.consoleauth ########
+# auth url for connecting to quantum in admin context (string
+# value)
+#quantum_admin_auth_url=http://localhost:5000/v2.0
-# consoleauth_topic=consoleauth
-#### (StrOpt) the topic console auth proxy nodes listen on
+# auth strategy for connecting to quantum in admin context
+# (string value)
+#quantum_auth_strategy=keystone
-######## defined in nova.consoleauth.manager ########
+#
+# Options defined in nova.network.rpcapi
+#
-# console_token_ttl=600
-#### (IntOpt) How many seconds before deleting tokens
+# the topic network nodes listen on (string value)
+#network_topic=network
-# consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager
-#### (StrOpt) Manager for console auth
+#
+# Options defined in nova.objectstore.s3server
+#
-######## defined in nova.db.api ########
+# path to s3 buckets (string value)
+#buckets_path=$state_path/buckets
-# db_backend=sqlalchemy
-#### (StrOpt) The backend to use for db
+# IP address for S3 API to listen (string value)
+#s3_listen=0.0.0.0
-# enable_new_services=true
-#### (BoolOpt) Services to be added to the available pool on create
+# port for s3 api to listen (integer value)
+#s3_listen_port=3333
-# instance_name_template=instance-%08x
-#### (StrOpt) Template string to be used to generate instance names
-# snapshot_name_template=snapshot-%s
-#### (StrOpt) Template string to be used to generate snapshot names
+#
+# Options defined in nova.openstack.common.eventlet_backdoor
+#
+# port for eventlet backdoor to listen (integer value)
+#backdoor_port=<None>
-######## defined in nova.db.base ########
-# db_driver=nova.db
-#### (StrOpt) driver to use for database access
+#
+# Options defined in nova.openstack.common.lockutils
+#
+# Whether to disable inter-process locks (boolean value)
+#disable_process_locking=false
-######## defined in nova.image.s3 ########
+# Directory to use for lock files (string value)
+#lock_path=/usr/lib/python/site-packages/nova/openstack
-# image_decryption_dir=/tmp
-#### (StrOpt) parent dir for tempdir used for image decryption
-# s3_access_key=notchecked
-#### (StrOpt) access key to use for s3 server for images
+#
+# Options defined in nova.openstack.common.log
+#
-# s3_secret_key=notchecked
-#### (StrOpt) secret key to use for s3 server for images
+# Log output to standard error (boolean value)
+#use_stderr=true
-# s3_use_ssl=false
-#### (BoolOpt) whether to use ssl when talking to s3
+# Default file mode used when creating log files (string
+# value)
+#logfile_mode=0644
-# s3_affix_tenant=false
-#### (BoolOpt) whether to affix the tenant id to the access key when
-#### downloading from s3
+# format string to use for log messages with context (string
+# value)
+#logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s] %(instance)s%(message)s
+# format string to use for log messages without context
+# (string value)
+#logging_default_format_string=%(asctime)s %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
-######## defined in nova.ipv6.api ########
+# data to append to log format when level is DEBUG (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
-# ipv6_backend=rfc2462
-#### (StrOpt) Backend to use for IPv6 generation
+# prefix each line of exception output with this format
+# (string value)
+#logging_exception_prefix=%(asctime)s %(process)d TRACE %(name)s %(instance)s
+# list of logger=LEVEL pairs (list value)
+#default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN
-######## defined in nova.network.ldapdns ########
+# publish error events (boolean value)
+#publish_errors=false
-# ldap_dns_url=ldap://ldap.example.com:389
-#### (StrOpt) URL for ldap server which will store dns entries
+# make deprecations fatal (boolean value)
+#fatal_deprecations=false
-# ldap_dns_user=uid=admin,ou=people,dc=example,dc=org
-#### (StrOpt) user for ldap DNS
+# If an instance is passed with the log message, format it
+# like this (string value)
+#instance_format="[instance: %(uuid)s] "
-# ldap_dns_password=password
-#### (StrOpt) password for ldap DNS
+# If an instance UUID is passed with the log message, format
+# it like this (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
-# ldap_dns_soa_hostmaster=hostmaster@example.org
-#### (StrOpt) Hostmaster for ldap dns driver Statement of Authority
-# ldap_dns_servers=dns.example.org
-#### (MultiStrOpt) DNS Servers for ldap dns driver
+#
+# Options defined in nova.openstack.common.notifier.api
+#
-# ldap_dns_base_dn=ou=hosts,dc=example,dc=org
-#### (StrOpt) Base DN for DNS entries in ldap
+# Driver or drivers to handle sending notifications (multi
+# valued)
-# ldap_dns_soa_refresh=1800
-#### (StrOpt) Refresh interval (in seconds) for ldap dns driver Statement
-#### of Authority
+# Default notification level for outgoing notifications
+# (string value)
+#default_notification_level=INFO
-# ldap_dns_soa_retry=3600
-#### (StrOpt) Retry interval (in seconds) for ldap dns driver Statement of
-#### Authority
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=$host
-# ldap_dns_soa_expiry=86400
-#### (StrOpt) Expiry interval (in seconds) for ldap dns driver Statement
-#### of Authority
-# ldap_dns_soa_minimum=7200
-#### (StrOpt) Minimum interval (in seconds) for ldap dns driver Statement
-#### of Authority
+#
+# Options defined in nova.openstack.common.notifier.rpc_notifier
+#
+# AMQP topic used for openstack notifications (list value)
+#notification_topics=notifications
-######## defined in nova.network.linux_net ########
-# dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf
-#### (StrOpt) location of flagfile for dhcpbridge
+#
+# Options defined in nova.openstack.common.rpc
+#
-# networks_path=$state_path/networks
-#### (StrOpt) Location to keep network config files
+# The messaging module to use, defaults to kombu. (string
+# value)
+#rpc_backend=nova.openstack.common.rpc.impl_kombu
-# public_interface=eth0
-#### (StrOpt) Interface for public IP addresses
+# Size of RPC thread pool (integer value)
+#rpc_thread_pool_size=64
-# network_device_mtu=<None>
-#### (StrOpt) MTU setting for vlan
+# Size of RPC connection pool (integer value)
+#rpc_conn_pool_size=30
-# dhcpbridge=$bindir/nova-dhcpbridge
-#### (StrOpt) location of nova-dhcpbridge
+# Seconds to wait for a response from call or multicall
+# (integer value)
+#rpc_response_timeout=60
-# routing_source_ip=$my_ip
-#### (StrOpt) Public IP of network host
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
-# dhcp_lease_time=120
-#### (IntOpt) Lifetime of a DHCP lease in seconds
+# Modules of exceptions that are permitted to be recreatedupon
+# receiving exception data from an rpc call. (list value)
+#allowed_rpc_exception_modules=nova.openstack.common.exception,nova.exception,cinder.exception,exceptions
-# dns_server=<None>
-#### (StrOpt) if set, uses specific dns server for dnsmasq
+# If passed, use a fake RabbitMQ provider (boolean value)
+#fake_rabbit=false
-# dmz_cidr=
-#### (ListOpt) A list of dmz range that should be accepted
+# AMQP exchange to connect to if using RabbitMQ or Qpid
+# (string value)
+#control_exchange=openstack
-# dnsmasq_config_file=
-#### (StrOpt) Override the default dnsmasq settings with this file
-# linuxnet_interface_driver=nova.network.linux_net.LinuxBridgeInterfaceDriver
-#### (StrOpt) Driver used to create ethernet devices.
+#
+# Options defined in nova.openstack.common.rpc.impl_kombu
+#
-# linuxnet_ovs_integration_bridge=br-int
-#### (StrOpt) Name of Open vSwitch bridge used with linuxnet
+# SSL version to use (valid only if SSL enabled) (string
+# value)
+#kombu_ssl_version=
-# send_arp_for_ha=false
-#### (BoolOpt) send gratuitous ARPs for HA setup
+# SSL key file (valid only if SSL enabled) (string value)
+#kombu_ssl_keyfile=
-# send_arp_for_ha_count=3
-#### (IntOpt) send this many gratuitous ARPs for HA setup
+# SSL cert file (valid only if SSL enabled) (string value)
+#kombu_ssl_certfile=
-# use_single_default_gateway=false
-#### (BoolOpt) Use single default gateway. Only first nic of vm will get
-#### default gateway from dhcp server
+# SSL certification authority file (valid only if SSL enabled)
+# (string value)
+#kombu_ssl_ca_certs=
+# The RabbitMQ broker address where a single node is used
+# (string value)
+#rabbit_host=localhost
-######## defined in nova.network.manager ########
+# The RabbitMQ broker port where a single node is used
+# (integer value)
+#rabbit_port=5672
-# flat_network_bridge=<None>
-#### (StrOpt) Bridge for simple network instances
+# RabbitMQ HA cluster host:port pairs (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
-# flat_network_dns=8.8.4.4
-#### (StrOpt) Dns for simple network
+# connect over SSL for RabbitMQ (boolean value)
+#rabbit_use_ssl=false
-# flat_injected=false
-#### (BoolOpt) Whether to attempt to inject network setup into guest
+# the RabbitMQ userid (string value)
+#rabbit_userid=guest
-# flat_interface=<None>
-#### (StrOpt) FlatDhcp will bridge into this interface if set
+# the RabbitMQ password (string value)
+#rabbit_password=guest
-# vlan_start=100
-#### (IntOpt) First VLAN for private networks
+# the RabbitMQ virtual host (string value)
+#rabbit_virtual_host=/
-# vlan_interface=<None>
-#### (StrOpt) vlans will bridge into this interface if set
+# how frequently to retry connecting with RabbitMQ (integer
+# value)
+#rabbit_retry_interval=1
-# num_networks=1
-#### (IntOpt) Number of networks to support
+# how long to backoff for between retries when connecting to
+# RabbitMQ (integer value)
+#rabbit_retry_backoff=2
-# vpn_ip=$my_ip
-#### (StrOpt) Public IP for the cloudpipe VPN servers
+# maximum retries with trying to connect to RabbitMQ (the
+# default of 0 implies an infinite retry count) (integer
+# value)
+#rabbit_max_retries=0
-# vpn_start=1000
-#### (IntOpt) First Vpn port for private networks
+# use durable queues in RabbitMQ (boolean value)
+#rabbit_durable_queues=false
-# multi_host=false
-#### (BoolOpt) Default value for multi_host in networks
+# use H/A queues in RabbitMQ (x-ha-policy: all).You need to
+# wipe RabbitMQ database when changing this option. (boolean
+# value)
+#rabbit_ha_queues=false
-# network_size=256
-#### (IntOpt) Number of addresses in each private subnet
-# floating_range=4.4.4.0/24
-#### (StrOpt) Floating IP address block
+#
+# Options defined in nova.openstack.common.rpc.impl_qpid
+#
-# default_floating_pool=nova
-#### (StrOpt) Default pool for floating ips
+# Qpid broker hostname (string value)
+#qpid_hostname=localhost
-# fixed_range=10.0.0.0/8
-#### (StrOpt) Fixed IP address block
+# Qpid broker port (string value)
+#qpid_port=5672
-# fixed_range_v6=fd00::/48
-#### (StrOpt) Fixed IPv6 address block
+# Qpid HA cluster host:port pairs (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
-# gateway=<None>
-#### (StrOpt) Default IPv4 gateway
+# Username for qpid connection (string value)
+#qpid_username=
-# gateway_v6=<None>
-#### (StrOpt) Default IPv6 gateway
+# Password for qpid connection (string value)
+#qpid_password=
-# cnt_vpn_clients=0
-#### (IntOpt) Number of addresses reserved for vpn clients
+# Space separated list of SASL mechanisms to use for auth
+# (string value)
+#qpid_sasl_mechanisms=
-# fixed_ip_disassociate_timeout=600
-#### (IntOpt) Seconds after which a deallocated ip is disassociated
+# Seconds between connection keepalive heartbeats (integer
+# value)
+#qpid_heartbeat=60
-# create_unique_mac_address_attempts=5
-#### (IntOpt) Number of attempts to create unique mac address
+# Transport to use, either 'tcp' or 'ssl' (string value)
+#qpid_protocol=tcp
-# auto_assign_floating_ip=false
-#### (BoolOpt) Autoassigning floating ip to VM
+# Disable Nagle algorithm (boolean value)
+#qpid_tcp_nodelay=true
-# network_host=nova
-#### (StrOpt) Network host to use for ip allocation in flat modes
-# fake_call=false
-#### (BoolOpt) If True, skip using the queue and make local calls
+#
+# Options defined in nova.openstack.common.rpc.impl_zmq
+#
-# force_dhcp_release=false
-#### (BoolOpt) If True, send a dhcp release on instance termination
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
-# dhcp_domain=novalocal
-#### (StrOpt) domain to use for building the hostnames
+# MatchMaker driver (string value)
+#rpc_zmq_matchmaker=nova.openstack.common.rpc.matchmaker.MatchMakerLocalhost
-# l3_lib=nova.network.l3.LinuxNetL3
-#### (StrOpt) Indicates underlying L3 management library
+# ZeroMQ receiver listening port (integer value)
+#rpc_zmq_port=9501
-# update_dns_entries=false
-#### (BoolOpt) If True, when a DNS entry must be updated, it sends a fanout
-#### cast to all network hosts to update their DNS entries in multi
-#### host mode
+# Number of ZeroMQ contexts, defaults to 1 (integer value)
+#rpc_zmq_contexts=1
-# dns_update_periodic_interval=-1
-#### (IntOpt) Number of periodic scheduler ticks to wait between runs of
-#### updates to DNS entries
+# Directory for holding IPC sockets (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=sorcha
-######## defined in nova.network.quantumv2.api ########
-# quantum_url=http://127.0.0.1:9696
-#### (StrOpt) URL for connecting to quantum
+#
+# Options defined in nova.openstack.common.rpc.matchmaker
+#
-# quantum_url_timeout=30
-#### (IntOpt) timeout value for connecting to quantum in seconds
+# Matchmaker ring file (JSON) (string value)
+#matchmaker_ringfile=/etc/nova/matchmaker_ring.json
-# quantum_admin_username=<None>
-#### (StrOpt) username for connecting to quantum in admin context
-# quantum_admin_password=<None>
-#### (StrOpt) password for connecting to quantum in admin context
+#
+# Options defined in nova.scheduler.driver
+#
-# quantum_admin_tenant_name=<None>
-#### (StrOpt) tenant name for connecting to quantum in admin context
+# The scheduler host manager class to use (string value)
+#scheduler_host_manager=nova.scheduler.host_manager.HostManager
-# quantum_admin_auth_url=http://localhost:5000/v2.0
-#### (StrOpt) auth url for connecting to quantum in admin context
+# Maximum number of attempts to schedule an instance (integer
+# value)
+#scheduler_max_attempts=3
-# quantum_auth_strategy=keystone
-#### (StrOpt) auth strategy for connecting to quantum in admin context
+#
+# Options defined in nova.scheduler.filters.core_filter
+#
-######## defined in nova.objectstore.s3server ########
+# Virtual CPU to Physical CPU allocation ratio (floating point
+# value)
+#cpu_allocation_ratio=16.0
-# buckets_path=$state_path/buckets
-#### (StrOpt) path to s3 buckets
-# s3_listen=0.0.0.0
-#### (StrOpt) IP address for S3 API to listen
+#
+# Options defined in nova.scheduler.filters.disk_filter
+#
-# s3_listen_port=3333
-#### (IntOpt) port for s3 api to listen
+# virtual disk to physical disk allocation ratio (floating
+# point value)
+#disk_allocation_ratio=1.0
-######## defined in nova.openstack.common.lockutils ########
+#
+# Options defined in nova.scheduler.filters.io_ops_filter
+#
-# disable_process_locking=false
-#### (BoolOpt) Whether to disable inter-process locks
+# Ignore hosts that have too many
+# builds/resizes/snaps/migrations (integer value)
+#max_io_ops_per_host=8
-# lock_path=/usr/lib/python/site-packages/nova/openstack
-#### (StrOpt) Directory to use for lock files
+#
+# Options defined in nova.scheduler.filters.isolated_hosts_filter
+#
-######## defined in nova.openstack.common.log ########
+# Images to run on isolated host (list value)
+#isolated_images=
-# logdir=<None>
-#### (StrOpt) Log output to a per-service log file in named directory
+# Host reserved for specific images (list value)
+#isolated_hosts=
-# logfile=<None>
-#### (StrOpt) Log output to a named file
-# use_stderr=true
-#### (BoolOpt) Log output to standard error
+#
+# Options defined in nova.scheduler.filters.num_instances_filter
+#
-# logfile_mode=0644
-#### (StrOpt) Default file mode used when creating log files
+# Ignore hosts that have too many instances (integer value)
+#max_instances_per_host=50
-# logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s] %(instance)s%(message)s
-#### (StrOpt) format string to use for log messages with context
-# logging_default_format_string=%(asctime)s %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
-#### (StrOpt) format string to use for log messages without context
+#
+# Options defined in nova.scheduler.filters.ram_filter
+#
-# logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
-#### (StrOpt) data to append to log format when level is DEBUG
+# virtual ram to physical ram allocation ratio (floating point
+# value)
+#ram_allocation_ratio=1.5
-# logging_exception_prefix=%(asctime)s %(process)d TRACE %(name)s %(instance)s
-#### (StrOpt) prefix each line of exception output with this format
-# default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN
-#### (ListOpt) list of logger=LEVEL pairs
+#
+# Options defined in nova.scheduler.host_manager
+#
-# publish_errors=false
-#### (BoolOpt) publish error events
+# Filter classes available to the scheduler which may be
+# specified more than once. An entry of
+# "nova.scheduler.filters.standard_filters" maps to all
+# filters included with nova. (multi valued)
+#scheduler_available_filters=nova.scheduler.filters.all_filters
-# fatal_deprecations=false
-#### (BoolOpt) make deprecations fatal
+# Which filter class names to use for filtering hosts when not
+# specified in the request. (list value)
+#scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter
-# instance_format="[instance: %(uuid)s] "
-#### (StrOpt) If an instance is passed with the log message, format it
-#### like this
+# Which weight class names to use for weighing hosts (list
+# value)
+#scheduler_weight_classes=nova.scheduler.weights.all_weighers
-# instance_uuid_format="[instance: %(uuid)s] "
-#### (StrOpt) If an instance UUID is passed with the log message, format
-#### it like this
+#
+# Options defined in nova.scheduler.manager
+#
-######## defined in nova.openstack.common.notifier.api ########
+# Default driver to use for the scheduler (string value)
+#scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
-#### (MultiStrOpt) Driver or drivers to handle sending notifications
-# default_notification_level=INFO
-#### (StrOpt) Default notification level for outgoing notifications
+#
+# Options defined in nova.scheduler.multi
+#
-# default_publisher_id=$host
-#### (StrOpt) Default publisher_id for outgoing notifications
+# Driver to use for scheduling compute calls (string value)
+#compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
+# Default driver to use for scheduling calls (string value)
+#default_scheduler_driver=nova.scheduler.chance.ChanceScheduler
-######## defined in nova.openstack.common.notifier.rabbit_notifier ########
-# notification_topics=notifications
-#### (ListOpt) AMQP topic used for openstack notifications
+#
+# Options defined in nova.scheduler.rpcapi
+#
+# the topic scheduler nodes listen on (string value)
+#scheduler_topic=scheduler
-######## defined in nova.openstack.common.rpc ########
-# rpc_backend=nova.openstack.common.rpc.impl_kombu
-#### (StrOpt) The messaging module to use, defaults to kombu.
+#
+# Options defined in nova.scheduler.scheduler_options
+#
-# rpc_thread_pool_size=64
-#### (IntOpt) Size of RPC thread pool
+# Absolute path to scheduler configuration JSON file. (string
+# value)
+#scheduler_json_config_location=
-# rpc_conn_pool_size=30
-#### (IntOpt) Size of RPC connection pool
-# rpc_response_timeout=60
-#### (IntOpt) Seconds to wait for a response from call or multicall
+#
+# Options defined in nova.scheduler.weights.least_cost
+#
-# rpc_cast_timeout=30
-#### (IntOpt) Seconds to wait before a cast expires (TTL). Only supported
-#### by impl_zmq.
+# Which cost functions the LeastCostScheduler should use (list
+# value)
+#least_cost_functions=<None>
-# allowed_rpc_exception_modules=nova.openstack.common.exception,nova.exception,cinder.exception
-#### (ListOpt) Modules of exceptions that are permitted to be recreatedupon
-#### receiving exception data from an rpc call.
+# How much weight to give the noop cost function (floating
+# point value)
+#noop_cost_fn_weight=1.0
-# fake_rabbit=false
-#### (BoolOpt) If passed, use a fake RabbitMQ provider
+# How much weight to give the fill-first cost function. A
+# negative value will reverse behavior: e.g. spread-first
+# (floating point value)
+#compute_fill_first_cost_fn_weight=<None>
-######## defined in nova.openstack.common.rpc.impl_kombu ########
+#
+# Options defined in nova.scheduler.weights.ram
+#
-# kombu_ssl_version=
-#### (StrOpt) SSL version to use (valid only if SSL enabled)
+# Multiplier used for weighing ram. Negative numbers mean to
+# stack vs spread. (floating point value)
+#ram_weight_multiplier=1.0
-# kombu_ssl_keyfile=
-#### (StrOpt) SSL key file (valid only if SSL enabled)
-# kombu_ssl_certfile=
-#### (StrOpt) SSL cert file (valid only if SSL enabled)
+#
+# Options defined in nova.servicegroup.api
+#
-# kombu_ssl_ca_certs=
-#### (StrOpt) SSL certification authority file (valid only if SSL enabled)
+# The driver for servicegroup service. (string value)
+#servicegroup_driver=db
-# rabbit_host=localhost
-#### (StrOpt) The RabbitMQ broker address where a single node is used
-# rabbit_port=5672
-#### (IntOpt) The RabbitMQ broker port where a single node is used
+#
+# Options defined in nova.virt.baremetal.db.api
+#
-# rabbit_hosts=$rabbit_host:$rabbit_port
-#### (ListOpt) RabbitMQ HA cluster host:port pairs
+# The backend to use for db (string value)
+#baremetal_db_backend=sqlalchemy
-# rabbit_use_ssl=false
-#### (BoolOpt) connect over SSL for RabbitMQ
-# rabbit_userid=guest
-#### (StrOpt) the RabbitMQ userid
+#
+# Options defined in nova.virt.configdrive
+#
-# rabbit_password=guest
-#### (StrOpt) the RabbitMQ password
+# Config drive format. One of iso9660 (default) or vfat
+# (string value)
+#config_drive_format=iso9660
-# rabbit_virtual_host=/
-#### (StrOpt) the RabbitMQ virtual host
+# Where to put temporary files associated with config drive
+# creation (string value)
+#config_drive_tempdir=<None>
-# rabbit_retry_interval=1
-#### (IntOpt) how frequently to retry connecting with RabbitMQ
+# Set to force injection to take place on a config drive (if
+# set, valid options are: always) (string value)
+#force_config_drive=<None>
-# rabbit_retry_backoff=2
-#### (IntOpt) how long to backoff for between retries when connecting to
-#### RabbitMQ
+# Name and optionally path of the tool used for ISO image
+# creation (string value)
+#mkisofs_cmd=genisoimage
-# rabbit_max_retries=0
-#### (IntOpt) maximum retries with trying to connect to RabbitMQ (the
-#### default of 0 implies an infinite retry count)
-# rabbit_durable_queues=false
-#### (BoolOpt) use durable queues in RabbitMQ
+#
+# Options defined in nova.virt.disk.api
+#
-# rabbit_ha_queues=false
-#### (BoolOpt) use H/A queues in RabbitMQ (x-ha-policy: all).You need to
-#### wipe RabbitMQ database when changing this option.
+# Template file for injected network (string value)
+#injected_network_template=$pybasedir/nova/virt/interfaces.template
+# mkfs commands for ephemeral device. The format is
+# <os_type>=<mkfs command> (multi valued)
+#virt_mkfs=default=mkfs.ext3 -L %(fs_label)s -F %(target)s
+#virt_mkfs=linux=mkfs.ext3 -L %(fs_label)s -F %(target)s
+#virt_mkfs=windows=mkfs.ntfs --force --fast --label %(fs_label)s %(target)s
-######## defined in nova.openstack.common.rpc.impl_qpid ########
-# qpid_hostname=localhost
-#### (StrOpt) Qpid broker hostname
+#
+# Options defined in nova.virt.disk.mount.nbd
+#
-# qpid_port=5672
-#### (StrOpt) Qpid broker port
+# time to wait for a NBD device coming up (integer value)
+#timeout_nbd=10
-# qpid_username=
-#### (StrOpt) Username for qpid connection
-# qpid_password=
-#### (StrOpt) Password for qpid connection
+#
+# Options defined in nova.virt.driver
+#
-# qpid_sasl_mechanisms=
-#### (StrOpt) Space separated list of SASL mechanisms to use for auth
+# Driver to use for controlling virtualization. Options
+# include: libvirt.LibvirtDriver, xenapi.XenAPIDriver,
+# fake.FakeDriver, baremetal.BareMetalDriver,
+# vmwareapi.VMWareESXDriver (string value)
+#compute_driver=<None>
-# qpid_reconnect=true
-#### (BoolOpt) Automatically reconnect
+# The default format an ephemeral_volume will be formatted
+# with on creation. (string value)
+#default_ephemeral_format=<None>
-# qpid_reconnect_timeout=0
-#### (IntOpt) Reconnection timeout in seconds
+# Whether to use cow images (boolean value)
+#use_cow_images=true
-# qpid_reconnect_limit=0
-#### (IntOpt) Max reconnections before giving up
-# qpid_reconnect_interval_min=0
-#### (IntOpt) Minimum seconds between reconnection attempts
+#
+# Options defined in nova.virt.firewall
+#
-# qpid_reconnect_interval_max=0
-#### (IntOpt) Maximum seconds between reconnection attempts
+# Firewall driver (defaults to hypervisor specific iptables
+# driver) (string value)
+#firewall_driver=<None>
-# qpid_reconnect_interval=0
-#### (IntOpt) Equivalent to setting max and min to the same value
+# Whether to allow network traffic from same network (boolean
+# value)
+#allow_same_net_traffic=true
-# qpid_heartbeat=60
-#### (IntOpt) Seconds between connection keepalive heartbeats
-# qpid_protocol=tcp
-#### (StrOpt) Transport to use, either 'tcp' or 'ssl'
+#
+# Options defined in nova.virt.hyperv.vmops
+#
-# qpid_tcp_nodelay=true
-#### (BoolOpt) Disable Nagle algorithm
+# Default vSwitch Name, if none provided first external is
+# used (string value)
+#vswitch_name=<None>
+# Required for live migration among hosts with different CPU
+# features (boolean value)
+#limit_cpu_features=false
-######## defined in nova.openstack.common.rpc.impl_zmq ########
+# Sets the admin password in the config drive image (boolean
+# value)
+#config_drive_inject_password=false
-# rpc_zmq_bind_address=*
-#### (StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet
-#### interface, or IP. The "host" option should point or resolve
-#### to this address.
+# qemu-img is used to convert between different image types
+# (string value)
+#qemu_img_cmd=qemu-img.exe
-# rpc_zmq_matchmaker=nova.openstack.common.rpc.matchmaker.MatchMakerLocalhost
-#### (StrOpt) MatchMaker driver
+# Attaches the Config Drive image as a cdrom drive instead of
+# a disk drive (boolean value)
+#config_drive_cdrom=false
-# rpc_zmq_port=9501
-#### (IntOpt) ZeroMQ receiver listening port
-# rpc_zmq_contexts=1
-#### (IntOpt) Number of ZeroMQ contexts, defaults to 1
+#
+# Options defined in nova.virt.hyperv.volumeops
+#
-# rpc_zmq_ipc_dir=/var/run/openstack
-#### (StrOpt) Directory for holding IPC sockets
+# The number of times we retry on attaching volume (integer
+# value)
+#hyperv_attaching_volume_retry_count=10
-# rpc_zmq_host=nova
-#### (StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP
-#### address. Must match "host" option, if running Nova.
+# The seconds to wait between an volume attachment attempt
+# (integer value)
+#hyperv_wait_between_attach_retry=5
+# Force volumeutils v1 (boolean value)
+#force_volumeutils_v1=false
-######## defined in nova.openstack.common.rpc.matchmaker ########
-# matchmaker_ringfile=/etc/nova/matchmaker_ring.json
-#### (StrOpt) Matchmaker ring file (JSON)
+#
+# Options defined in nova.virt.images
+#
+# Force backing images to raw format (boolean value)
+#force_raw_images=true
-######## defined in nova.scheduler.driver ########
-# scheduler_host_manager=nova.scheduler.host_manager.HostManager
-#### (StrOpt) The scheduler host manager class to use
+#
+# Options defined in nova.virt.libvirt.driver
+#
-# scheduler_max_attempts=3
-#### (IntOpt) Maximum number of attempts to schedule an instance
+# Rescue ami image (string value)
+#rescue_image_id=<None>
+# Rescue aki image (string value)
+#rescue_kernel_id=<None>
-######## defined in nova.scheduler.filters.core_filter ########
+# Rescue ari image (string value)
+#rescue_ramdisk_id=<None>
-# cpu_allocation_ratio=16.0
-#### (FloatOpt) Virtual CPU to Physical CPU allocation ratio
+# Libvirt domain type (valid options are: kvm, lxc, qemu, uml,
+# xen) (string value)
+#libvirt_type=kvm
+# Override the default libvirt URI (which is dependent on
+# libvirt_type) (string value)
+#libvirt_uri=
-######## defined in nova.scheduler.filters.disk_filter ########
+# Inject the admin password at boot time, without an agent.
+# (boolean value)
+#libvirt_inject_password=false
-# disk_allocation_ratio=1.0
-#### (FloatOpt) virtual disk to physical disk allocation ratio
+# Inject the ssh public key at boot time (boolean value)
+#libvirt_inject_key=true
+# The partition to inject to : -1 => inspect (libguestfs
+# only), 0 => not partitioned, >0 => partition number (integer
+# value)
+#libvirt_inject_partition=1
-######## defined in nova.scheduler.filters.io_ops_filter ########
+# Sync virtual and real mouse cursors in Windows VMs (boolean
+# value)
+#use_usb_tablet=true
-# max_io_ops_per_host=8
-#### (IntOpt) Ignore hosts that have too many
-#### builds/resizes/snaps/migrations
+# Migration target URI (any included "%s" is replaced with the
+# migration target hostname) (string value)
+#live_migration_uri=qemu+tcp://%s/system
+# Migration flags to be set for live migration (string value)
+#live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER
-######## defined in nova.scheduler.filters.num_instances_filter ########
+# Migration flags to be set for block migration (string value)
+#block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_NON_SHARED_INC
-# max_instances_per_host=50
-#### (IntOpt) Ignore hosts that have too many instances
+# Maximum bandwidth to be used during migration, in Mbps
+# (integer value)
+#live_migration_bandwidth=0
+# Snapshot image format (valid options are : raw, qcow2, vmdk,
+# vdi). Defaults to same as source image (string value)
+#snapshot_image_format=<None>
-######## defined in nova.scheduler.filters.ram_filter ########
+# The libvirt VIF driver to configure the VIFs. (string value)
+#libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtBridgeDriver
-# ram_allocation_ratio=1.5
-#### (FloatOpt) virtual ram to physical ram allocation ratio
+# Libvirt handlers for remote volumes. (list value)
+#libvirt_volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume_nfs.NfsVolumeDriver
+# Override the default disk prefix for the devices attached to
+# a server, which is dependent on libvirt_type. (valid options
+# are: sd, xvd, uvd, vd) (string value)
+#libvirt_disk_prefix=<None>
-######## defined in nova.scheduler.filters.trusted_filter ########
+# Number of seconds to wait for instance to shut down after
+# soft reboot request is made. We fall back to hard reboot if
+# instance does not shutdown within this window. (integer
+# value)
+#libvirt_wait_soft_reboot_seconds=120
-# attestation_server=<None>
-#### (StrOpt) attestation server http
+# Use a separated OS thread pool to realize non-blocking
+# libvirt calls (boolean value)
+#libvirt_nonblocking=true
-# attestation_server_ca_file=<None>
-#### (StrOpt) attestation server Cert file for Identity verification
+# Set to "host-model" to clone the host CPU feature flags; to
+# "host-passthrough" to use the host CPU model exactly; to
+# "custom" to use a named CPU model; to "none" to not set any
+# CPU model. If libvirt_type="kvm|qemu", it will default to
+# "host-model", otherwise it will default to "none" (string
+# value)
+#libvirt_cpu_mode=<None>
-# attestation_port=8443
-#### (StrOpt) attestation server port
+# Set to a named libvirt CPU model (see names listed in
+# /usr/share/libvirt/cpu_map.xml). Only has effect if
+# libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu"
+# (string value)
+#libvirt_cpu_model=<None>
-# attestation_api_url=/OpenAttestationWebServices/V1.0
-#### (StrOpt) attestation web API URL
+# Location where libvirt driver will store snapshots before
+# uploading them to image service (string value)
+#libvirt_snapshots_directory=$instances_path/snapshots
-# attestation_auth_blob=<None>
-#### (StrOpt) attestation authorization blob - must change
+# Location where the Xen hvmloader is kept (string value)
+#xen_hvmloader_path=/usr/lib/xen/boot/hvmloader
-######## defined in nova.scheduler.host_manager ########
+#
+# Options defined in nova.virt.libvirt.imagebackend
+#
-# scheduler_available_filters=nova.scheduler.filters.standard_filters
-#### (MultiStrOpt) Filter classes available to the scheduler which may be
-#### specified more than once. An entry of
-#### "nova.scheduler.filters.standard_filters" maps to all
-#### filters included with nova.
+# VM Images format. Acceptable values are: raw, qcow2, lvm,
+# default. If default is specified, then use_cow_images flag
+# is used instead of this one. (string value)
+#libvirt_images_type=default
-# scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter
-#### (ListOpt) Which filter class names to use for filtering hosts when not
-#### specified in the request.
+# LVM Volume Group that is used for VM images, when you
+# specify libvirt_images_type=lvm. (string value)
+#libvirt_images_volume_group=<None>
+# Create sparse logical volumes (with virtualsize) if this
+# flag is set to True. (boolean value)
+#libvirt_sparse_logical_volumes=false
-######## defined in nova.scheduler.least_cost ########
-# least_cost_functions=nova.scheduler.least_cost.compute_fill_first_cost_fn
-#### (ListOpt) Which cost functions the LeastCostScheduler should use
+#
+# Options defined in nova.virt.libvirt.imagecache
+#
-# noop_cost_fn_weight=1.0
-#### (FloatOpt) How much weight to give the noop cost function
+# Where cached images are stored under $instances_path.This is
+# NOT the full path - just a folder name.For per-compute-host
+# cached images, set to _base_$my_ip (string value)
+#base_dir_name=_base
-# compute_fill_first_cost_fn_weight=-1.0
-#### (FloatOpt) How much weight to give the fill-first cost function. A
-#### negative value will reverse behavior: e.g. spread-first
+# Allows image information files to be stored in non-standard
+# locations (string value)
+#image_info_filename_pattern=$instances_path/$base_dir_name/%(image)s.info
+# Should unused base images be removed? (boolean value)
+#remove_unused_base_images=true
-######## defined in nova.scheduler.manager ########
+# Unused resized base images younger than this will not be
+# removed (integer value)
+#remove_unused_resized_minimum_age_seconds=3600
-# scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
-#### (StrOpt) Default driver to use for the scheduler
+# Unused unresized base images younger than this will not be
+# removed (integer value)
+#remove_unused_original_minimum_age_seconds=86400
+# Write a checksum for files in _base to disk (boolean value)
+#checksum_base_images=false
-######## defined in nova.scheduler.multi ########
+# How frequently to checksum base images (integer value)
+#checksum_interval_seconds=3600
-# compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
-#### (StrOpt) Driver to use for scheduling compute calls
-# default_scheduler_driver=nova.scheduler.chance.ChanceScheduler
-#### (StrOpt) Default driver to use for scheduling calls
+#
+# Options defined in nova.virt.libvirt.vif
+#
+# Name of Integration Bridge used by Open vSwitch (string
+# value)
+#libvirt_ovs_bridge=br-int
-######## defined in nova.scheduler.scheduler_options ########
+# Use virtio for bridge interfaces with KVM/QEMU (boolean
+# value)
+#libvirt_use_virtio_for_bridges=true
-# scheduler_json_config_location=
-#### (StrOpt) Absolute path to scheduler configuration JSON file.
+#
+# Options defined in nova.virt.libvirt.volume
+#
-######## defined in nova.virt.baremetal.driver ########
+# number of times to rescan iSCSI target to find volume
+# (integer value)
+#num_iscsi_scan_tries=3
-# baremetal_type=baremetal
-#### (StrOpt) baremetal domain type
+# the RADOS client name for accessing rbd volumes (string
+# value)
+#rbd_user=<None>
+# the libvirt uuid of the secret for the rbd_uservolumes
+# (string value)
+#rbd_secret_uuid=<None>
-######## defined in nova.virt.baremetal.nodes ########
-# baremetal_driver=tilera
-#### (StrOpt) Bare-metal driver runs on
+#
+# Options defined in nova.virt.libvirt.volume_nfs
+#
+# Base dir where nfs expected to be mounted on compute (string
+# value)
+#nfs_mount_point_base=$state_path/mnt
-######## defined in nova.virt.baremetal.tilera ########
-# tile_monitor=/usr/local/TileraMDE/bin/tile-monitor
-#### (StrOpt) Tilera command line program for Bare-metal driver
+#
+# Options defined in nova.virt.powervm.driver
+#
+# PowerVM manager type (ivm, hmc) (string value)
+#powervm_mgr_type=ivm
-######## defined in nova.virt.configdrive ########
+# PowerVM manager host or ip (string value)
+#powervm_mgr=<None>
-# config_drive_format=iso9660
-#### (StrOpt) Config drive format. One of iso9660 (default) or vfat
+# PowerVM manager user name (string value)
+#powervm_mgr_user=<None>
-# config_drive_tempdir=<None>
-#### (StrOpt) Where to put temporary files associated with config drive
-#### creation
+# PowerVM manager user password (string value)
+#powervm_mgr_passwd=<None>
-# force_config_drive=<None>
-#### (StrOpt) Set to force injection to take place on a config drive (if
-#### set, valid options are: always)
+# PowerVM image remote path (string value)
+#powervm_img_remote_path=<None>
+# Local directory to download glance images to (string value)
+#powervm_img_local_path=<None>
-######## defined in nova.virt.disk.api ########
-# injected_network_template=$pybasedir/nova/virt/interfaces.template
-#### (StrOpt) Template file for injected network
+#
+# Options defined in nova.virt.vmwareapi.driver
+#
-# img_handlers=loop,nbd,guestfs
-#### (ListOpt) Order of methods used to mount disk images
+# URL for connection to VMWare ESX host.Required if
+# compute_driver is vmwareapi.VMWareESXDriver. (string value)
+#vmwareapi_host_ip=<None>
-# virt_mkfs=default=mkfs.ext3 -L %(fs_label)s -F %(target)s
-# virt_mkfs=linux=mkfs.ext3 -L %(fs_label)s -F %(target)s
-# virt_mkfs=windows=mkfs.ntfs --force --fast --label %(fs_label)s %(target)s
-#### (MultiStrOpt) mkfs commands for ephemeral device. The format is
-#### <os_type>=<mkfs command>
+# Username for connection to VMWare ESX host. Used only if
+# compute_driver is vmwareapi.VMWareESXDriver. (string value)
+#vmwareapi_host_username=<None>
+# Password for connection to VMWare ESX host. Used only if
+# compute_driver is vmwareapi.VMWareESXDriver. (string value)
+#vmwareapi_host_password=<None>
-######## defined in nova.virt.disk.nbd ########
+# The interval used for polling of remote tasks. Used only if
+# compute_driver is vmwareapi.VMWareESXDriver. (floating point
+# value)
+#vmwareapi_task_poll_interval=5.0
-# timeout_nbd=10
-#### (IntOpt) time to wait for a NBD device coming up
+# The number of times we retry on failures, e.g., socket
+# error, etc. Used only if compute_driver is
+# vmwareapi.VMWareESXDriver. (integer value)
+#vmwareapi_api_retry_count=10
-######## defined in nova.virt.firewall ########
+#
+# Options defined in nova.virt.vmwareapi.vif
+#
-# firewall_driver=<None>
-#### (StrOpt) Firewall driver (defaults to hypervisor specific iptables
-#### driver)
+# Physical ethernet adapter name for vlan networking (string
+# value)
+#vmwareapi_vlan_interface=vmnic0
-# allow_same_net_traffic=true
-#### (BoolOpt) Whether to allow network traffic from same network
+#
+# Options defined in nova.virt.vmwareapi.vim
+#
-######## defined in nova.virt.hyperv.vmops ########
+# VIM Service WSDL Location e.g
+# http://<server>/vimService.wsdl. Due to a bug in vSphere ESX
+# 4.1 default wsdl. Refer readme-vmware to setup (string
+# value)
+#vmwareapi_wsdl_loc=<None>
-# vswitch_name=<None>
-#### (StrOpt) Default vSwitch Name, if none provided first external is
-#### used
-# limit_cpu_features=false
-#### (BoolOpt) required for live migration among hosts with different CPU
-#### features
+#
+# Options defined in nova.virt.xenapi.agent
+#
+# number of seconds to wait for agent reply (integer value)
+#agent_timeout=30
-######## defined in nova.virt.hyperv.volumeops ########
+# number of seconds to wait for agent to be fully operational
+# (integer value)
+#agent_version_timeout=300
-# hyperv_attaching_volume_retry_count=10
-#### (IntOpt) The number of times we retry on attaching volume
+# number of seconds to wait for agent reply to resetnetwork
+# request (integer value)
+#agent_resetnetwork_timeout=60
-# hyperv_wait_between_attach_retry=5
-#### (IntOpt) The seconds to wait between an volume attachment attempt
+# Specifies the path in which the xenapi guest agent should be
+# located. If the agent is present, network configuration is
+# not injected into the image. Used if
+# compute_driver=xenapi.XenAPIDriver and flat_injected=True
+# (string value)
+#xenapi_agent_path=usr/sbin/xe-update-networking
+# Disable XenAPI agent. Reduces the amount of time it takes
+# nova to detect that a VM has started, when that VM does not
+# have the agent installed (boolean value)
+#xenapi_disable_agent=false
-######## defined in nova.virt.images ########
-# force_raw_images=true
-#### (BoolOpt) Force backing images to raw format
+#
+# Options defined in nova.virt.xenapi.driver
+#
+# URL for connection to XenServer/Xen Cloud Platform. Required
+# if compute_driver=xenapi.XenAPIDriver (string value)
+#xenapi_connection_url=<None>
-######## defined in nova.virt.libvirt.driver ########
+# Username for connection to XenServer/Xen Cloud Platform.
+# Used only if compute_driver=xenapi.XenAPIDriver (string
+# value)
+#xenapi_connection_username=root
-# rescue_image_id=<None>
-#### (StrOpt) Rescue ami image
+# Password for connection to XenServer/Xen Cloud Platform.
+# Used only if compute_driver=xenapi.XenAPIDriver (string
+# value)
+#xenapi_connection_password=<None>
-# rescue_kernel_id=<None>
-#### (StrOpt) Rescue aki image
+# Maximum number of concurrent XenAPI connections. Used only
+# if compute_driver=xenapi.XenAPIDriver (integer value)
+#xenapi_connection_concurrent=5
-# rescue_ramdisk_id=<None>
-#### (StrOpt) Rescue ari image
+# The interval used for polling of coalescing vhds. Used only
+# if compute_driver=xenapi.XenAPIDriver (floating point value)
+#xenapi_vhd_coalesce_poll_interval=5.0
-# libvirt_type=kvm
-#### (StrOpt) Libvirt domain type (valid options are: kvm, lxc, qemu, uml,
-#### xen)
+# Ensure compute service is running on host XenAPI connects
+# to. (boolean value)
+#xenapi_check_host=true
-# libvirt_uri=
-#### (StrOpt) Override the default libvirt URI (which is dependent on
-#### libvirt_type)
+# Max number of times to poll for VHD to coalesce. Used only
+# if compute_driver=xenapi.XenAPIDriver (integer value)
+#xenapi_vhd_coalesce_max_attempts=5
-# libvirt_inject_password=false
-#### (BoolOpt) Inject the admin password at boot time, without an agent.
+# Base path to the storage repository (string value)
+#xenapi_sr_base_path=/var/run/sr-mount
-# libvirt_inject_key=true
-#### (BoolOpt) Inject the ssh public key at boot time
+# iSCSI Target Host (string value)
+#target_host=<None>
-# libvirt_inject_partition=1
-#### (IntOpt) The partition to inject to : -1 => inspect (libguestfs
-#### only), 0 => not partitioned, >0 => partition number
+# iSCSI Target Port, 3260 Default (string value)
+#target_port=3260
-# use_usb_tablet=true
-#### (BoolOpt) Sync virtual and real mouse cursors in Windows VMs
+# IQN Prefix (string value)
+#iqn_prefix=iqn.2010-10.org.openstack
-# live_migration_uri=qemu+tcp://%s/system
-#### (StrOpt) Migration target URI (any included "%s" is replaced with the
-#### migration target hostname)
+# Used to enable the remapping of VBD dev (Works around an
+# issue in Ubuntu Maverick) (boolean value)
+#xenapi_remap_vbd_dev=false
-# live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER
-#### (StrOpt) Migration flags to be set for live migration
+# Specify prefix to remap VBD dev to (ex. /dev/xvdb ->
+# /dev/sdb) (string value)
+#xenapi_remap_vbd_dev_prefix=sd
-# block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_NON_SHARED_INC
-#### (StrOpt) Migration flags to be set for block migration
+# Timeout in seconds for XenAPI login. (integer value)
+#xenapi_login_timeout=10
-# live_migration_bandwidth=0
-#### (IntOpt) Maximum bandwidth to be used during migration, in Mbps
-# snapshot_image_format=<None>
-#### (StrOpt) Snapshot image format (valid options are : raw, qcow2, vmdk,
-#### vdi). Defaults to same as source image
+#
+# Options defined in nova.virt.xenapi.pool
+#
-# libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtBridgeDriver
-#### (StrOpt) The libvirt VIF driver to configure the VIFs.
+# To use for hosts with different CPUs (boolean value)
+#use_join_force=true
-# libvirt_volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver
-#### (ListOpt) Libvirt handlers for remote volumes.
-# libvirt_disk_prefix=<None>
-#### (StrOpt) Override the default disk prefix for the devices attached to
-#### a server, which is dependent on libvirt_type. (valid options
-#### are: sd, xvd, uvd, vd)
+#
+# Options defined in nova.virt.xenapi.vif
+#
-# libvirt_wait_soft_reboot_seconds=120
-#### (IntOpt) Number of seconds to wait for instance to shut down after
-#### soft reboot request is made. We fall back to hard reboot if
-#### instance does not shutdown within this window.
+# Name of Integration Bridge used by Open vSwitch (string
+# value)
+#xenapi_ovs_integration_bridge=xapi1
-# libvirt_nonblocking=true
-#### (BoolOpt) Use a separated OS thread pool to realize non-blocking
-#### libvirt calls
-# libvirt_cpu_mode=<None>
-#### (StrOpt) Set to "host-model" to clone the host CPU feature flags; to
-#### "host-passthrough" to use the host CPU model exactly; to
-#### "custom" to use a named CPU model; to "none" to not set any
-#### CPU model. If libvirt_type="kvm|qemu", it will default to
-#### "host-model", otherwise it will default to "none"
+#
+# Options defined in nova.virt.xenapi.vm_utils
+#
-# libvirt_cpu_model=<None>
-#### (StrOpt) Set to a named libvirt CPU model (see names listed in
-#### /usr/share/libvirt/cpu_map.xml). Only has effect if
-#### libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu"
+# Cache glance images locally. `all` will cache all images,
+# `some` will only cache images that have the image_property
+# `cache_in_nova=True`, and `none` turns off caching entirely
+# (string value)
+#cache_images=all
-# libvirt_snapshots_directory=$instances_path/snapshots
-#### (StrOpt) Location where libvirt driver will store snapshots before
-#### uploading them to image service
+# Default OS type (string value)
+#default_os_type=linux
+# Time to wait for a block device to be created (integer
+# value)
+#block_device_creation_timeout=10
-######## defined in nova.virt.libvirt.imagebackend ########
+# Maximum size in bytes of kernel or ramdisk images (integer
+# value)
+#max_kernel_ramdisk_size=16777216
-# libvirt_images_type=default
-#### (StrOpt) VM Images format. Acceptable values are: raw, qcow2, lvm,
-#### default. If default is specified, then use_cow_images flag
-#### is used instead of this one.
+# Filter for finding the SR to be used to install guest
+# instances on. The default value is the Local Storage in
+# default XenServer/XCP installations. To select an SR with a
+# different matching criteria, you could set it to other-
+# config:my_favorite_sr=true. On the other hand, to fall back
+# on the Default SR, as displayed by XenCenter, set this flag
+# to: default-sr:true (string value)
+#sr_matching_filter=other-config:i18n-key=local-storage
-# libvirt_images_volume_group=<None>
-#### (StrOpt) LVM Volume Group that is used for VM images, when you
-#### specify libvirt_images_type=lvm.
+# Whether to use sparse_copy for copying data on a resize down
+# (False will use standard dd). This speeds up resizes down
+# considerably since large runs of zeros won't have to be
+# rsynced (boolean value)
+#xenapi_sparse_copy=true
-# libvirt_sparse_logical_volumes=false
-#### (BoolOpt) Create sparse logical volumes (with virtualsize) if this
-#### flag is set to True.
+# Maximum number of retries to unplug VBD (integer value)
+#xenapi_num_vbd_unplug_retries=10
+# Whether or not to download images via Bit Torrent
+# (all|some|none). (string value)
+#xenapi_torrent_images=none
-######## defined in nova.virt.libvirt.imagecache ########
+# Base URL for torrent files. (string value)
+#xenapi_torrent_base_url=<None>
-# remove_unused_base_images=true
-#### (BoolOpt) Should unused base images be removed?
+# Probability that peer will become a seeder. (1.0 = 100%)
+# (floating point value)
+#xenapi_torrent_seed_chance=1.0
-# remove_unused_resized_minimum_age_seconds=3600
-#### (IntOpt) Unused resized base images younger than this will not be
-#### removed
+# Number of seconds after downloading an image via BitTorrent
+# that it should be seeded for other peers. (integer value)
+#xenapi_torrent_seed_duration=3600
-# remove_unused_original_minimum_age_seconds=86400
-#### (IntOpt) Unused unresized base images younger than this will not be
-#### removed
+# Cached torrent files not accessed within this number of
+# seconds can be reaped (integer value)
+#xenapi_torrent_max_last_accessed=86400
-# checksum_base_images=false
-#### (BoolOpt) Write a checksum for files in _base to disk
+# Beginning of port range to listen on (integer value)
+#xenapi_torrent_listen_port_start=6881
+# End of port range to listen on (integer value)
+#xenapi_torrent_listen_port_end=6891
-######## defined in nova.virt.libvirt.utils ########
+# Number of seconds a download can remain at the same progress
+# percentage w/o being considered a stall (integer value)
+#xenapi_torrent_download_stall_cutoff=600
-# image_info_filename_pattern=$instances_path/$base_dir_name/%(image)s.info
-#### (StrOpt) Allows image information files to be stored in non-standard
-#### locations
+# Maximum number of seeder processes to run concurrently
+# within a given dom0. (-1 = no limit) (integer value)
+#xenapi_torrent_max_seeder_processes_per_host=1
-######## defined in nova.virt.libvirt.vif ########
+#
+# Options defined in nova.virt.xenapi.vmops
+#
-# libvirt_ovs_bridge=br-int
-#### (StrOpt) Name of Integration Bridge used by Open vSwitch
+# number of seconds to wait for instance to go to running
+# state (integer value)
+#xenapi_running_timeout=60
-# libvirt_use_virtio_for_bridges=false
-#### (BoolOpt) Use virtio for bridge interfaces
+# The XenAPI VIF driver using XenServer Network APIs. (string
+# value)
+#xenapi_vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver
-######## defined in nova.virt.libvirt.volume ########
+#
+# Options defined in nova.vnc
+#
-# num_iscsi_scan_tries=3
-#### (IntOpt) number of times to rescan iSCSI target to find volume
+# location of vnc console proxy, in the form
+# "http://127.0.0.1:6080/vnc_auto.html" (string value)
+#novncproxy_base_url=http://127.0.0.1:6080/vnc_auto.html
-# rbd_user=<None>
-#### (StrOpt) the RADOS client name for accessing rbd volumes
+# location of nova xvp vnc console proxy, in the form
+# "http://127.0.0.1:6081/console" (string value)
+#xvpvncproxy_base_url=http://127.0.0.1:6081/console
-# rbd_secret_uuid=<None>
-#### (StrOpt) the libvirt uuid of the secret for the rbd_uservolumes
+# IP address on which instance vncservers should listen
+# (string value)
+#vncserver_listen=127.0.0.1
+# the address to which proxy clients (like nova-xvpvncproxy)
+# should connect (string value)
+#vncserver_proxyclient_address=127.0.0.1
-######## defined in nova.virt.libvirt.volume_nfs ########
+# enable vnc related features (boolean value)
+#vnc_enabled=true
-# nfs_mount_point_base=$state_path/mnt
-#### (StrOpt) Base dir where nfs expected to be mounted on compute
+# keymap for vnc (string value)
+#vnc_keymap=en-us
-######## defined in nova.virt.powervm.driver ########
+#
+# Options defined in nova.vnc.xvp_proxy
+#
-# powervm_mgr_type=ivm
-#### (StrOpt) PowerVM manager type (ivm, hmc)
+# Port that the XCP VNC proxy should bind to (integer value)
+#xvpvncproxy_port=6081
-# powervm_mgr=<None>
-#### (StrOpt) PowerVM manager host or ip
+# Address that the XCP VNC proxy should bind to (string value)
+#xvpvncproxy_host=0.0.0.0
-# powervm_mgr_user=<None>
-#### (StrOpt) PowerVM manager user name
-# powervm_mgr_passwd=<None>
-#### (StrOpt) PowerVM manager user password
+#
+# Options defined in nova.volume
+#
-# powervm_img_remote_path=<None>
-#### (StrOpt) PowerVM image remote path
+# The full class name of the volume API class to use (string
+# value)
+#volume_api_class=nova.volume.cinder.API
-# powervm_img_local_path=<None>
-#### (StrOpt) Local directory to download glance images to
+#
+# Options defined in nova.volume.cinder
+#
-######## defined in nova.virt.vmwareapi.driver ########
+# Info to match when looking for cinder in the service
+# catalog. Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info=volume:cinder:publicURL
-# vmwareapi_host_ip=<None>
-#### (StrOpt) URL for connection to VMWare ESX host.Required if
-#### compute_driver is vmwareapi.VMWareESXDriver.
+# Override service catalog lookup with template for cinder
+# endpoint e.g. http://localhost:8776/v1/%(project_id)s
+# (string value)
+#cinder_endpoint_template=<None>
-# vmwareapi_host_username=<None>
-#### (StrOpt) Username for connection to VMWare ESX host. Used only if
-#### compute_driver is vmwareapi.VMWareESXDriver.
+# Number of cinderclient retries on failed http calls (integer
+# value)
+#cinder_http_retries=3
-# vmwareapi_host_password=<None>
-#### (StrOpt) Password for connection to VMWare ESX host. Used only if
-#### compute_driver is vmwareapi.VMWareESXDriver.
-# vmwareapi_task_poll_interval=5.0
-#### (FloatOpt) The interval used for polling of remote tasks. Used only if
-#### compute_driver is vmwareapi.VMWareESXDriver.
+[conductor]
-# vmwareapi_api_retry_count=10
-#### (IntOpt) The number of times we retry on failures, e.g., socket
-#### error, etc. Used only if compute_driver is
-#### vmwareapi.VMWareESXDriver.
+#
+# Options defined in nova.conductor.api
+#
-# vmwareapi_vlan_interface=vmnic0
-#### (StrOpt) Physical ethernet adapter name for vlan networking
+# Perform nova-conductor operations locally (boolean value)
+#use_local=false
+# the topic conductor nodes listen on (string value)
+#topic=conductor
-######## defined in nova.virt.vmwareapi.vim ########
+# full class name for the Manager for conductor (string value)
+#manager=nova.conductor.manager.ConductorManager
-# vmwareapi_wsdl_loc=<None>
-#### (StrOpt) VIM Service WSDL Location e.g
-#### http://<server>/vimService.wsdl. Due to a bug in vSphere ESX
-#### 4.1 default wsdl. Refer readme-vmware to setup
+[cells]
-######## defined in nova.virt.vmwareapi.vmops ########
+#
+# Options defined in nova.cells.manager
+#
-# vmware_vif_driver=nova.virt.vmwareapi.vif.VMWareVlanBridgeDriver
-#### (StrOpt) The VMWare VIF driver to configure the VIFs.
+# Cells communication driver to use (string value)
+#driver=nova.cells.rpc_driver.CellsRPCDriver
+# Number of seconds after an instance was updated or deleted
+# to continue to update cells (integer value)
+#instance_updated_at_threshold=3600
-######## defined in nova.virt.xenapi.agent ########
+# Number of instances to update per periodic task run (integer
+# value)
+#instance_update_num_instances=1
-# agent_timeout=30
-#### (IntOpt) number of seconds to wait for agent reply
-# agent_version_timeout=300
-#### (IntOpt) number of seconds to wait for agent to be fully operational
+#
+# Options defined in nova.cells.messaging
+#
-# agent_resetnetwork_timeout=60
-#### (IntOpt) number of seconds to wait for agent reply to resetnetwork
-#### request
+# Maximum number of hops for cells routing. (integer value)
+#max_hop_count=10
-# xenapi_agent_path=usr/sbin/xe-update-networking
-#### (StrOpt) Specifies the path in which the xenapi guest agent should be
-#### located. If the agent is present, network configuration is
-#### not injected into the image. Used if
-#### compute_driver=xenapi.XenAPIDriver and flat_injected=True
+# Cells scheduler to use (string value)
+#scheduler=nova.cells.scheduler.CellsScheduler
-# xenapi_disable_agent=false
-#### (BoolOpt) Disable XenAPI agent. Reduces the amount of time it takes
-#### nova to detect that a VM has started, when that VM does not
-#### have the agent installed
+#
+# Options defined in nova.cells.opts
+#
-######## defined in nova.virt.xenapi.driver ########
+# Enable cell functionality (boolean value)
+#enable=false
-# xenapi_connection_url=<None>
-#### (StrOpt) URL for connection to XenServer/Xen Cloud Platform. Required
-#### if compute_driver=xenapi.XenAPIDriver
+# the topic cells nodes listen on (string value)
+#topic=cells
-# xenapi_connection_username=root
-#### (StrOpt) Username for connection to XenServer/Xen Cloud Platform.
-#### Used only if compute_driver=xenapi.XenAPIDriver
+# Manager for cells (string value)
+#manager=nova.cells.manager.CellsManager
-# xenapi_connection_password=<None>
-#### (StrOpt) Password for connection to XenServer/Xen Cloud Platform.
-#### Used only if compute_driver=xenapi.XenAPIDriver
+# name of this cell (string value)
+#name=nova
-# xenapi_connection_concurrent=5
-#### (IntOpt) Maximum number of concurrent XenAPI connections. Used only
-#### if compute_driver=xenapi.XenAPIDriver
+# Key/Multi-value list with the capabilities of the cell (list
+# value)
+#capabilities=hypervisor=xenserver;kvm,os=linux;windows
-# xenapi_vhd_coalesce_poll_interval=5.0
-#### (FloatOpt) The interval used for polling of coalescing vhds. Used only
-#### if compute_driver=xenapi.XenAPIDriver
+# Seconds to wait for response from a call to a cell. (integer
+# value)
+#call_timeout=60
-# xenapi_check_host=true
-#### (BoolOpt) Ensure compute service is running on host XenAPI connects
-#### to.
-# xenapi_vhd_coalesce_max_attempts=5
-#### (IntOpt) Max number of times to poll for VHD to coalesce. Used only
-#### if compute_driver=xenapi.XenAPIDriver
+#
+# Options defined in nova.cells.rpc_driver
+#
-# xenapi_sr_base_path=/var/run/sr-mount
-#### (StrOpt) Base path to the storage repository
+# Base queue name to use when communicating between cells.
+# Various topics by message type will be appended to this.
+# (string value)
+#rpc_driver_queue_base=cells.intercell
-# target_host=<None>
-#### (StrOpt) iSCSI Target Host
-# target_port=3260
-#### (StrOpt) iSCSI Target Port, 3260 Default
+#
+# Options defined in nova.cells.scheduler
+#
-# iqn_prefix=iqn.2010-10.org.openstack
-#### (StrOpt) IQN Prefix
+# How many retries when no cells are available. (integer
+# value)
+#scheduler_retries=10
-# xenapi_remap_vbd_dev=false
-#### (BoolOpt) Used to enable the remapping of VBD dev (Works around an
-#### issue in Ubuntu Maverick)
+# How often to retry in seconds when no cells are available.
+# (integer value)
+#scheduler_retry_delay=2
-# xenapi_remap_vbd_dev_prefix=sd
-#### (StrOpt) Specify prefix to remap VBD dev to (ex. /dev/xvdb ->
-#### /dev/sdb)
-# xenapi_login_timeout=10
-#### (IntOpt) Timeout in seconds for XenAPI login.
+#
+# Options defined in nova.cells.state
+#
+# Seconds between getting fresh cell info from db. (integer
+# value)
+#db_check_interval=60
-######## defined in nova.virt.xenapi.pool ########
-# use_join_force=true
-#### (BoolOpt) To use for hosts with different CPUs
+[baremetal]
+#
+# Options defined in nova.virt.baremetal.db.sqlalchemy.session
+#
-######## defined in nova.virt.xenapi.vif ########
+# The SQLAlchemy connection string used to connect to the
+# bare-metal database (string value)
+#sql_connection=sqlite:///$state_path/baremetal_$sqlite_db
-# xenapi_ovs_integration_bridge=xapi1
-#### (StrOpt) Name of Integration Bridge used by Open vSwitch
+#
+# Options defined in nova.virt.baremetal.driver
+#
-######## defined in nova.virt.xenapi.vm_utils ########
+# Whether baremetal compute injects password or not (boolean
+# value)
+#inject_password=true
-# default_os_type=linux
-#### (StrOpt) Default OS type
+# Template file for injected network (string value)
+#injected_network_template=$pybasedir/nova/virt/baremetal/interfaces.template
-# block_device_creation_timeout=10
-#### (IntOpt) Time to wait for a block device to be created
+# Baremetal VIF driver. (string value)
+#vif_driver=nova.virt.baremetal.vif_driver.BareMetalVIFDriver
-# max_kernel_ramdisk_size=16777216
-#### (IntOpt) Maximum size in bytes of kernel or ramdisk images
+# Baremetal volume driver. (string value)
+#volume_driver=nova.virt.baremetal.volume_driver.LibvirtVolumeDriver
-# sr_matching_filter=other-config:i18n-key=local-storage
-#### (StrOpt) Filter for finding the SR to be used to install guest
-#### instances on. The default value is the Local Storage in
-#### default XenServer/XCP installations. To select an SR with a
-#### different matching criteria, you could set it to other-
-#### config:my_favorite_sr=true. On the other hand, to fall back
-#### on the Default SR, as displayed by XenCenter, set this flag
-#### to: default-sr:true
+# a list of additional capabilities corresponding to
+# instance_type_extra_specs for this compute host to
+# advertise. Valid entries are name=value, pairs For example,
+# "key1:val1, key2:val2" (list value)
+#instance_type_extra_specs=
-# xenapi_sparse_copy=true
-#### (BoolOpt) Whether to use sparse_copy for copying data on a resize down
-#### (False will use standard dd). This speeds up resizes down
-#### considerably since large runs of zeros won't have to be
-#### rsynced
+# Baremetal driver back-end (pxe or tilera) (string value)
+#driver=nova.virt.baremetal.pxe.PXE
-# xenapi_num_vbd_unplug_retries=10
-#### (IntOpt) Maximum number of retries to unplug VBD
+# Baremetal power management method (string value)
+#power_manager=nova.virt.baremetal.ipmi.IPMI
-# xenapi_torrent_images=none
-#### (StrOpt) Whether or not to download images via Bit Torrent
-#### (all|some|none).
+# Baremetal compute node's tftp root path (string value)
+#tftp_root=/tftpboot
-# xenapi_torrent_base_url=<None>
-#### (StrOpt) Base URL for torrent files.
-# xenapi_torrent_seed_chance=1.0
-#### (FloatOpt) Probability that peer will become a seeder. (1.0 = 100%)
+#
+# Options defined in nova.virt.baremetal.ipmi
+#
-# xenapi_torrent_seed_duration=3600
-#### (IntOpt) Number of seconds after downloading an image via BitTorrent
-#### that it should be seeded for other peers.
+# path to baremetal terminal program (string value)
+#terminal=shellinaboxd
-# xenapi_torrent_max_last_accessed=86400
-#### (IntOpt) Cached torrent files not accessed within this number of
-#### seconds can be reaped
+# path to baremetal terminal SSL cert(PEM) (string value)
+#terminal_cert_dir=<None>
-# xenapi_torrent_listen_port_start=6881
-#### (IntOpt) Beginning of port range to listen on
+# path to directory stores pidfiles of baremetal_terminal
+# (string value)
+#terminal_pid_dir=$state_path/baremetal/console
-# xenapi_torrent_listen_port_end=6891
-#### (IntOpt) End of port range to listen on
+# maximal number of retries for IPMI operations (integer
+# value)
+#ipmi_power_retry=5
-# xenapi_torrent_download_stall_cutoff=600
-#### (IntOpt) Number of seconds a download can remain at the same progress
-#### percentage w/o being considered a stall
-# xenapi_torrent_max_seeder_processes_per_host=1
-#### (IntOpt) Maximum number of seeder processes to run concurrently
-#### within a given dom0. (-1 = no limit)
+#
+# Options defined in nova.virt.baremetal.pxe
+#
+# Default kernel image ID used in deployment phase (string
+# value)
+#deploy_kernel=<None>
-######## defined in nova.virt.xenapi.vmops ########
+# Default ramdisk image ID used in deployment phase (string
+# value)
+#deploy_ramdisk=<None>
-# xenapi_running_timeout=60
-#### (IntOpt) number of seconds to wait for instance to go to running
-#### state
+# Template file for injected network config (string value)
+#net_config_template=$pybasedir/nova/virt/baremetal/net-dhcp.ubuntu.template
-# xenapi_vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver
-#### (StrOpt) The XenAPI VIF driver using XenServer Network APIs.
+# additional append parameters for baremetal PXE boot (string
+# value)
+#pxe_append_params=<None>
+# Template file for PXE configuration (string value)
+#pxe_config_template=$pybasedir/nova/virt/baremetal/pxe_config.template
-######## defined in nova.vnc ########
-# novncproxy_base_url=http://127.0.0.1:6080/vnc_auto.html
-#### (StrOpt) location of vnc console proxy, in the form
-#### "http://127.0.0.1:6080/vnc_auto.html"
+#
+# Options defined in nova.virt.baremetal.volume_driver
+#
-# xvpvncproxy_base_url=http://127.0.0.1:6081/console
-#### (StrOpt) location of nova xvp vnc console proxy, in the form
-#### "http://127.0.0.1:6081/console"
+# Do not set this out of dev/test environments. If a node does
+# not have an fixed PXE IP address, volumes are exported with
+# globally opened ACL (boolean value)
+#use_unsafe_iscsi=false
-# vncserver_listen=127.0.0.1
-#### (StrOpt) Ip address on which instance vncserversshould listen
+# iSCSI IQN prefix used in baremetal volume connections.
+# (string value)
+#iscsi_iqn_prefix=iqn.2010-10.org.openstack.baremetal
-# vncserver_proxyclient_address=127.0.0.1
-#### (StrOpt) the address to which proxy clients (like nova-xvpvncproxy)
-#### should connect
-# vnc_enabled=true
-#### (BoolOpt) enable vnc related features
+[rpc_notifier2]
-# vnc_keymap=en-us
-#### (StrOpt) keymap for vnc
+#
+# Options defined in nova.openstack.common.notifier.rpc_notifier2
+#
+# AMQP topic(s) used for openstack notifications (list value)
+#topics=notifications
-######## defined in nova.vnc.xvp_proxy ########
-# xvpvncproxy_port=6081
-#### (IntOpt) Port that the XCP VNC proxy should bind to
+[trusted_computing]
-# xvpvncproxy_host=0.0.0.0
-#### (StrOpt) Address that the XCP VNC proxy should bind to
+#
+# Options defined in nova.scheduler.filters.trusted_filter
+#
+# attestation server http (string value)
+#attestation_server=<None>
-######## defined in nova.volume.cinder ########
+# attestation server Cert file for Identity verification
+# (string value)
+#attestation_server_ca_file=<None>
-# cinder_catalog_info=volume:cinder:publicURL
-#### (StrOpt) Info to match when looking for cinder in the service
-#### catalog. Format is : separated values of the form:
-#### <service_type>:<service_name>:<endpoint_type>
+# attestation server port (string value)
+#attestation_port=8443
-# cinder_endpoint_template=<None>
-#### (StrOpt) Override service catalog lookup with template for cinder
-#### endpoint e.g. http://localhost:8776/v1/%(project_id)s
+# attestation web API URL (string value)
+#attestation_api_url=/OpenAttestationWebServices/V1.0
-# cinder_http_retries=3
-#### (IntOpt) Number of cinderclient retries on failed http calls
+# attestation authorization blob - must change (string value)
+#attestation_auth_blob=<None>
-# Total option count: 463
+# Total option count: 514
diff --git a/nova/api/openstack/compute/contrib/admin_networks.py b/nova/api/openstack/compute/contrib/admin_networks.py
index cdcee02d0..f5facd601 100644
--- a/nova/api/openstack/compute/contrib/admin_networks.py
+++ b/nova/api/openstack/compute/contrib/admin_networks.py
@@ -151,7 +151,7 @@ class AdminNetworkController(wsgi.Controller):
class Admin_networks(extensions.ExtensionDescriptor):
- """Admin-only Network Management Extension"""
+ """Admin-only Network Management Extension."""
name = "AdminNetworks"
alias = "os-admin-networks"
diff --git a/nova/api/openstack/compute/contrib/coverage_ext.py b/nova/api/openstack/compute/contrib/coverage_ext.py
index 875fa6051..4b7d4e57f 100644
--- a/nova/api/openstack/compute/contrib/coverage_ext.py
+++ b/nova/api/openstack/compute/contrib/coverage_ext.py
@@ -203,6 +203,9 @@ class CoverageController(object):
if xml:
self.coverInst.xml_report(outfile=path)
elif html:
+ if os.path.isdir(path):
+ msg = _("Directory conflict: %s already exists")
+ raise exc.HTTPBadRequest(explanation=msg)
self.coverInst.html_report(directory=path)
else:
output = open(path, 'w')
diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py
index 9812ceba3..52487c305 100644
--- a/nova/api/openstack/compute/contrib/hosts.py
+++ b/nova/api/openstack/compute/contrib/hosts.py
@@ -22,9 +22,7 @@ from xml.parsers import expat
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova import availability_zones
-from nova.compute import api as compute_api
-from nova import db
+from nova import compute
from nova import exception
from nova.openstack.common import log as logging
@@ -94,140 +92,162 @@ class HostUpdateDeserializer(wsgi.XMLDeserializer):
return dict(body=updates)
-def _list_hosts(req):
- """Returns a summary list of hosts, optionally filtering
- by service type.
- """
- context = req.environ['nova.context']
- services = db.service_get_all(context, False)
- services = availability_zones.set_availability_zones(context, services)
- zone = ''
- if 'zone' in req.GET:
- zone = req.GET['zone']
- if zone:
- services = [s for s in services if s['availability_zone'] == zone]
- hosts = []
- for host in services:
- hosts.append({"host_name": host['host'], 'service': host['topic'],
- 'zone': host['availability_zone']})
- return hosts
-
-
-def check_host(fn):
- """Makes sure that the host exists."""
- def wrapped(self, req, id, *args, **kwargs):
- listed_hosts = _list_hosts(req)
- hosts = [h["host_name"] for h in listed_hosts]
- if id in hosts:
- return fn(self, req, id, *args, **kwargs)
- else:
- message = _("Host '%s' could not be found.") % id
- raise webob.exc.HTTPNotFound(explanation=message)
- return wrapped
-
-
class HostController(object):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
- self.api = compute_api.HostAPI()
+ self.api = compute.HostAPI()
super(HostController, self).__init__()
@wsgi.serializers(xml=HostIndexTemplate)
def index(self, req):
- authorize(req.environ['nova.context'])
- return {'hosts': _list_hosts(req)}
+ """
+ :returns: A dict in the format:
+
+ {'hosts': [{'host_name': 'some.host.name',
+ 'service': 'cells'},
+ {'host_name': 'some.other.host.name',
+ 'service': 'cells'},
+ {'host_name': 'some.celly.host.name',
+ 'service': 'cells'},
+ {'host_name': 'console1.host.com',
+ 'service': 'consoleauth'},
+ {'host_name': 'network1.host.com',
+ 'service': 'network'},
+ {'host_name': 'netwwork2.host.com',
+ 'service': 'network'},
+ {'host_name': 'sched1.host.com',
+ 'service': 'scheduler'},
+ {'host_name': 'sched2.host.com',
+ 'service': 'scheduler'},
+ {'host_name': 'vol1.host.com',
+ 'service': 'volume'}]}
+ """
+ context = req.environ['nova.context']
+ authorize(context)
+ zone = req.GET.get('zone', None)
+ data = self.api.list_hosts(context, zone)
+
+ return {'hosts': data}
@wsgi.serializers(xml=HostUpdateTemplate)
@wsgi.deserializers(xml=HostUpdateDeserializer)
- @check_host
def update(self, req, id, body):
- authorize(req.environ['nova.context'])
- update_values = {}
- for raw_key, raw_val in body.iteritems():
- key = raw_key.lower().strip()
- val = raw_val.lower().strip()
- if key == "status":
- if val in ("enable", "disable"):
- update_values['status'] = val.startswith("enable")
- else:
- explanation = _("Invalid status: '%s'") % raw_val
- raise webob.exc.HTTPBadRequest(explanation=explanation)
- elif key == "maintenance_mode":
- if val not in ['enable', 'disable']:
- explanation = _("Invalid mode: '%s'") % raw_val
- raise webob.exc.HTTPBadRequest(explanation=explanation)
- update_values['maintenance_mode'] = val == 'enable'
+ """
+ :param body: example format {'status': 'enable',
+ 'maintenance_mode': 'enable'}
+ :returns:
+ """
+ def read_enabled(orig_val, msg):
+ """
+ :param orig_val: A string with either 'enable' or 'disable'. May
+ be surrounded by whitespace, and case doesn't
+ matter
+ :param msg: The message to be passed to HTTPBadRequest. A single
+ %s will be replaced with orig_val.
+ :returns: True for 'enabled' and False for 'disabled'
+ """
+ val = orig_val.strip().lower()
+ if val == "enable":
+ return True
+ elif val == "disable":
+ return False
else:
- explanation = _("Invalid update setting: '%s'") % raw_key
- raise webob.exc.HTTPBadRequest(explanation=explanation)
-
- # this is for handling multiple settings at the same time:
- # the result dictionaries are merged in the first one.
- # Note: the 'host' key will always be the same so it's
- # okay that it gets overwritten.
- update_setters = {'status': self._set_enabled_status,
- 'maintenance_mode': self._set_host_maintenance}
- result = {}
- for key, value in update_values.iteritems():
- result.update(update_setters[key](req, id, value))
+ raise webob.exc.HTTPBadRequest(explanation=msg % orig_val)
+ context = req.environ['nova.context']
+ authorize(context)
+ # See what the user wants to 'update'
+ params = dict([(k.strip().lower(), v) for k, v in body.iteritems()])
+ orig_status = status = params.pop('status', None)
+ orig_maint_mode = maint_mode = params.pop('maintenance_mode', None)
+ # Validate the request
+ if len(params) > 0:
+ # Some extra param was passed. Fail.
+ explanation = _("Invalid update setting: '%s'") % params.keys()[0]
+ raise webob.exc.HTTPBadRequest(explanation=explanation)
+ if orig_status is not None:
+ status = read_enabled(orig_status, _("Invalid status: '%s'"))
+ if orig_maint_mode is not None:
+ maint_mode = read_enabled(orig_maint_mode, _("Invalid mode: '%s'"))
+ if status is None and maint_mode is None:
+ explanation = _("'status' or 'maintenance_mode' needed for "
+ "host update")
+ raise webob.exc.HTTPBadRequest(explanation=explanation)
+ # Make the calls and merge the results
+ result = {'host': id}
+ if status is not None:
+ result['status'] = self._set_enabled_status(context, id, status)
+ if maint_mode is not None:
+ result['maintenance_mode'] = self._set_host_maintenance(context,
+ id, maint_mode)
return result
- def _set_host_maintenance(self, req, host, mode=True):
+ def _set_host_maintenance(self, context, host_name, mode=True):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
- context = req.environ['nova.context']
- LOG.audit(_("Putting host %(host)s in maintenance "
+ LOG.audit(_("Putting host %(host_name)s in maintenance "
"mode %(mode)s.") % locals())
try:
- result = self.api.set_host_maintenance(context, host, mode)
+ result = self.api.set_host_maintenance(context, host_name, mode)
except NotImplementedError:
msg = _("Virt driver does not implement host maintenance mode.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
- return {"host": host, "maintenance_mode": result}
+ except exception.NotFound as e:
+ raise webob.exc.HTTPNotFound(explanation=e.message)
+ if result not in ("on_maintenance", "off_maintenance"):
+ raise webob.exc.HTTPBadRequest(explanation=result)
+ return result
- def _set_enabled_status(self, req, host, enabled):
- """Sets the specified host's ability to accept new instances."""
- context = req.environ['nova.context']
- state = "enabled" if enabled else "disabled"
- LOG.audit(_("Setting host %(host)s to %(state)s.") % locals())
+ def _set_enabled_status(self, context, host_name, enabled):
+ """Sets the specified host's ability to accept new instances.
+ :param enabled: a boolean - if False no new VMs will be able to start
+ on the host"""
+ if enabled:
+ LOG.audit(_("Enabling host %s.") % host_name)
+ else:
+ LOG.audit(_("Disabling host %s.") % host_name)
try:
- result = self.api.set_host_enabled(context, host=host,
- enabled=enabled)
+ result = self.api.set_host_enabled(context, host_name=host_name,
+ enabled=enabled)
except NotImplementedError:
msg = _("Virt driver does not implement host disabled status.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
- return {"host": host, "status": result}
+ except exception.NotFound as e:
+ raise webob.exc.HTTPNotFound(explanation=e.message)
+ if result not in ("enabled", "disabled"):
+ raise webob.exc.HTTPBadRequest(explanation=result)
+ return result
- def _host_power_action(self, req, host, action):
+ def _host_power_action(self, req, host_name, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
authorize(context)
try:
- result = self.api.host_power_action(context, host=host,
+ result = self.api.host_power_action(context, host_name=host_name,
action=action)
except NotImplementedError:
msg = _("Virt driver does not implement host power management.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
- return {"host": host, "power_action": result}
+ except exception.NotFound as e:
+ raise webob.exc.HTTPNotFound(explanation=e.message)
+ return {"host": host_name, "power_action": result}
@wsgi.serializers(xml=HostActionTemplate)
def startup(self, req, id):
- return self._host_power_action(req, host=id, action="startup")
+ return self._host_power_action(req, host_name=id, action="startup")
@wsgi.serializers(xml=HostActionTemplate)
def shutdown(self, req, id):
- return self._host_power_action(req, host=id, action="shutdown")
+ return self._host_power_action(req, host_name=id, action="shutdown")
@wsgi.serializers(xml=HostActionTemplate)
def reboot(self, req, id):
- return self._host_power_action(req, host=id, action="reboot")
+ return self._host_power_action(req, host_name=id, action="reboot")
@wsgi.serializers(xml=HostShowTemplate)
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
- :param context: security context
- :param host: hostname
+ :param id: hostname
:returns: expected to use HostShowTemplate.
ex.::
@@ -235,66 +255,15 @@ class HostController(object):
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
- host = id
context = req.environ['nova.context']
- if not context.is_admin:
+ try:
+ data = self.api.describe_host(context, id)
+ except exception.NotFound as e:
+ raise webob.exc.HTTPNotFound(explanation=e.message)
+ except exception.AdminRequired:
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
-
- # Getting compute node info and related instances info
- try:
- compute_ref = db.service_get_all_compute_by_host(context, host)
- compute_ref = compute_ref[0]
- except exception.ComputeHostNotFound:
- raise webob.exc.HTTPNotFound(explanation=_("Host not found"))
- instance_refs = db.instance_get_all_by_host(context,
- compute_ref['host'])
-
- # Getting total available/used resource
- compute_ref = compute_ref['compute_node'][0]
- resources = [{'resource': {'host': host, 'project': '(total)',
- 'cpu': compute_ref['vcpus'],
- 'memory_mb': compute_ref['memory_mb'],
- 'disk_gb': compute_ref['local_gb']}},
- {'resource': {'host': host, 'project': '(used_now)',
- 'cpu': compute_ref['vcpus_used'],
- 'memory_mb': compute_ref['memory_mb_used'],
- 'disk_gb': compute_ref['local_gb_used']}}]
-
- cpu_sum = 0
- mem_sum = 0
- hdd_sum = 0
- for i in instance_refs:
- cpu_sum += i['vcpus']
- mem_sum += i['memory_mb']
- hdd_sum += i['root_gb'] + i['ephemeral_gb']
-
- resources.append({'resource': {'host': host,
- 'project': '(used_max)',
- 'cpu': cpu_sum,
- 'memory_mb': mem_sum,
- 'disk_gb': hdd_sum}})
-
- # Getting usage resource per project
- project_ids = [i['project_id'] for i in instance_refs]
- project_ids = list(set(project_ids))
- for project_id in project_ids:
- vcpus = [i['vcpus'] for i in instance_refs
- if i['project_id'] == project_id]
-
- mem = [i['memory_mb'] for i in instance_refs
- if i['project_id'] == project_id]
-
- disk = [i['root_gb'] + i['ephemeral_gb'] for i in instance_refs
- if i['project_id'] == project_id]
-
- resources.append({'resource': {'host': host,
- 'project': project_id,
- 'cpu': reduce(lambda x, y: x + y, vcpus),
- 'memory_mb': reduce(lambda x, y: x + y, mem),
- 'disk_gb': reduce(lambda x, y: x + y, disk)}})
-
- return {'host': resources}
+ return {'host': data}
class Hosts(extensions.ExtensionDescriptor):
diff --git a/nova/api/openstack/compute/contrib/os_networks.py b/nova/api/openstack/compute/contrib/os_networks.py
index 61a9d3af6..4be0bd100 100644
--- a/nova/api/openstack/compute/contrib/os_networks.py
+++ b/nova/api/openstack/compute/contrib/os_networks.py
@@ -187,7 +187,7 @@ class NetworkController(object):
class Os_networks(extensions.ExtensionDescriptor):
- """Tenant-based Network Management Extension"""
+ """Tenant-based Network Management Extension."""
name = "OSNetworks"
alias = "os-networks"
diff --git a/nova/api/openstack/compute/contrib/server_password.py b/nova/api/openstack/compute/contrib/server_password.py
index b4b2e04a5..0fd620fb8 100644
--- a/nova/api/openstack/compute/contrib/server_password.py
+++ b/nova/api/openstack/compute/contrib/server_password.py
@@ -66,7 +66,7 @@ class ServerPasswordController(object):
class Server_password(extensions.ExtensionDescriptor):
- """Server password support"""
+ """Server password support."""
name = "ServerPassword"
alias = "os-server-password"
diff --git a/nova/availability_zones.py b/nova/availability_zones.py
index c08e029cf..cb5cce591 100644
--- a/nova/availability_zones.py
+++ b/nova/availability_zones.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-""" utilities for multiple APIs"""
+"""utilities for multiple APIs."""
from nova import db
from nova.openstack.common import cfg
diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py
index e5617e742..56d521892 100644
--- a/nova/cells/messaging.py
+++ b/nova/cells/messaging.py
@@ -690,17 +690,16 @@ class _BroadcastMessageMethods(_BaseMessageMethods):
if not self._at_the_top():
return
instance_uuid = instance['uuid']
- routing_path = message.routing_path
- instance['cell_name'] = _reverse_path(routing_path)
+
# Remove things that we can't update in the top level cells.
- # 'cell_name' is included in this list.. because we'll set it
- # ourselves based on the reverse of the routing path. metadata
- # is only updated in the API cell, so we don't listen to what
- # the child cell tells us.
+ # 'metadata' is only updated in the API cell, so don't overwrite
+ # it based on what child cells say. Make sure to update
+ # 'cell_name' based on the routing path.
items_to_remove = ['id', 'security_groups', 'instance_type',
'volumes', 'cell_name', 'name', 'metadata']
for key in items_to_remove:
instance.pop(key, None)
+ instance['cell_name'] = _reverse_path(message.routing_path)
# Fixup info_cache. We'll have to update this separately if
# it exists.
diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py
index bf9e75af4..d1f8cc16c 100644
--- a/nova/compute/__init__.py
+++ b/nova/compute/__init__.py
@@ -33,6 +33,17 @@ nova.openstack.common.cfg.CONF.register_opts(_compute_opts)
def API(*args, **kwargs):
importutils = nova.openstack.common.importutils
- compute_api_class = nova.openstack.common.cfg.CONF.compute_api_class
- cls = importutils.import_class(compute_api_class)
- return cls(*args, **kwargs)
+ class_name = nova.openstack.common.cfg.CONF.compute_api_class
+ return importutils.import_object(class_name, *args, **kwargs)
+
+
+def HostAPI(*args, **kwargs):
+ """
+ Returns the 'HostAPI' class from the same module as the configured compute
+ api
+ """
+ importutils = nova.openstack.common.importutils
+ compute_api_class_name = nova.openstack.common.cfg.CONF.compute_api_class
+ compute_api_class = importutils.import_class(compute_api_class_name)
+ class_name = compute_api_class.__module__ + ".HostAPI"
+ return importutils.import_object(class_name, *args, **kwargs)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 5e191556d..d0a039644 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -29,6 +29,7 @@ import time
import urllib
import uuid
+from nova import availability_zones
from nova import block_device
from nova.compute import instance_types
from nova.compute import power_state
@@ -150,7 +151,7 @@ def policy_decorator(scope):
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
- scope='compute:security_groups')
+ scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
@@ -844,10 +845,10 @@ class API(base.Base):
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall."""
- hosts = [x['host'] for (x, idx)
- in self.db.service_get_all_compute_sorted(context)]
- for host in hosts:
- self.compute_rpcapi.refresh_provider_fw_rules(context, host)
+ host_names = [x['host'] for (x, idx)
+ in self.db.service_get_all_compute_sorted(context)]
+ for host_name in host_names:
+ self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
@wrap_check_policy
def update(self, context, instance, **kwargs):
@@ -884,6 +885,12 @@ class API(base.Base):
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])
reservations = None
+
+ if context.is_admin and context.project_id != instance['project_id']:
+ project_id = instance['project_id']
+ else:
+ project_id = context.project_id
+
try:
# NOTE(maoy): no expected_task_state needs to be set
attrs = {'progress': 0}
@@ -898,6 +905,7 @@ class API(base.Base):
old['task_state'] not in (task_states.DELETING,
task_states.SOFT_DELETING)):
reservations = QUOTAS.reserve(context,
+ project_id=project_id,
instances=-1,
cores=-instance['vcpus'],
ram=-instance['memory_mb'])
@@ -909,7 +917,9 @@ class API(base.Base):
self.db.instance_destroy(context, instance['uuid'],
constraint)
if reservations:
- QUOTAS.commit(context, reservations)
+ QUOTAS.commit(context,
+ reservations,
+ project_id=project_id)
return
except exception.ConstraintNotMet:
# Refresh to get new host information
@@ -944,13 +954,14 @@ class API(base.Base):
host=src_host, cast=False,
reservations=downsize_reservations)
- is_up = False
# NOTE(jogo): db allows for multiple compute services per host
try:
services = self.db.service_get_all_compute_by_host(
context.elevated(), instance['host'])
except exception.ComputeHostNotFound:
services = []
+
+ is_up = False
for service in services:
if self.servicegroup_api.service_is_up(service):
is_up = True
@@ -960,19 +971,25 @@ class API(base.Base):
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms)
if reservations:
- QUOTAS.commit(context, reservations)
+ QUOTAS.commit(context,
+ reservations,
+ project_id=project_id)
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if reservations:
- QUOTAS.rollback(context, reservations)
+ QUOTAS.rollback(context,
+ reservations,
+ project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
if reservations:
- QUOTAS.rollback(context, reservations)
+ QUOTAS.rollback(context,
+ reservations,
+ project_id=project_id)
def _local_delete(self, context, instance, bdms):
- LOG.warning(_('host for instance is down, deleting from '
- 'database'), instance=instance)
+ LOG.warning(_("instance's host %s is down, deleting from "
+ "database") % instance['host'], instance=instance)
instance_uuid = instance['uuid']
self.db.instance_info_cache_delete(context, instance_uuid)
compute_utils.notify_about_instance_usage(
@@ -1865,9 +1882,9 @@ class API(base.Base):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
- def get_backdoor_port(self, context, host):
+ def get_backdoor_port(self, context, host_name):
"""Retrieve backdoor port."""
- return self.compute_rpcapi.get_backdoor_port(context, host)
+ return self.compute_rpcapi.get_backdoor_port(context, host_name)
@wrap_check_policy
@check_instance_lock
@@ -2133,45 +2150,148 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.ACTIVE])
def live_migrate(self, context, instance, block_migration,
- disk_over_commit, host):
+ disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug(_("Going to try to live migrate instance to %s"),
- host, instance=instance)
+ host_name, instance=instance)
instance = self.update(context, instance,
task_state=task_states.MIGRATING,
expected_task_state=None)
self.scheduler_rpcapi.live_migration(context, block_migration,
- disk_over_commit, instance, host)
+ disk_over_commit, instance, host_name)
+
+
+def check_host(fn):
+ """Decorator that makes sure that the host exists."""
+ def wrapped(self, context, host_name, *args, **kwargs):
+ if self.does_host_exist(context, host_name):
+ return fn(self, context, host_name, *args, **kwargs)
+ else:
+ raise exception.HostNotFound(host=host_name)
+ return wrapped
class HostAPI(base.Base):
+ """Sub-set of the Compute Manager API for managing host operations."""
+
def __init__(self):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(HostAPI, self).__init__()
- """Sub-set of the Compute Manager API for managing host operations."""
- def set_host_enabled(self, context, host, enabled):
+ @check_host
+ def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
return self.compute_rpcapi.set_host_enabled(context, enabled=enabled,
- host=host)
+ host=host_name)
- def get_host_uptime(self, context, host):
+ @check_host
+ def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
- return self.compute_rpcapi.get_host_uptime(context, host=host)
+ return self.compute_rpcapi.get_host_uptime(context, host=host_name)
- def host_power_action(self, context, host, action):
+ @check_host
+ def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
- # NOTE(comstud): No instance_uuid argument to this compute manager
- # call
return self.compute_rpcapi.host_power_action(context, action=action,
- host=host)
+ host=host_name)
+ def list_hosts(self, context, zone=None, service=None):
+ """Returns a summary list of enabled hosts, optionally filtering
+ by zone and/or service type.
+ """
+ LOG.debug(_("Listing hosts"))
+ services = self.db.service_get_all(context, False)
+ services = availability_zones.set_availability_zones(context, services)
+ if zone:
+ services = [s for s in services if s['availability_zone'] == zone]
+ hosts = []
+ for host in services:
+ hosts.append({'host_name': host['host'], 'service': host['topic'],
+ 'zone': host['availability_zone']})
+ if service:
+ hosts = [host for host in hosts
+ if host["service"] == service]
+ return hosts
+
+ def does_host_exist(self, context, host_name):
+ """
+ Returns True if the host with host_name exists, False otherwise
+ """
+ return self.db.service_does_host_exist(context, host_name)
+
+ def describe_host(self, context, host_name):
+ """
+ Returns information about a host in this kind of format:
+ :returns:
+ ex.::
+ {'host': 'hostname',
+ 'project': 'admin',
+ 'cpu': 1,
+ 'memory_mb': 2048,
+ 'disk_gb': 30}
+ """
+ # Getting compute node info and related instances info
+ try:
+ compute_ref = self.db.service_get_all_compute_by_host(context,
+ host_name)
+ compute_ref = compute_ref[0]
+ except exception.ComputeHostNotFound:
+ raise exception.HostNotFound(host=host_name)
+ instance_refs = self.db.instance_get_all_by_host(context,
+ compute_ref['host'])
+
+ # Getting total available/used resource
+ compute_ref = compute_ref['compute_node'][0]
+ resources = [{'resource': {'host': host_name, 'project': '(total)',
+ 'cpu': compute_ref['vcpus'],
+ 'memory_mb': compute_ref['memory_mb'],
+ 'disk_gb': compute_ref['local_gb']}},
+ {'resource': {'host': host_name, 'project': '(used_now)',
+ 'cpu': compute_ref['vcpus_used'],
+ 'memory_mb': compute_ref['memory_mb_used'],
+ 'disk_gb': compute_ref['local_gb_used']}}]
+
+ cpu_sum = 0
+ mem_sum = 0
+ hdd_sum = 0
+ for i in instance_refs:
+ cpu_sum += i['vcpus']
+ mem_sum += i['memory_mb']
+ hdd_sum += i['root_gb'] + i['ephemeral_gb']
+
+ resources.append({'resource': {'host': host_name,
+ 'project': '(used_max)',
+ 'cpu': cpu_sum,
+ 'memory_mb': mem_sum,
+ 'disk_gb': hdd_sum}})
+
+ # Getting usage resource per project
+ project_ids = [i['project_id'] for i in instance_refs]
+ project_ids = list(set(project_ids))
+ for project_id in project_ids:
+ vcpus = [i['vcpus'] for i in instance_refs
+ if i['project_id'] == project_id]
+
+ mem = [i['memory_mb'] for i in instance_refs
+ if i['project_id'] == project_id]
+
+ disk = [i['root_gb'] + i['ephemeral_gb'] for i in instance_refs
+ if i['project_id'] == project_id]
+
+ resources.append({'resource': {'host': host_name,
+ 'project': project_id,
+ 'cpu': sum(vcpus),
+ 'memory_mb': sum(mem),
+ 'disk_gb': sum(disk)}})
+ return resources
+
+ @check_host
def set_host_maintenance(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
@@ -2237,25 +2357,27 @@ class AggregateAPI(base.Base):
reason='not empty')
self.db.aggregate_delete(context, aggregate_id)
- def add_host_to_aggregate(self, context, aggregate_id, host):
+ def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
# validates the host; ComputeHostNotFound is raised if invalid
- service = self.db.service_get_all_compute_by_host(context, host)[0]
+ service = self.db.service_get_all_compute_by_host(
+ context, host_name)[0]
aggregate = self.db.aggregate_get(context, aggregate_id)
- self.db.aggregate_host_add(context, aggregate_id, host)
+ self.db.aggregate_host_add(context, aggregate_id, host_name)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
- aggregate=aggregate, host_param=host, host=host)
+ aggregate=aggregate, host_param=host_name, host=host_name)
return self.get_aggregate(context, aggregate_id)
- def remove_host_from_aggregate(self, context, aggregate_id, host):
+ def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
# validates the host; ComputeHostNotFound is raised if invalid
- service = self.db.service_get_all_compute_by_host(context, host)[0]
+ service = self.db.service_get_all_compute_by_host(
+ context, host_name)[0]
aggregate = self.db.aggregate_get(context, aggregate_id)
- self.db.aggregate_host_delete(context, aggregate_id, host)
+ self.db.aggregate_host_delete(context, aggregate_id, host_name)
self.compute_rpcapi.remove_aggregate_host(context,
- aggregate=aggregate, host_param=host, host=host)
+ aggregate=aggregate, host_param=host_name, host=host_name)
return self.get_aggregate(context, aggregate_id)
def _get_aggregate_info(self, context, aggregate):
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index c9438b156..85942541f 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -358,97 +358,72 @@ class ComputeManager(manager.SchedulerDependentManager):
'trying to set it to ERROR'),
instance_uuid=instance_uuid)
- def _get_instances_at_startup(self, context):
- '''Get instances for this host during service init.'''
- attempt = 0
- timeout = 10
- while True:
- # NOTE(danms): Try ten times with a short timeout, and then punt
- # to the configured RPC timeout after that
- if attempt == 10:
- timeout = None
- attempt += 1
-
- # NOTE(russellb): This is running during service startup. If we
- # allow an exception to be raised, the service will shut down.
- # This may fail the first time around if nova-conductor wasn't
- # running when nova-compute started.
- try:
- self.conductor_api.ping(context, '1.21 GigaWatts',
- timeout=timeout)
- break
- except rpc_common.Timeout as e:
- LOG.exception(_('Timed out waiting for nova-conductor. '
- 'Is it running? Or did nova-compute start '
- 'before nova-conductor?'))
-
- return self.conductor_api.instance_get_all_by_host(context, self.host)
-
- def _destroy_evacuated_instances(self, context):
- """Destroys evacuated instances.
-
- While the compute was down the instances running on it could be
- evacuated to another host. Checking that instance host identical to
- current host. Otherwise destroying it
+ def _get_instances_on_driver(self, context):
+ """Return a list of instance records that match the instances found
+ on the hypervisor.
"""
-
- # getting all vms on this host
local_instances = []
try:
- # try to find all local instances by uuid
+ # Try to find all local instances by uuid.
+ # FIXME(comstud): Would be nice to consolidate this into
+ # a single query to nova-conductor.
for uuid in self.driver.list_instance_uuids():
try:
- local_instances.append(self.conductor_api.
- instance_get_by_uuid(context, uuid))
+ instance = self.conductor_api.instance_get_by_uuid(
+ context, uuid)
+ local_instances.append(instance)
except exception.InstanceNotFound as e:
LOG.error(_('Instance %(uuid)s found in the '
'hypervisor, but not in the database'),
locals())
continue
+ return local_instances
except NotImplementedError:
- # the driver doesn't support uuids listing, will do it in ugly way
- for instance_name in self.driver.list_instances():
- try:
- # couldn't find better way to find instance in db by it's
- # name if i will run on the list of this host instances it
- # will be hard to ignore instances that were created
- # outside openstack. returns -1 if instance name doesn't
- # match template
- instance_id = compute_utils.parse_decimal_id(CONF
- .instance_name_template, instance_name)
-
- if instance_id == -1:
- continue
-
- local_instances.append(self.conductor_api.
- instance_get(context, instance_id))
- except exception.InstanceNotFound as e:
- LOG.error(_('Instance %(instance_name)s found in the '
- 'hypervisor, but not in the database'),
- locals())
- continue
+ pass
+ # The driver doesn't support uuids listing, so we'll have
+ # to brute force.
+ driver_instances = self.driver.list_instances()
+ all_instances = self.conductor_api.instance_get_all(context)
+ name_map = dict([(instance['name'], instance)
+ for instance in all_instances])
+ local_instances = []
+ for driver_instance in driver_instances:
+ instance = name_map.get(driver_instance)
+ if not instance:
+ LOG.error(_('Instance %(driver_instance)s found in the '
+ 'hypervisor, but not in the database'),
+ locals())
+ continue
+ local_instances.append(instance)
+ return local_instances
+
+ def _destroy_evacuated_instances(self, context):
+ """Destroys evacuated instances.
+
+ While nova-compute was down, the instances running on it could be
+ evacuated to another host. Check that the instances reported
+ by the driver are still associated with this host. If they are
+ not, destroy them.
+ """
+ our_host = self.host
+ local_instances = self._get_instances_on_driver(context)
for instance in local_instances:
instance_host = instance['host']
- host = self.host
instance_name = instance['name']
- if instance['host'] != host:
- LOG.info(_('instance host %(instance_host)s is not equal to '
- 'current host %(host)s. '
- 'Deleting zombie instance %(instance_name)s'),
- locals())
-
+ if instance['host'] != our_host:
+ LOG.info(_('Deleting instance as its host ('
+ '%(instance_host)s) is not equal to our '
+ 'host (%(our_host)s).'),
+ locals(), instance=instance)
network_info = self._get_instance_nw_info(context, instance)
bdi = self._get_instance_volume_block_device_info(context,
- instance['uuid'])
-
+ instance)
self.driver.destroy(instance,
self._legacy_nw_info(network_info),
bdi,
False)
- LOG.info(_('zombie vm destroyed'))
-
def _init_instance(self, context, instance):
'''Initialize this instance during service init.'''
db_state = instance['power_state']
@@ -507,7 +482,8 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
- instances = self._get_instances_at_startup(context)
+ instances = self.conductor_api.instance_get_all_by_host(context,
+ self.host)
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
@@ -592,9 +568,15 @@ class ComputeManager(manager.SchedulerDependentManager):
def _get_instance_nw_info(self, context, instance):
"""Get a list of dictionaries of network data of an instance."""
- # get the network info from network
+ # Get the network info from network API, but don't let it
+ # update the cache, as that will hit the DB. We'll update
+ # the cache ourselves via the conductor.
network_info = self.network_api.get_instance_nw_info(context,
- instance)
+ instance, update_cache=False)
+ cache = {'network_info': network_info.json()}
+ self.conductor_api.instance_info_cache_update(context,
+ instance,
+ cache)
return network_info
def _legacy_nw_info(self, network_info):
@@ -727,7 +709,8 @@ class ComputeManager(manager.SchedulerDependentManager):
# Spawn success:
if (is_first_time and not instance['access_ip_v4']
and not instance['access_ip_v6']):
- self._update_access_ip(context, instance, network_info)
+ instance = self._update_access_ip(context, instance,
+ network_info)
self._notify_about_instance_usage(context, instance,
"create.end", network_info=network_info,
@@ -844,7 +827,7 @@ class ComputeManager(manager.SchedulerDependentManager):
network_name = CONF.default_access_ip_network_name
if not network_name:
- return
+ return instance
update_info = {}
for vif in nw_info:
@@ -855,7 +838,9 @@ class ComputeManager(manager.SchedulerDependentManager):
if ip['version'] == 6:
update_info['access_ip_v6'] = ip['address']
if update_info:
- self._instance_update(context, instance['uuid'], **update_info)
+ instance = self._instance_update(context, instance['uuid'],
+ **update_info)
+ return instance
def _check_instance_not_already_created(self, context, instance):
"""Ensure an instance with the same name is not already present."""
@@ -932,10 +917,10 @@ class ComputeManager(manager.SchedulerDependentManager):
def _allocate_network(self, context, instance, requested_networks):
"""Allocate networks for an instance and return the network info."""
- self._instance_update(context, instance['uuid'],
- vm_state=vm_states.BUILDING,
- task_state=task_states.NETWORKING,
- expected_task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.NETWORKING,
+ expected_task_state=None)
is_vpn = pipelib.is_vpn_image(instance['image_ref'])
try:
# allocate and get network info
@@ -954,9 +939,9 @@ class ComputeManager(manager.SchedulerDependentManager):
def _prep_block_device(self, context, instance, bdms):
"""Set up the block device for an instance with error logging."""
- self._instance_update(context, instance['uuid'],
- vm_state=vm_states.BUILDING,
- task_state=task_states.BLOCK_DEVICE_MAPPING)
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
return self._setup_block_device_mapping(context, instance, bdms)
except Exception:
@@ -967,11 +952,10 @@ class ComputeManager(manager.SchedulerDependentManager):
def _spawn(self, context, instance, image_meta, network_info,
block_device_info, injected_files, admin_password):
"""Spawn an instance with error logging and update its power state."""
- self._instance_update(context, instance['uuid'],
- vm_state=vm_states.BUILDING,
- task_state=task_states.SPAWNING,
- expected_task_state=task_states.
- BLOCK_DEVICE_MAPPING)
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.SPAWNING,
+ expected_task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
@@ -1198,13 +1182,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(context, instance, "power_off.start")
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.STOPPED,
- expected_task_state=(task_states.POWERING_OFF,
- task_states.STOPPING),
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.STOPPED,
+ expected_task_state=(task_states.POWERING_OFF,
+ task_states.STOPPING),
+ task_state=None)
self._notify_about_instance_usage(context, instance, "power_off.end")
# NOTE(johannes): This is probably better named power_on_instance
@@ -1218,13 +1201,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(context, instance, "power_on.start")
self.driver.power_on(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- expected_task_state=(task_states.POWERING_ON,
- task_states.STARTING))
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ expected_task_state=(task_states.POWERING_ON,
+ task_states.STARTING))
self._notify_about_instance_usage(context, instance, "power_on.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -1241,12 +1223,11 @@ class ComputeManager(manager.SchedulerDependentManager):
# doesn't implement the soft_delete method
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.SOFT_DELETED,
- expected_task_state=task_states.SOFT_DELETING,
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.SOFT_DELETED,
+ expected_task_state=task_states.SOFT_DELETING,
+ task_state=None)
self._notify_about_instance_usage(context, instance, "soft_delete.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -1262,12 +1243,11 @@ class ComputeManager(manager.SchedulerDependentManager):
# doesn't implement the restore method
self.driver.power_on(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- expected_task_state=task_states.RESTORING,
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ expected_task_state=task_states.RESTORING,
+ task_state=None)
self._notify_about_instance_usage(context, instance, "restore.end")
# NOTE(johannes): In the folsom release, power_off_instance was poorly
@@ -1375,11 +1355,10 @@ class ComputeManager(manager.SchedulerDependentManager):
"rebuild.start", extra_usage_info=extra_usage_info)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- task_state=task_states.REBUILDING,
- expected_task_state=task_states.REBUILDING)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ task_state=task_states.REBUILDING,
+ expected_task_state=task_states.REBUILDING)
if recreate:
# Detaching volumes.
@@ -1399,15 +1378,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.destroy(instance,
self._legacy_nw_info(network_info))
- instance = self._instance_update(context,
- instance['uuid'],
- task_state=task_states.
- REBUILD_BLOCK_DEVICE_MAPPING,
- expected_task_state=task_states.REBUILDING)
+ instance = self._instance_update(context, instance['uuid'],
+ task_state=task_states.REBUILD_BLOCK_DEVICE_MAPPING,
+ expected_task_state=task_states.REBUILDING)
instance['injected_files'] = injected_files
- network_info = self.network_api.get_instance_nw_info(context,
- instance)
+ network_info = self._get_instance_nw_info(context, instance)
if bdms is None:
capi = self.conductor_api
bdms = capi.block_device_mapping_get_all_by_instance(
@@ -1415,14 +1391,11 @@ class ComputeManager(manager.SchedulerDependentManager):
device_info = self._setup_block_device_mapping(context, instance,
bdms)
- instance = self._instance_update(context,
- instance['uuid'],
- task_state=task_states.
- REBUILD_SPAWNING,
- expected_task_state=task_states.
- REBUILD_BLOCK_DEVICE_MAPPING)
- # pull in new password here since the original password isn't in
- # the db
+ expected_task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
+ instance = self._instance_update(context, instance['uuid'],
+ task_state=task_states.REBUILD_SPAWNING,
+ expected_task_state=expected_task_state)
+
admin_password = new_pass
self.driver.spawn(context, instance, image_meta,
@@ -1529,9 +1502,8 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state)
LOG.audit(_('instance snapshotting'), context=context,
instance=instance)
@@ -1554,14 +1526,17 @@ class ComputeManager(manager.SchedulerDependentManager):
expected_task_state = task_states.IMAGE_BACKUP
def update_task_state(task_state, expected_state=expected_task_state):
- self._instance_update(context, instance['uuid'],
- task_state=task_state,
- expected_task_state=expected_state)
+ return self._instance_update(context, instance['uuid'],
+ task_state=task_state,
+ expected_task_state=expected_state)
self.driver.snapshot(context, instance, image_id, update_task_state)
+ # The instance could have changed from the driver. But since
+ # we're doing a fresh update here, we'll grab the changes.
- self._instance_update(context, instance['uuid'], task_state=None,
- expected_task_state=task_states.IMAGE_UPLOADING)
+ instance = self._instance_update(context, instance['uuid'],
+ task_state=None,
+ expected_task_state=task_states.IMAGE_UPLOADING)
if image_type == 'snapshot' and rotation:
raise exception.ImageRotationNotAllowed()
@@ -1919,18 +1894,15 @@ class ComputeManager(manager.SchedulerDependentManager):
# Just roll back the record. There's no need to resize down since
# the 'old' VM already has the preferred attributes
- self._instance_update(context,
- instance['uuid'],
- launched_at=timeutils.utcnow(),
- expected_task_state=task_states.
- RESIZE_REVERTING)
+ instance = self._instance_update(context,
+ instance['uuid'], launched_at=timeutils.utcnow(),
+ expected_task_state=task_states.RESIZE_REVERTING)
self.network_api.migrate_instance_finish(context, instance,
migration)
- self._instance_update(context, instance['uuid'],
- vm_state=vm_states.ACTIVE,
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.ACTIVE, task_state=None)
rt = self._get_resource_tracker(instance.get('node'))
rt.revert_resize(context, migration)
@@ -2336,12 +2308,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.suspend(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.SUSPENDED,
- task_state=None,
- expected_task_state=task_states.SUSPENDING)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.SUSPENDED,
+ task_state=None,
+ expected_task_state=task_states.SUSPENDING)
self._notify_about_instance_usage(context, instance, 'suspend')
@@ -2361,11 +2332,9 @@ class ComputeManager(manager.SchedulerDependentManager):
block_device_info)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None)
+ instance = self._instance_update(context,
+ instance['uuid'], power_state=current_power_state,
+ vm_state=vm_states.ACTIVE, task_state=None)
self._notify_about_instance_usage(context, instance, 'resume')
@@ -2878,23 +2847,20 @@ class ComputeManager(manager.SchedulerDependentManager):
block_migration)
# Restore instance state
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- host=self.host,
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- expected_task_state=task_states.MIGRATING)
+ instance = self._instance_update(context, instance['uuid'],
+ host=self.host, power_state=current_power_state,
+ vm_state=vm_states.ACTIVE, task_state=None,
+ expected_task_state=task_states.MIGRATING)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
- def _rollback_live_migration(self, context, instance_ref,
+ def _rollback_live_migration(self, context, instance,
dest, block_migration, migrate_data=None):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
- :param instance_ref: nova.db.sqlalchemy.models.Instance
+ :param instance: nova.db.sqlalchemy.models.Instance
:param dest:
This method is called from live migration src host.
This param specifies destination host.
@@ -2903,23 +2869,18 @@ class ComputeManager(manager.SchedulerDependentManager):
if not none, contains implementation specific data.
"""
- host = instance_ref['host']
- self._instance_update(context,
- instance_ref['uuid'],
- host=host,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- expected_task_state=task_states.MIGRATING)
+ host = instance['host']
+ instance = self._instance_update(context, instance['uuid'],
+ host=host, vm_state=vm_states.ACTIVE,
+ task_state=None, expected_task_state=task_states.MIGRATING)
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
- self.network_api.setup_networks_on_host(context, instance_ref,
- self.host)
+ self.network_api.setup_networks_on_host(context, instance, self.host)
- for bdm in self._get_instance_volume_bdms(context, instance_ref):
+ for bdm in self._get_instance_volume_bdms(context, instance):
volume_id = bdm['volume_id']
- volume = self.volume_api.get(context, volume_id)
- self.compute_rpcapi.remove_volume_connection(context, instance_ref,
- volume['id'], dest)
+ self.compute_rpcapi.remove_volume_connection(context, instance,
+ volume_id, dest)
# Block migration needs empty image at destination host
# before migration starts, so if any failure occurs,
@@ -2934,7 +2895,7 @@ class ComputeManager(manager.SchedulerDependentManager):
is_shared_storage = migrate_data.get('is_shared_storage', True)
if block_migration or (is_volume_backed and not is_shared_storage):
self.compute_rpcapi.rollback_live_migration_at_destination(context,
- instance_ref, dest)
+ instance, dest)
def rollback_live_migration_at_destination(self, context, instance):
"""Cleaning up image directory that is created pre_live_migration.
@@ -3001,7 +2962,7 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
- self.network_api.get_instance_nw_info(context, instance)
+ self._get_instance_nw_info(context, instance)
LOG.debug(_('Updated the info_cache for instance'),
instance=instance)
except Exception:
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 256b64979..075d59ec8 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -166,11 +166,8 @@ class ResourceTracker(object):
old_instance_type = instance_types.get_instance_type(
old_instance_type_id)
- return db.migration_create(context.elevated(),
- {'instance_uuid': instance['uuid'],
- 'source_compute': instance['host'],
- 'source_node': instance['node'],
- 'dest_compute': self.host,
+ return self.conductor_api.migration_create(context, instance,
+ {'dest_compute': self.host,
'dest_node': self.nodename,
'dest_host': self.driver.get_host_ip_addr(),
'old_instance_type_id': old_instance_type['id'],
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index e8592dbe2..0c475d082 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -253,63 +253,3 @@ def usage_volume_info(vol_usage):
vol_usage['curr_write_bytes'])
return usage_info
-
-
-def parse_decimal_id(template, instance_name):
- """Finds instance decimal id from instance name
-
- :param template: template e.g. instance-%03x-james
- :param instance_name: instance name like instance-007-james
-
- :returns: parsed decimal id, e.g. 7 from the input above
- """
-
- # find pattern like %05x, %d..etc.
- reg = re.search('(%\d*)([ioxds])', template)
- format = reg.group(0)
-
- # split template to get prefix and suffix
- tokens = template.split(format)
-
- if tokens[0]:
- if not instance_name.startswith(tokens[0]):
- # template prefix not match
- return -1
- instance_name = instance_name[len(tokens[0]):]
-
- if tokens[1]:
- if not instance_name.endswith(tokens[1]):
- # template suffix not match
- return -1
- instance_name = instance_name[:-len(tokens[1])]
-
- # validate that instance_id length matches
- expected_length = format[1:-1]
-
- # if expected length is empty it means instance_id can be of any length
- if expected_length:
- if len(instance_name) < int(expected_length):
- return -1
- # if instance_id has preciding zeroes it must be of expected length
- if (instance_name[:1] == '0' and
- len(instance_name) != int(expected_length)):
- return -1
-
- # if the minimal expected length empty, there should be no preceding zeros
- elif instance_name[0] == '0':
- return -1
-
- # finding base of the template to convert to decimal
- base_fmt = format[-1:]
- base = 10
- if base_fmt == 'x':
- base = 16
- elif base_fmt == 'o':
- base = 8
-
- try:
- res = int(instance_name, base)
- except ValueError:
- res = -1
-
- return res
diff --git a/nova/conductor/__init__.py b/nova/conductor/__init__.py
index 1e31da54c..4c702d037 100644
--- a/nova/conductor/__init__.py
+++ b/nova/conductor/__init__.py
@@ -18,7 +18,8 @@ import nova.openstack.common.importutils
def API(*args, **kwargs):
- if nova.openstack.common.cfg.CONF.conductor.use_local:
+ use_local = kwargs.pop('use_local', False)
+ if nova.openstack.common.cfg.CONF.conductor.use_local or use_local:
api = conductor_api.LocalAPI
else:
api = conductor_api.API
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 4a72f81e0..4cc10604b 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -20,6 +20,7 @@ from nova.conductor import manager
from nova.conductor import rpcapi
from nova import exception as exc
from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
conductor_opts = [
@@ -39,6 +40,8 @@ CONF = cfg.CONF
CONF.register_group(conductor_group)
CONF.register_opts(conductor_opts, conductor_group)
+LOG = logging.getLogger(__name__)
+
class ExceptionHelper(object):
"""Class to wrap another and translate the ClientExceptions raised by its
@@ -68,6 +71,10 @@ class LocalAPI(object):
# other/future users of this sort of functionality.
self._manager = ExceptionHelper(manager.ConductorManager())
+ def wait_until_ready(self, context, *args, **kwargs):
+ # nothing to wait for in the local case.
+ pass
+
def ping(self, context, arg, timeout=None):
return self._manager.ping(context, arg)
@@ -107,6 +114,11 @@ class LocalAPI(object):
return self._manager.instance_get_active_by_window(
context, begin, end, project_id, host)
+ def instance_info_cache_update(self, context, instance, values):
+ return self._manager.instance_info_cache_update(context,
+ instance,
+ values)
+
def instance_info_cache_delete(self, context, instance):
return self._manager.instance_info_cache_delete(context, instance)
@@ -122,6 +134,9 @@ class LocalAPI(object):
return self._manager.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
+ def migration_create(self, context, instance, values):
+ return self._manager.migration_create(context, instance, values)
+
def migration_update(self, context, migration, status):
return self._manager.migration_update(context, migration, status)
@@ -237,12 +252,22 @@ class LocalAPI(object):
def service_get_all_compute_by_host(self, context, host):
return self._manager.service_get_all_by(context, 'compute', host)
+ def service_get_by_args(self, context, host, binary):
+ return self._manager.service_get_all_by(context, host=host,
+ binary=binary)
+
def action_event_start(self, context, values):
return self._manager.action_event_start(context, values)
def action_event_finish(self, context, values):
return self._manager.action_event_finish(context, values)
+ def service_create(self, context, values):
+ return self._manager.service_create(context, values)
+
+ def service_destroy(self, context, service_id):
+ return self._manager.service_destroy(context, service_id)
+
class API(object):
"""Conductor API that does updates via RPC to the ConductorManager."""
@@ -250,6 +275,35 @@ class API(object):
def __init__(self):
self.conductor_rpcapi = rpcapi.ConductorAPI()
+ def wait_until_ready(self, context, early_timeout=10, early_attempts=10):
+ '''Wait until a conductor service is up and running.
+
+ This method calls the remote ping() method on the conductor topic until
+ it gets a response. It starts with a shorter timeout in the loop
+ (early_timeout) up to early_attempts number of tries. It then drops
+ back to the globally configured timeout for rpc calls for each retry.
+ '''
+ attempt = 0
+ timeout = early_timeout
+ while True:
+ # NOTE(danms): Try ten times with a short timeout, and then punt
+ # to the configured RPC timeout after that
+ if attempt == early_attempts:
+ timeout = None
+ attempt += 1
+
+ # NOTE(russellb): This is running during service startup. If we
+ # allow an exception to be raised, the service will shut down.
+ # This may fail the first time around if nova-conductor wasn't
+ # running when this service started.
+ try:
+ self.ping(context, '1.21 GigaWatts', timeout=timeout)
+ break
+ except rpc_common.Timeout as e:
+ LOG.exception(_('Timed out waiting for nova-conductor. '
+ 'Is it running? Or did this service start '
+ 'before nova-conductor?'))
+
def ping(self, context, arg, timeout=None):
return self.conductor_rpcapi.ping(context, arg, timeout)
@@ -291,6 +345,10 @@ class API(object):
return self.conductor_rpcapi.instance_get_active_by_window(
context, begin, end, project_id, host)
+ def instance_info_cache_update(self, context, instance, values):
+ return self.conductor_rpcapi.instance_info_cache_update(context,
+ instance, values)
+
def instance_info_cache_delete(self, context, instance):
return self.conductor_rpcapi.instance_info_cache_delete(context,
instance)
@@ -309,6 +367,10 @@ class API(object):
return crpcapi.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
+ def migration_create(self, context, instance, values):
+ return self.conductor_rpcapi.migration_create(context, instance,
+ values)
+
def migration_update(self, context, migration, status):
return self.conductor_rpcapi.migration_update(context, migration,
status)
@@ -435,8 +497,18 @@ class API(object):
return self.conductor_rpcapi.service_get_all_by(context, 'compute',
host)
+ def service_get_by_args(self, context, host, binary):
+ return self.conductor_rpcapi.service_get_all_by(context, host=host,
+ binary=binary)
+
def action_event_start(self, context, values):
return self.conductor_rpcapi.action_event_start(context, values)
def action_event_finish(self, context, values):
return self.conductor_rpcapi.action_event_finish(context, values)
+
+ def service_create(self, context, values):
+ return self.conductor_rpcapi.service_create(context, values)
+
+ def service_destroy(self, context, service_id):
+ return self.conductor_rpcapi.service_destroy(context, service_id)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 3c26f320e..8c6f39f02 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -43,7 +43,7 @@ datetime_fields = ['launched_at', 'terminated_at']
class ConductorManager(manager.SchedulerDependentManager):
"""Mission: TBD."""
- RPC_API_VERSION = '1.25'
+ RPC_API_VERSION = '1.30'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
@@ -100,6 +100,13 @@ class ConductorManager(manager.SchedulerDependentManager):
context, confirm_window, dest_compute)
return jsonutils.to_primitive(migrations)
+ def migration_create(self, context, instance, values):
+ values.update({'instance_uuid': instance['uuid'],
+ 'source_compute': instance['host'],
+ 'source_node': instance['node']})
+ migration_ref = self.db.migration_create(context.elevated(), values)
+ return jsonutils.to_primitive(migration_ref)
+
@rpc_common.client_exceptions(exception.MigrationNotFound)
def migration_update(self, context, migration, status):
migration_ref = self.db.migration_update(context.elevated(),
@@ -229,6 +236,10 @@ class ConductorManager(manager.SchedulerDependentManager):
def instance_info_cache_delete(self, context, instance):
self.db.instance_info_cache_delete(context, instance['uuid'])
+ def instance_info_cache_update(self, context, instance, values):
+ self.db.instance_info_cache_update(context, instance['uuid'],
+ values)
+
def instance_type_get(self, context, instance_type_id):
result = self.db.instance_type_get(context, instance_type_id)
return jsonutils.to_primitive(result)
@@ -244,8 +255,9 @@ class ConductorManager(manager.SchedulerDependentManager):
wr_bytes, instance['uuid'], last_refreshed,
update_totals)
- def service_get_all_by(self, context, topic=None, host=None):
- if not any((topic, host)):
+ @rpc_common.client_exceptions(exception.HostBinaryNotFound)
+ def service_get_all_by(self, context, topic=None, host=None, binary=None):
+ if not any((topic, host, binary)):
result = self.db.service_get_all(context)
elif all((topic, host)):
if topic == 'compute':
@@ -254,6 +266,8 @@ class ConductorManager(manager.SchedulerDependentManager):
else:
result = self.db.service_get_by_host_and_topic(context,
host, topic)
+ elif all((host, binary)):
+ result = self.db.service_get_by_args(context, host, binary)
elif topic:
result = self.db.service_get_all_by_topic(context, topic)
elif host:
@@ -262,7 +276,17 @@ class ConductorManager(manager.SchedulerDependentManager):
return jsonutils.to_primitive(result)
def action_event_start(self, context, values):
- return self.db.action_event_start(context, values)
+ evt = self.db.action_event_start(context, values)
+ return jsonutils.to_primitive(evt)
def action_event_finish(self, context, values):
- return self.db.action_event_finish(context, values)
+ evt = self.db.action_event_finish(context, values)
+ return jsonutils.to_primitive(evt)
+
+ def service_create(self, context, values):
+ svc = self.db.service_create(context, values)
+ return jsonutils.to_primitive(svc)
+
+ @rpc_common.client_exceptions(exception.ServiceNotFound)
+ def service_destroy(self, context, service_id):
+ self.db.service_destroy(context, service_id)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 6b91de167..b7f760cf5 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -58,6 +58,11 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
Un-Deprecate instance_get_all_by_host
1.24 - Added instance_get
1.25 - Added action_event_start and action_event_finish
+ 1.26 - Added instance_info_cache_update
+ 1.27 - Added service_create
+ 1.28 - Added binary arg to service_get_all_by
+ 1.29 - Added service_destroy
+ 1.30 - Added migration_create
"""
BASE_RPC_API_VERSION = '1.0'
@@ -101,6 +106,12 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
dest_compute=dest_compute)
return self.call(context, msg, version='1.20')
+ def migration_create(self, context, instance, values):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('migration_create', instance=instance_p,
+ values=values)
+ return self.call(context, msg, version='1.30')
+
def migration_update(self, context, migration, status):
migration_p = jsonutils.to_primitive(migration)
msg = self.make_msg('migration_update', migration=migration_p,
@@ -251,9 +262,10 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
update_totals=update_totals)
return self.call(context, msg, version='1.19')
- def service_get_all_by(self, context, topic=None, host=None):
- msg = self.make_msg('service_get_all_by', topic=topic, host=host)
- return self.call(context, msg, version='1.21')
+ def service_get_all_by(self, context, topic=None, host=None, binary=None):
+ msg = self.make_msg('service_get_all_by', topic=topic, host=host,
+ binary=binary)
+ return self.call(context, msg, version='1.28')
def instance_get_all(self, context):
msg = self.make_msg('instance_get_all')
@@ -270,3 +282,18 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def action_event_finish(self, context, values):
msg = self.make_msg('action_event_finish', values=values)
return self.call(context, msg, version='1.25')
+
+ def instance_info_cache_update(self, context, instance, values):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('instance_info_cache_update',
+ instance=instance_p,
+ values=values)
+ return self.call(context, msg, version='1.26')
+
+ def service_create(self, context, values):
+ msg = self.make_msg('service_create', values=values)
+ return self.call(context, msg, version='1.27')
+
+ def service_destroy(self, context, service_id):
+ msg = self.make_msg('service_destroy', service_id=service_id)
+ return self.call(context, msg, version='1.29')
diff --git a/nova/context.py b/nova/context.py
index 094e2bffb..1a566cb5a 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -124,7 +124,9 @@ class RequestContext(object):
'user_name': self.user_name,
'service_catalog': self.service_catalog,
'project_name': self.project_name,
- 'instance_lock_checked': self.instance_lock_checked}
+ 'instance_lock_checked': self.instance_lock_checked,
+ 'tenant': self.tenant,
+ 'user': self.user}
@classmethod
def from_dict(cls, values):
@@ -143,6 +145,19 @@ class RequestContext(object):
return context
+ # NOTE(sirp): the openstack/common version of RequestContext uses
+ # tenant/user whereas the Nova version uses project_id/user_id. We need
+ # this shim in order to use context-aware code from openstack/common, like
+ # logging, until we make the switch to using openstack/common's version of
+ # RequestContext.
+ @property
+ def tenant(self):
+ return self.project_id
+
+ @property
+ def user(self):
+ return self.user_id
+
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
diff --git a/nova/db/api.py b/nova/db/api.py
index 27bb6bf35..b1552b480 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -132,6 +132,15 @@ def service_get_all(context, disabled=None):
return IMPL.service_get_all(context, disabled)
+def service_does_host_exist(context, host_name, include_disabled=False):
+ """Returns True if 'host_name' is found in the services table, False
+ otherwise
+ :param: host_name - the name of the host we want to check if it exists
+ :param: include_disabled - Set to True to include hosts from disabled
+ services"""
+ return IMPL.service_does_host_exist(context, host_name, include_disabled)
+
+
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
@@ -1007,20 +1016,22 @@ def reservation_destroy(context, uuid):
def quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age):
+ until_refresh, max_age, project_id=None):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age)
+ until_refresh, max_age, project_id=project_id)
-def reservation_commit(context, reservations):
+def reservation_commit(context, reservations, project_id=None):
"""Commit quota reservations."""
- return IMPL.reservation_commit(context, reservations)
+ return IMPL.reservation_commit(context, reservations,
+ project_id=project_id)
-def reservation_rollback(context, reservations):
+def reservation_rollback(context, reservations, project_id=None):
"""Roll back quota reservations."""
- return IMPL.reservation_rollback(context, reservations)
+ return IMPL.reservation_rollback(context, reservations,
+ project_id=project_id)
def quota_destroy_all_by_project(context, project_id):
@@ -1622,32 +1633,32 @@ def instance_fault_get_by_instance_uuids(context, instance_uuids):
def action_start(context, values):
- """Start an action for an instance"""
+ """Start an action for an instance."""
return IMPL.action_start(context, values)
def action_finish(context, values):
- """Finish an action for an instance"""
+ """Finish an action for an instance."""
return IMPL.action_finish(context, values)
def actions_get(context, uuid):
- """Get all instance actions for the provided instance"""
+ """Get all instance actions for the provided instance."""
return IMPL.actions_get(context, uuid)
def action_get_by_id(context, uuid, action_id):
- """Get the action by id and given instance"""
+ """Get the action by id and given instance."""
return IMPL.action_get_by_id(context, uuid, action_id)
def action_event_start(context, values):
- """Start an event on an instance action"""
+ """Start an event on an instance action."""
return IMPL.action_event_start(context, values)
def action_event_finish(context, values):
- """Finish an event on an instance action"""
+ """Finish an event on an instance action."""
return IMPL.action_event_finish(context, values)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index bb6dfc63a..8930f6ccc 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -337,6 +337,15 @@ def service_get_all(context, disabled=None):
@require_admin_context
+def service_does_host_exist(context, host_name, include_disabled):
+ query = get_session().query(func.count(models.Service.host)).\
+ filter_by(host=host_name)
+ if not include_disabled:
+ query = query.filter_by(disabled=False)
+ return query.scalar() > 0
+
+
+@require_admin_context
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
@@ -2633,12 +2642,12 @@ def reservation_destroy(context, uuid):
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
-def _get_quota_usages(context, session):
+def _get_quota_usages(context, session, project_id):
# Broken out for testability
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
- filter_by(project_id=context.project_id).\
+ filter_by(project_id=project_id).\
with_lockmode('update').\
all()
return dict((row.resource, row) for row in rows)
@@ -2646,12 +2655,16 @@ def _get_quota_usages(context, session):
@require_context
def quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age):
+ until_refresh, max_age, project_id=None):
elevated = context.elevated()
session = get_session()
with session.begin():
+
+ if project_id is None:
+ project_id = context.project_id
+
# Get the current usages
- usages = _get_quota_usages(context, session)
+ usages = _get_quota_usages(context, session, project_id)
# Handle usage refresh
work = set(deltas.keys())
@@ -2662,7 +2675,7 @@ def quota_reserve(context, resources, quotas, deltas, expire,
refresh = False
if resource not in usages:
usages[resource] = _quota_usage_create(elevated,
- context.project_id,
+ project_id,
resource,
0, 0,
until_refresh or None,
@@ -2685,12 +2698,12 @@ def quota_reserve(context, resources, quotas, deltas, expire,
# Grab the sync routine
sync = resources[resource].sync
- updates = sync(elevated, context.project_id, session)
+ updates = sync(elevated, project_id, session)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
if res not in usages:
usages[res] = _quota_usage_create(elevated,
- context.project_id,
+ project_id,
res,
0, 0,
until_refresh or None,
@@ -2740,7 +2753,7 @@ def quota_reserve(context, resources, quotas, deltas, expire,
reservation = reservation_create(elevated,
str(uuid.uuid4()),
usages[resource],
- context.project_id,
+ project_id,
resource, delta, expire,
session=session)
reservations.append(reservation.uuid)
@@ -2788,10 +2801,10 @@ def _quota_reservations_query(session, context, reservations):
@require_context
-def reservation_commit(context, reservations):
+def reservation_commit(context, reservations, project_id=None):
session = get_session()
with session.begin():
- usages = _get_quota_usages(context, session)
+ usages = _get_quota_usages(context, session, project_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
@@ -2803,10 +2816,10 @@ def reservation_commit(context, reservations):
@require_context
-def reservation_rollback(context, reservations):
+def reservation_rollback(context, reservations, project_id=None):
session = get_session()
with session.begin():
- usages = _get_quota_usages(context, session)
+ usages = _get_quota_usages(context, session, project_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
@@ -4595,7 +4608,7 @@ def actions_get(context, instance_uuid):
def action_get_by_id(context, instance_uuid, action_id):
- """Get the action by id and given instance"""
+ """Get the action by id and given instance."""
action = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid).\
filter_by(id=action_id).\
@@ -4614,7 +4627,7 @@ def _action_get_by_request_id(context, instance_uuid, request_id,
def action_event_start(context, values):
- """Start an event on an instance action"""
+ """Start an event on an instance action."""
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
@@ -4634,7 +4647,7 @@ def action_event_start(context, values):
def action_event_finish(context, values):
- """Finish an event on an instance action"""
+ """Finish an event on an instance action."""
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 735ef56e2..52985a3eb 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -48,8 +48,17 @@ class NovaBase(object):
"""Save this object."""
if not session:
session = get_session()
- session.add(self)
- session.flush()
+ # NOTE(boris-42): This part of code should be look like:
+ # sesssion.add(self)
+ # session.flush()
+ # But there is a bug in sqlalchemy and eventlet that
+ # raises NoneType exception if there is no running
+ # transaction and rollback is called. As long as
+ # sqlalchemy has this bug we have to create transaction
+ # explicity.
+ with session.begin(subtransactions=True):
+ session.add(self)
+ session.flush()
def soft_delete(self, session=None):
"""Mark this object as deleted."""
@@ -985,7 +994,7 @@ class InstanceFault(BASE, NovaBase):
class InstanceAction(BASE, NovaBase):
- """Track client actions on an instance"""
+ """Track client actions on an instance."""
__tablename__ = 'instance_actions'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
action = Column(String(255))
@@ -1001,7 +1010,7 @@ class InstanceAction(BASE, NovaBase):
class InstanceActionEvent(BASE, NovaBase):
- """Track events that occur during an InstanceAction"""
+ """Track events that occur during an InstanceAction."""
__tablename__ = 'instance_actions_events'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
event = Column(String(255))
diff --git a/nova/network/api.py b/nova/network/api.py
index ec58e1101..25680e656 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -50,11 +50,8 @@ def refresh_cache(f):
msg = _('instance is a required argument to use @refresh_cache')
raise Exception(msg)
- # get nw_info from return if possible, otherwise call for it
- nw_info = res if isinstance(res, network_model.NetworkInfo) else None
-
- update_instance_cache_with_nw_info(self, context, instance, nw_info,
- *args, **kwargs)
+ update_instance_cache_with_nw_info(self, context, instance,
+ nw_info=res)
# return the original function's return value
return res
@@ -62,20 +59,18 @@ def refresh_cache(f):
def update_instance_cache_with_nw_info(api, context, instance,
- nw_info=None,
- *args,
- **kwargs):
+ nw_info=None):
try:
- nw_info = nw_info or api._get_instance_nw_info(context, instance)
-
+ if not isinstance(nw_info, network_model.NetworkInfo):
+ nw_info = None
+ if not nw_info:
+ nw_info = api._get_instance_nw_info(context, instance)
# update cache
cache = {'network_info': nw_info.json()}
api.db.instance_info_cache_update(context, instance['uuid'], cache)
- except Exception as e:
+ except Exception:
LOG.exception(_('Failed storing info cache'), instance=instance)
- LOG.debug(_('args: %s') % (args or {}))
- LOG.debug(_('kwargs: %s') % (kwargs or {}))
class API(base.Base):
@@ -243,10 +238,13 @@ class API(base.Base):
associations['project'] = project
self.network_rpcapi.associate(context, network_uuid, associations)
- @refresh_cache
- def get_instance_nw_info(self, context, instance):
+ def get_instance_nw_info(self, context, instance, update_cache=True):
"""Returns all network info related to an instance."""
- return self._get_instance_nw_info(context, instance)
+ result = self._get_instance_nw_info(context, instance)
+ if update_cache:
+ update_instance_cache_with_nw_info(self, context, instance,
+ result)
+ return result
def _get_instance_nw_info(self, context, instance):
"""Returns all network info related to an instance."""
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index ea09f69b2..e6abde609 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -647,12 +647,18 @@ def remove_floating_forward(floating_ip, fixed_ip, device):
def floating_forward_rules(floating_ip, fixed_ip, device):
+ rules = []
rule = '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip)
if device:
- rule += ' -o %s' % device
- return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
- ('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
- ('float-snat', rule)]
+ rules.append(('float-snat', rule + ' -d %s' % fixed_ip))
+ rules.append(('float-snat', rule + ' -o %s' % device))
+ else:
+ rules.append(('float-snat', rule))
+ rules.append(
+ ('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)))
+ rules.append(
+ ('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)))
+ return rules
def initialize_gateway_device(dev, network_ref):
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 064ae0427..51386b4fd 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -19,7 +19,7 @@
from nova.compute import api as compute_api
from nova.db import base
from nova import exception
-from nova.network.api import refresh_cache
+from nova.network import api as network_api
from nova.network import model as network_model
from nova.network import quantumv2
from nova.openstack.common import cfg
@@ -57,6 +57,9 @@ LOG = logging.getLogger(__name__)
NET_EXTERNAL = 'router:external'
+refresh_cache = network_api.refresh_cache
+update_instance_info_cache = network_api.update_instance_cache_with_nw_info
+
class API(base.Base):
"""API for interacting with the quantum 2.x API."""
@@ -181,9 +184,12 @@ class API(base.Base):
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_remove_security_group_refresh(context, instance)
- @refresh_cache
- def get_instance_nw_info(self, context, instance, networks=None):
- return self._get_instance_nw_info(context, instance, networks)
+ def get_instance_nw_info(self, context, instance, networks=None,
+ update_cache=True):
+ result = self._get_instance_nw_info(context, instance, networks)
+ if update_cache:
+ update_instance_info_cache(self, context, instance, result)
+ return result
def _get_instance_nw_info(self, context, instance, networks=None):
LOG.debug(_('get_instance_nw_info() for %s'),
diff --git a/nova/quota.py b/nova/quota.py
index c2e34cca5..96e612503 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -198,7 +198,7 @@ class DbQuotaDriver(object):
return quotas
- def _get_quotas(self, context, resources, keys, has_sync):
+ def _get_quotas(self, context, resources, keys, has_sync, project_id=None):
"""
A helper method which retrieves the quotas for the specific
resources identified by keys, and which apply to the current
@@ -211,6 +211,9 @@ class DbQuotaDriver(object):
have a sync attribute; if False, indicates
that the resource must NOT have a sync
attribute.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
# Filter resources
@@ -229,12 +232,12 @@ class DbQuotaDriver(object):
# Grab and return the quotas (without usages)
quotas = self.get_project_quotas(context, sub_resources,
- context.project_id,
+ project_id,
context.quota_class, usages=False)
return dict((k, v['limit']) for k, v in quotas.items())
- def limit_check(self, context, resources, values):
+ def limit_check(self, context, resources, values, project_id=None):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
@@ -254,6 +257,9 @@ class DbQuotaDriver(object):
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
# Ensure no value is less than zero
@@ -261,9 +267,13 @@ class DbQuotaDriver(object):
if unders:
raise exception.InvalidQuotaValue(unders=sorted(unders))
+ # If project_id is None, then we use the project_id in context
+ if project_id is None:
+ project_id = context.project_id
+
# Get the applicable quotas
quotas = self._get_quotas(context, resources, values.keys(),
- has_sync=False)
+ has_sync=False, project_id=project_id)
# Check the quotas and construct a list of the resources that
# would be put over limit by the desired values
@@ -273,7 +283,8 @@ class DbQuotaDriver(object):
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
usages={})
- def reserve(self, context, resources, deltas, expire=None):
+ def reserve(self, context, resources, deltas, expire=None,
+ project_id=None):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
@@ -303,6 +314,9 @@ class DbQuotaDriver(object):
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
# Set up the reservation expiration
@@ -315,12 +329,16 @@ class DbQuotaDriver(object):
if not isinstance(expire, datetime.datetime):
raise exception.InvalidReservationExpiration(expire=expire)
+ # If project_id is None, then we use the project_id in context
+ if project_id is None:
+ project_id = context.project_id
+
# Get the applicable quotas.
# NOTE(Vek): We're not worried about races at this point.
# Yes, the admin may be in the process of reducing
# quotas, but that's a pretty rare thing.
quotas = self._get_quotas(context, resources, deltas.keys(),
- has_sync=True)
+ has_sync=True, project_id=project_id)
# NOTE(Vek): Most of the work here has to be done in the DB
# API, because we have to do it in a transaction,
@@ -328,27 +346,40 @@ class DbQuotaDriver(object):
# session isn't available outside the DBAPI, we
# have to do the work there.
return db.quota_reserve(context, resources, quotas, deltas, expire,
- CONF.until_refresh, CONF.max_age)
+ CONF.until_refresh, CONF.max_age,
+ project_id=project_id)
- def commit(self, context, reservations):
+ def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
+ # If project_id is None, then we use the project_id in context
+ if project_id is None:
+ project_id = context.project_id
- db.reservation_commit(context, reservations)
+ db.reservation_commit(context, reservations, project_id=project_id)
- def rollback(self, context, reservations):
+ def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
+ # If project_id is None, then we use the project_id in context
+ if project_id is None:
+ project_id = context.project_id
- db.reservation_rollback(context, reservations)
+ db.reservation_rollback(context, reservations, project_id=project_id)
def usage_reset(self, context, resources):
"""
@@ -843,7 +874,7 @@ class QuotaEngine(object):
return res.count(context, *args, **kwargs)
- def limit_check(self, context, **values):
+ def limit_check(self, context, project_id=None, **values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
@@ -863,11 +894,15 @@ class QuotaEngine(object):
nothing.
:param context: The request context, for access checks.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
- return self._driver.limit_check(context, self._resources, values)
+ return self._driver.limit_check(context, self._resources, values,
+ project_id=project_id)
- def reserve(self, context, expire=None, **deltas):
+ def reserve(self, context, expire=None, project_id=None, **deltas):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
@@ -897,25 +932,32 @@ class QuotaEngine(object):
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
reservations = self._driver.reserve(context, self._resources, deltas,
- expire=expire)
+ expire=expire,
+ project_id=project_id)
LOG.debug(_("Created reservations %(reservations)s") % locals())
return reservations
- def commit(self, context, reservations):
+ def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
try:
- self._driver.commit(context, reservations)
+ self._driver.commit(context, reservations, project_id=project_id)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
@@ -924,16 +966,19 @@ class QuotaEngine(object):
LOG.exception(_("Failed to commit reservations "
"%(reservations)s") % locals())
- def rollback(self, context, reservations):
+ def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
try:
- self._driver.rollback(context, reservations)
+ self._driver.rollback(context, reservations, project_id=project_id)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index a45e21a16..dc494af8f 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -51,8 +51,6 @@ scheduler_driver_opts = [
CONF = cfg.CONF
CONF.register_opts(scheduler_driver_opts)
-CONF.import_opt('instances_path', 'nova.compute.manager')
-CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
def handle_schedule_error(context, ex, instance_uuid, request_spec):
diff --git a/nova/service.py b/nova/service.py
index 86f022f61..39e414eb6 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -30,6 +30,7 @@ import time
import eventlet
import greenlet
+from nova import conductor
from nova import context
from nova import db
from nova import exception
@@ -38,6 +39,7 @@ from nova.openstack.common import eventlet_backdoor
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
+from nova.openstack.common.rpc import common as rpc_common
from nova import servicegroup
from nova import utils
from nova import version
@@ -392,7 +394,7 @@ class Service(object):
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_enable=None, periodic_fuzzy_delay=None,
- periodic_interval_max=None,
+ periodic_interval_max=None, db_allowed=True,
*args, **kwargs):
self.host = host
self.binary = binary
@@ -407,6 +409,9 @@ class Service(object):
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
self.backdoor_port = None
+ self.db_allowed = db_allowed
+ self.conductor_api = conductor.API(use_local=db_allowed)
+ self.conductor_api.wait_until_ready(context.get_admin_context())
self.servicegroup_api = servicegroup.API()
def start(self):
@@ -417,9 +422,9 @@ class Service(object):
self.model_disconnected = False
ctxt = context.get_admin_context()
try:
- service_ref = db.service_get_by_args(ctxt,
- self.host,
- self.binary)
+ service_ref = self.conductor_api.service_get_by_args(ctxt,
+ self.host,
+ self.binary)
self.service_id = service_ref['id']
except exception.NotFound:
self._create_service_ref(ctxt)
@@ -467,12 +472,14 @@ class Service(object):
self.timers.append(periodic)
def _create_service_ref(self, context):
- service_ref = db.service_create(context,
- {'host': self.host,
- 'binary': self.binary,
- 'topic': self.topic,
- 'report_count': 0})
- self.service_id = service_ref['id']
+ svc_values = {
+ 'host': self.host,
+ 'binary': self.binary,
+ 'topic': self.topic,
+ 'report_count': 0
+ }
+ service = self.conductor_api.service_create(context, svc_values)
+ self.service_id = service['id']
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
@@ -481,7 +488,8 @@ class Service(object):
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_enable=None,
- periodic_fuzzy_delay=None, periodic_interval_max=None):
+ periodic_fuzzy_delay=None, periodic_interval_max=None,
+ db_allowed=True):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
@@ -514,7 +522,8 @@ class Service(object):
report_interval=report_interval,
periodic_enable=periodic_enable,
periodic_fuzzy_delay=periodic_fuzzy_delay,
- periodic_interval_max=periodic_interval_max)
+ periodic_interval_max=periodic_interval_max,
+ db_allowed=db_allowed)
return service_obj
@@ -522,7 +531,8 @@ class Service(object):
"""Destroy the service object in the datastore."""
self.stop()
try:
- db.service_destroy(context.get_admin_context(), self.service_id)
+ self.conductor_api.service_destroy(context.get_admin_context(),
+ self.service_id)
except exception.NotFound:
LOG.warn(_('Service killed that has no database entry'))
diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py
index b9653e1e2..ebd0ee6ac 100644
--- a/nova/servicegroup/api.py
+++ b/nova/servicegroup/api.py
@@ -39,7 +39,9 @@ CONF.register_opt(servicegroup_driver_opt)
class API(object):
_driver = None
- _driver_name_class_mapping = {"db": "nova.servicegroup.db_driver.DbDriver"}
+ _driver_name_class_mapping = {
+ 'db': 'nova.servicegroup.drivers.db.DbDriver'
+ }
@lockutils.synchronized('nova.servicegroup.api.new', 'nova-')
def __new__(cls, *args, **kwargs):
diff --git a/nova/servicegroup/drivers/__init__.py b/nova/servicegroup/drivers/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/servicegroup/drivers/__init__.py
diff --git a/nova/servicegroup/db_driver.py b/nova/servicegroup/drivers/db.py
index 075db3ed8..075db3ed8 100644
--- a/nova/servicegroup/db_driver.py
+++ b/nova/servicegroup/drivers/db.py
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index ae2ea11c3..b30a3ddeb 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -1153,7 +1153,7 @@ class CloudTestCase(test.TestCase):
# deleteOnTermination
# noDevice
def test_describe_image_mapping(self):
- # test for rootDeviceName and blockDeiceMapping.
+ # test for rootDeviceName and blockDeviceMapping.
describe_images = self.cloud.describe_images
self._setUpImageSet()
diff --git a/nova/tests/api/openstack/compute/contrib/test_hosts.py b/nova/tests/api/openstack/compute/contrib/test_hosts.py
index 0f5761d09..be4465cf9 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hosts.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hosts.py
@@ -19,59 +19,68 @@ import webob.exc
from nova.api.openstack.compute.contrib import hosts as os_hosts
from nova.compute import power_state
from nova.compute import vm_states
-from nova import context
+from nova import context as context_maker
from nova import db
from nova.openstack.common import log as logging
from nova import test
+from nova.tests import fake_hosts
LOG = logging.getLogger(__name__)
-HOST_LIST = {"hosts": [
- {"host_name": "host_c1", "service": "compute", "zone": "nova"},
- {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
- }
-HOST_LIST_NOVA_ZONE = [
- {"host_name": "host_c1", "service": "compute", "zone": "nova"},
- {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
-SERVICES_LIST = [
- {"host": "host_c1", "topic": "compute"},
- {"host": "host_c2", "topic": "compute"}]
-def stub_service_get_all(self, req):
- return SERVICES_LIST
+def stub_service_get_all(context, disabled=None):
+ return fake_hosts.SERVICES_LIST
-def stub_set_host_enabled(context, host, enabled):
- if host == "notimplemented":
- raise NotImplementedError()
- # We'll simulate success and failure by assuming
- # that 'host_c1' always succeeds, and 'host_c2'
- # always fails
- fail = (host == "host_c2")
- status = "enabled" if (enabled != fail) else "disabled"
- return status
+def stub_service_does_host_exist(context, host_name):
+ return host_name in [row['host'] for row in stub_service_get_all(context)]
+
+def stub_set_host_enabled(context, host_name, enabled):
+ """
+ Simulates three possible behaviours for VM drivers or compute drivers when
+ enabling or disabling a host.
-def stub_set_host_maintenance(context, host, mode):
- if host == "notimplemented":
+ 'enabled' means new instances can go to this host
+ 'disabled' means they can't
+ """
+ results = {True: "enabled", False: "disabled"}
+ if host_name == "notimplemented":
+ # The vm driver for this host doesn't support this feature
raise NotImplementedError()
+ elif host_name == "host_c2":
+ # Simulate a failure
+ return results[not enabled]
+ else:
+ # Do the right thing
+ return results[enabled]
+
+
+def stub_set_host_maintenance(context, host_name, mode):
# We'll simulate success and failure by assuming
# that 'host_c1' always succeeds, and 'host_c2'
# always fails
- fail = (host == "host_c2")
- maintenance = "on_maintenance" if (mode != fail) else "off_maintenance"
- return maintenance
+ results = {True: "on_maintenance", False: "off_maintenance"}
+ if host_name == "notimplemented":
+ # The vm driver for this host doesn't support this feature
+ raise NotImplementedError()
+ elif host_name == "host_c2":
+ # Simulate a failure
+ return results[not mode]
+ else:
+ # Do the right thing
+ return results[mode]
-def stub_host_power_action(context, host, action):
- if host == "notimplemented":
+def stub_host_power_action(context, host_name, action):
+ if host_name == "notimplemented":
raise NotImplementedError()
return action
def _create_instance(**kwargs):
"""Create a test instance."""
- ctxt = context.get_admin_context()
+ ctxt = context_maker.get_admin_context()
return db.instance_create(ctxt, _create_instance_dict(**kwargs))
@@ -99,12 +108,12 @@ def _create_instance_dict(**kwargs):
class FakeRequest(object):
- environ = {"nova.context": context.get_admin_context()}
+ environ = {"nova.context": context_maker.get_admin_context()}
GET = {}
class FakeRequestWithNovaZone(object):
- environ = {"nova.context": context.get_admin_context()}
+ environ = {"nova.context": context_maker.get_admin_context()}
GET = {"zone": "nova"}
@@ -114,14 +123,22 @@ class HostTestCase(test.TestCase):
def setUp(self):
super(HostTestCase, self).setUp()
self.controller = os_hosts.HostController()
+ self.hosts_api = self.controller.api
self.req = FakeRequest()
+
+ # Pretend we have fake_hosts.HOST_LIST in the DB
self.stubs.Set(db, 'service_get_all',
stub_service_get_all)
- self.stubs.Set(self.controller.api, 'set_host_enabled',
+ # Only hosts in our fake DB exist
+ self.stubs.Set(db, 'service_does_host_exist',
+ stub_service_does_host_exist)
+ # 'host_c1' always succeeds, and 'host_c2'
+ self.stubs.Set(self.hosts_api, 'set_host_enabled',
stub_set_host_enabled)
- self.stubs.Set(self.controller.api, 'set_host_maintenance',
+ # 'host_c1' always succeeds, and 'host_c2'
+ self.stubs.Set(self.hosts_api, 'set_host_maintenance',
stub_set_host_maintenance)
- self.stubs.Set(self.controller.api, 'host_power_action',
+ self.stubs.Set(self.hosts_api, 'host_power_action',
stub_host_power_action)
def _test_host_update(self, host, key, val, expected_value):
@@ -130,14 +147,17 @@ class HostTestCase(test.TestCase):
self.assertEqual(result[key], expected_value)
def test_list_hosts(self):
- # Verify that the compute hosts are returned.
- hosts = os_hosts._list_hosts(self.req)
- self.assertEqual(hosts, HOST_LIST['hosts'])
+ """Verify that the compute hosts are returned."""
+ result = self.controller.index(self.req)
+ self.assert_('hosts' in result)
+ hosts = result['hosts']
+ self.assertEqual(fake_hosts.HOST_LIST, hosts)
def test_list_hosts_with_zone(self):
- req = FakeRequestWithNovaZone()
- hosts = os_hosts._list_hosts(req)
- self.assertEqual(hosts, HOST_LIST_NOVA_ZONE)
+ result = self.controller.index(FakeRequestWithNovaZone())
+ self.assert_('hosts' in result)
+ hosts = result['hosts']
+ self.assertEqual(fake_hosts.HOST_LIST_NOVA_ZONE, hosts)
def test_disable_host(self):
self._test_host_update('host_c1', 'status', 'disable', 'disabled')
@@ -222,10 +242,6 @@ class HostTestCase(test.TestCase):
self.assertEqual(result["status"], "disabled")
self.assertEqual(result["maintenance_mode"], "on_maintenance")
- def test_bad_host(self):
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
- self.req, "bogus_host_name", {"status": "disable"})
-
def test_show_forbidden(self):
self.req.environ["nova.context"].is_admin = False
dest = 'dummydest'
@@ -244,7 +260,7 @@ class HostTestCase(test.TestCase):
def _create_compute_service(self):
"""Create compute-manager(ComputeNode and Service record)."""
- ctxt = context.get_admin_context()
+ ctxt = self.req.environ["nova.context"]
dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
'report_count': 0}
s_ref = db.service_create(ctxt, dic)
@@ -259,8 +275,8 @@ class HostTestCase(test.TestCase):
return db.service_get(ctxt, s_ref['id'])
def test_show_no_project(self):
- # No instance are running on the given host.
- ctxt = context.get_admin_context()
+ """No instances are running on the given host."""
+ ctxt = context_maker.get_admin_context()
s_ref = self._create_compute_service()
result = self.controller.show(self.req, s_ref['host'])
@@ -275,8 +291,8 @@ class HostTestCase(test.TestCase):
db.service_destroy(ctxt, s_ref['id'])
def test_show_works_correctly(self):
- # show() works correctly as expected.
- ctxt = context.get_admin_context()
+ """show() works correctly as expected."""
+ ctxt = context_maker.get_admin_context()
s_ref = self._create_compute_service()
i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
i_ref2 = _create_instance(project_id='p-02', vcpus=3,
@@ -303,17 +319,17 @@ class HostSerializerTest(test.TestCase):
def test_index_serializer(self):
serializer = os_hosts.HostIndexTemplate()
- text = serializer.serialize(HOST_LIST)
+ text = serializer.serialize(fake_hosts.OS_API_HOST_LIST)
tree = etree.fromstring(text)
self.assertEqual('hosts', tree.tag)
- self.assertEqual(len(HOST_LIST['hosts']), len(tree))
- for i in range(len(HOST_LIST)):
+ self.assertEqual(len(fake_hosts.HOST_LIST), len(tree))
+ for i in range(len(fake_hosts.HOST_LIST)):
self.assertEqual('host', tree[i].tag)
- self.assertEqual(HOST_LIST['hosts'][i]['host_name'],
+ self.assertEqual(fake_hosts.HOST_LIST[i]['host_name'],
tree[i].get('host_name'))
- self.assertEqual(HOST_LIST['hosts'][i]['service'],
+ self.assertEqual(fake_hosts.HOST_LIST[i]['service'],
tree[i].get('service'))
def test_update_serializer_with_status(self):
diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py
index a5810fb21..9973716f6 100644
--- a/nova/tests/cells/test_cells_messaging.py
+++ b/nova/tests/cells/test_cells_messaging.py
@@ -794,7 +794,9 @@ class CellsBroadcastMethodsTestCase(test.TestCase):
expected_sys_metadata = {'key1': 'value1',
'key2': 'value2'}
expected_info_cache = {'other': 'moo'}
+ expected_cell_name = 'api-cell!child-cell2!grandchild-cell1'
expected_instance = {'system_metadata': expected_sys_metadata,
+ 'cell_name': expected_cell_name,
'other': 'meow',
'uuid': 'fake_uuid'}
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 460366833..08d9451b3 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -53,6 +53,7 @@ from nova.openstack.common.notifier import test_notifier
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova import test
@@ -2367,80 +2368,60 @@ class ComputeTestCase(BaseTestCase):
# cleanup
db.instance_destroy(c, instance['uuid'])
- def test_live_migration_dest_raises_exception(self):
+ def test_live_migration_exception_rolls_back(self):
# Confirm exception when pre_live_migration fails.
- # creating instance testdata
- instance_ref = self._create_fake_instance({'host': 'dummy'})
- instance = jsonutils.to_primitive(instance_ref)
- inst_uuid = instance['uuid']
- inst_id = instance['id']
-
c = context.get_admin_context()
- topic = rpc.queue_get_for(c, CONF.compute_topic, instance['host'])
-
- # creating volume testdata
- volume_id = 'fake'
- values = {'instance_uuid': inst_uuid, 'device_name': '/dev/vdc',
- 'delete_on_termination': False, 'volume_id': volume_id}
- db.block_device_mapping_create(c, values)
-
- def fake_volume_get(self, context, volume_id):
- return {'id': volume_id}
- self.stubs.Set(cinder.API, 'get', fake_volume_get)
-
- def fake_instance_update(context, instance_uuid, **updates):
- return db.instance_update_and_get_original(context, instance_uuid,
- updates)
- self.stubs.Set(self.compute, '_instance_update',
- fake_instance_update)
+ src_host = 'fake-src-host'
+ dest_host = 'fake-dest-host'
+ instance = dict(uuid='fake_instance', host=src_host,
+ name='fake-name')
+ updated_instance = 'fake_updated_instance'
+ fake_bdms = [dict(volume_id='vol1-id'), dict(volume_id='vol2-id')]
# creating mocks
self.mox.StubOutWithMock(rpc, 'call')
-
self.mox.StubOutWithMock(self.compute.driver,
'get_instance_disk_info')
- self.compute.driver.get_instance_disk_info(instance['name'])
-
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'pre_live_migration')
- self.compute.compute_rpcapi.pre_live_migration(c,
- mox.IsA(instance), True, None, instance['host'],
- None).AndRaise(rpc.common.RemoteError('', '', ''))
+ self.mox.StubOutWithMock(self.compute, '_instance_update')
+ self.mox.StubOutWithMock(self.compute, '_get_instance_volume_bdms')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'remove_volume_connection')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'rollback_live_migration_at_destination')
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.MIGRATING})
- # mocks for rollback
- rpc.call(c, 'network', {'method': 'setup_networks_on_host',
- 'args': {'instance_id': inst_id,
- 'host': self.compute.host,
- 'teardown': False},
- 'version': '1.0'}, None)
- rpcinst = jsonutils.to_primitive(
- db.instance_get_by_uuid(self.context, instance['uuid']))
- rpc.call(c, topic,
- {"method": "remove_volume_connection",
- "args": {'instance': rpcinst,
- 'volume_id': volume_id},
- "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
- None)
- rpc.cast(c, topic,
- {"method": "rollback_live_migration_at_destination",
- "args": {'instance': rpcinst},
- "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
+ self.compute.driver.get_instance_disk_info(
+ instance['name']).AndReturn('fake_disk')
+ self.compute.compute_rpcapi.pre_live_migration(c,
+ instance, True, 'fake_disk', dest_host,
+ None).AndRaise(test.TestingException())
+
+ self.compute._instance_update(c, instance['uuid'],
+ host=src_host, vm_state=vm_states.ACTIVE,
+ task_state=None,
+ expected_task_state=task_states.MIGRATING).AndReturn(
+ updated_instance)
+ self.compute.network_api.setup_networks_on_host(c,
+ updated_instance, self.compute.host)
+ self.compute._get_instance_volume_bdms(c,
+ updated_instance).AndReturn(fake_bdms)
+ self.compute.compute_rpcapi.remove_volume_connection(
+ c, updated_instance, 'vol1-id', dest_host)
+ self.compute.compute_rpcapi.remove_volume_connection(
+ c, updated_instance, 'vol2-id', dest_host)
+ self.compute.compute_rpcapi.rollback_live_migration_at_destination(
+ c, updated_instance, dest_host)
# start test
self.mox.ReplayAll()
- self.assertRaises(rpc_common.RemoteError,
+ self.assertRaises(test.TestingException,
self.compute.live_migration,
- c, dest=instance['host'], block_migration=True,
- instance=rpcinst)
-
- # cleanup
- for bdms in db.block_device_mapping_get_all_by_instance(
- c, inst_uuid):
- db.block_device_mapping_destroy(c, bdms['id'])
- db.instance_destroy(c, inst_uuid)
+ c, dest=dest_host, block_migration=True,
+ instance=instance)
def test_live_migration_works_correctly(self):
# Confirm live_migration() works as expected correctly.
@@ -2558,38 +2539,50 @@ class ComputeTestCase(BaseTestCase):
self.compute._post_live_migration(c, inst_ref, dest)
def test_post_live_migration_at_destination(self):
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_finish')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'post_live_migration_at_destination')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute, '_instance_update')
+
params = {'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED, }
instance = jsonutils.to_primitive(self._create_fake_instance(params))
admin_ctxt = context.get_admin_context()
instance = db.instance_get_by_uuid(admin_ctxt, instance['uuid'])
- self.mox.StubOutWithMock(self.compute.network_api,
- 'setup_networks_on_host')
+
self.compute.network_api.setup_networks_on_host(admin_ctxt, instance,
self.compute.host)
- self.mox.StubOutWithMock(self.compute.network_api,
- 'migrate_instance_finish')
migration = {'source_compute': instance['host'],
'dest_compute': self.compute.host, }
self.compute.network_api.migrate_instance_finish(admin_ctxt,
instance, migration)
- self.mox.StubOutWithMock(self.compute.driver,
- 'post_live_migration_at_destination')
fake_net_info = []
self.compute.driver.post_live_migration_at_destination(admin_ctxt,
instance,
fake_net_info,
False)
- self.compute.network_api.setup_networks_on_host(admin_ctxt, instance,
- self.compute.host)
+ self.compute._get_power_state(admin_ctxt, instance).AndReturn(
+ 'fake_power_state')
+
+ updated_instance = 'fake_updated_instance'
+ self.compute._instance_update(admin_ctxt, instance['uuid'],
+ host=self.compute.host,
+ power_state='fake_power_state',
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ expected_task_state=task_states.MIGRATING).AndReturn(
+ updated_instance)
+ self.compute.network_api.setup_networks_on_host(admin_ctxt,
+ updated_instance, self.compute.host)
self.mox.ReplayAll()
+
self.compute.post_live_migration_at_destination(admin_ctxt, instance)
- instance = db.instance_get_by_uuid(admin_ctxt, instance['uuid'])
- self.assertEqual(instance['host'], self.compute.host)
- self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
- self.assertEqual(instance['task_state'], None)
def test_run_kill_vm(self):
# Detect when a vm is terminated behind the scenes.
@@ -2781,6 +2774,31 @@ class ComputeTestCase(BaseTestCase):
val = self.compute._running_deleted_instances('context')
self.assertEqual(val, [instance1])
+ def test_get_instance_nw_info(self):
+ fake_network.unset_stub_network_methods(self.stubs)
+
+ fake_instance = 'fake-instance'
+ fake_nw_info = network_model.NetworkInfo()
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'get_instance_nw_info')
+ self.mox.StubOutWithMock(fake_nw_info, 'json')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'instance_info_cache_update')
+
+ self.compute.network_api.get_instance_nw_info(self.context,
+ fake_instance, update_cache=False).AndReturn(fake_nw_info)
+ fake_nw_info.json().AndReturn('fake-nw-info')
+ expected_cache = {'network_info': 'fake-nw-info'}
+ self.compute.conductor_api.instance_info_cache_update(self.context,
+ fake_instance, expected_cache)
+
+ self.mox.ReplayAll()
+
+ result = self.compute._get_instance_nw_info(self.context,
+ fake_instance)
+ self.assertEqual(fake_nw_info, result)
+
def test_heal_instance_info_cache(self):
# Update on every call for the test
self.flags(heal_instance_info_cache_interval=-1)
@@ -2812,27 +2830,27 @@ class ComputeTestCase(BaseTestCase):
# and is ignored. However, the below increment of
# 'get_nw_info' won't happen, and you'll get an assert
# failure checking it below.
- self.assertEqual(instance, call_info['expected_instance'])
+ self.assertEqual(call_info['expected_instance'], instance)
call_info['get_nw_info'] += 1
self.stubs.Set(self.compute.conductor_api, 'instance_get_all_by_host',
fake_instance_get_all_by_host)
- self.stubs.Set(db, 'instance_get_by_uuid',
+ self.stubs.Set(self.compute.conductor_api, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
- self.stubs.Set(self.compute.network_api, 'get_instance_nw_info',
+ self.stubs.Set(self.compute, '_get_instance_nw_info',
fake_get_instance_nw_info)
call_info['expected_instance'] = instances[0]
self.compute._heal_instance_info_cache(ctxt)
- self.assertEqual(call_info['get_all_by_host'], 1)
- self.assertEqual(call_info['get_by_uuid'], 0)
- self.assertEqual(call_info['get_nw_info'], 1)
+ self.assertEqual(1, call_info['get_all_by_host'])
+ self.assertEqual(0, call_info['get_by_uuid'])
+ self.assertEqual(1, call_info['get_nw_info'])
call_info['expected_instance'] = instances[1]
self.compute._heal_instance_info_cache(ctxt)
- self.assertEqual(call_info['get_all_by_host'], 1)
- self.assertEqual(call_info['get_by_uuid'], 1)
- self.assertEqual(call_info['get_nw_info'], 2)
+ self.assertEqual(1, call_info['get_all_by_host'])
+ self.assertEqual(1, call_info['get_by_uuid'])
+ self.assertEqual(2, call_info['get_nw_info'])
# Make an instance switch hosts
instances[2]['host'] = 'not-me'
@@ -3095,24 +3113,8 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance(params)
self.compute._instance_update(self.context, instance['uuid'])
- def test_startup_conductor_ping(self):
- timeouts = []
- calls = dict(count=0)
-
- def fake_ping(context, message, timeout):
- timeouts.append(timeout)
- calls['count'] += 1
- if calls['count'] < 15:
- raise rpc_common.Timeout("fake")
-
- self.stubs.Set(self.compute.conductor_api, 'ping', fake_ping)
- self.compute._get_instances_at_startup(self.context)
- self.assertEqual(timeouts.count(10), 10)
- self.assertTrue(None in timeouts)
-
- def test_init_host_with_evacuated_instances_uuid_list(self):
- # creating testdata
- c = context.get_admin_context()
+ def test_destroy_evacuated_instances(self):
+ fake_context = context.get_admin_context()
# instances in central db
instances = [
@@ -3128,130 +3130,146 @@ class ComputeTestCase(BaseTestCase):
# those are already been evacuated to other host
evacuated_instance = self._create_fake_instance({'host': 'otherhost'})
- # creating mocks
+ instances.append(evacuated_instance)
+
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instances_on_driver')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_volume_block_device_info')
+ self.mox.StubOutWithMock(self.compute, '_legacy_nw_info')
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+
+ self.compute._get_instances_on_driver(fake_context).AndReturn(
+ instances)
+ self.compute._get_instance_nw_info(fake_context,
+ evacuated_instance).AndReturn(
+ 'fake_network_info')
+ self.compute._get_instance_volume_block_device_info(
+ fake_context, evacuated_instance).AndReturn('fake_bdi')
+ self.compute._legacy_nw_info('fake_network_info').AndReturn(
+ 'fake_legacy_network_info')
+ self.compute.driver.destroy(evacuated_instance,
+ 'fake_legacy_network_info',
+ 'fake_bdi',
+ False)
+
+ self.mox.ReplayAll()
+ self.compute._destroy_evacuated_instances(fake_context)
+
+ def test_init_host(self):
+
+ our_host = self.compute.host
+ fake_context = 'fake-context'
+ startup_instances = ['inst1', 'inst2', 'inst3']
+
+ def _do_mock_calls(defer_iptables_apply):
+ self.compute.driver.init_host(host=our_host)
+ context.get_admin_context().AndReturn(fake_context)
+ self.compute.conductor_api.instance_get_all_by_host(
+ fake_context, our_host).AndReturn(startup_instances)
+ if defer_iptables_apply:
+ self.compute.driver.filter_defer_apply_on()
+ self.compute._destroy_evacuated_instances(fake_context)
+ self.compute._init_instance(fake_context, startup_instances[0])
+ self.compute._init_instance(fake_context, startup_instances[1])
+ self.compute._init_instance(fake_context, startup_instances[2])
+ if defer_iptables_apply:
+ self.compute.driver.filter_defer_apply_off()
+ self.compute._report_driver_status(fake_context)
+ self.compute.publish_service_capabilities(fake_context)
+
self.mox.StubOutWithMock(self.compute.driver, 'init_host')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'filter_defer_apply_on')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'filter_defer_apply_off')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(context, 'get_admin_context')
+ self.mox.StubOutWithMock(self.compute,
+ '_destroy_evacuated_instances')
+ self.mox.StubOutWithMock(self.compute,
+ '_init_instance')
+ self.mox.StubOutWithMock(self.compute,
+ '_report_driver_status')
+ self.mox.StubOutWithMock(self.compute,
+ 'publish_service_capabilities')
- self.compute.driver.init_host(host=self.compute.host)
+ # Test with defer_iptables_apply
+ self.flags(defer_iptables_apply=True)
+ _do_mock_calls(True)
- def fake_get_admin_context():
- return c
+ self.mox.ReplayAll()
+ self.compute.init_host()
+ self.mox.VerifyAll()
- def fake_all(*args, **kwargs):
- pass
+ # Test without defer_iptables_apply
+ self.mox.ResetAll()
+ self.flags(defer_iptables_apply=False)
+ _do_mock_calls(False)
- def fake_list_instance_uuids():
- return [
- # those are still related to this host
- instances[0]['uuid'],
- instances[1]['uuid'],
- instances[2]['uuid'],
- # and this one already been evacuated to other host
- evacuated_instance['uuid']
- ]
-
- def fake_destroy(instance, nw, bdi, destroyDisks):
- self.assertFalse(destroyDisks)
- self.assertEqual(instance['uuid'], evacuated_instance['uuid'])
-
- self.stubs.Set(nova.context,
- 'get_admin_context',
- fake_get_admin_context)
- self.stubs.Set(self.compute.driver, 'filter_defer_apply_on', fake_all)
- self.stubs.Set(self.compute.driver,
- 'list_instance_uuids',
- fake_list_instance_uuids)
- self.stubs.Set(self.compute, '_get_instance_nw_info', fake_all)
- self.stubs.Set(self.compute, '_get_instance_volume_block_device_info',
- fake_all)
- self.stubs.Set(self.compute.driver, 'destroy', fake_destroy)
- self.stubs.Set(self.compute, '_legacy_nw_info', fake_all)
- self.stubs.Set(self.compute, '_init_instance', fake_all)
-
- self.stubs.Set(self.compute.driver, 'filter_defer_apply_off', fake_all)
- self.stubs.Set(self.compute, '_report_driver_status', fake_all)
- self.stubs.Set(self.compute, 'publish_service_capabilities', fake_all)
- # start test
self.mox.ReplayAll()
self.compute.init_host()
+ # VerifyCall done by tearDown
- db.instance_destroy(c, evacuated_instance['uuid'])
- for instance in instances:
- db.instance_destroy(c, instance['uuid'])
+ def test_get_instances_on_driver(self):
+ fake_context = context.get_admin_context()
- def test_init_host_with_evacuated_instances_names_list(self):
- # creating testdata
- c = context.get_admin_context()
+ driver_instances = []
+ for x in xrange(10):
+ instance = dict(uuid=uuidutils.generate_uuid())
+ driver_instances.append(instance)
- # instances in central db
- instances = [
- # those are still related to this host
- jsonutils.to_primitive(self._create_fake_instance(
- {'host': self.compute.host})),
- jsonutils.to_primitive(self._create_fake_instance(
- {'host': self.compute.host})),
- jsonutils.to_primitive(self._create_fake_instance(
- {'host': self.compute.host}))
- ]
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'list_instance_uuids')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'instance_get_by_uuid')
- # those are already been evacuated to other host
- evacuated_instance = self._create_fake_instance({'host': 'otherhost'})
+ self.compute.driver.list_instance_uuids().AndReturn(
+ [inst['uuid'] for inst in driver_instances])
+ for x in xrange(len(driver_instances)):
+ self.compute.conductor_api.instance_get_by_uuid(fake_context,
+ driver_instances[x]['uuid']).AndReturn(
+ driver_instances[x])
- # creating mocks
- self.mox.StubOutWithMock(self.compute.driver, 'init_host')
+ self.mox.ReplayAll()
- self.compute.driver.init_host(host=self.compute.host)
+ result = self.compute._get_instances_on_driver(fake_context)
+ self.assertEqual(driver_instances, result)
- def fake_get_admin_context():
- return c
+ def test_get_instances_on_driver_fallback(self):
+ # Test getting instances when driver doesn't support
+ # 'list_instance_uuids'
+ fake_context = context.get_admin_context()
- def fake_all(*args, **kwargs):
- pass
+ all_instances = []
+ driver_instances = []
+ for x in xrange(10):
+ instance = dict(name=uuidutils.generate_uuid())
+ if x % 2:
+ driver_instances.append(instance)
+ all_instances.append(instance)
- def fake_list_instances():
- return [
- # those are still related to this host
- CONF.instance_name_template % instances[0]['id'],
- CONF.instance_name_template % instances[1]['id'],
- CONF.instance_name_template % instances[2]['id'],
- # and this one already been evacuated to other host
- CONF.instance_name_template % evacuated_instance['id']
- ]
-
- def fake_list_instance_uuids():
- raise NotImplementedError()
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'list_instance_uuids')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'list_instances')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'instance_get_all')
- def fake_destroy(instance, nw, bdi, destroyDisks):
- self.assertFalse(destroyDisks)
- self.assertEqual(instance['uuid'], evacuated_instance['uuid'])
+ self.compute.driver.list_instance_uuids().AndRaise(
+ NotImplementedError())
+ self.compute.driver.list_instances().AndReturn(
+ [inst['name'] for inst in driver_instances])
+ self.compute.conductor_api.instance_get_all(
+ fake_context).AndReturn(all_instances)
- self.stubs.Set(nova.context,
- 'get_admin_context',
- fake_get_admin_context)
- self.stubs.Set(self.compute.driver, 'filter_defer_apply_on', fake_all)
- self.stubs.Set(self.compute.driver,
- 'list_instances',
- fake_list_instances)
- self.stubs.Set(self.compute.driver,
- 'list_instance_uuids',
- fake_list_instance_uuids)
-
- self.stubs.Set(self.compute, '_get_instance_nw_info', fake_all)
- self.stubs.Set(self.compute, '_get_instance_volume_block_device_info',
- fake_all)
- self.stubs.Set(self.compute.driver, 'destroy', fake_destroy)
- self.stubs.Set(self.compute, '_legacy_nw_info', fake_all)
- self.stubs.Set(self.compute, '_init_instance', fake_all)
-
- self.stubs.Set(self.compute.driver, 'filter_defer_apply_off', fake_all)
- self.stubs.Set(self.compute, '_report_driver_status', fake_all)
- self.stubs.Set(self.compute, 'publish_service_capabilities', fake_all)
- # start test
self.mox.ReplayAll()
- self.compute.init_host()
- db.instance_destroy(c, evacuated_instance['uuid'])
- for instance in instances:
- db.instance_destroy(c, instance['uuid'])
+ result = self.compute._get_instances_on_driver(fake_context)
+ self.assertEqual(driver_instances, result)
def test_rebuild_on_host_updated_target(self):
"""Confirm evacuate scenario updates host."""
@@ -3475,7 +3493,7 @@ class ComputeTestCase(BaseTestCase):
db.instance_destroy(c, inst_uuid)
def test_rebuild_on_host_instance_exists(self):
- """Rebuild if instance exists raise an exception"""
+ """Rebuild if instance exists raise an exception."""
# creating testdata
c = self.context.elevated()
@@ -3893,12 +3911,12 @@ class ComputeAPITestCase(BaseTestCase):
def test_repeated_delete_quota(self):
in_use = {'instances': 1}
- def fake_reserve(context, **deltas):
+ def fake_reserve(context, expire=None, project_id=None, **deltas):
return dict(deltas.iteritems())
self.stubs.Set(QUOTAS, 'reserve', fake_reserve)
- def fake_commit(context, deltas):
+ def fake_commit(context, deltas, project_id=None):
for k, v in deltas.iteritems():
in_use[k] = in_use.get(k, 0) + v
@@ -3952,7 +3970,8 @@ class ComputeAPITestCase(BaseTestCase):
'host': CONF.host})
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
- nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg())
+ nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg(),
+ project_id=mox.IgnoreArg())
self.mox.ReplayAll()
self.compute_api.soft_delete(self.context, instance)
@@ -3980,7 +3999,8 @@ class ComputeAPITestCase(BaseTestCase):
'host': CONF.host})
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback')
- nova.quota.QUOTAS.rollback(mox.IgnoreArg(), mox.IgnoreArg())
+ nova.quota.QUOTAS.rollback(mox.IgnoreArg(), mox.IgnoreArg(),
+ project_id=mox.IgnoreArg())
self.mox.ReplayAll()
def fail(*args, **kwargs):
@@ -5754,7 +5774,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.live_migrate(self.context, instance,
block_migration=True,
disk_over_commit=True,
- host='fake_dest_host')
+ host_name='fake_dest_host')
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.MIGRATING)
@@ -6050,7 +6070,7 @@ class ComputePolicyTestCase(BaseTestCase):
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_instance_faults,
- self.context, instances)
+ context.get_admin_context(), instances)
def test_force_host_fail(self):
rules = {"compute:create": [],
@@ -6082,11 +6102,19 @@ class ComputeHostAPITestCase(BaseTestCase):
call_info['msg'] = msg
self.stubs.Set(rpc, 'call', fake_rpc_call)
+ def _pretend_fake_host_exists(self, ctxt):
+ """Sets it so that the host API always thinks that 'fake_host'
+ exists"""
+ self.mox.StubOutWithMock(self.host_api, 'does_host_exist')
+ self.host_api.does_host_exist(ctxt, 'fake_host').AndReturn(True)
+ self.mox.ReplayAll()
+
def test_set_host_enabled(self):
- ctxt = context.RequestContext('fake', 'fake')
+ ctxt = context.get_admin_context()
call_info = {}
self._rpc_call_stub(call_info)
+ self._pretend_fake_host_exists(ctxt)
self.host_api.set_host_enabled(ctxt, 'fake_host', 'fake_enabled')
self.assertEqual(call_info['context'], ctxt)
self.assertEqual(call_info['topic'], 'compute.fake_host')
@@ -6100,6 +6128,7 @@ class ComputeHostAPITestCase(BaseTestCase):
call_info = {}
self._rpc_call_stub(call_info)
+ self._pretend_fake_host_exists(ctxt)
self.host_api.get_host_uptime(ctxt, 'fake_host')
self.assertEqual(call_info['context'], ctxt)
self.assertEqual(call_info['topic'], 'compute.fake_host')
@@ -6109,9 +6138,10 @@ class ComputeHostAPITestCase(BaseTestCase):
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
def test_host_power_action(self):
- ctxt = context.RequestContext('fake', 'fake')
+ ctxt = context.get_admin_context()
call_info = {}
self._rpc_call_stub(call_info)
+ self._pretend_fake_host_exists(ctxt)
self.host_api.host_power_action(ctxt, 'fake_host', 'fake_action')
self.assertEqual(call_info['context'], ctxt)
self.assertEqual(call_info['topic'], 'compute.fake_host')
@@ -6122,9 +6152,10 @@ class ComputeHostAPITestCase(BaseTestCase):
compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
def test_set_host_maintenance(self):
- ctxt = context.RequestContext('fake', 'fake')
+ ctxt = context.get_admin_context()
call_info = {}
self._rpc_call_stub(call_info)
+ self._pretend_fake_host_exists(ctxt)
self.host_api.set_host_maintenance(ctxt, 'fake_host', 'fake_mode')
self.assertEqual(call_info['context'], ctxt)
self.assertEqual(call_info['topic'], 'compute.fake_host')
diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
index 9417be79a..f29c68627 100644
--- a/nova/tests/compute/test_compute_utils.py
+++ b/nova/tests/compute/test_compute_utils.py
@@ -390,97 +390,3 @@ class MetadataToDictTestCase(test.TestCase):
def test_metadata_to_dict_empty(self):
self.assertEqual(compute_utils.metadata_to_dict([]), {})
-
-
-class ParseDecimalIDTestCase(test.TestCase):
-
- def setUp(self):
- super(ParseDecimalIDTestCase, self).setUp()
- self.context = context.RequestContext('fake', 'fake')
-
- self.templates = [
- CONF.instance_name_template,
- 'instance-%08x',
- 'instance-%08o',
- 'instance-%08d',
- 'instance-%04x',
- 'instance-%04o',
- 'instance-%04d',
- 'instance-%x',
- 'instance-%o',
- 'instance-%d',
- 'james-%07x-bond',
- 'james-%07o-bond',
- 'james-%07d-bond',
- 'xxxx%xxxx',
- 'oooo%oooo',
- 'dddd%dddd',
- '%02x',
- '%02o',
- '%02d',
- '%x',
- '%o',
- '%d',
- '%07x-bond',
- '%07o-bond',
- '%07d-bond',
- '123%xxxx',
- '123%oooo',
- '123%dddd',
- '007%02x',
- '007%02o',
- '007%02d',
- '42%x',
- '42%o',
- '42%d',
- '700%07x007',
- '700%07o007',
- '700%07d007']
-
- self.ids = [
- 1,
- 5,
- 10,
- 42,
- 90,
- 100,
- 256,
- 500,
- 1000,
- 2500,
- 19294,
- 100500,
- 21093404
- ]
-
- def _validate_id(self, template, name):
- return compute_utils.parse_decimal_id(template, name)
-
- def test_name_template_based(self):
- for template in self.templates:
- for id in self.ids:
- self.assertEqual(id, self._validate_id(template,
- template % id))
-
- def test_name_not_template_based(self):
-
- for template in self.templates:
- for id in self.ids:
- name = template % id
-
- self.assertEqual(-1, self._validate_id(template,
- 'n%s' % name))
- self.assertEqual(-1, self._validate_id(template,
- '%sw' % name))
- self.assertEqual(-1, self._validate_id(template,
- 'reg%s' % name))
- self.assertEqual(-1, self._validate_id(template,
- '%sex' % name))
- self.assertEqual(-1, self._validate_id(template, '%s%s%s' % (
- name[:1],
- 'abr',
- name[-1:])))
- self.assertEqual(-1, self._validate_id(template, '%s%s%s' % (
- name[:1],
- 'qwer23456ert',
- name[-1:])))
diff --git a/nova/tests/compute/test_host_api.py b/nova/tests/compute/test_host_api.py
new file mode 100644
index 000000000..f00245d1e
--- /dev/null
+++ b/nova/tests/compute/test_host_api.py
@@ -0,0 +1,105 @@
+# Copyright (c) 2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import api
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests import fake_hosts
+
+
+class HostApiTestCase(test.TestCase):
+ """
+ Tests 'host' subset of the compute api
+ """
+
+ def setUp(self):
+ super(HostApiTestCase, self).setUp()
+ self.compute_rpcapi = api.compute_rpcapi
+ self.api = api.HostAPI()
+
+ def test_bad_host_set_enabled(self):
+ """
+ Tests that actions on single hosts that don't exist blow up without
+ having to reach the host via rpc. Should raise HostNotFound if you
+ try to update a host that is not in the DB
+ """
+ self.assertRaises(exception.HostNotFound, self.api.set_host_enabled,
+ context.get_admin_context(), "bogus_host_name", False)
+
+ def test_list_compute_hosts(self):
+ ctx = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'service_get_all')
+ db.service_get_all(ctx, False).AndReturn(fake_hosts.SERVICES_LIST)
+ self.mox.ReplayAll()
+ compute_hosts = self.api.list_hosts(ctx, service="compute")
+ self.mox.VerifyAll()
+ expected = [host for host in fake_hosts.HOST_LIST
+ if host["service"] == "compute"]
+ self.assertEqual(expected, compute_hosts)
+
+ def test_describe_host(self):
+ """
+ Makes sure that describe_host returns the correct information
+ given our fake input.
+ """
+ ctx = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ host_name = 'host_c1'
+ db.service_get_all_compute_by_host(ctx, host_name).AndReturn(
+ [{'host': 'fake_host',
+ 'compute_node': [
+ {'vcpus': 4,
+ 'vcpus_used': 1,
+ 'memory_mb': 8192,
+ 'memory_mb_used': 2048,
+ 'local_gb': 1024,
+ 'local_gb_used': 648}
+ ]
+ }])
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ db.instance_get_all_by_host(ctx, 'fake_host').AndReturn(
+ [{'project_id': 42,
+ 'vcpus': 1,
+ 'memory_mb': 2048,
+ 'root_gb': 648,
+ 'ephemeral_gb': 0,
+ }])
+ self.mox.ReplayAll()
+ result = self.api.describe_host(ctx, host_name)
+ self.assertEqual(result,
+ [{'resource': {'cpu': 4,
+ 'disk_gb': 1024,
+ 'host': 'host_c1',
+ 'memory_mb': 8192,
+ 'project': '(total)'}},
+ {'resource': {'cpu': 1,
+ 'disk_gb': 648,
+ 'host': 'host_c1',
+ 'memory_mb': 2048,
+ 'project': '(used_now)'}},
+ {'resource': {'cpu': 1,
+ 'disk_gb': 648,
+ 'host': 'host_c1',
+ 'memory_mb': 2048,
+ 'project': '(used_max)'}},
+ {'resource': {'cpu': 1,
+ 'disk_gb': 648,
+ 'host': 'host_c1',
+ 'memory_mb': 2048,
+ 'project': 42}}]
+ )
+ self.mox.VerifyAll()
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index afe05abe0..3bfd51461 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -24,6 +24,7 @@ from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import test
@@ -35,6 +36,7 @@ LOG = logging.getLogger(__name__)
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_LOCAL_GB = 6
FAKE_VIRT_VCPUS = 1
+CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
@@ -50,6 +52,9 @@ class UnsupportedVirtDriver(driver.ComputeDriver):
# no support for getting resource usage info
return {}
+ def legacy_nwinfo(self):
+ return True
+
class FakeVirtDriver(driver.ComputeDriver):
@@ -80,6 +85,9 @@ class FakeVirtDriver(driver.ComputeDriver):
}
return d
+ def legacy_nwinfo(self):
+ return True
+
class BaseTestCase(test.TestCase):
@@ -91,14 +99,21 @@ class BaseTestCase(test.TestCase):
self.context = context.get_admin_context()
+ self.flags(use_local=True, group='conductor')
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+
self._instances = {}
self._instance_types = {}
- self.stubs.Set(db, 'instance_get_all_by_host_and_node',
+ self.stubs.Set(self.conductor.db,
+ 'instance_get_all_by_host_and_node',
self._fake_instance_get_all_by_host_and_node)
- self.stubs.Set(db, 'instance_update_and_get_original',
+ self.stubs.Set(self.conductor.db,
+ 'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
- self.stubs.Set(db, 'instance_type_get', self._fake_instance_type_get)
+ self.stubs.Set(self.conductor.db,
+ 'instance_type_get', self._fake_instance_type_get)
self.host = 'fakehost'
@@ -616,7 +631,8 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
- self.stubs.Set(db, 'migration_create', self._fake_migration_create)
+ self.stubs.Set(self.conductor.db,
+ 'migration_create', self._fake_migration_create)
self.instance = self._fake_instance()
self.instance_type = self._fake_instance_type_create()
@@ -639,7 +655,7 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
if values:
migration.update(values)
- self._migrations[instance_uuid] = migration
+ self._migrations[migration['instance_uuid']] = migration
return migration
def test_claim(self):
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 3e7f33e85..46fadf4f0 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -35,14 +35,21 @@ from nova import test
FAKE_IMAGE_REF = 'fake-image-ref'
+class FakeContext(context.RequestContext):
+ def elevated(self):
+ """Return a consistent elevated context so we can detect it."""
+ if not hasattr(self, '_elevated'):
+ self._elevated = super(FakeContext, self).elevated()
+ return self._elevated
+
+
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id,
- self.project_id)
+ self.context = FakeContext(self.user_id, self.project_id)
def stub_out_client_exceptions(self):
def passthru(exceptions, func, *args, **kwargs):
@@ -123,6 +130,21 @@ class _BaseTestCase(object):
'fake-window',
'fake-host')
+ def test_migration_create(self):
+ inst = {'uuid': 'fake-uuid',
+ 'host': 'fake-host',
+ 'node': 'fake-node'}
+ self.mox.StubOutWithMock(db, 'migration_create')
+ db.migration_create(self.context.elevated(),
+ {'instance_uuid': inst['uuid'],
+ 'source_compute': inst['host'],
+ 'source_node': inst['node'],
+ 'fake-key': 'fake-value'}).AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.migration_create(self.context, inst,
+ {'fake-key': 'fake-value'})
+ self.assertEqual(result, 'result')
+
def test_migration_update(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
@@ -324,6 +346,17 @@ class _BaseTestCase(object):
self.conductor.instance_info_cache_delete(self.context,
{'uuid': 'fake-uuid'})
+ def test_instance_info_cache_update(self):
+ fake_values = {'key1': 'val1', 'key2': 'val2'}
+ fake_instance = {'uuid': 'fake-uuid'}
+ self.mox.StubOutWithMock(db, 'instance_info_cache_update')
+ db.instance_info_cache_update(self.context, 'fake-uuid',
+ fake_values)
+ self.mox.ReplayAll()
+ self.conductor.instance_info_cache_update(self.context,
+ fake_instance,
+ fake_values)
+
def test_instance_type_get(self):
self.mox.StubOutWithMock(db, 'instance_type_get')
db.instance_type_get(self.context, 'fake-id').AndReturn('fake-type')
@@ -448,6 +481,11 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
('host',),
dict(topic='compute', host='host'))
+ def test_service_get_by_args(self):
+ self._test_stubbed('service_get_by_args',
+ ('host', 'binary'),
+ dict(host='host', binary='binary'))
+
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
@@ -617,12 +655,19 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
{'name': 'fake-inst'},
'updated_at', 'asc')
- def _test_stubbed(self, name, *args):
+ def _test_stubbed(self, name, *args, **kwargs):
self.mox.StubOutWithMock(db, name)
getattr(db, name)(self.context, *args).AndReturn('fake-result')
+ if name == 'service_destroy':
+ # TODO(russellb) This is a hack ... SetUp() starts the conductor()
+ # service. There is a cleanup step that runs after this test which
+ # also deletes the associated service record. This involves a call
+ # to db.service_destroy(), which we have stubbed out.
+ db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
result = getattr(self.conductor, name)(self.context, *args)
- self.assertEqual(result, 'fake-result')
+ self.assertEqual(
+ result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
self._test_stubbed('service_get_all')
@@ -639,6 +684,29 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
def test_service_get_all_compute_by_host(self):
self._test_stubbed('service_get_all_compute_by_host', 'host')
+ def test_service_create(self):
+ self._test_stubbed('service_create', {})
+
+ def test_service_destroy(self):
+ self._test_stubbed('service_destroy', '', returns=False)
+
+ def test_ping(self):
+ timeouts = []
+ calls = dict(count=0)
+
+ def fake_ping(_self, context, message, timeout):
+ timeouts.append(timeout)
+ calls['count'] += 1
+ if calls['count'] < 15:
+ raise rpc_common.Timeout("fake")
+
+ self.stubs.Set(conductor_api.API, 'ping', fake_ping)
+
+ self.conductor.wait_until_ready(self.context)
+
+ self.assertEqual(timeouts.count(10), 10)
+ self.assertTrue(None in timeouts)
+
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
@@ -656,6 +724,10 @@ class ConductorLocalAPITestCase(ConductorAPITestCase):
self.assertRaises(KeyError,
self._do_update, instance['uuid'], foo='bar')
+ def test_ping(self):
+ # Override test in ConductorAPITestCase
+ pass
+
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
@@ -668,6 +740,11 @@ class ConductorImportTest(test.TestCase):
self.assertTrue(isinstance(conductor.API(),
conductor_api.API))
+ def test_import_conductor_override_to_local(self):
+ self.flags(use_local=False, group='conductor')
+ self.assertTrue(isinstance(conductor.API(use_local=True),
+ conductor_api.LocalAPI))
+
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
diff --git a/nova/tests/fake_hosts.py b/nova/tests/fake_hosts.py
new file mode 100644
index 000000000..e6831d124
--- /dev/null
+++ b/nova/tests/fake_hosts.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Provides some fake hosts to test host and service related functions
+"""
+
+HOST_LIST = [
+ {"host_name": "host_c1", "service": "compute", "zone": "nova"},
+ {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
+
+OS_API_HOST_LIST = {"hosts": HOST_LIST}
+
+HOST_LIST_NOVA_ZONE = [
+ {"host_name": "host_c1", "service": "compute", "zone": "nova"},
+ {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
+
+SERVICES_LIST = [
+ {"host": "host_c1", "topic": "compute"},
+ {"host": "host_c2", "topic": "compute"}]
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl
new file mode 100644
index 000000000..be9afe012
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 0,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl
new file mode 100644
index 000000000..53afae086
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="0" vcpus="1" ram="512" name="%(flavor_name)s" id="%(flavor_id)s" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl
new file mode 100644
index 000000000..c46a1695d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "swap": "",
+ "vcpus": 2
+ },
+ {
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "swap": "",
+ "vcpus": 4
+ },
+ {
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "swap": "",
+ "vcpus": 8
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl
new file mode 100644
index 000000000..ced8e1779
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors>
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl
new file mode 100644
index 000000000..ca86aeb4e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "swap": 5
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl
new file mode 100644
index 000000000..5f54df5cd
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
+ name="%(flavor_name)s"
+ ram="1024"
+ vcpus="2"
+ disk="10"
+ id="%(flavor_id)s"
+ swap="5" />
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl
new file mode 100644
index 000000000..e61a08dc1
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "swap": 5,
+ "vcpus": 2
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl
new file mode 100644
index 000000000..e8c69ecee
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="%(flavor_name)s" id="%(flavor_id)s" swap="5">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 7853d1429..0cbc1352b 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -367,7 +367,6 @@ class ApiSamplesTrap(ApiSampleTestBase):
do_not_approve_additions.append('os-create-server-ext')
do_not_approve_additions.append('os-flavor-access')
do_not_approve_additions.append('os-flavor-extra-specs')
- do_not_approve_additions.append('os-flavor-swap')
do_not_approve_additions.append('os-floating-ip-dns')
do_not_approve_additions.append('os-floating-ip-pools')
do_not_approve_additions.append('os-fping')
@@ -1030,6 +1029,55 @@ class FlavorRxtxXmlTest(FlavorRxtxJsonTest):
ctype = 'xml'
+class FlavorSwapJsonTest(ApiSampleTestBase):
+ extension_name = ('nova.api.openstack.compute.contrib.flavor_swap.'
+ 'Flavor_swap')
+
+ def _get_flags(self):
+ f = super(FlavorSwapJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # FlavorSwap extension also needs Flavormanage to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
+ return f
+
+ def test_flavor_swap_get(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ self.assertEqual(response.status, 200)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'flavor_name': 'm1.tiny'
+ }
+ subs.update(self._get_regexes())
+ return self._verify_response('flavor-swap-get-resp', subs,
+ response)
+
+ def test_flavor_swap_list(self):
+ response = self._do_get('flavors/detail')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('flavor-swap-list-resp', subs,
+ response)
+
+ def test_flavor_swap_create(self):
+ subs = {
+ 'flavor_id': 100,
+ 'flavor_name': 'flavortest'
+ }
+ response = self._do_post('flavors',
+ 'flavor-swap-post-req',
+ subs)
+ self.assertEqual(response.status, 200)
+ subs.update(self._get_regexes())
+ return self._verify_response('flavor-swap-post-resp',
+ subs, response)
+
+
+class FlavorSwapXmlTest(FlavorSwapJsonTest):
+ ctype = 'xml'
+
+
class SecurityGroupsSampleJsonTest(ServersSampleBase):
extension_name = "nova.api.openstack.compute.contrib" + \
".security_groups.Security_groups"
diff --git a/nova/tests/integrated/test_multiprocess_api.py b/nova/tests/integrated/test_multiprocess_api.py
index 4f07d9de9..5a82e0033 100644
--- a/nova/tests/integrated/test_multiprocess_api.py
+++ b/nova/tests/integrated/test_multiprocess_api.py
@@ -71,18 +71,24 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase):
self.pid = pid
- # Wait for up to a second for workers to get started
- start = time.time()
- while time.time() - start < 1:
- workers = self._get_workers()
- if len(workers) == self.workers:
- break
-
- time.sleep(.1)
+ # Wait at most 10 seconds to spawn workers
+ cond = lambda: self.workers == len(self._get_workers())
+ timeout = 10
+ self._wait(cond, timeout)
+ workers = self._get_workers()
self.assertEqual(len(workers), self.workers)
return workers
+ def _wait(self, cond, timeout):
+ start = time.time()
+ while True:
+ if cond():
+ break
+ if time.time() - start > timeout:
+ break
+ time.sleep(.1)
+
def tearDown(self):
if self.pid:
# Make sure all processes are stopped
@@ -114,18 +120,14 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase):
LOG.info('pid of first child is %s' % start_workers[0])
os.kill(start_workers[0], signal.SIGTERM)
- # loop and check if new worker is spawned (for 1 second max)
- start = time.time()
- while time.time() - start < 1:
- end_workers = self._get_workers()
- LOG.info('workers: %r' % end_workers)
-
- if start_workers != end_workers:
- break
-
- time.sleep(.1)
+ # Wait at most 5 seconds to respawn a worker
+ cond = lambda: start_workers != self._get_workers()
+ timeout = 5
+ self._wait(cond, timeout)
# Make sure worker pids don't match
+ end_workers = self._get_workers()
+ LOG.info('workers: %r' % end_workers)
self.assertNotEqual(start_workers, end_workers)
# check if api service still works
@@ -141,17 +143,13 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase):
os.kill(self.pid, sig)
- # loop and check if all processes are killed (for 1 second max)
- start = time.time()
- while time.time() - start < 1:
- workers = self._get_workers()
- LOG.info('workers: %r' % workers)
-
- if not workers:
- break
-
- time.sleep(.1)
+ # Wait at most 5 seconds to kill all workers
+ cond = lambda: not self._get_workers()
+ timeout = 5
+ self._wait(cond, timeout)
+ workers = self._get_workers()
+ LOG.info('workers: %r' % workers)
self.assertFalse(workers, 'No OS processes left.')
def test_terminate_sigkill(self):
diff --git a/nova/tests/test_configdrive2.py b/nova/tests/test_configdrive2.py
index 91c2a4e5e..260ab28c7 100644
--- a/nova/tests/test_configdrive2.py
+++ b/nova/tests/test_configdrive2.py
@@ -45,7 +45,7 @@ class ConfigDriveTestCase(test.TestCase):
self.mox.ReplayAll()
- with configdrive.config_drive_helper() as c:
+ with configdrive.ConfigDriveBuilder() as c:
c._add_file('this/is/a/path/hello', 'This is some content')
(fd, imagefile) = tempfile.mkstemp(prefix='cd_iso_')
os.close(fd)
@@ -77,7 +77,7 @@ class ConfigDriveTestCase(test.TestCase):
self.mox.ReplayAll()
- with configdrive.config_drive_helper() as c:
+ with configdrive.ConfigDriveBuilder() as c:
c._add_file('this/is/a/path/hello', 'This is some content')
(fd, imagefile) = tempfile.mkstemp(prefix='cd_vfat_')
os.close(fd)
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index acc290991..7df28bfcb 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -540,7 +540,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(expected, instance_faults)
def test_instance_action_start(self):
- """Create an instance action"""
+ """Create an instance action."""
ctxt = context.get_admin_context()
uuid = str(stdlib_uuid.uuid4())
@@ -563,7 +563,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(ctxt.project_id, actions[0]['project_id'])
def test_instance_action_finish(self):
- """Create an instance action"""
+ """Create an instance action."""
ctxt = context.get_admin_context()
uuid = str(stdlib_uuid.uuid4())
@@ -593,7 +593,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(ctxt.project_id, actions[0]['project_id'])
def test_instance_actions_get_by_instance(self):
- """Ensure we can get actions by UUID"""
+ """Ensure we can get actions by UUID."""
ctxt1 = context.get_admin_context()
ctxt2 = context.get_admin_context()
uuid1 = str(stdlib_uuid.uuid4())
@@ -625,7 +625,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual('run_instance', actions[1]['action'])
def test_instance_action_get_by_instance_and_action(self):
- """Ensure we can get an action by instance UUID and action id"""
+ """Ensure we can get an action by instance UUID and action id."""
ctxt1 = context.get_admin_context()
ctxt2 = context.get_admin_context()
uuid1 = str(stdlib_uuid.uuid4())
@@ -657,7 +657,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(ctxt1.request_id, action['request_id'])
def test_instance_action_event_start(self):
- """Create an instance action event"""
+ """Create an instance action event."""
ctxt = context.get_admin_context()
uuid = str(stdlib_uuid.uuid4())
@@ -683,7 +683,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(start_time, events[0]['start_time'])
def test_instance_action_event_finish(self):
- """Finish an instance action event"""
+ """Finish an instance action event."""
ctxt = context.get_admin_context()
uuid = str(stdlib_uuid.uuid4())
@@ -717,7 +717,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(finish_time, events[0]['finish_time'])
def test_instance_action_event_get_by_id(self):
- """Get a specific instance action event"""
+ """Get a specific instance action event."""
ctxt1 = context.get_admin_context()
ctxt2 = context.get_admin_context()
uuid1 = str(stdlib_uuid.uuid4())
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index b6759de54..08b33e201 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -281,18 +281,21 @@ class FakeDriver(object):
project_id, quota_class, defaults, usages))
return resources
- def limit_check(self, context, resources, values):
- self.called.append(('limit_check', context, resources, values))
-
- def reserve(self, context, resources, deltas, expire=None):
- self.called.append(('reserve', context, resources, deltas, expire))
+ def limit_check(self, context, resources, values, project_id=None):
+ self.called.append(('limit_check', context, resources,
+ values, project_id))
+
+ def reserve(self, context, resources, deltas, expire=None,
+ project_id=None):
+ self.called.append(('reserve', context, resources, deltas,
+ expire, project_id))
return self.reservations
- def commit(self, context, reservations):
- self.called.append(('commit', context, reservations))
+ def commit(self, context, reservations, project_id=None):
+ self.called.append(('commit', context, reservations, project_id))
- def rollback(self, context, reservations):
- self.called.append(('rollback', context, reservations))
+ def rollback(self, context, reservations, project_id=None):
+ self.called.append(('rollback', context, reservations, project_id))
def usage_reset(self, context, resources):
self.called.append(('usage_reset', context, resources))
@@ -600,7 +603,7 @@ class QuotaEngineTestCase(test.TestCase):
test_resource2=3,
test_resource3=2,
test_resource4=1,
- )),
+ ), None),
])
def test_reserve(self):
@@ -615,6 +618,9 @@ class QuotaEngineTestCase(test.TestCase):
result2 = quota_obj.reserve(context, expire=3600,
test_resource1=1, test_resource2=2,
test_resource3=3, test_resource4=4)
+ result3 = quota_obj.reserve(context, project_id='fake_project',
+ test_resource1=1, test_resource2=2,
+ test_resource3=3, test_resource4=4)
self.assertEqual(driver.called, [
('reserve', context, quota_obj._resources, dict(
@@ -622,13 +628,19 @@ class QuotaEngineTestCase(test.TestCase):
test_resource2=3,
test_resource3=2,
test_resource4=1,
- ), None),
+ ), None, None),
+ ('reserve', context, quota_obj._resources, dict(
+ test_resource1=1,
+ test_resource2=2,
+ test_resource3=3,
+ test_resource4=4,
+ ), 3600, None),
('reserve', context, quota_obj._resources, dict(
test_resource1=1,
test_resource2=2,
test_resource3=3,
test_resource4=4,
- ), 3600),
+ ), None, 'fake_project'),
])
self.assertEqual(result1, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
@@ -636,6 +648,9 @@ class QuotaEngineTestCase(test.TestCase):
self.assertEqual(result2, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
+ self.assertEqual(result3, [
+ 'resv-01', 'resv-02', 'resv-03', 'resv-04',
+ ])
def test_commit(self):
context = FakeContext(None, None)
@@ -644,7 +659,7 @@ class QuotaEngineTestCase(test.TestCase):
quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
- ('commit', context, ['resv-01', 'resv-02', 'resv-03']),
+ ('commit', context, ['resv-01', 'resv-02', 'resv-03'], None),
])
def test_rollback(self):
@@ -654,7 +669,7 @@ class QuotaEngineTestCase(test.TestCase):
quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
- ('rollback', context, ['resv-01', 'resv-02', 'resv-03']),
+ ('rollback', context, ['resv-01', 'resv-02', 'resv-03'], None),
])
def test_usage_reset(self):
@@ -1205,7 +1220,7 @@ class DbQuotaDriverTestCase(test.TestCase):
def _stub_quota_reserve(self):
def fake_quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age):
+ until_refresh, max_age, project_id=None):
self.calls.append(('quota_reserve', expire, until_refresh,
max_age))
return ['resv-1', 'resv-2', 'resv-3']
@@ -1389,7 +1404,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
def fake_get_session():
return FakeSession()
- def fake_get_quota_usages(context, session):
+ def fake_get_quota_usages(context, session, project_id):
return self.usages.copy()
def fake_quota_usage_create(context, project_id, resource, in_use,
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 2d98a2641..4873714f3 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -113,6 +113,9 @@ class ServiceTestCase(test.TestCase):
self.binary = 'nova-fake'
self.topic = 'fake'
self.mox.StubOutWithMock(service, 'db')
+ self.mox.StubOutWithMock(db, 'service_create')
+ self.mox.StubOutWithMock(db, 'service_get_by_args')
+ self.flags(use_local=True, group='conductor')
def test_create(self):
@@ -134,9 +137,9 @@ class ServiceTestCase(test.TestCase):
'report_count': 0,
'id': 1}
- service.db.service_get_by_args(mox.IgnoreArg(),
+ db.service_get_by_args(mox.IgnoreArg(),
self.host, self.binary).AndRaise(exception.NotFound())
- service.db.service_create(mox.IgnoreArg(),
+ db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
return service_ref
@@ -182,6 +185,14 @@ class TestWSGIService(test.TestCase):
self.assertNotEqual(0, test_service.port)
test_service.stop()
+ def test_service_random_port_with_ipv6(self):
+ CONF.set_default("test_service_listen", "::1")
+ test_service = service.WSGIService("test_service")
+ test_service.start()
+ self.assertEqual("::1", test_service.host)
+ self.assertNotEqual(0, test_service.port)
+ test_service.stop()
+
class TestLauncher(test.TestCase):
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index 9e9309dfe..199ae30b1 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -114,7 +114,7 @@ class _FakeDriverBackendTestCase(object):
# We can't actually make a config drive v2 because ensure_tree has
# been faked out
- self.stubs.Set(nova.virt.configdrive._ConfigDriveBuilder,
+ self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
'make_drive', fake_make_drive)
def _teardown_fakelibvirt(self):
diff --git a/nova/tests/test_wsgi.py b/nova/tests/test_wsgi.py
index eda735cae..b4b25ed97 100644
--- a/nova/tests/test_wsgi.py
+++ b/nova/tests/test_wsgi.py
@@ -90,3 +90,12 @@ class TestWSGIServer(test.TestCase):
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
+
+ def test_start_random_port_with_ipv6(self):
+ server = nova.wsgi.Server("test_random_port", None,
+ host="::1", port=0)
+ server.start()
+ self.assertEqual("::1", server.host)
+ self.assertNotEqual(0, server.port)
+ server.stop()
+ server.wait()
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index b3437db62..0b1c5d0e7 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -1780,19 +1780,12 @@ class XenAPIBWCountersTestCase(stubs.XenAPITestBase):
}
def test_get_all_bw_counters(self):
- class testinstance(object):
- def __init__(self, name, uuid):
- self.name = name
- self.uuid = uuid
+ instances = [dict(name='test1', uuid='1-2-3'),
+ dict(name='test2', uuid='4-5-6')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
XenAPIBWCountersTestCase._fake_fetch_bandwidth)
- result = self.conn.get_all_bw_counters([testinstance(
- name='test1',
- uuid='1-2-3'),
- testinstance(
- name='test2',
- uuid='4-5-6')])
+ result = self.conn.get_all_bw_counters(instances)
self.assertEqual(len(result), 4)
self.assertIn(dict(uuid='1-2-3',
mac_address="a:b:c:d...",
@@ -1816,14 +1809,11 @@ class XenAPIBWCountersTestCase(stubs.XenAPITestBase):
"""Test that get_all_bw_conters returns an empty list when
no data returned from Xenserver. c.f. bug #910045.
"""
- class testinstance(object):
- def __init__(self):
- self.name = "instance-0001"
- self.uuid = "1-2-3-4-5"
+ instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
XenAPIBWCountersTestCase._fake_fetch_bandwidth_mt)
- result = self.conn.get_all_bw_counters([testinstance()])
+ result = self.conn.get_all_bw_counters(instances)
self.assertEqual(result, [])
diff --git a/nova/tests/virt/xenapi/test_vm_utils.py b/nova/tests/virt/xenapi/test_vm_utils.py
new file mode 100644
index 000000000..275088af0
--- /dev/null
+++ b/nova/tests/virt/xenapi/test_vm_utils.py
@@ -0,0 +1,89 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import contextlib
+import fixtures
+import mox
+import uuid
+
+from nova import test
+from nova.tests.xenapi import stubs
+from nova import utils
+from nova.virt.xenapi import vm_utils
+
+
+@contextlib.contextmanager
+def contextified(result):
+ yield result
+
+
+def _fake_noop(*args, **kwargs):
+ return
+
+
+class GenerateConfigDriveTestCase(test.TestCase):
+ def test_no_admin_pass(self):
+ # This is here to avoid masking errors, it shouldn't be used normally
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.xenapi.vm_utils.destroy_vdi', _fake_noop))
+
+ # Mocks
+ instance = {}
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr('session').AndReturn('sr_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vdi')
+ vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2',
+ 'configdrive',
+ 64 * 1024 * 1024).AndReturn('vdi_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here')
+ vm_utils.vdi_attached_here(
+ 'session', 'vdi_ref', read_only=False).AndReturn(
+ contextified('mounted_dev'))
+
+ class FakeInstanceMetadata(object):
+ def __init__(self, instance, content=None, extra_md=None):
+ pass
+
+ def metadata_for_config_drive(self):
+ return []
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.api.metadata.base.InstanceMetadata',
+ FakeInstanceMetadata))
+
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
+ '-allow-lowercase', '-allow-multidot', '-l',
+ '-publisher', mox.IgnoreArg(), '-quiet',
+ '-J', '-r', '-V', 'config-2', mox.IgnoreArg(),
+ attempts=1, run_as_root=False).AndReturn(None)
+ utils.execute('dd', mox.IgnoreArg(), mox.IgnoreArg(),
+ run_as_root=True).AndReturn(None)
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vbd')
+ vm_utils.create_vbd('session', 'vm_ref', 'vdi_ref', mox.IgnoreArg(),
+ bootable=False, read_only=True).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ # And the actual call we're testing
+ vm_utils.generate_configdrive('session', instance, 'vm_ref',
+ 'userdevice')
diff --git a/nova/tests/virt/xenapi/test_volumeops.py b/nova/tests/virt/xenapi/test_volumeops.py
index 5b5c38139..7cc5c70da 100644
--- a/nova/tests/virt/xenapi/test_volumeops.py
+++ b/nova/tests/virt/xenapi/test_volumeops.py
@@ -20,9 +20,47 @@ from nova.virt.xenapi import volumeops
class VolumeAttachTestCase(test.TestCase):
+ def test_detach_volume_call(self):
+ ops = volumeops.VolumeOps('session')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'find_vbd_by_number')
+ self.mox.StubOutWithMock(volumeops.vm_utils, '_is_vm_shutdown')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
+
+ volumeops.vm_utils.vm_ref_or_raise('session', 'instance_1').AndReturn(
+ 'vmref')
+
+ volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
+ 'devnumber')
+
+ volumeops.vm_utils.find_vbd_by_number(
+ 'session', 'vmref', 'devnumber').AndReturn('vbdref')
+
+ volumeops.vm_utils._is_vm_shutdown('session', 'vmref').AndReturn(
+ False)
+
+ volumeops.vm_utils.unplug_vbd('session', 'vbdref')
+
+ volumeops.vm_utils.destroy_vbd('session', 'vbdref')
+
+ volumeops.volume_utils.find_sr_from_vbd(
+ 'session', 'vbdref').AndReturn('srref')
+
+ volumeops.volume_utils.purge_sr('session', 'srref')
+
+ self.mox.ReplayAll()
+
+ ops.detach_volume(
+ dict(driver_volume_type='iscsi', data='conn_data'),
+ 'instance_1', 'mountpoint')
+
def test_attach_volume_call(self):
ops = volumeops.VolumeOps('session')
- self.mox.StubOutWithMock(ops, 'connect_volume')
+ self.mox.StubOutWithMock(ops, '_connect_volume')
self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
@@ -32,7 +70,7 @@ class VolumeAttachTestCase(test.TestCase):
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
- ops.connect_volume(
+ ops._connect_volume(
'conn_data', 'devnumber', 'instance_1', 'vmref', hotplug=True)
self.mox.ReplayAll()
@@ -42,7 +80,7 @@ class VolumeAttachTestCase(test.TestCase):
def test_attach_volume_no_hotplug(self):
ops = volumeops.VolumeOps('session')
- self.mox.StubOutWithMock(ops, 'connect_volume')
+ self.mox.StubOutWithMock(ops, '_connect_volume')
self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
@@ -52,7 +90,7 @@ class VolumeAttachTestCase(test.TestCase):
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
- ops.connect_volume(
+ ops._connect_volume(
'conn_data', 'devnumber', 'instance_1', 'vmref', hotplug=False)
self.mox.ReplayAll()
@@ -85,7 +123,8 @@ class VolumeAttachTestCase(test.TestCase):
self.stubs.Set(ops._session, 'call_xenapi', fake_call_xenapi)
self.mox.StubOutWithMock(volumeops.volume_utils, 'parse_sr_info')
- self.mox.StubOutWithMock(ops, 'introduce_sr')
+ self.mox.StubOutWithMock(
+ volumeops.volume_utils, 'introduce_sr_unless_present')
self.mox.StubOutWithMock(volumeops.volume_utils, 'introduce_vdi')
self.mox.StubOutWithMock(volumeops.vm_utils, 'create_vbd')
@@ -93,7 +132,8 @@ class VolumeAttachTestCase(test.TestCase):
connection_data, sr_label).AndReturn(
tuple([sr_uuid, sr_label, sr_params]))
- ops.introduce_sr(sr_uuid, sr_label, sr_params).AndReturn(sr_ref)
+ volumeops.volume_utils.introduce_sr_unless_present(
+ session, sr_uuid, sr_label, sr_params).AndReturn(sr_ref)
volumeops.volume_utils.introduce_vdi(
session, sr_ref, vdi_uuid, None).AndReturn(vdi_ref)
@@ -104,7 +144,7 @@ class VolumeAttachTestCase(test.TestCase):
self.mox.ReplayAll()
- ops.connect_volume(connection_data, dev_number, instance_name,
+ ops._connect_volume(connection_data, dev_number, instance_name,
vm_ref, hotplug=False)
self.assertEquals(False, called['xenapi'])
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 3659da711..462e0c444 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -166,6 +166,9 @@ class BareMetalDriver(driver.ComputeDriver):
# TODO(deva): define the version properly elsewhere
return 1
+ def legacy_nwinfo(self):
+ return True
+
def list_instances(self):
l = []
ctx = nova_context.get_admin_context()
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
index f97126e72..b94ac9032 100644
--- a/nova/virt/baremetal/pxe.py
+++ b/nova/virt/baremetal/pxe.py
@@ -154,17 +154,17 @@ def get_deploy_ari_id(instance):
def get_image_dir_path(instance):
- """Generate the dir for an instances disk"""
+ """Generate the dir for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'])
def get_image_file_path(instance):
- """Generate the full path for an instances disk"""
+ """Generate the full path for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'], 'disk')
def get_pxe_config_file_path(instance):
- """Generate the path for an instances PXE config file"""
+ """Generate the path for an instances PXE config file."""
return os.path.join(CONF.baremetal.tftp_root, instance['uuid'], 'config')
@@ -185,7 +185,7 @@ def get_partition_sizes(instance):
def get_pxe_mac_path(mac):
- """Convert a MAC address into a PXE config file name"""
+ """Convert a MAC address into a PXE config file name."""
return os.path.join(
CONF.baremetal.tftp_root,
'pxelinux.cfg',
@@ -232,7 +232,7 @@ def get_tftp_image_info(instance):
class PXE(base.NodeDriver):
- """PXE bare metal driver"""
+ """PXE bare metal driver."""
def __init__(self):
super(PXE, self).__init__()
@@ -352,7 +352,7 @@ class PXE(base.NodeDriver):
def cache_images(self, context, node, instance,
admin_password, image_meta, injected_files, network_info):
- """Prepare all the images for this instance"""
+ """Prepare all the images for this instance."""
tftp_image_info = get_tftp_image_info(instance)
self._cache_tftp_images(context, instance, tftp_image_info)
@@ -361,7 +361,7 @@ class PXE(base.NodeDriver):
injected_files, admin_password)
def destroy_images(self, context, node, instance):
- """Delete instance's image file"""
+ """Delete instance's image file."""
bm_utils.unlink_without_raise(get_image_file_path(instance))
bm_utils.unlink_without_raise(get_image_dir_path(instance))
@@ -420,7 +420,7 @@ class PXE(base.NodeDriver):
bm_utils.create_link_without_raise(pxe_config_file_path, mac_path)
def deactivate_bootloader(self, context, node, instance):
- """Delete PXE bootloader images and config"""
+ """Delete PXE bootloader images and config."""
try:
image_info = get_tftp_image_info(instance)
except exception.NovaException:
diff --git a/nova/virt/configdrive.py b/nova/virt/configdrive.py
index 321bf8389..d4352c5e6 100644
--- a/nova/virt/configdrive.py
+++ b/nova/virt/configdrive.py
@@ -17,7 +17,6 @@
"""Config Drive v2 helper."""
-import contextlib
import os
import shutil
import tempfile
@@ -54,18 +53,12 @@ configdrive_opts = [
CONF = cfg.CONF
CONF.register_opts(configdrive_opts)
+# Config drives are 64mb, if we can't size to the exact size of the data
+CONFIGDRIVESIZE_BYTES = 64 * 1024 * 1024
-@contextlib.contextmanager
-def config_drive_helper(instance_md=None):
- cdb = _ConfigDriveBuilder(instance_md=instance_md)
- try:
- yield cdb
- finally:
- cdb.cleanup()
-
-class _ConfigDriveBuilder(object):
- """Don't use this directly, use the fancy pants contextlib helper above!"""
+class ConfigDriveBuilder(object):
+ """Build config drives, optionally as a context manager."""
def __init__(self, instance_md=None):
self.imagefile = None
@@ -79,6 +72,17 @@ class _ConfigDriveBuilder(object):
if instance_md is not None:
self.add_instance_metadata(instance_md)
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exctype, excval, exctb):
+ if exctype is not None:
+ # NOTE(mikal): this means we're being cleaned up because an
+ # exception was thrown. All bets are off now, and we should not
+ # swallow the exception
+ return False
+ self.cleanup()
+
def _add_file(self, path, data):
filepath = os.path.join(self.tempdir, path)
dirname = os.path.dirname(filepath)
@@ -116,10 +120,9 @@ class _ConfigDriveBuilder(object):
def _make_vfat(self, path):
# NOTE(mikal): This is a little horrible, but I couldn't find an
- # equivalent to genisoimage for vfat filesystems. vfat images are
- # always 64mb.
+ # equivalent to genisoimage for vfat filesystems.
with open(path, 'w') as f:
- f.truncate(64 * 1024 * 1024)
+ f.truncate(CONFIGDRIVESIZE_BYTES)
utils.mkfs('vfat', path, label='config-2')
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index b6a8a91ad..e396de6a0 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -727,11 +727,10 @@ class ComputeDriver(object):
raise NotImplementedError()
def legacy_nwinfo(self):
- """
- Indicate if the driver requires the legacy network_info format.
- """
- # TODO(tr3buchet): update all subclasses and remove this
- return True
+ """True if the driver requires the legacy network_info format."""
+ # TODO(tr3buchet): update all subclasses and remove this method and
+ # related helpers.
+ raise NotImplementedError(self.legacy_nwinfo)
def manage_image_cache(self, context, all_instances):
"""
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 88346cc3a..0a29a6d67 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -403,6 +403,9 @@ class FakeDriver(driver.ComputeDriver):
def list_instance_uuids(self):
return []
+ def legacy_nwinfo(self):
+ return True
+
class FakeVirtAPI(virtapi.VirtAPI):
def instance_update(self, context, instance_uuid, updates):
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index 83493f7ff..1fba15506 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -192,7 +192,7 @@ class VMOps(baseops.BaseOps):
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path_iso}, instance=instance)
- with configdrive.config_drive_helper(instance_md=inst_md) as cdb:
+ with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
try:
cdb.make_drive(configdrive_path_iso)
except exception.ProcessExecutionError, e:
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index ea6e0e6a0..42d9dd99b 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -433,6 +433,9 @@ class LibvirtDriver(driver.ComputeDriver):
except exception.NovaException:
return False
+ def legacy_nwinfo(self):
+ return True
+
# TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
def list_instance_ids(self):
if self._conn.numOfDomains() == 0:
@@ -964,8 +967,7 @@ class LibvirtDriver(driver.ComputeDriver):
@exception.wrap_exception()
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
- xml = self._get_domain_xml(instance, network_info,
- block_device_info=None)
+ xml = self._get_domain_xml(instance, network_info, block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
@@ -1383,7 +1385,7 @@ class LibvirtDriver(driver.ComputeDriver):
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md)
- with configdrive.config_drive_helper(instance_md=inst_md) as cdb:
+ with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = basepath(fname='disk.config')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
@@ -2032,8 +2034,13 @@ class LibvirtDriver(driver.ComputeDriver):
"""
- stats = libvirt_utils.get_fs_info(CONF.instances_path)
- return stats['total'] / (1024 ** 3)
+ if CONF.libvirt_images_type == 'lvm':
+ vg_total = libvirt_utils.volume_group_total_space(
+ CONF.libvirt_images_volume_group)
+ return vg_total / (1024 ** 3)
+ else:
+ stats = libvirt_utils.get_fs_info(CONF.instances_path)
+ return stats['total'] / (1024 ** 3)
def get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
@@ -2043,15 +2050,26 @@ class LibvirtDriver(driver.ComputeDriver):
"""
total = 0
- for dom_id in self.list_instance_ids():
- dom = self._conn.lookupByID(dom_id)
- vcpus = dom.vcpus()
- if vcpus is None:
- # dom.vcpus is not implemented for lxc, but returning 0 for
- # a used count is hardly useful for something measuring usage
- total += 1
- else:
- total += len(vcpus[1])
+ dom_ids = self.list_instance_ids()
+ for dom_id in dom_ids:
+ try:
+ dom = self._conn.lookupByID(dom_id)
+ vcpus = dom.vcpus()
+ if vcpus is None:
+ # dom.vcpus is not implemented for lxc, but returning 0 for
+ # a used count is hardly useful for something measuring
+ # usage
+ total += 1
+ else:
+ total += len(vcpus[1])
+ except libvirt.libvirtError as err:
+ if err.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
+ LOG.debug(_("List of domains returned by libVirt: %s")
+ % dom_ids)
+ LOG.warn(_("libVirt can't find a domain with id: %s")
+ % dom_id)
+ continue
+ raise
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return total
@@ -2101,8 +2119,13 @@ class LibvirtDriver(driver.ComputeDriver):
"""
- stats = libvirt_utils.get_fs_info(CONF.instances_path)
- return stats['used'] / (1024 ** 3)
+ if CONF.libvirt_images_type == 'lvm':
+ vg_used = libvirt_utils.volume_group_used_space(
+ CONF.libvirt_images_volume_group)
+ return vg_used / (1024 ** 3)
+ else:
+ stats = libvirt_utils.get_fs_info(CONF.instances_path)
+ return stats['used'] / (1024 ** 3)
def get_hypervisor_type(self):
"""Get hypervisor type.
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 73c3b552b..9c8d192c7 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -144,6 +144,36 @@ def volume_group_free_space(vg):
return int(out.strip())
+def volume_group_total_space(vg):
+ """Return total space on volume group in bytes.
+
+ :param vg: volume group name
+ """
+
+ out, err = execute('vgs', '--noheadings', '--nosuffix',
+ '--units', 'b', '-o', 'vg_size', vg,
+ run_as_root=True)
+ return int(out.strip())
+
+
+def volume_group_used_space(vg):
+ """Return available space on volume group in bytes.
+
+ :param vg: volume group name
+ """
+
+ out, err = execute('vgs', '--noheadings', '--nosuffix',
+ '--separator', '|',
+ '--units', 'b', '-o', 'vg_size,vg_free', vg,
+ run_as_root=True)
+
+ info = out.split('|')
+ if len(info) != 2:
+ raise RuntimeError(_("vg %s must be LVM volume group") % vg)
+
+ return int(info[0]) - int(info[1])
+
+
def list_logical_volumes(vg):
"""List logical volumes paths for given volume group.
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index c883d1edb..8734df1f6 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -118,6 +118,9 @@ class VMWareESXDriver(driver.ComputeDriver):
# FIXME(sateesh): implement this
pass
+ def legacy_nwinfo(self):
+ return True
+
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index bdb73b28f..0acc360e8 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -311,7 +311,7 @@ class XenAPIDriver(driver.ComputeDriver):
# we only care about VMs that correspond to a nova-managed
# instance:
- imap = dict([(inst.name, inst.uuid) for inst in instances])
+ imap = dict([(inst['name'], inst['uuid']) for inst in instances])
bwcounters = []
# get a dictionary of instance names. values are dictionaries
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 40d43da8d..6a0116098 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -34,6 +34,7 @@ from xml.parsers import expat
from eventlet import greenthread
+from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import power_state
from nova.compute import task_states
@@ -43,6 +44,7 @@ from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import utils
+from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import localfs as vfsimpl
from nova.virt import driver
@@ -153,6 +155,7 @@ class ImageType(object):
| 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
| linux, HVM assumed for Windows)
| 5 - ISO disk image (local SR, NOT partitioned by plugin)
+ | 6 - config drive
"""
KERNEL = 0
@@ -161,7 +164,9 @@ class ImageType(object):
DISK_RAW = 3
DISK_VHD = 4
DISK_ISO = 5
- _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO)
+ DISK_CONFIGDRIVE = 6
+ _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO,
+ DISK_CONFIGDRIVE)
KERNEL_STR = "kernel"
RAMDISK_STR = "ramdisk"
@@ -169,8 +174,9 @@ class ImageType(object):
DISK_RAW_STR = "os_raw"
DISK_VHD_STR = "vhd"
DISK_ISO_STR = "iso"
+ DISK_CONFIGDRIVE_STR = "configdrive"
_strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR,
- DISK_ISO_STR)
+ DISK_ISO_STR, DISK_CONFIGDRIVE_STR)
@classmethod
def to_string(cls, image_type):
@@ -178,14 +184,15 @@ class ImageType(object):
@classmethod
def get_role(cls, image_type_id):
- " Get the role played by the image, based on its type "
+ """Get the role played by the image, based on its type."""
return {
cls.KERNEL: 'kernel',
cls.RAMDISK: 'ramdisk',
cls.DISK: 'root',
cls.DISK_RAW: 'root',
cls.DISK_VHD: 'root',
- cls.DISK_ISO: 'iso'
+ cls.DISK_ISO: 'iso',
+ cls.DISK_CONFIGDRIVE: 'configdrive'
}.get(image_type_id)
@@ -868,6 +875,42 @@ def generate_ephemeral(session, instance, vm_ref, userdevice, name_label,
CONF.default_ephemeral_format)
+def generate_configdrive(session, instance, vm_ref, userdevice,
+ admin_password=None, files=None):
+ sr_ref = safe_find_sr(session)
+ vdi_ref = create_vdi(session, sr_ref, instance, 'config-2',
+ 'configdrive', configdrive.CONFIGDRIVESIZE_BYTES)
+
+ try:
+ with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
+ dev_path = utils.make_dev_path(dev)
+
+ # NOTE(mikal): libvirt supports injecting the admin password as
+ # well. This is not currently implemented for xenapi as it is not
+ # supported by the existing file injection
+ extra_md = {}
+ if admin_password:
+ extra_md['admin_pass'] = admin_password
+ inst_md = instance_metadata.InstanceMetadata(instance,
+ content=files,
+ extra_md=extra_md)
+ with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
+ with utils.tempdir() as tmp_path:
+ tmp_file = os.path.join(tmp_path, 'configdrive')
+ cdb.make_drive(tmp_file)
+
+ utils.execute('dd',
+ 'if=%s' % tmp_file,
+ 'of=%s' % dev_path,
+ run_as_root=True)
+
+ create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False,
+ read_only=True)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ destroy_vdi(session, vdi_ref)
+
+
def create_kernel_image(context, session, instance, name_label, image_id,
image_type):
"""Creates kernel/ramdisk file from the image stored in the cache.
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index e8e0f3cb0..4a8372cda 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -40,6 +40,7 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
+from nova.virt import configdrive
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.xenapi import agent as xapi_agent
@@ -77,6 +78,7 @@ DEVICE_RESCUE = '1'
DEVICE_SWAP = '2'
DEVICE_EPHEMERAL = '3'
DEVICE_CD = '4'
+DEVICE_CONFIGDRIVE = '5'
def cmp_version(a, b):
@@ -344,7 +346,8 @@ class VMOps(object):
@step
def attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type):
self._attach_disks(instance, vm_ref, name_label, vdis,
- disk_image_type)
+ disk_image_type, admin_password,
+ injected_files)
if rescue:
# NOTE(johannes): Attach root disk to rescue VM now, before
@@ -437,7 +440,12 @@ class VMOps(object):
disk_image_type)
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
- self.inject_instance_metadata(instance, vm_ref)
+
+ # NOTE(mikal): file injection only happens if we are _not_ using a
+ # configdrive.
+ if not configdrive.required_by(instance):
+ self.inject_instance_metadata(instance, vm_ref)
+
return vm_ref
def _setup_vm_networking(self, instance, vm_ref, vdis, network_info,
@@ -491,7 +499,7 @@ class VMOps(object):
return vm_ref
def _attach_disks(self, instance, vm_ref, name_label, vdis,
- disk_image_type):
+ disk_image_type, admin_password=None, files=None):
ctx = nova_context.get_admin_context()
instance_type = instance['instance_type']
@@ -537,6 +545,13 @@ class VMOps(object):
DEVICE_EPHEMERAL, name_label,
ephemeral_gb)
+ # Attach (optional) configdrive v2 disk
+ if configdrive.required_by(instance):
+ vm_utils.generate_configdrive(self._session, instance, vm_ref,
+ DEVICE_CONFIGDRIVE,
+ admin_password=admin_password,
+ files=files)
+
def _boot_new_instance(self, instance, vm_ref, injected_files,
admin_password):
"""Boot a new instance and configure it."""
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index e584bac67..7921e3e87 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -22,6 +22,7 @@ and storage repositories
import re
import string
+from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -381,3 +382,28 @@ def _get_target_port(iscsi_string):
return iscsi_string[iscsi_string.find(':') + 1:]
elif iscsi_string is None or CONF.target_port:
return CONF.target_port
+
+
+def introduce_sr_unless_present(session, sr_uuid, label, params):
+ LOG.debug(_("Introducing SR %s") % label)
+ sr_ref = find_sr_by_uuid(session, sr_uuid)
+ if sr_ref:
+ LOG.debug(_('SR found in xapi database. No need to introduce'))
+ return sr_ref
+ sr_ref = introduce_sr(session, sr_uuid, label, params)
+
+ if sr_ref is None:
+ raise exception.NovaException(_('Could not introduce SR'))
+ return sr_ref
+
+
+def forget_sr_if_present(session, sr_uuid):
+ sr_ref = find_sr_by_uuid(session, sr_uuid)
+ if sr_ref is None:
+ LOG.debug(_('SR %s not found in the xapi database') % sr_uuid)
+ return
+ try:
+ forget_sr(session, sr_uuid)
+ except StorageError, exc:
+ LOG.exception(exc)
+ raise exception.NovaException(_('Could not forget SR'))
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 51c97c9de..5f79b6c3a 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -35,76 +35,6 @@ class VolumeOps(object):
def __init__(self, session):
self._session = session
- def create_volume_for_sm(self, volume, sr_uuid):
- LOG.debug("Creating volume for Storage Manager")
-
- sm_vol_rec = {}
- try:
- sr_ref = self._session.call_xenapi("SR.get_by_uuid", sr_uuid)
- except self._session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise volume_utils.StorageError(_('Unable to get SR using uuid'))
- #Create VDI
- label = 'vol-' + volume['id']
- desc = 'xensm volume for ' + volume['id']
- # size presented to xenapi is in bytes, while euca api is in GB
- vdi_size = volume['size'] * 1024 * 1024 * 1024
- vdi_ref = vm_utils.create_vdi(self._session, sr_ref,
- None, label, desc,
- vdi_size, False)
- vdi_rec = self._session.call_xenapi("VDI.get_record", vdi_ref)
- sm_vol_rec['vdi_uuid'] = vdi_rec['uuid']
- return sm_vol_rec
-
- def delete_volume_for_sm(self, vdi_uuid):
- vdi_ref = self._session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
- if vdi_ref is None:
- raise exception.NovaException(_('Could not find VDI ref'))
-
- vm_utils.destroy_vdi(self._session, vdi_ref)
-
- def create_sr(self, label, params):
- LOG.debug(_("Creating SR %s") % label)
- sr_ref = volume_utils.create_sr(self._session, label, params)
- if sr_ref is None:
- raise exception.NovaException(_('Could not create SR'))
- sr_rec = self._session.call_xenapi("SR.get_record", sr_ref)
- if sr_rec is None:
- raise exception.NovaException(_('Could not retrieve SR record'))
- return sr_rec['uuid']
-
- # Checks if sr has already been introduced to this host
- def introduce_sr(self, sr_uuid, label, params):
- LOG.debug(_("Introducing SR %s") % label)
- sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
- if sr_ref:
- LOG.debug(_('SR found in xapi database. No need to introduce'))
- return sr_ref
- sr_ref = volume_utils.introduce_sr(self._session, sr_uuid, label,
- params)
- if sr_ref is None:
- raise exception.NovaException(_('Could not introduce SR'))
- return sr_ref
-
- def is_sr_on_host(self, sr_uuid):
- LOG.debug(_('Checking for SR %s') % sr_uuid)
- sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
- if sr_ref:
- return True
- return False
-
- # Checks if sr has been introduced
- def forget_sr(self, sr_uuid):
- sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
- if sr_ref is None:
- LOG.INFO(_('SR %s not found in the xapi database') % sr_uuid)
- return
- try:
- volume_utils.forget_sr(self._session, sr_uuid)
- except volume_utils.StorageError, exc:
- LOG.exception(exc)
- raise exception.NovaException(_('Could not forget SR'))
-
def attach_volume(self, connection_info, instance_name, mountpoint,
hotplug=True):
"""Attach volume storage to VM instance."""
@@ -122,13 +52,13 @@ class VolumeOps(object):
connection_data = connection_info['data']
dev_number = volume_utils.get_device_number(mountpoint)
- self.connect_volume(connection_data, dev_number, instance_name,
+ self._connect_volume(connection_data, dev_number, instance_name,
vm_ref, hotplug=hotplug)
LOG.info(_('Mountpoint %(mountpoint)s attached to'
' instance %(instance_name)s') % locals())
- def connect_volume(self, connection_data, dev_number, instance_name,
+ def _connect_volume(self, connection_data, dev_number, instance_name,
vm_ref, hotplug=True):
description = 'Disk-for:%s' % instance_name
@@ -137,7 +67,8 @@ class VolumeOps(object):
# Introduce SR
try:
- sr_ref = self.introduce_sr(uuid, label, sr_params)
+ sr_ref = volume_utils.introduce_sr_unless_present(
+ self._session, uuid, label, sr_params)
LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
except self._session.XenAPI.Failure, exc:
LOG.exception(exc)
@@ -159,7 +90,7 @@ class VolumeOps(object):
vdi_uuid, target_lun)
except volume_utils.StorageError, exc:
LOG.exception(exc)
- self.forget_sr(uuid)
+ volume_utils.forget_sr_if_present(self._session, uuid)
raise Exception(_('Unable to create VDI on SR %(sr_ref)s for'
' instance %(instance_name)s') % locals())
@@ -169,7 +100,7 @@ class VolumeOps(object):
osvol=True)
except self._session.XenAPI.Failure, exc:
LOG.exception(exc)
- self.forget_sr(uuid)
+ volume_utils.forget_sr_if_present(self._session, uuid)
raise Exception(_('Unable to use SR %(sr_ref)s for'
' instance %(instance_name)s') % locals())
@@ -178,7 +109,7 @@ class VolumeOps(object):
self._session.call_xenapi("VBD.plug", vbd_ref)
except self._session.XenAPI.Failure, exc:
LOG.exception(exc)
- self.forget_sr(uuid)
+ volume_utils.forget_sr_if_present(self._session, uuid)
raise Exception(_('Unable to attach volume to instance %s')
% instance_name)
@@ -190,7 +121,7 @@ class VolumeOps(object):
# Detach VBD from VM
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
% locals())
- device_number = volume_utils.mountpoint_to_number(mountpoint)
+ device_number = volume_utils.get_device_number(mountpoint)
try:
vbd_ref = vm_utils.find_vbd_by_number(self._session, vm_ref,
device_number)
@@ -199,9 +130,7 @@ class VolumeOps(object):
raise Exception(_('Unable to locate volume %s') % mountpoint)
try:
- vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
- sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
- if vm_rec['power_state'] != 'Halted':
+ if not vm_utils._is_vm_shutdown(self._session, vm_ref):
vm_utils.unplug_vbd(self._session, vbd_ref)
except volume_utils.StorageError, exc:
LOG.exception(exc)
@@ -214,6 +143,7 @@ class VolumeOps(object):
# Forget SR only if no other volumes on this host are using it
try:
+ sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
volume_utils.purge_sr(self._session, sr_ref)
except volume_utils.StorageError, exc:
LOG.exception(exc)
diff --git a/nova/wsgi.py b/nova/wsgi.py
index e7e731523..c103526da 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -20,6 +20,7 @@
"""Utility methods for working with WSGI servers."""
import os.path
+import socket
import sys
import eventlet
@@ -82,8 +83,14 @@ class Server(object):
raise exception.InvalidInput(
reason='The backlog must be more than 1')
- self._socket = eventlet.listen((host, port), backlog=backlog)
- (self.host, self.port) = self._socket.getsockname()
+ try:
+ socket.inet_pton(socket.AF_INET6, host)
+ family = socket.AF_INET6
+ except Exception:
+ family = socket.AF_INET
+
+ self._socket = eventlet.listen((host, port), family, backlog=backlog)
+ (self.host, self.port) = self._socket.getsockname()[0:2]
LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__)
def start(self):
diff --git a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py
index dcbee3ded..0f88e52bb 100644
--- a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py
+++ b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py
@@ -22,7 +22,7 @@ import subprocess
def execute_get_output(*command):
- """Execute and return stdout"""
+ """Execute and return stdout."""
devnull = open(os.devnull, 'w')
command = map(str, command)
proc = subprocess.Popen(command, close_fds=True,
@@ -32,7 +32,7 @@ def execute_get_output(*command):
def execute(*command):
- """Execute without returning stdout"""
+ """Execute without returning stdout."""
devnull = open(os.devnull, 'w')
command = map(str, command)
proc = subprocess.Popen(command, close_fds=True,
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
index 5cfd32dbd..be873a7e8 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
@@ -38,7 +38,7 @@ pluginlib.configure_logging("xenstore")
class XenstoreError(pluginlib.PluginError):
- """Errors that occur when calling xenstore-* through subprocesses"""
+ """Errors that occur when calling xenstore-* through subprocesses."""
def __init__(self, cmd, return_code, stderr, stdout):
msg = "cmd: %s; returncode: %d; stderr: %s; stdout: %s"
diff --git a/run_tests.sh b/run_tests.sh
index c4a1d9efc..1a54c1bef 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -81,7 +81,7 @@ function run_tests {
if [ $coverage -eq 1 ]; then
# Do not test test_coverage_ext when gathering coverage.
if [ "x$testrargs" = "x" ]; then
- testrargs = "^(?!.*test_coverage_ext).*$"
+ testrargs="^(?!.*test_coverage_ext).*$"
fi
export PYTHON="${wrapper} coverage run --source nova --parallel-mode"
fi
@@ -116,18 +116,24 @@ function run_pep8 {
srcfiles=`find nova -type f -name "*.py" ! -wholename "nova\/openstack*"`
srcfiles+=" `find bin -type f ! -name "nova.conf*" ! -name "*api-paste.ini*"`"
srcfiles+=" `find tools -type f -name "*.py"`"
+ srcfiles+=" `find plugins -type f -name "*.py"`"
+ srcfiles+=" `find smoketests -type f -name "*.py"`"
srcfiles+=" setup.py"
# Until all these issues get fixed, ignore.
- ignore='--ignore=N402,E12,E711,E721,E712'
+ ignore='--ignore=E12,E711,E721,E712'
${wrapper} python tools/hacking.py ${ignore} ${srcfiles}
+ # NOTE(sdague): as of grizzly-2 these are passing however leaving the comment
+ # in here in case we need to break it out when we get more of our hacking working
+ # again.
+ #
# NOTE(sirp): Dom0 plugins are written for Python 2.4, meaning some HACKING
# checks are too strict.
- pep8onlyfiles=`find plugins -type f -name "*.py"`
- pep8onlyfiles+=" `find plugins/xenserver/xenapi/etc/xapi.d/plugins/ -type f -perm +111`"
- ${wrapper} pep8 ${ignore} ${pep8onlyfiles}
+ # pep8onlyfiles=`find plugins -type f -name "*.py"`
+ # pep8onlyfiles+=" `find plugins/xenserver/xenapi/etc/xapi.d/plugins/ -type f -perm +111`"
+ # ${wrapper} pep8 ${ignore} ${pep8onlyfiles}
}
diff --git a/smoketests/base.py b/smoketests/base.py
index 69222878b..f6cec3168 100644
--- a/smoketests/base.py
+++ b/smoketests/base.py
@@ -63,7 +63,7 @@ class SmokeTestCase(unittest.TestCase):
return status == 0
def wait_for_running(self, instance, tries=60, wait=1):
- """Wait for instance to be running"""
+ """Wait for instance to be running."""
for x in xrange(tries):
instance.update()
if instance.state.startswith('running'):
@@ -73,7 +73,7 @@ class SmokeTestCase(unittest.TestCase):
return False
def wait_for_deleted(self, instance, tries=60, wait=1):
- """Wait for instance to be deleted"""
+ """Wait for instance to be deleted."""
for x in xrange(tries):
try:
#NOTE(dprince): raises exception when instance id disappears
@@ -85,7 +85,7 @@ class SmokeTestCase(unittest.TestCase):
return False
def wait_for_ping(self, ip, command="ping", tries=120):
- """Wait for ip to be pingable"""
+ """Wait for ip to be pingable."""
for x in xrange(tries):
if self.can_ping(ip, command):
return True
@@ -93,7 +93,7 @@ class SmokeTestCase(unittest.TestCase):
return False
def wait_for_ssh(self, ip, key_name, tries=30, wait=5):
- """Wait for ip to be sshable"""
+ """Wait for ip to be sshable."""
for x in xrange(tries):
try:
conn = self.connect_ssh(ip, key_name)
@@ -141,9 +141,7 @@ class SmokeTestCase(unittest.TestCase):
**kwargs)
def split_clc_url(self, clc_url):
- """
- Splits a cloud controller endpoint url.
- """
+ """Splits a cloud controller endpoint url."""
parts = httplib.urlsplit(clc_url)
is_secure = parts.scheme == 'https'
ip, port = parts.netloc.split(':')
diff --git a/tools/conf/extract_opts.py b/tools/conf/extract_opts.py
index 836e48578..3185cb93d 100644
--- a/tools/conf/extract_opts.py
+++ b/tools/conf/extract_opts.py
@@ -39,7 +39,6 @@ OPTION_COUNT = 0
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT,
MULTISTROPT]))
-OPTION_HELP_INDENT = "####"
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
@@ -47,10 +46,6 @@ WORDWRAP_WIDTH = 60
def main(srcfiles):
- print '\n'.join(['#' * 20, '# nova.conf sample #', '#' * 20,
- '', '[DEFAULT]', ''])
- _list_opts(cfg.CommonConfigOpts,
- cfg.__name__ + ':' + cfg.CommonConfigOpts.__name__)
mods_by_pkg = dict()
for filepath in srcfiles:
pkg_name = filepath.split(os.sep)[1]
@@ -63,31 +58,94 @@ def main(srcfiles):
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
ext_names.sort()
pkg_names.extend(ext_names)
+
+ # opts_by_group is a mapping of group name to an options list
+ # The options list is a list of (module, options) tuples
+ opts_by_group = {'DEFAULT': []}
+
+ opts_by_group['DEFAULT'].append(
+ (cfg.__name__ + ':' + cfg.CommonConfigOpts.__name__,
+ _list_opts(cfg.CommonConfigOpts)[0][1]))
+
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
- _print_module(mod_str)
+ if mod_str.endswith('.__init__'):
+ mod_str = mod_str[:mod_str.rfind(".")]
+
+ mod_obj = _import_module(mod_str)
+ if not mod_obj:
+ continue
+
+ for group, opts in _list_opts(mod_obj):
+ opts_by_group.setdefault(group, []).append((mod_str, opts))
+
+ print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
+ for group, opts in opts_by_group.items():
+ print_group_opts(group, opts)
+
print "# Total option count: %d" % OPTION_COUNT
-def _print_module(mod_str):
- mod_obj = None
- if mod_str.endswith('.__init__'):
- mod_str = mod_str[:mod_str.rfind(".")]
+def _import_module(mod_str):
try:
- mod_obj = importutils.import_module(mod_str)
+ return importutils.import_module(mod_str)
except (ValueError, AttributeError), err:
- return
+ return None
except ImportError, ie:
sys.stderr.write("%s\n" % str(ie))
- return
+ return None
except Exception, e:
- return
- _list_opts(mod_obj, mod_str)
+ return None
+
+
+def _guess_groups(opt, mod_obj):
+ groups = []
+
+ # is it in the DEFAULT group?
+ if (opt.dest in cfg.CONF and
+ not isinstance(cfg.CONF[opt.dest], cfg.CONF.GroupAttr)):
+ groups.append('DEFAULT')
+ # what other groups is it in?
+ for key, value in cfg.CONF.items():
+ if not isinstance(value, cfg.CONF.GroupAttr):
+ continue
+ if opt.dest not in value:
+ continue
+ groups.append(key)
-def _list_opts(obj, name):
+ if len(groups) == 1:
+ return groups[0]
+
+ group = None
+ for g in groups:
+ if g in mod_obj.__name__:
+ group = g
+ break
+
+ if group is None and 'DEFAULT' in groups:
+ sys.stderr.write("Guessing that " + opt.dest +
+ " in " + mod_obj.__name__ +
+ " is in DEFAULT group out of " +
+ ','.join(groups) + "\n")
+ return 'DEFAULT'
+
+ if group is None:
+ sys.stderr("Unable to guess what group " + opt.dest +
+ " in " + mod_obj.__name__ +
+ " is in out of " + ','.join(groups) + "\n")
+ sys.exit(1)
+
+ sys.stderr.write("Guessing that " + opt.dest +
+ " in " + mod_obj.__name__ +
+ " is in the " + group +
+ " group out of " + ','.join(groups) + "\n")
+ return group
+
+
+def _list_opts(obj):
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
@@ -96,14 +154,26 @@ def _list_opts(obj, name):
elif (isinstance(attr_obj, list) and
all(map(lambda x: isinstance(x, cfg.Opt), attr_obj))):
opts.extend(attr_obj)
- if not opts:
- return
- global OPTION_COUNT
- OPTION_COUNT += len(opts)
- print '######## defined in %s ########\n' % name
+
+ ret = {}
for opt in opts:
- _print_opt(opt)
+ ret.setdefault(_guess_groups(opt, obj), []).append(opt)
+ return ret.items()
+
+
+def print_group_opts(group, opts_by_module):
+ print "[%s]" % group
print
+ global OPTION_COUNT
+ for mod, opts in opts_by_module:
+ OPTION_COUNT += len(opts)
+ print '#'
+ print '# Options defined in %s' % mod
+ print '#'
+ print
+ for opt in opts:
+ _print_opt(opt)
+ print
def _get_my_ip():
@@ -134,10 +204,14 @@ def _sanitize_default(s):
return s
-def _wrap(msg, indent):
- padding = ' ' * indent
- prefix = "\n%s %s " % (OPTION_HELP_INDENT, padding)
- return prefix.join(textwrap.wrap(msg, WORDWRAP_WIDTH))
+OPT_TYPES = {
+ 'StrOpt': 'string value',
+ 'BoolOpt': 'boolean value',
+ 'IntOpt': 'integer value',
+ 'FloatOpt': 'floating point value',
+ 'ListOpt': 'list value',
+ 'MultiStrOpt': 'multi valued',
+}
def _print_opt(opt):
@@ -150,35 +224,35 @@ def _print_opt(opt):
except (ValueError, AttributeError), err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
+ opt_help += ' (' + OPT_TYPES[opt_type] + ')'
+ print '#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))
try:
if opt_default is None:
- print '# %s=<None>' % opt_name
+ print '#%s=<None>' % opt_name
elif opt_type == STROPT:
assert(isinstance(opt_default, basestring))
- print '# %s=%s' % (opt_name, _sanitize_default(opt_default))
+ print '#%s=%s' % (opt_name, _sanitize_default(opt_default))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
- print '# %s=%s' % (opt_name, str(opt_default).lower())
+ print '#%s=%s' % (opt_name, str(opt_default).lower())
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
- print '# %s=%s' % (opt_name, opt_default)
+ print '#%s=%s' % (opt_name, opt_default)
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
- print '# %s=%s' % (opt_name, opt_default)
+ print '#%s=%s' % (opt_name, opt_default)
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
- print '# %s=%s' % (opt_name, ','.join(opt_default))
+ print '#%s=%s' % (opt_name, ','.join(opt_default))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
for default in opt_default:
- print '# %s=%s' % (opt_name, default)
+ print '#%s=%s' % (opt_name, default)
+ print
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
- opt_type_tag = "(%s)" % opt_type
- print OPTION_HELP_INDENT, opt_type_tag, _wrap(opt_help, len(opt_type_tag))
- print
if __name__ == '__main__':
diff --git a/tools/lintstack.sh b/tools/lintstack.sh
index 848a16fa5..42c6a60b3 100755
--- a/tools/lintstack.sh
+++ b/tools/lintstack.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
-# Copyright (c) 2012, AT&T Labs, Yun Mao <yunmao@gmail.com>
+# Copyright (c) 2012-2013, AT&T Labs, Yun Mao <yunmao@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,13 +15,31 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Use lintstack.py to compare pylint errors between HEAD and HEAD~1
-
+# Use lintstack.py to compare pylint errors.
+# We run pylint twice, once on HEAD, once on the code before the latest
+# commit for review.
set -e
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
GITHEAD=`git rev-parse HEAD`
cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py
-git checkout HEAD~1
+
+if git rev-parse HEAD^2 2>/dev/null; then
+ # The HEAD is a Merge commit. Here, the patch to review is
+ # HEAD^2, the master branch is at HEAD^1, and the patch was
+ # written based on HEAD^2~1.
+ PREV_COMMIT=`git rev-parse HEAD^2~1`
+ git checkout HEAD~1
+ # The git merge is necessary for reviews with a series of patches.
+ # If not, this is a no-op so won't hurt either.
+ git merge $PREV_COMMIT
+else
+ # The HEAD is not a merge commit. This won't happen on gerrit.
+ # Most likely you are running against your own patch locally.
+ # We assume the patch to examine is HEAD, and we compare it against
+ # HEAD~1
+ git checkout HEAD~1
+fi
+
# First generate tools/pylint_exceptions from HEAD~1
$TOOLS_DIR/lintstack.head.py generate
# Then use that as a reference to compare against HEAD
diff --git a/tools/pip-requires b/tools/pip-requires
index 1eb09ff65..1845ba7dd 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -8,7 +8,7 @@ eventlet>=0.9.17
kombu>=1.0.4
lxml>=2.3
routes==1.12.3
-WebOb>=1.0.8
+WebOb==1.2.3
greenlet>=0.3.1
PasteDeploy==1.5.0
paste
diff --git a/tools/test-requires b/tools/test-requires
index 6ee42d31c..c1683fe27 100644
--- a/tools/test-requires
+++ b/tools/test-requires
@@ -11,5 +11,5 @@ pep8==1.3.3
pylint==0.25.2
python-subunit
sphinx>=1.1.2
-testrepository>=0.0.8
+testrepository>=0.0.12
testtools>=0.9.22
diff --git a/tox.ini b/tox.ini
index ca5e6e778..cf565c19f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,8 +8,7 @@ setenv = VIRTUAL_ENV={envdir}
LC_ALL=C
deps = -r{toxinidir}/tools/pip-requires
-r{toxinidir}/tools/test-requires
-commands = bash -c 'if [ ! -d ./.testrepository ] ; then testr init ; fi'
- bash -c 'testr run --parallel {posargs} ; RET=$? ; echo "Slowest Tests" ; testr slowest && exit $RET'
+commands = python setup.py testr --slowest --testr-args='{posargs}'
[tox:jenkins]
sitepackages = True
@@ -18,9 +17,9 @@ downloadcache = ~/cache/pip
[testenv:pep8]
deps=pep8==1.3.3
commands =
- python tools/hacking.py --ignore=N402,E12,E711,E721,E712 --repeat --show-source \
+ python tools/hacking.py --ignore=E12,E711,E721,E712 --repeat --show-source \
--exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg .
- python tools/hacking.py --ignore=N402,E12,E711,E721,E712 --repeat --show-source \
+ python tools/hacking.py --ignore=E12,E711,E721,E712 --repeat --show-source \
--filename=nova* bin
[testenv:pylint]
@@ -34,13 +33,11 @@ deps = pyflakes
commands = python tools/flakes.py nova
[testenv:cover]
-# Need to omit DynamicallyCompiledCheetahTemplate.py from coverage because
-# it ceases to exist post test run. Also do not run test_coverage_ext tests
-# while gathering coverage as those tests conflict with coverage.
-setenv = OMIT=--omit=DynamicallyCompiledCheetahTemplate.py
- PYTHON=coverage run --source nova --parallel-mode
-commands = bash -c 'if [ ! -d ./.testrepository ] ; then testr init ; fi'
- bash -c 'testr run --parallel \^\(\?\!\.\*test_coverage_ext\)\.\*\$ ; RET=$? ; coverage combine ; coverage html -d ./cover $OMIT && exit $RET'
+# Also do not run test_coverage_ext tests while gathering coverage as those
+# tests conflict with coverage.
+commands =
+ python setup.py testr --coverage \
+ --testr-args='^(?!.*test_coverage_ext).*$'
[testenv:venv]
commands = {posargs}