summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-rootwrap40
-rw-r--r--doc/api_samples/os-consoles/get-vnc-console-post-resp.xml4
-rw-r--r--doc/api_samples/os-deferred-delete/force-delete-post-req.json3
-rw-r--r--doc/api_samples/os-deferred-delete/force-delete-post-req.xml2
-rw-r--r--doc/api_samples/os-deferred-delete/restore-post-req.json3
-rw-r--r--doc/api_samples/os-deferred-delete/restore-post-req.xml2
-rw-r--r--doc/api_samples/os-deferred-delete/server-post-req.json16
-rw-r--r--doc/api_samples/os-deferred-delete/server-post-req.xml19
-rw-r--r--doc/api_samples/os-deferred-delete/server-post-resp.json16
-rw-r--r--doc/api_samples/os-deferred-delete/server-post-resp.xml6
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json16
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml14
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-get-resp.json16
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-get-resp.xml14
-rw-r--r--doc/api_samples/os-quota-sets/quotas-update-post-req.json5
-rw-r--r--doc/api_samples/os-quota-sets/quotas-update-post-req.xml4
-rw-r--r--doc/api_samples/os-quota-sets/quotas-update-post-resp.json15
-rw-r--r--doc/api_samples/os-quota-sets/quotas-update-post-resp.xml14
-rw-r--r--etc/nova/rootwrap.conf6
-rw-r--r--etc/nova/rootwrap.d/api-metadata.filters12
-rw-r--r--etc/nova/rootwrap.d/compute.filters30
-rw-r--r--etc/nova/rootwrap.d/network.filters18
-rw-r--r--nova/compute/manager.py90
-rw-r--r--nova/compute/resource_tracker.py7
-rw-r--r--nova/compute/utils.py23
-rw-r--r--nova/conductor/manager.py2
-rw-r--r--nova/db/api.py18
-rw-r--r--nova/db/sqlalchemy/api.py79
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/145_add_volume_usage_cache.py69
-rw-r--r--nova/db/sqlalchemy/models.py18
-rw-r--r--nova/network/minidns.py33
-rw-r--r--nova/rootwrap/filters.py29
-rw-r--r--nova/rootwrap/wrapper.py38
-rw-r--r--nova/test.py4
-rw-r--r--nova/tests/api/ec2/test_cloud.py3
-rw-r--r--nova/tests/compute/test_compute.py32
-rw-r--r--nova/tests/fake_flags.py2
-rw-r--r--nova/tests/fake_policy.py (renamed from nova/tests/policy.json)19
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl14
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl14
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl15
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl14
-rw-r--r--nova/tests/integrated/test_api_samples.py62
-rw-r--r--nova/tests/network/test_manager.py12
-rw-r--r--nova/tests/policy_fixture.py44
-rw-r--r--nova/tests/test_db_api.py86
-rw-r--r--nova/tests/test_libvirt.py50
-rw-r--r--nova/tests/test_nova_rootwrap.py20
-rw-r--r--nova/tests/test_policy.py24
-rw-r--r--nova/tests/test_powervm.py42
-rw-r--r--nova/tests/test_xenapi.py88
-rw-r--r--nova/tests/utils.py18
-rw-r--r--nova/virt/driver.py5
-rw-r--r--nova/virt/fake.py7
-rw-r--r--nova/virt/firewall.py4
-rw-r--r--nova/virt/libvirt/driver.py63
-rw-r--r--nova/virt/powervm/blockdev.py302
-rw-r--r--nova/virt/powervm/constants.py2
-rw-r--r--nova/virt/powervm/operator.py208
-rw-r--r--nova/virt/xenapi/firewall.py8
-rw-r--r--nova/virt/xenapi/pool.py54
-rw-r--r--tools/flakes.py22
-rw-r--r--tools/pip-requires2
-rw-r--r--tox.ini4
77 files changed, 1619 insertions, 418 deletions
diff --git a/bin/nova-rootwrap b/bin/nova-rootwrap
index a28205a80..3322bc815 100755
--- a/bin/nova-rootwrap
+++ b/bin/nova-rootwrap
@@ -42,6 +42,7 @@ import sys
RC_UNAUTHORIZED = 99
RC_NOCOMMAND = 98
RC_BADCONFIG = 97
+RC_NOEXECFOUND = 96
def _subprocess_setup():
@@ -65,6 +66,11 @@ if __name__ == '__main__':
config.read(configfile)
try:
filters_path = config.get("DEFAULT", "filters_path").split(",")
+ if config.has_option("DEFAULT", "exec_dirs"):
+ exec_dirs = config.get("DEFAULT", "exec_dirs").split(",")
+ else:
+ # Use system PATH if exec_dirs is not specified
+ exec_dirs = os.environ["PATH"].split(':')
except ConfigParser.Error:
print "%s: Incorrect configuration file: %s" % (execname, configfile)
sys.exit(RC_BADCONFIG)
@@ -79,16 +85,24 @@ if __name__ == '__main__':
# Execute command if it matches any of the loaded filters
filters = wrapper.load_filters(filters_path)
- filtermatch = wrapper.match_filter(filters, userargs)
- if filtermatch:
- obj = subprocess.Popen(filtermatch.get_command(userargs),
- stdin=sys.stdin,
- stdout=sys.stdout,
- stderr=sys.stderr,
- preexec_fn=_subprocess_setup,
- env=filtermatch.get_environment(userargs))
- obj.wait()
- sys.exit(obj.returncode)
-
- print "Unauthorized command: %s" % ' '.join(userargs)
- sys.exit(RC_UNAUTHORIZED)
+ try:
+ filtermatch = wrapper.match_filter(filters, userargs,
+ exec_dirs=exec_dirs)
+ if filtermatch:
+ obj = subprocess.Popen(filtermatch.get_command(userargs,
+ exec_dirs=exec_dirs),
+ stdin=sys.stdin,
+ stdout=sys.stdout,
+ stderr=sys.stderr,
+ preexec_fn=_subprocess_setup,
+ env=filtermatch.get_environment(userargs))
+ obj.wait()
+ sys.exit(obj.returncode)
+
+ except wrapper.FilterMatchNotExecutable as exc:
+ print "Executable not found: %s" % exc.match.exec_path
+ sys.exit(RC_NOEXECFOUND)
+
+ except wrapper.NoFilterMatched:
+ print "Unauthorized command: %s" % ' '.join(userargs)
+ sys.exit(RC_UNAUTHORIZED)
diff --git a/doc/api_samples/os-consoles/get-vnc-console-post-resp.xml b/doc/api_samples/os-consoles/get-vnc-console-post-resp.xml
index 2d74fffa8..3c7e0c4a3 100644
--- a/doc/api_samples/os-consoles/get-vnc-console-post-resp.xml
+++ b/doc/api_samples/os-consoles/get-vnc-console-post-resp.xml
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
<console>
- <type>novnc</type>
- <url>http://example.com:6080/vnc_auto.html?token=f9906a48-b71e-4f18-baca-c987da3ebdb3&title=dafa(75ecef58-3b8e-4659-ab3b-5501454188e9)</url>
+ <type>novnc</type>
+ <url>http://example.com:6080/vnc_auto.html?token=f9906a48-b71e-4f18-baca-c987da3ebdb3</url>
</console>
diff --git a/doc/api_samples/os-deferred-delete/force-delete-post-req.json b/doc/api_samples/os-deferred-delete/force-delete-post-req.json
new file mode 100644
index 000000000..3f1abb676
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/force-delete-post-req.json
@@ -0,0 +1,3 @@
+{
+ "forceDelete": null
+} \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/force-delete-post-req.xml b/doc/api_samples/os-deferred-delete/force-delete-post-req.xml
new file mode 100644
index 000000000..ab3477c9c
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/force-delete-post-req.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<forceDelete /> \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/restore-post-req.json b/doc/api_samples/os-deferred-delete/restore-post-req.json
new file mode 100644
index 000000000..0e526ff64
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/restore-post-req.json
@@ -0,0 +1,3 @@
+{
+ "restore": null
+} \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/restore-post-req.xml b/doc/api_samples/os-deferred-delete/restore-post-req.xml
new file mode 100644
index 000000000..a43bef2f5
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/restore-post-req.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<restore /> \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/server-post-req.json b/doc/api_samples/os-deferred-delete/server-post-req.json
new file mode 100644
index 000000000..d88eb4122
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/server-post-req.xml b/doc/api_samples/os-deferred-delete/server-post-req.xml
new file mode 100644
index 000000000..0a3c8bb53
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/server-post-resp.json b/doc/api_samples/os-deferred-delete/server-post-resp.json
new file mode 100644
index 000000000..a81b795e1
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "jDje6SdBHGfQ",
+ "id": "e08e6d34-fcc1-480e-b11e-24a675b479f8",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/e08e6d34-fcc1-480e-b11e-24a675b479f8",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/e08e6d34-fcc1-480e-b11e-24a675b479f8",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/server-post-resp.xml b/doc/api_samples/os-deferred-delete/server-post-resp.xml
new file mode 100644
index 000000000..1562cf716
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="43008037-cd16-436e-948d-e084d17c37eb" adminPass="eDu5JojvmLQC">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/43008037-cd16-436e-948d-e084d17c37eb" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/43008037-cd16-436e-948d-e084d17c37eb" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json
new file mode 100644
index 000000000..70d4b66eb
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json
@@ -0,0 +1,16 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml
new file mode 100644
index 000000000..7dfdddeb2
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_set> \ No newline at end of file
diff --git a/doc/api_samples/os-quota-sets/quotas-show-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-get-resp.json
new file mode 100644
index 000000000..70d4b66eb
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-show-get-resp.json
@@ -0,0 +1,16 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml b/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml
new file mode 100644
index 000000000..7dfdddeb2
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_set> \ No newline at end of file
diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-req.json b/doc/api_samples/os-quota-sets/quotas-update-post-req.json
new file mode 100644
index 000000000..1f12caa04
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-update-post-req.json
@@ -0,0 +1,5 @@
+{
+ "quota_set": {
+ "security_groups": 45
+ }
+}
diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-req.xml b/doc/api_samples/os-quota-sets/quotas-update-post-req.xml
new file mode 100644
index 000000000..4bb7b3a47
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-update-post-req.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <security_groups>45</security_groups>
+</quota_set> \ No newline at end of file
diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-resp.json b/doc/api_samples/os-quota-sets/quotas-update-post-resp.json
new file mode 100644
index 000000000..6581c6354
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-update-post-resp.json
@@ -0,0 +1,15 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 45
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml b/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml
new file mode 100644
index 000000000..aef4761f8
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set>
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>45</security_groups>
+</quota_set> \ No newline at end of file
diff --git a/etc/nova/rootwrap.conf b/etc/nova/rootwrap.conf
index 730f71695..5d6034eb9 100644
--- a/etc/nova/rootwrap.conf
+++ b/etc/nova/rootwrap.conf
@@ -5,3 +5,9 @@
# List of directories to load filter definitions from (separated by ',').
# These directories MUST all be only writeable by root !
filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap
+
+# List of directories to search executables in, in case filters do not
+# explicitely specify a full path (separated by ',')
+# If not specified, defaults to system PATH environment variable.
+# These directories MUST all be only writeable by root !
+exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
diff --git a/etc/nova/rootwrap.d/api-metadata.filters b/etc/nova/rootwrap.d/api-metadata.filters
index ef454cbff..1aa6f83e6 100644
--- a/etc/nova/rootwrap.d/api-metadata.filters
+++ b/etc/nova/rootwrap.d/api-metadata.filters
@@ -5,13 +5,9 @@
[Filters]
# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
-iptables-save: CommandFilter, /sbin/iptables-save, root
-iptables-save_usr: CommandFilter, /usr/sbin/iptables-save, root
-ip6tables-save: CommandFilter, /sbin/ip6tables-save, root
-ip6tables-save_usr: CommandFilter, /usr/sbin/ip6tables-save, root
+iptables-save: CommandFilter, iptables-save, root
+ip6tables-save: CommandFilter, ip6tables-save, root
# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
-iptables-restore: CommandFilter, /sbin/iptables-restore, root
-iptables-restore_usr: CommandFilter, /usr/sbin/iptables-restore, root
-ip6tables-restore: CommandFilter, /sbin/ip6tables-restore, root
-ip6tables-restore_usr: CommandFilter, /usr/sbin/ip6tables-restore, root
+iptables-restore: CommandFilter, iptables-restore, root
+ip6tables-restore: CommandFilter, ip6tables-restore, root
diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters
index 62fbcff67..cb7ad7487 100644
--- a/etc/nova/rootwrap.d/compute.filters
+++ b/etc/nova/rootwrap.d/compute.filters
@@ -72,8 +72,7 @@ ip: CommandFilter, /sbin/ip, root
# nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev
# nova/network/linux_net.py: 'tunctl', '-b', '-t', dev
-tunctl: CommandFilter, /bin/tunctl, root
-tunctl_usr: CommandFilter, /usr/sbin/tunctl, root
+tunctl: CommandFilter, tunctl, root
# nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
# nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
@@ -87,13 +86,11 @@ ovs-ofctl: CommandFilter, /usr/bin/ovs-ofctl, root
dd: CommandFilter, /bin/dd, root
# nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ...
-iscsiadm: CommandFilter, /sbin/iscsiadm, root
-iscsiadm_usr: CommandFilter, /usr/bin/iscsiadm, root
+iscsiadm: CommandFilter, iscsiadm, root
# nova/virt/xenapi/vm_utils.py: parted, --script, ...
# nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*.
-parted: CommandFilter, /sbin/parted, root
-parted_usr: CommandFilter, /usr/sbin/parted, root
+parted: CommandFilter, parted, root
# nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s
fdisk: CommandFilter, /sbin/fdisk, root
@@ -105,21 +102,16 @@ e2fsck: CommandFilter, /sbin/e2fsck, root
resize2fs: CommandFilter, /sbin/resize2fs, root
# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
-iptables-save: CommandFilter, /sbin/iptables-save, root
-iptables-save_usr: CommandFilter, /usr/sbin/iptables-save, root
-ip6tables-save: CommandFilter, /sbin/ip6tables-save, root
-ip6tables-save_usr: CommandFilter, /usr/sbin/ip6tables-save, root
+iptables-save: CommandFilter, iptables-save, root
+ip6tables-save: CommandFilter, ip6tables-save, root
# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
-iptables-restore: CommandFilter, /sbin/iptables-restore, root
-iptables-restore_usr: CommandFilter, /usr/sbin/iptables-restore, root
-ip6tables-restore: CommandFilter, /sbin/ip6tables-restore, root
-ip6tables-restore_usr: CommandFilter, /usr/sbin/ip6tables-restore, root
+iptables-restore: CommandFilter, iptables-restore, root
+ip6tables-restore: CommandFilter, ip6tables-restore, root
# nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
# nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
-arping: CommandFilter, /usr/bin/arping, root
-arping_sbin: CommandFilter, /sbin/arping, root
+arping: CommandFilter, arping, root
# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
dhcp_release: CommandFilter, /usr/bin/dhcp_release, root
@@ -142,8 +134,7 @@ radvd: CommandFilter, /usr/sbin/radvd, root
# nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
# nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
# nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
-brctl: CommandFilter, /sbin/brctl, root
-brctl_usr: CommandFilter, /usr/sbin/brctl, root
+brctl: CommandFilter, brctl, root
# nova/virt/libvirt/utils.py: 'mkswap'
# nova/virt/xenapi/vm_utils.py: 'mkswap'
@@ -156,8 +147,7 @@ mkfs: CommandFilter, /sbin/mkfs, root
qemu-img: CommandFilter, /usr/bin/qemu-img, root
# nova/virt/disk/vfs/localfs.py: 'readlink', '-e'
-readlink: CommandFilter, /bin/readlink, root
-readlink_usr: CommandFilter, /usr/bin/readlink, root
+readlink: CommandFilter, readlink, root
# nova/virt/disk/api.py: 'touch', target
touch: CommandFilter, /usr/bin/touch, root
diff --git a/etc/nova/rootwrap.d/network.filters b/etc/nova/rootwrap.d/network.filters
index 133475500..c58bc77e7 100644
--- a/etc/nova/rootwrap.d/network.filters
+++ b/etc/nova/rootwrap.d/network.filters
@@ -40,21 +40,16 @@ ebtables: CommandFilter, /sbin/ebtables, root
ebtables_usr: CommandFilter, /usr/sbin/ebtables, root
# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
-iptables-save: CommandFilter, /sbin/iptables-save, root
-iptables-save_usr: CommandFilter, /usr/sbin/iptables-save, root
-ip6tables-save: CommandFilter, /sbin/ip6tables-save, root
-ip6tables-save_usr: CommandFilter, /usr/sbin/ip6tables-save, root
+iptables-save: CommandFilter, iptables-save, root
+ip6tables-save: CommandFilter, ip6tables-save, root
# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
-iptables-restore: CommandFilter, /sbin/iptables-restore, root
-iptables-restore_usr: CommandFilter, /usr/sbin/iptables-restore, root
-ip6tables-restore: CommandFilter, /sbin/ip6tables-restore, root
-ip6tables-restore_usr: CommandFilter, /usr/sbin/ip6tables-restore, root
+iptables-restore: CommandFilter, iptables-restore, root
+ip6tables-restore: CommandFilter, ip6tables-restore, root
# nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
# nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
-arping: CommandFilter, /usr/bin/arping, root
-arping_sbin: CommandFilter, /sbin/arping, root
+arping: CommandFilter, arping, root
# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
dhcp_release: CommandFilter, /usr/bin/dhcp_release, root
@@ -77,8 +72,7 @@ radvd: CommandFilter, /usr/sbin/radvd, root
# nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
# nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
# nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
-brctl: CommandFilter, /sbin/brctl, root
-brctl_usr: CommandFilter, /usr/sbin/brctl, root
+brctl: CommandFilter, brctl, root
# nova/network/linux_net.py: 'sysctl', ....
sysctl: CommandFilter, /sbin/sysctl, root
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 87b500689..fd8dc7d17 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -122,6 +122,9 @@ interval_opts = [
cfg.IntOpt('reclaim_instance_interval',
default=0,
help='Interval in seconds for reclaiming deleted instances'),
+ cfg.IntOpt('volume_usage_poll_interval',
+ default=0,
+ help='Interval in seconds for gathering volume usages'),
]
timeout_opts = [
@@ -312,6 +315,7 @@ class ComputeManager(manager.SchedulerDependentManager):
CONF.network_manager, host=kwargs.get('host', None))
self._last_host_check = 0
self._last_bw_usage_poll = 0
+ self._last_vol_usage_poll = 0
self._last_info_cache_heal = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
@@ -1637,7 +1641,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.confirm_migration(migration, instance,
self._legacy_nw_info(network_info))
- rt = self._get_resource_tracker(instance.get('node'))
+ rt = self._get_resource_tracker(migration['source_node'])
rt.confirm_resize(context, migration)
self._notify_about_instance_usage(
@@ -2401,6 +2405,24 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Detach a volume from an instance."""
bdm = self._get_instance_volume_bdm(context, instance['uuid'],
volume_id)
+ if CONF.volume_usage_poll_interval > 0:
+ vol_stats = []
+ mp = bdm['device_name']
+ # Handle bootable volumes which will not contain /dev/
+ if '/dev/' in mp:
+ mp = mp[5:]
+ try:
+ vol_stats = self.driver.block_stats(instance['name'], mp)
+ except NotImplementedError:
+ pass
+
+ if vol_stats:
+ LOG.debug(_("Updating volume usage cache with totals"))
+ rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
+ self.db.vol_usage_update(context, volume_id, rd_req, rd_bytes,
+ wr_req, wr_bytes, instance['id'],
+ update_totals=True)
+
self._detach_volume(context, instance, bdm)
volume = self.volume_api.get(context, volume_id)
connector = self.driver.get_volume_connector(instance)
@@ -2966,6 +2988,72 @@ class ComputeManager(manager.SchedulerDependentManager):
bw_ctr['bw_out'],
last_refreshed=refreshed)
+ def _get_host_volume_bdms(self, context, host):
+ """Return all block device mappings on a compute host"""
+ compute_host_bdms = []
+ instances = self.db.instance_get_all_by_host(context, self.host)
+ for instance in instances:
+ instance_bdms = self._get_instance_volume_bdms(context,
+ instance['uuid'])
+ compute_host_bdms.append(dict(instance=instance,
+ instance_bdms=instance_bdms))
+
+ return compute_host_bdms
+
+ def _update_volume_usage_cache(self, context, vol_usages, refreshed):
+ """Updates the volume usage cache table with a list of stats"""
+ for usage in vol_usages:
+ # Allow switching of greenthreads between queries.
+ greenthread.sleep(0)
+ self.db.vol_usage_update(context, usage['volume'], usage['rd_req'],
+ usage['rd_bytes'], usage['wr_req'],
+ usage['wr_bytes'], usage['instance_id'],
+ last_refreshed=refreshed)
+
+ def _send_volume_usage_notifications(self, context, start_time):
+ """Queries vol usage cache table and sends a vol usage notification"""
+ # We might have had a quick attach/detach that we missed in
+ # the last run of get_all_volume_usage and this one
+ # but detach stats will be recorded in db and returned from
+ # vol_get_usage_by_time
+ vol_usages = self.db.vol_get_usage_by_time(context, start_time)
+ for vol_usage in vol_usages:
+ notifier.notify(context, 'volume.%s' % self.host, 'volume.usage',
+ notifier.INFO,
+ compute_utils.usage_volume_info(vol_usage))
+
+ @manager.periodic_task
+ def _poll_volume_usage(self, context, start_time=None):
+ if CONF.volume_usage_poll_interval == 0:
+ return
+ else:
+ if not start_time:
+ start_time = utils.last_completed_audit_period()[1]
+
+ curr_time = time.time()
+ if (curr_time - self._last_vol_usage_poll) < \
+ CONF.volume_usage_poll_interval:
+ return
+ else:
+ self._last_vol_usage_poll = curr_time
+ compute_host_bdms = self._get_host_volume_bdms(context,
+ self.host)
+ if not compute_host_bdms:
+ return
+ else:
+ LOG.debug(_("Updating volume usage cache"))
+ try:
+ vol_usages = self.driver.get_all_volume_usage(context,
+ compute_host_bdms)
+ except NotImplementedError:
+ return
+
+ refreshed = timeutils.utcnow()
+ self._update_volume_usage_cache(context, vol_usages,
+ refreshed)
+
+ self._send_volume_usage_notifications(context, start_time)
+
@manager.periodic_task
def _report_driver_status(self, context):
curr_time = time.time()
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 07e839857..0767b4e61 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -23,6 +23,7 @@ from nova.compute import claims
from nova.compute import instance_types
from nova.compute import task_states
from nova.compute import vm_states
+from nova import conductor
from nova import context
from nova import db
from nova import exception
@@ -63,6 +64,7 @@ class ResourceTracker(object):
self.stats = importutils.import_object(CONF.compute_stats_class)
self.tracked_instances = {}
self.tracked_migrations = {}
+ self.conductor_api = conductor.API()
@lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
def instance_claim(self, context, instance_ref, limits=None):
@@ -183,9 +185,8 @@ class ResourceTracker(object):
"""
values = {'host': self.host, 'node': self.nodename,
'launched_on': self.host}
- (old_ref, new_ref) = db.instance_update_and_get_original(context,
- instance_ref['uuid'], values)
- notifications.send_update(context, old_ref, new_ref)
+ self.conductor_api.instance_update(context, instance_ref['uuid'],
+ **values)
instance_ref['host'] = self.host
instance_ref['launched_on'] = self.host
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index cc25ac6cf..a0dfbea8d 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -227,3 +227,26 @@ def start_instance_usage_audit(context, begin, end, host, num_instances):
def finish_instance_usage_audit(context, begin, end, host, errors, message):
db.task_log_end_task(context, "instance_usage_audit", begin, end, host,
errors, message)
+
+
+def usage_volume_info(vol_usage):
+ def null_safe_str(s):
+ return str(s) if s else ''
+
+ tot_refreshed = vol_usage['tot_last_refreshed']
+ curr_refreshed = vol_usage['curr_last_refreshed']
+ last_refreshed_time = (tot_refreshed if tot_refreshed > curr_refreshed
+ else curr_refreshed)
+
+ usage_info = dict(
+ volume_id=vol_usage['volume_id'],
+ instance_id=vol_usage['instance_id'],
+ last_refreshed=null_safe_str(last_refreshed_time),
+ reads=vol_usage['tot_reads'] + vol_usage['curr_reads'],
+ read_bytes=vol_usage['tot_read_bytes'] +
+ vol_usage['curr_read_bytes'],
+ writes=vol_usage['tot_writes'] + vol_usage['curr_writes'],
+ write_bytes=vol_usage['tot_write_bytes'] +
+ vol_usage['curr_write_bytes'])
+
+ return usage_info
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 53a3dc745..824875b3c 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -29,7 +29,7 @@ allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
'power_state', 'access_ip_v4', 'access_ip_v6',
'launched_at', 'terminated_at', 'host', 'node',
'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
- 'instance_type_id', 'root_device_name', 'host',
+ 'instance_type_id', 'root_device_name', 'launched_on',
'progress', 'vm_mode', 'default_ephemeral_device',
'default_swap_device', 'root_device_name',
]
diff --git a/nova/db/api.py b/nova/db/api.py
index b496e4bd0..ad928f585 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -1433,6 +1433,24 @@ def instance_type_extra_specs_update_or_create(context, flavor_id,
###################
+def vol_get_usage_by_time(context, begin):
+ """Return volumes usage that have been updated after a specified time"""
+ return IMPL.vol_get_usage_by_time(context, begin)
+
+
+def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
+ instance_id, last_refreshed=None, update_totals=False):
+ """Update cached volume usage for a volume
+ Creates new record if needed."""
+ return IMPL.vol_usage_update(context, id, rd_req, rd_bytes, wr_req,
+ wr_bytes, instance_id,
+ last_refreshed=last_refreshed,
+ update_totals=update_totals)
+
+
+###################
+
+
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id"""
return IMPL.s3_image_get(context, image_id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index c4558a84c..c1b6e66dd 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -4134,6 +4134,85 @@ def instance_type_extra_specs_update_or_create(context, flavor_id,
####################
+@require_context
+def vol_get_usage_by_time(context, begin):
+ """Return volumes usage that have been updated after a specified time"""
+ return model_query(context, models.VolumeUsage, read_deleted="yes").\
+ filter(or_(models.VolumeUsage.tot_last_refreshed == None,
+ models.VolumeUsage.tot_last_refreshed > begin,
+ models.VolumeUsage.curr_last_refreshed == None,
+ models.VolumeUsage.curr_last_refreshed > begin,
+ )).\
+ all()
+
+
+@require_context
+def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
+ instance_id, last_refreshed=None, update_totals=False,
+ session=None):
+ if not session:
+ session = get_session()
+
+ if last_refreshed is None:
+ last_refreshed = timeutils.utcnow()
+
+ with session.begin():
+ values = {}
+ # NOTE(dricco): We will be mostly updating current usage records vs
+ # updating total or creating records. Optimize accordingly.
+ if not update_totals:
+ values = {'curr_last_refreshed': last_refreshed,
+ 'curr_reads': rd_req,
+ 'curr_read_bytes': rd_bytes,
+ 'curr_writes': wr_req,
+ 'curr_write_bytes': wr_bytes,
+ 'instance_id': instance_id}
+ else:
+ values = {'tot_last_refreshed': last_refreshed,
+ 'tot_reads': models.VolumeUsage.tot_reads + rd_req,
+ 'tot_read_bytes': models.VolumeUsage.tot_read_bytes +
+ rd_bytes,
+ 'tot_writes': models.VolumeUsage.tot_writes + wr_req,
+ 'tot_write_bytes': models.VolumeUsage.tot_write_bytes +
+ wr_bytes,
+ 'curr_reads': 0,
+ 'curr_read_bytes': 0,
+ 'curr_writes': 0,
+ 'curr_write_bytes': 0,
+ 'instance_id': instance_id}
+
+ rows = model_query(context, models.VolumeUsage,
+ session=session, read_deleted="yes").\
+ filter_by(volume_id=id).\
+ update(values, synchronize_session=False)
+
+ if rows:
+ return
+
+ vol_usage = models.VolumeUsage()
+ vol_usage.tot_last_refreshed = timeutils.utcnow()
+ vol_usage.curr_last_refreshed = timeutils.utcnow()
+ vol_usage.volume_id = id
+
+ if not update_totals:
+ vol_usage.curr_reads = rd_req
+ vol_usage.curr_read_bytes = rd_bytes
+ vol_usage.curr_writes = wr_req
+ vol_usage.curr_write_bytes = wr_bytes
+ else:
+ vol_usage.tot_reads = rd_req
+ vol_usage.tot_read_bytes = rd_bytes
+ vol_usage.tot_writes = wr_req
+ vol_usage.tot_write_bytes = wr_bytes
+
+ vol_usage.save(session=session)
+
+ return
+
+
+####################
+
+
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id"""
result = model_query(context, models.S3Image, read_deleted="yes").\
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/145_add_volume_usage_cache.py b/nova/db/sqlalchemy/migrate_repo/versions/145_add_volume_usage_cache.py
new file mode 100644
index 000000000..7adbcb938
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/145_add_volume_usage_cache.py
@@ -0,0 +1,69 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, DateTime
+from sqlalchemy import Boolean, BigInteger, MetaData, Integer, String, Table
+
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ # Create new table
+ volume_usage_cache = Table('volume_usage_cache', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('volume_id', String(36), nullable=False),
+ Column("instance_id", Integer()),
+ Column('tot_last_refreshed', DateTime(timezone=False)),
+ Column('tot_reads', BigInteger(), default=0),
+ Column('tot_read_bytes', BigInteger(), default=0),
+ Column('tot_writes', BigInteger(), default=0),
+ Column('tot_write_bytes', BigInteger(), default=0),
+ Column('curr_last_refreshed', DateTime(timezone=False)),
+ Column('curr_reads', BigInteger(), default=0),
+ Column('curr_read_bytes', BigInteger(), default=0),
+ Column('curr_writes', BigInteger(), default=0),
+ Column('curr_write_bytes', BigInteger(), default=0),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
+ )
+
+ try:
+ volume_usage_cache.create()
+ except Exception:
+ LOG.exception("Exception while creating table 'volume_usage_cache'")
+ meta.drop_all(tables=[volume_usage_cache])
+ raise
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ volume_usage_cache = Table('volume_usage_cache', meta, autoload=True)
+ try:
+ volume_usage_cache.drop()
+ except Exception:
+ LOG.error(_("volume_usage_cache table not dropped"))
+ raise
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index a09517b67..a038b6745 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -938,6 +938,24 @@ class BandwidthUsage(BASE, NovaBase):
last_ctr_out = Column(BigInteger)
+class VolumeUsage(BASE, NovaBase):
+ """Cache for volume usage data pulled from the hypervisor"""
+ __tablename__ = 'volume_usage_cache'
+ id = Column(Integer, primary_key=True, nullable=False)
+ volume_id = Column(String(36), nullable=False)
+ instance_id = Column(Integer)
+ tot_last_refreshed = Column(DateTime)
+ tot_reads = Column(BigInteger, default=0)
+ tot_read_bytes = Column(BigInteger, default=0)
+ tot_writes = Column(BigInteger, default=0)
+ tot_write_bytes = Column(BigInteger, default=0)
+ curr_last_refreshed = Column(DateTime)
+ curr_reads = Column(BigInteger, default=0)
+ curr_read_bytes = Column(BigInteger, default=0)
+ curr_writes = Column(BigInteger, default=0)
+ curr_write_bytes = Column(BigInteger, default=0)
+
+
class S3Image(BASE, NovaBase):
"""Compatibility layer for the S3 image service talking to Glance"""
__tablename__ = 's3_images'
diff --git a/nova/network/minidns.py b/nova/network/minidns.py
index a4a945c15..4f0eab1d4 100644
--- a/nova/network/minidns.py
+++ b/nova/network/minidns.py
@@ -18,8 +18,10 @@ import tempfile
from nova import exception
from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
class MiniDNS(object):
@@ -35,8 +37,11 @@ class MiniDNS(object):
def __init__(self):
if CONF.logdir:
self.filename = os.path.join(CONF.logdir, "dnstest.txt")
+ self.tempdir = None
else:
- self.filename = "dnstest.txt"
+ self.tempdir = tempfile.mkdtemp()
+ self.filename = os.path.join(self.tempdir, "dnstest.txt")
+ LOG.debug(_('minidns file is |%s|'), self.filename)
if not os.path.exists(self.filename):
f = open(self.filename, "w+")
@@ -62,6 +67,8 @@ class MiniDNS(object):
return qualified
def create_entry(self, name, address, type, domain):
+ if name is None:
+ raise exception.InvalidInput(_("Invalid name"))
if type.lower() != 'a':
raise exception.InvalidInput(_("This driver only supports "
@@ -91,6 +98,9 @@ class MiniDNS(object):
return entry
def delete_entry(self, name, domain):
+ if name is None:
+ raise exception.InvalidInput(_("Invalid name"))
+
deleted = False
infile = open(self.filename, 'r')
outfile = tempfile.NamedTemporaryFile('w', delete=False)
@@ -105,6 +115,8 @@ class MiniDNS(object):
outfile.close()
shutil.move(outfile.name, self.filename)
if not deleted:
+ LOG.warn(_('Cannot delete entry |%s|'),
+ self.qualify(name, domain).lower())
raise exception.NotFound
def modify_address(self, name, address, domain):
@@ -133,8 +145,10 @@ class MiniDNS(object):
entry = self.parse_line(line)
if entry and entry['address'].lower() == address.lower():
if entry['name'].lower().endswith(domain.lower()):
- domain_index = entry['name'].lower().find(domain.lower())
- entries.append(entry['name'][0:domain_index - 1])
+ name = entry['name'].split(".")[0]
+ if name not in entries:
+ entries.append(name)
+
infile.close()
return entries
@@ -150,7 +164,17 @@ class MiniDNS(object):
return entries
def delete_dns_file(self):
- os.remove(self.filename)
+ LOG.warn(_("This shouldn't be getting called except during testing."))
+ if os.path.exists(self.filename):
+ try:
+ os.remove(self.filename)
+ except OSError:
+ pass
+ if self.tempdir and os.path.exists(self.tempdir):
+ try:
+ shutil.rmtree(self.tempdir)
+ except OSError:
+ pass
def create_domain(self, fqdomain):
if self.get_entries_by_name(fqdomain, ''):
@@ -177,4 +201,5 @@ class MiniDNS(object):
outfile.close()
shutil.move(outfile.name, self.filename)
if not deleted:
+ LOG.warn(_('Cannot delete domain |%s|'), fqdomain)
raise exception.NotFound
diff --git a/nova/rootwrap/filters.py b/nova/rootwrap/filters.py
index 46a812e5d..a3e5f1c3c 100644
--- a/nova/rootwrap/filters.py
+++ b/nova/rootwrap/filters.py
@@ -26,6 +26,23 @@ class CommandFilter(object):
self.exec_path = exec_path
self.run_as = run_as
self.args = args
+ self.real_exec = None
+
+ def get_exec(self, exec_dirs=[]):
+ """Returns existing executable, or empty string if none found"""
+ if self.real_exec is not None:
+ return self.real_exec
+ self.real_exec = ""
+ if self.exec_path.startswith('/'):
+ if os.access(self.exec_path, os.X_OK):
+ self.real_exec = self.exec_path
+ else:
+ for binary_path in exec_dirs:
+ expanded_path = os.path.join(binary_path, self.exec_path)
+ if os.access(expanded_path, os.X_OK):
+ self.real_exec = expanded_path
+ break
+ return self.real_exec
def match(self, userargs):
"""Only check that the first argument (command) matches exec_path"""
@@ -33,12 +50,13 @@ class CommandFilter(object):
return True
return False
- def get_command(self, userargs):
+ def get_command(self, userargs, exec_dirs=[]):
"""Returns command to execute (with sudo -u if run_as != root)."""
+ to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
if (self.run_as != 'root'):
# Used to run commands at lesser privileges
- return ['sudo', '-u', self.run_as, self.exec_path] + userargs[1:]
- return [self.exec_path] + userargs[1:]
+ return ['sudo', '-u', self.run_as, to_exec] + userargs[1:]
+ return [to_exec] + userargs[1:]
def get_environment(self, userargs):
"""Returns specific environment to set, None if none"""
@@ -82,9 +100,10 @@ class DnsmasqFilter(CommandFilter):
return True
return False
- def get_command(self, userargs):
+ def get_command(self, userargs, exec_dirs=[]):
+ to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
dnsmasq_pos = userargs.index('dnsmasq')
- return [self.exec_path] + userargs[dnsmasq_pos + 1:]
+ return [to_exec] + userargs[dnsmasq_pos + 1:]
def get_environment(self, userargs):
env = os.environ.copy()
diff --git a/nova/rootwrap/wrapper.py b/nova/rootwrap/wrapper.py
index 3dd7ee7e3..742f23b14 100644
--- a/nova/rootwrap/wrapper.py
+++ b/nova/rootwrap/wrapper.py
@@ -23,6 +23,20 @@ import string
from nova.rootwrap import filters
+class NoFilterMatched(Exception):
+ """This exception is raised when no filter matched."""
+ pass
+
+
+class FilterMatchNotExecutable(Exception):
+ """
+ This exception is raised when a filter matched but no executable was
+ found.
+ """
+ def __init__(self, match=None, **kwargs):
+ self.match = match
+
+
def build_filter(class_name, *args):
"""Returns a filter object of class class_name"""
if not hasattr(filters, class_name):
@@ -50,23 +64,29 @@ def load_filters(filters_path):
return filterlist
-def match_filter(filters, userargs):
+def match_filter(filters, userargs, exec_dirs=[]):
"""
Checks user command and arguments through command filters and
- returns the first matching filter, or None is none matched.
+ returns the first matching filter.
+ Raises NoFilterMatched if no filter matched.
+ Raises FilterMatchNotExecutable if no executable was found for the
+ best filter match.
"""
-
- found_filter = None
+ first_not_executable_filter = None
for f in filters:
if f.match(userargs):
# Try other filters if executable is absent
- if not os.access(f.exec_path, os.X_OK):
- if not found_filter:
- found_filter = f
+ if not f.get_exec(exec_dirs=exec_dirs):
+ if not first_not_executable_filter:
+ first_not_executable_filter = f
continue
# Otherwise return matching filter for execution
return f
- # No filter matched or first missing executable
- return found_filter
+ if first_not_executable_filter:
+ # A filter matched, but no executable was found for it
+ raise FilterMatchNotExecutable(match=first_not_executable_filter)
+
+ # No filter matched
+ raise NoFilterMatched()
diff --git a/nova/test.py b/nova/test.py
index 4ea1d3023..1e9945bbc 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -46,6 +46,8 @@ from nova.openstack.common import timeutils
from nova import service
from nova import tests
from nova.tests import fake_flags
+from nova.tests import policy_fixture
+from nova.tests import utils
test_opts = [
@@ -156,10 +158,12 @@ class TestCase(testtools.TestCase):
self._services = []
self._modules = {}
self.useFixture(EnvironmentVariable('http_proxy'))
+ self.policy = self.useFixture(policy_fixture.PolicyFixture())
def tearDown(self):
"""Runs after each test method to tear down test environment."""
try:
+ utils.cleanup_dns_managers()
self.mox.UnsetStubs()
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index bed4ac25b..0a694bbb7 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -54,6 +54,8 @@ CONF.import_opt('default_instance_type', 'nova.config')
CONF.import_opt('use_ipv6', 'nova.config')
LOG = logging.getLogger(__name__)
+HOST = "testhost"
+
def get_fake_cache():
def _ip(ip, fixed=True, floats=None):
@@ -248,6 +250,7 @@ class CloudTestCase(test.TestCase):
# TODO(jkoelker) Probably need to query for instance_type_id and
# make sure we get a valid one
inst = db.instance_create(self.context, {'host': self.compute.host,
+ 'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index f99dc5281..346c44f89 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -48,7 +48,6 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier_api
from nova.openstack.common.notifier import test_notifier
-from nova.openstack.common import policy as common_policy
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
@@ -5392,20 +5391,9 @@ class ComputePolicyTestCase(BaseTestCase):
def setUp(self):
super(ComputePolicyTestCase, self).setUp()
- nova.policy.reset()
- nova.policy.init()
self.compute_api = compute.API()
- def tearDown(self):
- super(ComputePolicyTestCase, self).tearDown()
- nova.policy.reset()
-
- def _set_rules(self, rules):
- common_policy.set_rules(common_policy.Rules(
- dict((k, common_policy.parse_rule(v))
- for k, v in rules.items())))
-
def test_actions_are_prefixed(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
nova.policy.enforce(self.context, 'compute:reboot', {})
@@ -5417,20 +5405,20 @@ class ComputePolicyTestCase(BaseTestCase):
# force delete to fail
rules = {"compute:delete": [["false:false"]]}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.delete, self.context, instance)
# reset rules to allow deletion
rules = {"compute:delete": []}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.compute_api.delete(self.context, instance)
def test_create_fail(self):
rules = {"compute:create": [["false:false"]]}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1')
@@ -5441,7 +5429,7 @@ class ComputePolicyTestCase(BaseTestCase):
"compute:create:attach_network": [["false:false"]],
"compute:create:attach_volume": [],
}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1',
@@ -5454,7 +5442,7 @@ class ComputePolicyTestCase(BaseTestCase):
"compute:create:attach_network": [],
"compute:create:attach_volume": [["false:false"]],
}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1',
@@ -5467,7 +5455,7 @@ class ComputePolicyTestCase(BaseTestCase):
rules = {
"compute:get": [["false:false"]],
}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get, self.context, instance['uuid'])
@@ -5476,7 +5464,7 @@ class ComputePolicyTestCase(BaseTestCase):
rules = {
"compute:get_all": [["false:false"]],
}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_all, self.context)
@@ -5489,7 +5477,7 @@ class ComputePolicyTestCase(BaseTestCase):
rules = {
"compute:get_instance_faults": [["false:false"]],
}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_instance_faults,
@@ -5498,7 +5486,7 @@ class ComputePolicyTestCase(BaseTestCase):
def test_force_host_fail(self):
rules = {"compute:create": [],
"compute:create:forced_host": [["role:fake"]]}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, None, '1',
@@ -5507,7 +5495,7 @@ class ComputePolicyTestCase(BaseTestCase):
def test_force_host_pass(self):
rules = {"compute:create": [],
"compute:create:forced_host": []}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.compute_api.create(self.context, None, '1',
availability_zone='1:1')
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 3fcc9fac4..83ec33cab 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -45,3 +45,5 @@ def set_defaults(conf):
conf.set_default('rpc_response_timeout', 5)
conf.set_default('rpc_cast_timeout', 5)
conf.set_default('lock_path', None)
+ conf.set_default('floating_ip_dns_manager', 'nova.tests.utils.dns_manager')
+ conf.set_default('instance_dns_manager', 'nova.tests.utils.dns_manager')
diff --git a/nova/tests/policy.json b/nova/tests/fake_policy.py
index 517ba2a55..b3ae0fa17 100644
--- a/nova/tests/policy.json
+++ b/nova/tests/fake_policy.py
@@ -1,3 +1,21 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 OpenStack, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+policy_data = """
{
"admin_api": "role:admin",
@@ -205,3 +223,4 @@
"network:create_public_dns_domain": "",
"network:delete_dns_domain": ""
}
+"""
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl
index 00f32c6b9..c1f73180e 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl
@@ -1,2 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?>
-<os-getVNCConsole type="novnc" />
+<os-getVNCConsole>
+ <type>novnc</type>
+</os-getVNCConsole>
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl
new file mode 100644
index 000000000..d3562d390
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "forceDelete": null
+}
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl
new file mode 100644
index 000000000..31928207e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<forceDelete />
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl
new file mode 100644
index 000000000..d38291fe0
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "restore": null
+}
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl
new file mode 100644
index 000000000..8a95b4fcc
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<restore />
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
new file mode 100644
index 000000000..70d4b66eb
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
new file mode 100644
index 000000000..7dfdddeb2
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_set> \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
new file mode 100644
index 000000000..70d4b66eb
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
new file mode 100644
index 000000000..7dfdddeb2
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_set> \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl
new file mode 100644
index 000000000..1f12caa04
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "quota_set": {
+ "security_groups": 45
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl
new file mode 100644
index 000000000..596ce56ac
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <security_groups>45</security_groups>
+</quota_set>
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
new file mode 100644
index 000000000..6581c6354
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
@@ -0,0 +1,15 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 45
+ }
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
new file mode 100644
index 000000000..aef4761f8
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set>
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>45</security_groups>
+</quota_set> \ No newline at end of file
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 841faeb00..4936ff2cb 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -1660,5 +1660,65 @@ class ConsolesSampleJsonTests(ServersSampleBase):
subs, response)
-class ConsoleOutputSampleXmlTests(ConsoleOutputSampleJsonTest):
+class ConsolesSampleXmlTests(ConsolesSampleJsonTests):
ctype = 'xml'
+
+
+class DeferredDeleteSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".deferred_delete.Deferred_delete")
+
+ def setUp(self):
+ super(DeferredDeleteSampleJsonTests, self).setUp()
+ self.flags(reclaim_instance_interval=1)
+
+ def test_restore(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s' % uuid)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'restore-post-req', {})
+ self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), '')
+
+ def test_force_delete(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s' % uuid)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'force-delete-post-req', {})
+ self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), '')
+
+
+class DeferredDeleteSampleXmlTests(DeferredDeleteSampleJsonTests):
+ ctype = 'xml'
+
+
+class QuotasSampleJsonTests(ApiSampleTestBase):
+ extension_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
+
+ def test_show_quotas(self):
+ """Get api sample to show quotas"""
+ response = self._do_get('os-quota-sets/fake_tenant')
+ self.assertEqual(response.status, 200)
+ return self._verify_response('quotas-show-get-resp', {}, response)
+
+ def test_show_quotas_defaults(self):
+ """Get api sample to show quotas defaults"""
+ response = self._do_get('os-quota-sets/fake_tenant/defaults')
+ self.assertEqual(response.status, 200)
+ return self._verify_response('quotas-show-defaults-get-resp',
+ {}, response)
+
+ def test_update_quotas(self):
+ """Get api sample to update quotas"""
+ response = self._do_put('os-quota-sets/fake_tenant',
+ 'quotas-update-post-req',
+ {})
+ self.assertEqual(response.status, 200)
+ return self._verify_response('quotas-update-post-resp', {}, response)
+
+
+class QuotasSampleXmlTests(QuotasSampleJsonTests):
+ ctype = "xml"
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index 8d4a511b6..b3ba161c6 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -139,8 +139,6 @@ class FlatNetworkTestCase(test.TestCase):
self.tempdir = tempfile.mkdtemp()
self.flags(logdir=self.tempdir)
self.network = network_manager.FlatManager(host=HOST)
- self.network.instance_dns_manager = importutils.import_object(
- 'nova.network.minidns.MiniDNS')
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
@@ -1573,6 +1571,7 @@ class AllocateTestCase(test.TestCase):
{'address': address,
'pool': 'nova'})
inst = db.instance_create(self.context, {'host': self.compute.host,
+ 'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
@@ -1600,8 +1599,6 @@ class FloatingIPTestCase(test.TestCase):
self.tempdir = tempfile.mkdtemp()
self.flags(logdir=self.tempdir)
self.network = TestFloatingIPManager()
- self.network.floating_dns_manager = importutils.import_object(
- 'nova.network.minidns.MiniDNS')
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
@@ -1931,9 +1928,6 @@ class NetworkPolicyTestCase(test.TestCase):
super(NetworkPolicyTestCase, self).tearDown()
nova.policy.reset()
- def _set_rules(self, rules):
- nova.common.policy.set_brain(nova.common.policy.HttpBrain(rules))
-
def test_check_policy(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
target = {
@@ -1952,10 +1946,6 @@ class InstanceDNSTestCase(test.TestCase):
self.tempdir = tempfile.mkdtemp()
self.flags(logdir=self.tempdir)
self.network = TestFloatingIPManager()
- self.network.instance_dns_manager = importutils.import_object(
- 'nova.network.minidns.MiniDNS')
- self.network.floating_dns_manager = importutils.import_object(
- 'nova.network.dns_driver.DNSDriver')
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
diff --git a/nova/tests/policy_fixture.py b/nova/tests/policy_fixture.py
new file mode 100644
index 000000000..282a28b44
--- /dev/null
+++ b/nova/tests/policy_fixture.py
@@ -0,0 +1,44 @@
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import fixtures
+
+from nova.openstack.common import cfg
+from nova.openstack.common import policy as common_policy
+import nova.policy
+from nova.tests import fake_policy
+
+CONF = cfg.CONF
+
+
+class PolicyFixture(fixtures.Fixture):
+
+ def setUp(self):
+ super(PolicyFixture, self).setUp()
+ self.policy_dir = self.useFixture(fixtures.TempDir())
+ self.policy_file_name = os.path.join(self.policy_dir.path,
+ 'policy.json')
+ with open(self.policy_file_name, 'w') as policy_file:
+ policy_file.write(fake_policy.policy_data)
+ CONF.set_override('policy_file', self.policy_file_name)
+ nova.policy.reset()
+ nova.policy.init()
+ self.addCleanup(nova.policy.reset)
+
+ def set_rules(self, rules):
+ common_policy.set_rules(common_policy.Rules(
+ dict((k, common_policy.parse_rule(v))
+ for k, v in rules.items())))
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 546aeaa97..f2124c021 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -1315,3 +1315,89 @@ class InstanceDestroyConstraints(test.TestCase):
ctx, instance['uuid'], constraint)
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
self.assertFalse(instance['deleted'])
+
+
+class VolumeUsageDBApiTestCase(test.TestCase):
+ def setUp(self):
+ super(VolumeUsageDBApiTestCase, self).setUp()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def test_vol_usage_update_no_totals_update(self):
+ ctxt = context.get_admin_context()
+ now = timeutils.utcnow()
+ timeutils.set_time_override(now)
+ start_time = now - datetime.timedelta(seconds=10)
+ refreshed_time = now - datetime.timedelta(seconds=5)
+
+ expected_vol_usages = [{'volume_id': u'1',
+ 'curr_reads': 1000,
+ 'curr_read_bytes': 2000,
+ 'curr_writes': 3000,
+ 'curr_write_bytes': 4000},
+ {'volume_id': u'2',
+ 'curr_reads': 100,
+ 'curr_read_bytes': 200,
+ 'curr_writes': 300,
+ 'curr_write_bytes': 400}]
+
+ def _compare(vol_usage, expected):
+ for key, value in expected.items():
+ self.assertEqual(vol_usage[key], value)
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+ self.assertEqual(len(vol_usages), 0)
+
+ vol_usage = db.vol_usage_update(ctxt, 1, rd_req=10, rd_bytes=20,
+ wr_req=30, wr_bytes=40, instance_id=1)
+ vol_usage = db.vol_usage_update(ctxt, 2, rd_req=100, rd_bytes=200,
+ wr_req=300, wr_bytes=400,
+ instance_id=1)
+ vol_usage = db.vol_usage_update(ctxt, 1, rd_req=1000, rd_bytes=2000,
+ wr_req=3000, wr_bytes=4000,
+ instance_id=1,
+ last_refreshed=refreshed_time)
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+ self.assertEqual(len(vol_usages), 2)
+ _compare(vol_usages[0], expected_vol_usages[0])
+ _compare(vol_usages[1], expected_vol_usages[1])
+ timeutils.clear_time_override()
+
+ def test_vol_usage_update_totals_update(self):
+ ctxt = context.get_admin_context()
+ now = timeutils.utcnow()
+ timeutils.set_time_override(now)
+ start_time = now - datetime.timedelta(seconds=10)
+ expected_vol_usages = {'volume_id': u'1',
+ 'tot_reads': 600,
+ 'tot_read_bytes': 800,
+ 'tot_writes': 1000,
+ 'tot_write_bytes': 1200,
+ 'curr_reads': 0,
+ 'curr_read_bytes': 0,
+ 'curr_writes': 0,
+ 'curr_write_bytes': 0}
+
+ vol_usage = db.vol_usage_update(ctxt, 1, rd_req=100, rd_bytes=200,
+ wr_req=300, wr_bytes=400,
+ instance_id=1)
+ vol_usage = db.vol_usage_update(ctxt, 1, rd_req=200, rd_bytes=300,
+ wr_req=400, wr_bytes=500,
+ instance_id=1,
+ update_totals=True)
+ vol_usage = db.vol_usage_update(ctxt, 1, rd_req=300, rd_bytes=400,
+ wr_req=500, wr_bytes=600,
+ instance_id=1)
+ vol_usage = db.vol_usage_update(ctxt, 1, rd_req=400, rd_bytes=500,
+ wr_req=600, wr_bytes=700,
+ instance_id=1,
+ update_totals=True)
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+
+ self.assertEquals(1, len(vol_usages))
+ for key, value in expected_vol_usages.items():
+ self.assertEqual(vol_usages[0][key], value)
+ timeutils.clear_time_override()
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 5c90ebcd1..5df7d920d 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -4213,6 +4213,56 @@ class LibvirtDriverTestCase(test.TestCase):
_fake_network_info(self.stubs, 1))
+class LibvirtVolumeUsageTestCase(test.TestCase):
+ """Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver
+ .get_all_volume_usage"""
+
+ def setUp(self):
+ super(LibvirtVolumeUsageTestCase, self).setUp()
+ self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.c = context.get_admin_context()
+
+ # creating instance
+ inst = {}
+ inst['uuid'] = '875a8070-d0b9-4949-8b31-104d125c9a64'
+ self.ins_ref = db.instance_create(self.c, inst)
+
+ # verify bootable volume device path also
+ self.bdms = [{'volume_id': 1,
+ 'device_name': '/dev/vde'},
+ {'volume_id': 2,
+ 'device_name': 'vda'}]
+
+ def test_get_all_volume_usage(self):
+ def fake_block_stats(instance_name, disk):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ self.stubs.Set(self.conn, 'block_stats', fake_block_stats)
+ vol_usage = self.conn.get_all_volume_usage(self.c,
+ [dict(instance=self.ins_ref, instance_bdms=self.bdms)])
+
+ expected_usage = [{'volume': 1,
+ 'instance_id': 1,
+ 'rd_bytes': 688640L, 'wr_req': 0L,
+ 'flush_operations': -1L, 'rd_req': 169L,
+ 'wr_bytes': 0L},
+ {'volume': 2,
+ 'instance_id': 1,
+ 'rd_bytes': 688640L, 'wr_req': 0L,
+ 'flush_operations': -1L, 'rd_req': 169L,
+ 'wr_bytes': 0L}]
+ self.assertEqual(vol_usage, expected_usage)
+
+ def test_get_all_volume_usage_device_not_found(self):
+ def fake_lookup(instance_name):
+ raise libvirt.libvirtError('invalid path')
+
+ self.stubs.Set(self.conn, '_lookup_by_name', fake_lookup)
+ vol_usage = self.conn.get_all_volume_usage(self.c,
+ [dict(instance=self.ins_ref, instance_bdms=self.bdms)])
+ self.assertEqual(vol_usage, [])
+
+
class LibvirtNonblockingTestCase(test.TestCase):
"""Test libvirt_nonblocking option"""
diff --git a/nova/tests/test_nova_rootwrap.py b/nova/tests/test_nova_rootwrap.py
index 135a5e46e..1dfd57a72 100644
--- a/nova/tests/test_nova_rootwrap.py
+++ b/nova/tests/test_nova_rootwrap.py
@@ -43,16 +43,16 @@ class RootwrapTestCase(test.TestCase):
def test_RegExpFilter_reject(self):
usercmd = ["ls", "root"]
- filtermatch = wrapper.match_filter(self.filters, usercmd)
- self.assertTrue(filtermatch is None)
+ self.assertRaises(wrapper.NoFilterMatched,
+ wrapper.match_filter, self.filters, usercmd)
def test_missing_command(self):
valid_but_missing = ["foo_bar_not_exist"]
invalid = ["foo_bar_not_exist_and_not_matched"]
- filtermatch = wrapper.match_filter(self.filters, valid_but_missing)
- self.assertTrue(filtermatch is not None)
- filtermatch = wrapper.match_filter(self.filters, invalid)
- self.assertTrue(filtermatch is None)
+ self.assertRaises(wrapper.FilterMatchNotExecutable,
+ wrapper.match_filter, self.filters, valid_but_missing)
+ self.assertRaises(wrapper.NoFilterMatched,
+ wrapper.match_filter, self.filters, invalid)
def _test_DnsmasqFilter(self, filter_class, config_file_arg):
usercmd = ['env', config_file_arg + '=A', 'NETWORK_ID=foobar',
@@ -136,6 +136,14 @@ class RootwrapTestCase(test.TestCase):
self.assertEqual(f.get_command(usercmd), ['/bin/cat', goodfn])
self.assertTrue(f.match(usercmd))
+ def test_exec_dirs_search(self):
+ # This test supposes you have /bin/cat or /usr/bin/cat locally
+ f = filters.CommandFilter("cat", "root")
+ usercmd = ['cat', '/f']
+ self.assertTrue(f.match(usercmd))
+ self.assertTrue(f.get_command(usercmd, exec_dirs=['/bin',
+ '/usr/bin']) in (['/bin/cat', '/f'], ['/usr/bin/cat', '/f']))
+
def test_skips(self):
# Check that all filters are skipped and that the last matches
usercmd = ["cat", "/"]
diff --git a/nova/tests/test_policy.py b/nova/tests/test_policy.py
index f90854f08..ba11c07f9 100644
--- a/nova/tests/test_policy.py
+++ b/nova/tests/test_policy.py
@@ -32,17 +32,13 @@ from nova import utils
class PolicyFileTestCase(test.TestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
- policy.reset()
self.context = context.RequestContext('fake', 'fake')
self.target = {}
- def tearDown(self):
- super(PolicyFileTestCase, self).tearDown()
- policy.reset()
-
def test_modified_policy_reloads(self):
with utils.tempdir() as tmpdir:
tmpfilename = os.path.join(tmpdir, 'policy')
+
self.flags(policy_file=tmpfilename)
# NOTE(uni): context construction invokes policy check to determin
@@ -66,9 +62,6 @@ class PolicyFileTestCase(test.TestCase):
class PolicyTestCase(test.TestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
- policy.reset()
- # NOTE(vish): preload rules to circumvent reloading from file
- policy.init()
rules = {
"true": '@',
"example:allowed": '@',
@@ -81,17 +74,10 @@ class PolicyTestCase(test.TestCase):
"example:lowercase_admin": "role:admin or role:sysadmin",
"example:uppercase_admin": "role:ADMIN or role:sysadmin",
}
- # NOTE(vish): then overload underlying brain
- common_policy.set_rules(common_policy.Rules(
- dict((k, common_policy.parse_rule(v))
- for k, v in rules.items())))
+ self.policy.set_rules(rules)
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.target = {}
- def tearDown(self):
- policy.reset()
- super(PolicyTestCase, self).tearDown()
-
def test_enforce_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
@@ -165,8 +151,6 @@ class DefaultPolicyTestCase(test.TestCase):
def setUp(self):
super(DefaultPolicyTestCase, self).setUp()
- policy.reset()
- policy.init()
self.rules = {
"default": '',
@@ -183,10 +167,6 @@ class DefaultPolicyTestCase(test.TestCase):
for k, v in self.rules.items()), default_rule)
common_policy.set_rules(rules)
- def tearDown(self):
- super(DefaultPolicyTestCase, self).tearDown()
- policy.reset()
-
def test_policy_called(self):
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:exist", {})
diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py
index b84bd9fbd..02d3a5a3f 100644
--- a/nova/tests/test_powervm.py
+++ b/nova/tests/test_powervm.py
@@ -25,7 +25,7 @@ from nova import test
from nova.compute import power_state
from nova.openstack.common import log as logging
from nova.virt import images
-
+from nova.virt.powervm import blockdev as powervm_blockdev
from nova.virt.powervm import driver as powervm_driver
from nova.virt.powervm import exception
from nova.virt.powervm import lpar
@@ -73,20 +73,6 @@ class FakeIVMOperator(object):
def remove_disk(self, disk_name):
pass
- def create_logical_volume(self, size):
- return 'lvfake01'
-
- def remove_logical_volume(self, lv_name):
- pass
-
- def copy_file_to_device(self, sourcePath, device):
- pass
-
- def copy_image_file(self, sourcePath, remotePath):
- finalPath = '/home/images/rhel62.raw.7e358754160433febd6f3318b7c9e335'
- size = 4294967296
- return finalPath, size
-
def run_cfg_dev(self, device_name):
pass
@@ -108,6 +94,26 @@ class FakeIVMOperator(object):
return 'fake-powervm'
+class FakeBlockAdapter(powervm_blockdev.PowerVMLocalVolumeAdapter):
+
+ def __init__(self):
+ pass
+
+ def _create_logical_volume(self, size):
+ return 'lvfake01'
+
+ def _remove_logical_volume(self, lv_name):
+ pass
+
+ def _copy_file_to_device(self, sourcePath, device, decrompress=True):
+ pass
+
+ def _copy_image_file(self, sourcePath, remotePath, decompress=False):
+ finalPath = '/home/images/rhel62.raw.7e358754160433febd6f3318b7c9e335'
+ size = 4294967296
+ return finalPath, size
+
+
def fake_get_powervm_operator():
return FakeIVMOperator()
@@ -119,6 +125,8 @@ class PowerVMDriverTestCase(test.TestCase):
super(PowerVMDriverTestCase, self).setUp()
self.stubs.Set(operator, 'get_powervm_operator',
fake_get_powervm_operator)
+ self.stubs.Set(operator, 'get_powervm_disk_adapter',
+ lambda: FakeBlockAdapter())
self.powervm_connection = powervm_driver.PowerVMDriver(None)
self.instance = self._create_instance()
@@ -161,8 +169,8 @@ class PowerVMDriverTestCase(test.TestCase):
self.flags(powervm_img_local_path='/images/')
self.stubs.Set(images, 'fetch_to_raw', lambda *x, **y: None)
self.stubs.Set(
- self.powervm_connection._powervm._operator,
- 'copy_image_file',
+ self.powervm_connection._powervm._disk_adapter,
+ 'create_volume_from_image',
lambda *x, **y: raise_(exception.PowerVMImageCreationFailed()))
self.stubs.Set(
self.powervm_connection._powervm, '_cleanup',
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 6d0686258..f570e9959 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -242,9 +242,9 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
instance = db.instance_create(self.context, self.instance_values)
- vm = xenapi_fake.create_vm(instance.name, 'Running')
+ vm = xenapi_fake.create_vm(instance['name'], 'Running')
result = conn.attach_volume(self._make_connection_info(),
- instance.name, '/dev/sdc')
+ instance['name'], '/dev/sdc')
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
@@ -259,11 +259,11 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
instance = db.instance_create(self.context, self.instance_values)
- xenapi_fake.create_vm(instance.name, 'Running')
+ xenapi_fake.create_vm(instance['name'], 'Running')
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{'driver_volume_type': 'nonexist'},
- instance.name,
+ instance['name'],
'/dev/sdc')
@@ -410,7 +410,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
- self.assertEquals(vm_labels, [instance.name])
+ self.assertEquals(vm_labels, [instance['name']])
# Ensure VBDs were torn down
vbd_labels = []
@@ -418,7 +418,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
- self.assertEquals(vbd_labels, [instance.name])
+ self.assertEquals(vbd_labels, [instance['name']])
# Ensure VDIs were torn down
for vdi_ref in xenapi_fake.get_all('VDI'):
@@ -588,8 +588,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
'herp', network_info)
self.create_vm_record(self.conn, os_type, instance['name'])
self.check_vm_record(self.conn, check_injection)
- self.assertTrue(instance.os_type)
- self.assertTrue(instance.architecture)
+ self.assertTrue(instance['os_type'])
+ self.assertTrue(instance['architecture'])
def test_spawn_empty_dns(self):
"""Test spawning with an empty dns list"""
@@ -826,7 +826,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
instance = self._create_instance()
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
- vm_ref = vm_utils.lookup(session, instance.name)
+ vm_ref = vm_utils.lookup(session, instance['name'])
swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
root_vdi_ref = xenapi_fake.create_vdi('root', None)
@@ -854,7 +854,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
- rescue_vm = xenapi_fake.create_vm(instance.name + '-rescue', 'Running')
+ rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
+ 'Running')
conn.unrescue(instance, None)
def test_unrescue_not_in_rescue(self):
@@ -895,16 +896,16 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
fake.FakeVirtAPI())
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- xenapi_fake.create_vm(instance.name, 'Halted')
+ xenapi_fake.create_vm(instance['name'], 'Halted')
conn.reboot(instance, None, "SOFT")
- vm_ref = vm_utils.lookup(session, instance.name)
+ vm_ref = vm_utils.lookup(session, instance['name'])
vm = xenapi_fake.get_record('VM', vm_ref)
self.assertEquals(vm['power_state'], 'Running')
def test_reboot_unknown_state(self):
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- xenapi_fake.create_vm(instance.name, 'Unknown')
+ xenapi_fake.create_vm(instance['name'], 'Unknown')
self.assertRaises(xenapi_fake.Failure, conn.reboot, instance,
None, "SOFT")
@@ -982,6 +983,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
instance_values = {
'id': instance_id,
'uuid': '00000000-0000-0000-0000-00000000000%d' % instance_id,
+ 'display_name': 'host-%d' % instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
@@ -1129,7 +1131,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.instance_values)
- xenapi_fake.create_vm(instance.name, 'Running')
+ xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.migrate_disk_and_power_off(self.context, instance,
@@ -1137,7 +1139,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
def test_migrate_disk_and_power_off_passes_exceptions(self):
instance = db.instance_create(self.context, self.instance_values)
- xenapi_fake.create_vm(instance.name, 'Running')
+ xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
def fake_raise(*args, **kwargs):
@@ -1177,7 +1179,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
- image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
base = xenapi_fake.create_vdi('hurr', 'fake')
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
cow = xenapi_fake.create_vdi('durr', 'fake')
@@ -1212,7 +1214,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
- image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
@@ -1234,7 +1236,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
- image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
@@ -1251,7 +1253,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
# Resize instance would be determined by the compute call
- image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
@@ -1262,7 +1264,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
instance_values['root_gb'] = 40
instance_values['auto_disk_config'] = False
instance = db.instance_create(self.context, instance_values)
- xenapi_fake.create_vm(instance.name, 'Running')
+ xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.small')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ResizeError,
@@ -2160,10 +2162,10 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
aggregate = self._aggregate_setup()
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
- result = db.aggregate_get(self.context, aggregate.id)
+ result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_init_pool.called)
self.assertThat(self.fake_metadata,
- matchers.DictMatches(result.metadetails))
+ matchers.DictMatches(result['metadetails']))
def test_join_slave(self):
"""Ensure join_slave gets called when the request gets to master."""
@@ -2193,12 +2195,12 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
result = db.aggregate_create(self.context, values)
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.CREATED}
- db.aggregate_metadata_add(self.context, result.id, metadata)
+ db.aggregate_metadata_add(self.context, result['id'], metadata)
- db.aggregate_host_add(self.context, result.id, "host")
- aggregate = db.aggregate_get(self.context, result.id)
- self.assertEqual(["host"], aggregate.hosts)
- self.assertEqual(metadata, aggregate.metadetails)
+ db.aggregate_host_add(self.context, result['id'], "host")
+ aggregate = db.aggregate_get(self.context, result['id'])
+ self.assertEqual(["host"], aggregate['hosts'])
+ self.assertEqual(metadata, aggregate['metadetails'])
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
self.assertTrue(fake_pool_set_name_label.called)
@@ -2239,11 +2241,11 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
aggregate = self._aggregate_setup(metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
- result = db.aggregate_get(self.context, aggregate.id)
+ result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_clear_pool.called)
self.assertThat({pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: pool_states.ACTIVE},
- matchers.DictMatches(result.metadetails))
+ matchers.DictMatches(result['metadetails']))
def test_remote_master_non_empty_pool(self):
"""Ensure AggregateError is raised if removing the master."""
@@ -2263,13 +2265,13 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
result = db.aggregate_create(self.context, values)
pool_flag = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: aggr_state}
- db.aggregate_metadata_add(self.context, result.id, pool_flag)
+ db.aggregate_metadata_add(self.context, result['id'], pool_flag)
for host in hosts:
- db.aggregate_host_add(self.context, result.id, host)
+ db.aggregate_host_add(self.context, result['id'], host)
if metadata:
- db.aggregate_metadata_add(self.context, result.id, metadata)
- return db.aggregate_get(self.context, result.id)
+ db.aggregate_metadata_add(self.context, result['id'], metadata)
+ return db.aggregate_get(self.context, result['id'])
def test_add_host_to_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateAction is raised when adding host while
@@ -2342,23 +2344,17 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
fake_driver_add_to_aggregate)
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
- db.aggregate_metadata_add(self.context, self.aggr.id, metadata)
- db.aggregate_host_add(self.context, self.aggr.id, 'fake_host')
+ db.aggregate_metadata_add(self.context, self.aggr['id'], metadata)
+ db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host')
self.assertRaises(exception.AggregateError,
self.compute.add_aggregate_host,
self.context, "fake_host",
aggregate=jsonutils.to_primitive(self.aggr))
- excepted = db.aggregate_get(self.context, self.aggr.id)
- self.assertEqual(excepted.metadetails[pool_states.KEY],
+ excepted = db.aggregate_get(self.context, self.aggr['id'])
+ self.assertEqual(excepted['metadetails'][pool_states.KEY],
pool_states.ERROR)
- self.assertEqual(excepted.hosts, [])
-
-
-class Aggregate(object):
- def __init__(self, id=None, hosts=None):
- self.id = id
- self.hosts = hosts or []
+ self.assertEqual(excepted['hosts'], [])
class MockComputeAPI(object):
@@ -2405,7 +2401,7 @@ class HypervisorPoolTestCase(test.TestCase):
def test_slave_asks_master_to_add_slave_to_pool(self):
slave = ResourcePoolWithStubs()
- aggregate = Aggregate(id=98, hosts=[])
+ aggregate = {'id': 98, 'hosts': []}
slave.add_to_aggregate("CONTEXT", aggregate, "slave")
@@ -2417,7 +2413,7 @@ class HypervisorPoolTestCase(test.TestCase):
def test_slave_asks_master_to_remove_slave_from_pool(self):
slave = ResourcePoolWithStubs()
- aggregate = Aggregate(id=98, hosts=[])
+ aggregate = {'id': 98, 'hosts': []}
slave.remove_from_aggregate("CONTEXT", aggregate, "slave")
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
index f88135e3c..9fabab593 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/utils.py
@@ -19,6 +19,7 @@ import platform
import nova.context
import nova.db
from nova.image import glance
+from nova.network import minidns
from nova.openstack.common import cfg
CONF = cfg.CONF
@@ -102,3 +103,20 @@ def get_test_network_info(count=1):
def is_osx():
return platform.mac_ver()[0] != ''
+
+
+test_dns_managers = []
+
+
+def dns_manager():
+ global test_dns_managers
+ manager = minidns.MiniDNS()
+ test_dns_managers.append(manager)
+ return manager
+
+
+def cleanup_dns_managers():
+ global test_dns_managers
+ for manager in test_dns_managers:
+ manager.delete_dns_file()
+ test_dns_managers = []
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index ee775bc6b..005012c7f 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -251,6 +251,11 @@ class ComputeDriver(object):
running VM"""
raise NotImplementedError()
+ def get_all_volume_usage(self, context, compute_host_bdms):
+ """Return usage info for volumes attached to vms on
+ a given host"""
+ raise NotImplementedError()
+
def get_host_ip_addr(self):
"""
Retrieves the IP address of the dom0
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 9f5956b0d..fb1ed5558 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -248,6 +248,13 @@ class FakeDriver(driver.ComputeDriver):
bw = []
return bw
+ def get_all_volume_usage(self, context, instances, start_time,
+ stop_time=None):
+ """Return usage info for volumes attached to vms on
+ a given host"""
+ volusage = []
+ return volusage
+
def block_stats(self, instance_name, disk_id):
return [0L, 0L, 0L, 0L, None]
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index ee39b0b8c..035c38080 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -297,8 +297,8 @@ class IptablesFirewallDriver(FirewallDriver):
'-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
def _build_icmp_rule(self, rule, version):
- icmp_type = rule.from_port
- icmp_code = rule.to_port
+ icmp_type = rule['from_port']
+ icmp_code = rule['to_port']
if icmp_type == -1:
icmp_type_arg = None
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index b39fd773b..750ffb0a2 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -67,6 +67,7 @@ from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
+from nova.openstack.common.notifier import api as notifier
from nova import utils
from nova.virt import configdrive
from nova.virt.disk import api as disk
@@ -399,10 +400,22 @@ class LibvirtDriver(driver.ComputeDriver):
_connect_auth_cb,
None]
- if read_only:
- return libvirt.openReadOnly(uri)
- else:
- return libvirt.openAuth(uri, auth, 0)
+ try:
+ if read_only:
+ return libvirt.openReadOnly(uri)
+ else:
+ return libvirt.openAuth(uri, auth, 0)
+ except libvirt.libvirtError as ex:
+ LOG.exception(_("Connection to libvirt failed: %s"), ex)
+ payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
+ method='_connect',
+ reason=ex)
+ notifier.notify(nova_context.get_admin_context(),
+ notifier.publisher_id('compute'),
+ 'compute.libvirt.error',
+ notifier.ERROR,
+ payload)
+ pass
def get_num_instances(self):
"""Efficient override of base instance_exists method."""
@@ -2190,12 +2203,50 @@ class LibvirtDriver(driver.ComputeDriver):
# data format needs to be standardized across drivers
return jsonutils.dumps(cpu_info)
+ def get_all_volume_usage(self, context, compute_host_bdms):
+ """Return usage info for volumes attached to vms on
+ a given host"""
+ vol_usage = []
+
+ for instance_bdms in compute_host_bdms:
+ instance = instance_bdms['instance']
+
+ for bdm in instance_bdms['instance_bdms']:
+ vol_stats = []
+ mountpoint = bdm['device_name']
+ if mountpoint.startswith('/dev/'):
+ mountpoint = mountpoint[5:]
+
+ LOG.debug(_("Trying to get stats for the volume %s"),
+ bdm['volume_id'])
+ vol_stats = self.block_stats(instance['name'], mountpoint)
+
+ if vol_stats:
+ rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
+ vol_usage.append(dict(volume=bdm['volume_id'],
+ instance_id=instance['id'],
+ rd_req=rd_req,
+ rd_bytes=rd_bytes,
+ wr_req=wr_req,
+ wr_bytes=wr_bytes,
+ flush_operations=flush_ops))
+ return vol_usage
+
def block_stats(self, instance_name, disk):
"""
Note that this function takes an instance name.
"""
- domain = self._lookup_by_name(instance_name)
- return domain.blockStats(disk)
+ try:
+ domain = self._lookup_by_name(instance_name)
+ return domain.blockStats(disk)
+ except libvirt.libvirtError as e:
+ errcode = e.get_error_code()
+ LOG.info(_("Getting block stats failed, device might have "
+ "been detached. Code=%(errcode)s Error=%(e)s")
+ % locals())
+ except exception.InstanceNotFound:
+ LOG.info(_("Could not find domain in libvirt for instance %s. "
+ "Cannot get block stats for device") % instance_name)
def interface_stats(self, instance_name, interface):
"""
diff --git a/nova/virt/powervm/blockdev.py b/nova/virt/powervm/blockdev.py
new file mode 100644
index 000000000..16b09d51f
--- /dev/null
+++ b/nova/virt/powervm/blockdev.py
@@ -0,0 +1,302 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import hashlib
+import os
+import re
+
+from nova import exception as nova_exception
+from nova import utils
+
+from nova.openstack.common import cfg
+from nova.openstack.common import excutils
+from nova.openstack.common import log as logging
+from nova.virt import images
+from nova.virt.powervm import command
+from nova.virt.powervm import common
+from nova.virt.powervm import constants
+from nova.virt.powervm import exception
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+
+
+class PowerVMDiskAdapter(object):
+ pass
+
+
+class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
+ """Default block device providor for PowerVM
+
+ This disk adapter uses logical volumes on the hosting VIOS
+ to provide backing block devices for instances/LPARs
+ """
+
+ def __init__(self, connection):
+ super(PowerVMLocalVolumeAdapter, self).__init__()
+
+ self.command = command.IVMCommand()
+
+ self._connection = None
+ self.connection_data = connection
+
+ def _set_connection(self):
+ if self._connection is None:
+ self._connection = common.ssh_connect(self.connection_data)
+
+ def create_volume(self, size):
+ """Creates a logical volume with a minimum size
+
+ :param size: size of the logical volume in bytes
+ :returns: string -- the name of the new logical volume.
+ :raises: PowerVMNoSpaceLeftOnVolumeGroup
+ """
+ return self._create_logical_volume(size)
+
+ def delete_volume(self, disk_name):
+ """Removes the Logical Volume and its associated vSCSI connection
+
+ :param disk_name: name of Logical Volume device in /dev/
+ """
+ LOG.debug(_("Removing the logical volume '%s'") % disk_name)
+ self._remove_logical_volume(disk_name)
+
+ def create_volume_from_image(self, context, instance, image_id):
+ """Creates a Logical Volume and copies the specified image to it
+
+ :param context: nova context used to retrieve image from glance
+ :param instance: instance to create the volume for
+ :image_id: image_id reference used to locate image in glance
+ :returns: dictionary with the name of the created
+ Logical Volume device in 'device_name' key
+ """
+
+ file_name = '.'.join([image_id, 'gz'])
+ file_path = os.path.join(CONF.powervm_img_local_path,
+ file_name)
+
+ if not os.path.isfile(file_path):
+ LOG.debug(_("Fetching image '%s' from glance") % image_id)
+ images.fetch_to_raw(context, image_id, file_path,
+ instance['user_id'],
+ project_id=instance['project_id'])
+ else:
+ LOG.debug((_("Using image found at '%s'") % file_path))
+
+ LOG.debug(_("Ensuring image '%s' exists on IVM") % file_path)
+ remote_path = CONF.powervm_img_remote_path
+ remote_file_name, size = self._copy_image_file(file_path, remote_path)
+
+ # calculate root device size in bytes
+ # we respect the minimum root device size in constants
+ size_gb = max(instance['instance_type']['root_gb'],
+ constants.POWERVM_MIN_ROOT_GB)
+ size = size_gb * 1024 * 1024 * 1024
+
+ try:
+ LOG.debug(_("Creating logical volume of size %s bytes") % size)
+ disk_name = self._create_logical_volume(size)
+
+ LOG.debug(_("Copying image to the device '%s'") % disk_name)
+ self._copy_file_to_device(remote_file_name, disk_name)
+ except Exception:
+ LOG.error(_("Error while creating logical volume from image. "
+ "Will attempt cleanup."))
+ # attempt cleanup of logical volume before re-raising exception
+ with excutils.save_and_reraise_exception():
+ try:
+ self.delete_volume(disk_name)
+ except Exception:
+ msg = _('Error while attempting cleanup of failed '
+ 'deploy to logical volume.')
+ LOG.exception(msg)
+
+ return {'device_name': disk_name}
+
+ def create_image_from_volume(self):
+ raise NotImplementedError()
+
+ def migrate_volume(self):
+ raise NotImplementedError()
+
+ def attach_volume_to_host(self, *args, **kargs):
+ pass
+
+ def detach_volume_from_host(self, *args, **kargs):
+ pass
+
+ def _create_logical_volume(self, size):
+ """Creates a logical volume with a minimum size.
+
+ :param size: size of the logical volume in bytes
+ :returns: string -- the name of the new logical volume.
+ :raises: PowerVMNoSpaceLeftOnVolumeGroup
+ """
+ vgs = self.run_command(self.command.lsvg())
+ cmd = self.command.lsvg('%s -field vgname freepps -fmt :' %
+ ' '.join(vgs))
+ output = self.run_command(cmd)
+ found_vg = None
+
+ # If it's not a multiple of 1MB we get the next
+ # multiple and use it as the megabyte_size.
+ megabyte = 1024 * 1024
+ if (size % megabyte) != 0:
+ megabyte_size = int(size / megabyte) + 1
+ else:
+ megabyte_size = size / megabyte
+
+ # Search for a volume group with enough free space for
+ # the new logical volume.
+ for vg in output:
+ # Returned output example: 'rootvg:396 (25344 megabytes)'
+ match = re.search(r'^(\w+):\d+\s\((\d+).+$', vg)
+ if match is None:
+ continue
+ vg_name, avail_size = match.groups()
+ if megabyte_size <= int(avail_size):
+ found_vg = vg_name
+ break
+
+ if not found_vg:
+ LOG.error(_('Could not create logical volume. '
+ 'No space left on any volume group.'))
+ raise exception.PowerVMNoSpaceLeftOnVolumeGroup()
+
+ cmd = self.command.mklv('%s %sB' % (found_vg, size / 512))
+ lv_name = self.run_command(cmd)[0]
+ return lv_name
+
+ def _remove_logical_volume(self, lv_name):
+ """Removes the lv and the connection between its associated vscsi.
+
+ :param lv_name: a logical volume name
+ """
+ cmd = self.command.rmvdev('-vdev %s -rmlv' % lv_name)
+ self.run_command(cmd)
+
+ def _copy_file_to_device(self, source_path, device, decompress=True):
+ """Copy file to device.
+
+ :param source_path: path to input source file
+ :param device: output device name
+ :param decompress: if True (default) the file will be decompressed
+ on the fly while being copied to the drive
+ """
+ if decompress:
+ cmd = ('gunzip -c %s | dd of=/dev/%s bs=1024k' %
+ (source_path, device))
+ else:
+ cmd = 'dd if=%s of=/dev/%s bs=1024k' % (source_path, device)
+ self.run_command_as_root(cmd)
+
+ def _copy_image_file(self, source_path, remote_path, decompress=False):
+ """Copy file to VIOS, decompress it, and return its new size and name.
+
+ :param source_path: source file path
+ :param remote_path remote file path
+ :param decompress: if True, decompressess the file after copying;
+ if False (default), just copies the file
+ """
+ # Calculate source image checksum
+ hasher = hashlib.md5()
+ block_size = 0x10000
+ img_file = file(source_path, 'r')
+ buf = img_file.read(block_size)
+ while len(buf) > 0:
+ hasher.update(buf)
+ buf = img_file.read(block_size)
+ source_cksum = hasher.hexdigest()
+
+ comp_path = os.path.join(remote_path, os.path.basename(source_path))
+ uncomp_path = comp_path.rstrip(".gz")
+ if not decompress:
+ final_path = comp_path
+ else:
+ final_path = "%s.%s" % (uncomp_path, source_cksum)
+
+ # Check whether the image is already on IVM
+ output = self.run_command("ls %s" % final_path, check_exit_code=False)
+
+ # If the image does not exist already
+ if not len(output):
+ # Copy file to IVM
+ common.ftp_put_command(self.connection_data, source_path,
+ remote_path)
+
+ # Verify image file checksums match
+ cmd = ("/usr/bin/csum -h MD5 %s |"
+ "/usr/bin/awk '{print $1}'" % comp_path)
+ output = self.run_command_as_root(cmd)
+ if not len(output):
+ LOG.error(_("Unable to get checksum"))
+ raise exception.PowerVMFileTransferFailed()
+ if source_cksum != output[0]:
+ LOG.error(_("Image checksums do not match"))
+ raise exception.PowerVMFileTransferFailed()
+
+ if decompress:
+ # Unzip the image
+ cmd = "/usr/bin/gunzip %s" % comp_path
+ output = self.run_command_as_root(cmd)
+
+ # Remove existing image file
+ cmd = "/usr/bin/rm -f %s.*" % uncomp_path
+ output = self.run_command_as_root(cmd)
+
+ # Rename unzipped image
+ cmd = "/usr/bin/mv %s %s" % (uncomp_path, final_path)
+ output = self.run_command_as_root(cmd)
+
+ # Remove compressed image file
+ cmd = "/usr/bin/rm -f %s" % comp_path
+ output = self.run_command_as_root(cmd)
+
+ else:
+ LOG.debug(_("Image found on host at '%s'") % final_path)
+
+ # Calculate file size in multiples of 512 bytes
+ output = self.run_command("ls -o %s|awk '{print $4}'" %
+ final_path, check_exit_code=False)
+ if len(output):
+ size = int(output[0])
+ else:
+ LOG.error(_("Uncompressed image file not found"))
+ raise exception.PowerVMFileTransferFailed()
+ if (size % 512 != 0):
+ size = (int(size / 512) + 1) * 512
+
+ return final_path, size
+
+ def run_command(self, cmd, check_exit_code=True):
+ """Run a remote command using an active ssh connection.
+
+ :param command: String with the command to run.
+ """
+ self._set_connection()
+ stdout, stderr = utils.ssh_execute(self._connection, cmd,
+ check_exit_code=check_exit_code)
+ return stdout.strip().splitlines()
+
+ def run_command_as_root(self, command, check_exit_code=True):
+ """Run a remote command as root using an active ssh connection.
+
+ :param command: List of commands.
+ """
+ self._set_connection()
+ stdout, stderr = common.ssh_command_as_root(
+ self._connection, command, check_exit_code=check_exit_code)
+ return stdout.read().splitlines()
diff --git a/nova/virt/powervm/constants.py b/nova/virt/powervm/constants.py
index f1d091586..0d1e0892e 100644
--- a/nova/virt/powervm/constants.py
+++ b/nova/virt/powervm/constants.py
@@ -31,6 +31,8 @@ POWERVM_CPU_INFO = ('ppc64', 'powervm', '3940')
POWERVM_HYPERVISOR_TYPE = 'powervm'
POWERVM_HYPERVISOR_VERSION = '7.1'
+POWERVM_MIN_ROOT_GB = 10
+
POWERVM_MIN_MEM = 512
POWERVM_MAX_MEM = 1024
POWERVM_MAX_CPUS = 1
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index c977f7687..ad6b17035 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -15,8 +15,6 @@
# under the License.
import decimal
-import hashlib
-import os
import re
import time
@@ -28,7 +26,7 @@ from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
-from nova.virt import images
+from nova.virt.powervm import blockdev
from nova.virt.powervm import command
from nova.virt.powervm import common
from nova.virt.powervm import constants
@@ -47,6 +45,13 @@ def get_powervm_operator():
CONF.powervm_mgr_passwd))
+def get_powervm_disk_adapter():
+ return blockdev.PowerVMLocalVolumeAdapter(
+ common.Connection(CONF.powervm_mgr,
+ CONF.powervm_mgr_user,
+ CONF.powervm_mgr_passwd))
+
+
class PowerVMOperator(object):
"""PowerVM main operator.
@@ -56,6 +61,7 @@ class PowerVMOperator(object):
def __init__(self):
self._operator = get_powervm_operator()
+ self._disk_adapter = get_powervm_disk_adapter()
self._host_stats = {}
self._update_host_stats()
@@ -219,29 +225,21 @@ class PowerVMOperator(object):
def _create_image(context, instance, image_id):
"""Fetch image from glance and copy it to the remote system."""
try:
- file_name = '.'.join([image_id, 'gz'])
- file_path = os.path.join(CONF.powervm_img_local_path,
- file_name)
- LOG.debug(_("Fetching image '%s' from glance") % image_id)
- images.fetch_to_raw(context, image_id, file_path,
- instance['user_id'],
- project_id=instance['project_id'])
- LOG.debug(_("Copying image '%s' to IVM") % file_path)
- remote_path = CONF.powervm_img_remote_path
- remote_file_name, size = self._operator.copy_image_file(
- file_path, remote_path)
- # Logical volume
- LOG.debug(_("Creating logical volume"))
+ root_volume = self._disk_adapter.create_volume_from_image(
+ context, instance, image_id)
+
+ self._disk_adapter.attach_volume_to_host(root_volume)
+
lpar_id = self._operator.get_lpar(instance['name'])['lpar_id']
vhost = self._operator.get_vhost_by_instance_id(lpar_id)
- disk_name = self._operator.create_logical_volume(size)
- self._operator.attach_disk_to_vhost(disk_name, vhost)
- LOG.debug(_("Copying image to the device '%s'") % disk_name)
- self._operator.copy_file_to_device(remote_file_name, disk_name)
+ self._operator.attach_disk_to_vhost(
+ root_volume['device_name'], vhost)
except Exception, e:
LOG.exception(_("PowerVM image creation failed: %s") % str(e))
raise exception.PowerVMImageCreationFailed()
+ spawn_start = time.time()
+
try:
_create_lpar_instance(instance)
_create_image(context, instance, image_id)
@@ -274,6 +272,10 @@ class PowerVMOperator(object):
LOG.exception(_('Error while attempting to '
'clean up failed instance launch.'))
+ spawn_time = time.time() - spawn_start
+ LOG.info(_("Instance spawned in %s seconds") % spawn_time,
+ instance=instance)
+
def destroy(self, instance_name):
"""Destroy (shutdown and delete) the specified instance.
@@ -295,8 +297,10 @@ class PowerVMOperator(object):
self._operator.stop_lpar(instance_name)
if disk_name:
- LOG.debug(_("Removing the logical volume '%s'") % disk_name)
- self._operator.remove_logical_volume(disk_name)
+ # TODO(mrodden): we should also detach from the instance
+ # before we start deleting things...
+ self._disk_adapter.detach_volume_from_host(disk_name)
+ self._disk_adapter.delete_volume(disk_name)
LOG.debug(_("Deleting the LPAR instance '%s'") % instance_name)
self._operator.remove_lpar(instance_name)
@@ -439,20 +443,6 @@ class BaseOperator(object):
return None
- def get_disk_name_by_vhost(self, vhost):
- """Returns the disk name attached to a vhost.
-
- :param vhost: a vhost name
- :returns: string -- disk name
- """
- cmd = self.command.lsmap('-vadapter %s -field backing -fmt :'
- % vhost)
- output = self.run_command(cmd)
- if output:
- return output[0]
-
- return None
-
def get_hostname(self):
"""Returns the managed system hostname.
@@ -461,148 +451,18 @@ class BaseOperator(object):
output = self.run_command(self.command.hostname())
return output[0]
- def remove_disk(self, disk_name):
- """Removes a disk.
-
- :param disk: a disk name
- """
- self.run_command(self.command.rmdev('-dev %s' % disk_name))
-
- def create_logical_volume(self, size):
- """Creates a logical volume with a minimum size.
+ def get_disk_name_by_vhost(self, vhost):
+ """Returns the disk name attached to a vhost.
- :param size: size of the logical volume in bytes
- :returns: string -- the name of the new logical volume.
- :raises: PowerVMNoSpaceLeftOnVolumeGroup
+ :param vhost: a vhost name
+ :returns: string -- disk name
"""
- vgs = self.run_command(self.command.lsvg())
- cmd = self.command.lsvg('%s -field vgname freepps -fmt :'
- % ' '.join(vgs))
+ cmd = self.command.lsmap('-vadapter %s -field backing -fmt :' % vhost)
output = self.run_command(cmd)
- found_vg = None
-
- # If it's not a multiple of 1MB we get the next
- # multiple and use it as the megabyte_size.
- megabyte = 1024 * 1024
- if (size % megabyte) != 0:
- megabyte_size = int(size / megabyte) + 1
- else:
- megabyte_size = size / megabyte
-
- # Search for a volume group with enough free space for
- # the new logical volume.
- for vg in output:
- # Returned output example: 'rootvg:396 (25344 megabytes)'
- match = re.search(r'^(\w+):\d+\s\((\d+).+$', vg)
- if match is None:
- continue
- vg_name, avail_size = match.groups()
- if megabyte_size <= int(avail_size):
- found_vg = vg_name
- break
-
- if not found_vg:
- LOG.error(_('Could not create logical volume. '
- 'No space left on any volume group.'))
- raise exception.PowerVMNoSpaceLeftOnVolumeGroup()
-
- cmd = self.command.mklv('%s %sB' % (found_vg, size / 512))
- lv_name, = self.run_command(cmd)
- return lv_name
-
- def remove_logical_volume(self, lv_name):
- """Removes the lv and the connection between its associated vscsi.
-
- :param lv_name: a logical volume name
- """
- cmd = self.command.rmvdev('-vdev %s -rmlv' % lv_name)
- self.run_command(cmd)
-
- def copy_file_to_device(self, source_path, device):
- """Copy file to device.
-
- :param source_path: path to input source file
- :param device: output device name
- """
- cmd = 'dd if=%s of=/dev/%s bs=1024k' % (source_path, device)
- self.run_command_as_root(cmd)
-
- def copy_image_file(self, source_path, remote_path):
- """Copy file to VIOS, decompress it, and return its new size and name.
+ if output:
+ return output[0]
- :param source_path: source file path
- :param remote_path remote file path
- """
- # Calculate source image checksum
- hasher = hashlib.md5()
- block_size = 0x10000
- img_file = file(source_path, 'r')
- buf = img_file.read(block_size)
- while len(buf) > 0:
- hasher.update(buf)
- buf = img_file.read(block_size)
- source_cksum = hasher.hexdigest()
-
- comp_path = remote_path + os.path.basename(source_path)
- uncomp_path = comp_path.rstrip(".gz")
- final_path = "%s.%s" % (uncomp_path, source_cksum)
-
- # Check whether the uncompressed image is already on IVM
- output = self.run_command("ls %s" % final_path, check_exit_code=False)
-
- # If the image does not exist already
- if not len(output):
- # Copy file to IVM
- common.ftp_put_command(self.connection_data, source_path,
- remote_path)
-
- # Verify image file checksums match
- cmd = ("/usr/bin/csum -h MD5 %s |"
- "/usr/bin/awk '{print $1}'" % comp_path)
- output = self.run_command_as_root(cmd)
- if not len(output):
- LOG.error(_("Unable to get checksum"))
- raise exception.PowerVMFileTransferFailed()
- if source_cksum != output[0]:
- LOG.error(_("Image checksums do not match"))
- raise exception.PowerVMFileTransferFailed()
-
- # Unzip the image
- cmd = "/usr/bin/gunzip %s" % comp_path
- output = self.run_command_as_root(cmd)
-
- # Remove existing image file
- cmd = "/usr/bin/rm -f %s.*" % uncomp_path
- output = self.run_command_as_root(cmd)
-
- # Rename unzipped image
- cmd = "/usr/bin/mv %s %s" % (uncomp_path, final_path)
- output = self.run_command_as_root(cmd)
-
- # Remove compressed image file
- cmd = "/usr/bin/rm -f %s" % comp_path
- output = self.run_command_as_root(cmd)
-
- # Calculate file size in multiples of 512 bytes
- output = self.run_command("ls -o %s|awk '{print $4}'"
- % final_path, check_exit_code=False)
- if len(output):
- size = int(output[0])
- else:
- LOG.error(_("Uncompressed image file not found"))
- raise exception.PowerVMFileTransferFailed()
- if (size % 512 != 0):
- size = (int(size / 512) + 1) * 512
-
- return final_path, size
-
- def run_cfg_dev(self, device_name):
- """Run cfgdev command for a specific device.
-
- :param device_name: device name the cfgdev command will run.
- """
- cmd = self.command.cfgdev('-dev %s' % device_name)
- self.run_command(cmd)
+ return None
def attach_disk_to_vhost(self, disk, vhost):
"""Attach disk name to a specific vhost.
diff --git a/nova/virt/xenapi/firewall.py b/nova/virt/xenapi/firewall.py
index a39355830..e30465741 100644
--- a/nova/virt/xenapi/firewall.py
+++ b/nova/virt/xenapi/firewall.py
@@ -55,12 +55,12 @@ class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
def _build_tcp_udp_rule(self, rule, version):
- if rule.from_port == rule.to_port:
- return ['--dport', '%s' % (rule.from_port,)]
+ if rule['from_port'] == rule['to_port']:
+ return ['--dport', '%s' % (rule['from_port'],)]
else:
# No multiport needed for XS!
- return ['--dport', '%s:%s' % (rule.from_port,
- rule.to_port)]
+ return ['--dport', '%s:%s' % (rule['from_port'],
+ rule['to_port'])]
def _provider_rules(self):
"""Generate a list of rules from provider for IP4 & IP6.
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index e76bdbe1c..0be6fad12 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -80,51 +80,51 @@ class ResourcePool(object):
def add_to_aggregate(self, context, aggregate, host, slave_info=None):
"""Add a compute host to an aggregate."""
- if not self._is_hv_pool(context, aggregate.id):
+ if not self._is_hv_pool(context, aggregate['id']):
return
invalid = {pool_states.CHANGING: 'setup in progress',
pool_states.DISMISSED: 'aggregate deleted',
pool_states.ERROR: 'aggregate in error'}
- if (self._get_metadata(context, aggregate.id)[pool_states.KEY]
+ if (self._get_metadata(context, aggregate['id'])[pool_states.KEY]
in invalid.keys()):
raise exception.InvalidAggregateAction(
action='add host',
- aggregate_id=aggregate.id,
+ aggregate_id=aggregate['id'],
reason=invalid[self._get_metadata(context,
- aggregate.id)
+ aggregate['id'])
[pool_states.KEY]])
- if (self._get_metadata(context, aggregate.id)[pool_states.KEY]
+ if (self._get_metadata(context, aggregate['id'])[pool_states.KEY]
== pool_states.CREATED):
- self._virtapi.aggregate_metadata_add(context, aggregate.id,
+ self._virtapi.aggregate_metadata_add(context, aggregate['id'],
{pool_states.KEY:
pool_states.CHANGING})
- if len(aggregate.hosts) == 1:
+ if len(aggregate['hosts']) == 1:
# this is the first host of the pool -> make it master
- self._init_pool(aggregate.id, aggregate.name)
+ self._init_pool(aggregate['id'], aggregate['name'])
# save metadata so that we can find the master again
metadata = {'master_compute': host,
host: self._host_uuid,
pool_states.KEY: pool_states.ACTIVE}
- self._virtapi.aggregate_metadata_add(context, aggregate.id,
+ self._virtapi.aggregate_metadata_add(context, aggregate['id'],
metadata)
else:
# the pool is already up and running, we need to figure out
# whether we can serve the request from this host or not.
master_compute = self._get_metadata(context,
- aggregate.id)['master_compute']
+ aggregate['id'])['master_compute']
if master_compute == CONF.host and master_compute != host:
# this is the master -> do a pool-join
# To this aim, nova compute on the slave has to go down.
# NOTE: it is assumed that ONLY nova compute is running now
- self._join_slave(aggregate.id, host,
+ self._join_slave(aggregate['id'], host,
slave_info.get('compute_uuid'),
slave_info.get('url'), slave_info.get('user'),
slave_info.get('passwd'))
metadata = {host: slave_info.get('xenhost_uuid'), }
- self._virtapi.aggregate_metadata_add(context, aggregate.id,
+ self._virtapi.aggregate_metadata_add(context, aggregate['id'],
metadata)
elif master_compute and master_compute != host:
# send rpc cast to master, asking to add the following
@@ -137,55 +137,55 @@ class ResourcePool(object):
def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
"""Remove a compute host from an aggregate."""
slave_info = slave_info or dict()
- if not self._is_hv_pool(context, aggregate.id):
+ if not self._is_hv_pool(context, aggregate['id']):
return
invalid = {pool_states.CREATED: 'no hosts to remove',
pool_states.CHANGING: 'setup in progress',
pool_states.DISMISSED: 'aggregate deleted', }
- if (self._get_metadata(context, aggregate.id)[pool_states.KEY]
+ if (self._get_metadata(context, aggregate['id'])[pool_states.KEY]
in invalid.keys()):
raise exception.InvalidAggregateAction(
action='remove host',
- aggregate_id=aggregate.id,
+ aggregate_id=aggregate['id'],
reason=invalid[self._get_metadata(context,
- aggregate.id)[pool_states.KEY]])
+ aggregate['id'])[pool_states.KEY]])
master_compute = self._get_metadata(context,
- aggregate.id)['master_compute']
+ aggregate['id'])['master_compute']
if master_compute == CONF.host and master_compute != host:
# this is the master -> instruct it to eject a host from the pool
- host_uuid = self._get_metadata(context, aggregate.id)[host]
- self._eject_slave(aggregate.id,
+ host_uuid = self._get_metadata(context, aggregate['id'])[host]
+ self._eject_slave(aggregate['id'],
slave_info.get('compute_uuid'), host_uuid)
- self._virtapi.aggregate_metadata_delete(context, aggregate.id,
+ self._virtapi.aggregate_metadata_delete(context, aggregate['id'],
host)
elif master_compute == host:
# Remove master from its own pool -> destroy pool only if the
# master is on its own, otherwise raise fault. Destroying a
# pool made only by master is fictional
- if len(aggregate.hosts) > 1:
+ if len(aggregate['hosts']) > 1:
# NOTE: this could be avoided by doing a master
# re-election, but this is simpler for now.
raise exception.InvalidAggregateAction(
- aggregate_id=aggregate.id,
+ aggregate_id=aggregate['id'],
action='remove_from_aggregate',
reason=_('Unable to eject %(host)s '
'from the pool; pool not empty')
% locals())
- self._clear_pool(aggregate.id)
+ self._clear_pool(aggregate['id'])
for key in ['master_compute', host]:
- self._virtapi.aggregate_metadata_delete(context, aggregate.id,
- key)
+ self._virtapi.aggregate_metadata_delete(context,
+ aggregate['id'], key)
elif master_compute and master_compute != host:
# A master exists -> forward pool-eject request to master
slave_info = self._create_slave_info()
self.compute_rpcapi.remove_aggregate_host(
- context, aggregate.id, host, master_compute, slave_info)
+ context, aggregate['id'], host, master_compute, slave_info)
else:
# this shouldn't have happened
- raise exception.AggregateError(aggregate_id=aggregate.id,
+ raise exception.AggregateError(aggregate_id=aggregate['id'],
action='remove_from_aggregate',
reason=_('Unable to eject %(host)s '
'from the pool; No master found')
diff --git a/tools/flakes.py b/tools/flakes.py
new file mode 100644
index 000000000..7ebe10cec
--- /dev/null
+++ b/tools/flakes.py
@@ -0,0 +1,22 @@
+import __builtin__
+import os
+import sys
+
+from pyflakes.scripts.pyflakes import main
+
+"""
+ wrapper for pyflakes to ignore gettext based warning:
+ "undefined name '_'"
+
+ From https://bugs.launchpad.net/pyflakes/+bug/844592
+"""
+
+names = os.environ.get('PYFLAKES_BUILTINS', '_')
+names = [x.strip() for x in names.split(',')]
+for x in names:
+ if not hasattr(__builtin__, x):
+ setattr(__builtin__, x, True)
+
+del names, os, __builtin__
+
+sys.exit(main())
diff --git a/tools/pip-requires b/tools/pip-requires
index e590f365e..6b3c83ec6 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -2,7 +2,7 @@ SQLAlchemy>=0.7.8,<=0.7.9
Cheetah==2.4.4
amqplib>=0.6.1
anyjson>=0.2.4
-boto==2.1.1
+boto
eventlet>=0.9.17
kombu>=1.0.4
lxml>=2.3,<=2.3.5
diff --git a/tox.ini b/tox.ini
index 16959d621..ebbaf2173 100644
--- a/tox.ini
+++ b/tox.ini
@@ -35,6 +35,10 @@ deps = -r{toxinidir}/tools/pip-requires
pylint==0.26.0
commands = bash tools/lintstack.sh
+[testenv:pyflakes]
+deps = pyflakes
+commands = python tools/flakes.py nova
+
[testenv:cover]
setenv = NOSE_WITH_COVERAGE=1