summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-all3
-rwxr-xr-xbin/nova-api3
-rwxr-xr-xbin/nova-api-ec23
-rwxr-xr-xbin/nova-api-metadata3
-rwxr-xr-xbin/nova-api-os-compute3
-rwxr-xr-xbin/nova-cert3
-rwxr-xr-xbin/nova-clear-rabbit-queues3
-rwxr-xr-xbin/nova-compute12
-rwxr-xr-xbin/nova-console3
-rwxr-xr-xbin/nova-consoleauth3
-rwxr-xr-xbin/nova-dhcpbridge3
-rwxr-xr-xbin/nova-manage3
-rwxr-xr-xbin/nova-network3
-rwxr-xr-xbin/nova-novncproxy3
-rwxr-xr-xbin/nova-objectstore3
-rwxr-xr-xbin/nova-rpc-zmq-receiver3
-rwxr-xr-xbin/nova-scheduler3
-rwxr-xr-xbin/nova-xvpvncproxy3
-rw-r--r--doc/source/devref/scheduler.rst10
-rw-r--r--etc/nova/api-paste.ini2
-rw-r--r--etc/nova/policy.json1
-rw-r--r--nova/api/ec2/cloud.py2
-rw-r--r--nova/api/ec2/ec2utils.py8
-rw-r--r--nova/api/openstack/compute/contrib/floating_ips.py4
-rw-r--r--nova/api/openstack/compute/servers.py9
-rw-r--r--nova/common/deprecated.py55
-rw-r--r--nova/compute/api.py22
-rw-r--r--nova/compute/manager.py181
-rw-r--r--nova/compute/resource_tracker.py2
-rw-r--r--nova/compute/rpcapi.py42
-rw-r--r--nova/compute/task_states.py12
-rw-r--r--nova/config.py29
-rw-r--r--nova/console/api.py4
-rw-r--r--nova/db/sqlalchemy/api.py113
-rw-r--r--nova/db/sqlalchemy/session.py288
-rw-r--r--nova/exception.py4
-rw-r--r--nova/flags.py7
-rw-r--r--nova/locale/nova.pot3181
-rw-r--r--nova/manager.py22
-rw-r--r--nova/network/api.py2
-rw-r--r--nova/network/manager.py9
-rw-r--r--nova/network/quantumv2/api.py28
-rw-r--r--nova/network/rpcapi.py22
-rw-r--r--nova/openstack/common/log.py18
-rw-r--r--nova/openstack/common/uuidutils.py35
-rw-r--r--nova/scheduler/chance.py8
-rw-r--r--nova/scheduler/driver.py5
-rw-r--r--nova/scheduler/filter_scheduler.py7
-rw-r--r--nova/scheduler/host_manager.py43
-rw-r--r--nova/scheduler/manager.py25
-rw-r--r--nova/scheduler/multi.py15
-rw-r--r--nova/scheduler/rpcapi.py8
-rw-r--r--nova/scheduler/simple.py97
-rw-r--r--nova/service.py9
-rw-r--r--nova/test.py3
-rw-r--r--nova/tests/api/ec2/test_cinder_cloud.py3
-rw-r--r--nova/tests/api/ec2/test_cloud.py31
-rw-r--r--nova/tests/api/openstack/compute/test_server_actions.py12
-rw-r--r--nova/tests/baremetal/test_proxy_bare_metal.py2
-rw-r--r--nova/tests/compute/test_compute.py169
-rw-r--r--nova/tests/compute/test_resource_tracker.py10
-rw-r--r--nova/tests/compute/test_rpcapi.py21
-rw-r--r--nova/tests/fake_flags.py2
-rw-r--r--nova/tests/integrated/integrated_helpers.py2
-rw-r--r--nova/tests/network/test_api.py64
-rw-r--r--nova/tests/network/test_quantumv2.py51
-rw-r--r--nova/tests/network/test_rpcapi.py50
-rw-r--r--nova/tests/scheduler/fakes.py8
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py2
-rw-r--r--nova/tests/scheduler/test_host_manager.py3
-rw-r--r--nova/tests/scheduler/test_multi_scheduler.py21
-rw-r--r--nova/tests/scheduler/test_rpcapi.py6
-rw-r--r--nova/tests/test_deprecated.py46
-rw-r--r--nova/tests/test_flags.py7
-rw-r--r--nova/tests/test_hypervapi.py2
-rw-r--r--nova/tests/test_image_utils.py100
-rw-r--r--nova/tests/test_libvirt.py163
-rw-r--r--nova/tests/test_powervm.py2
-rw-r--r--nova/tests/test_service.py98
-rw-r--r--nova/tests/test_utils.py38
-rw-r--r--nova/tests/test_virt_drivers.py38
-rw-r--r--nova/tests/test_vmwareapi.py2
-rw-r--r--nova/tests/test_xenapi.py140
-rw-r--r--nova/utils.py50
-rw-r--r--nova/virt/baremetal/driver.py11
-rw-r--r--nova/virt/disk/api.py4
-rw-r--r--nova/virt/driver.py21
-rw-r--r--nova/virt/fake.py25
-rw-r--r--nova/virt/hyperv/driver.py4
-rw-r--r--nova/virt/images.py164
-rw-r--r--nova/virt/libvirt/driver.py20
-rw-r--r--nova/virt/libvirt/utils.py18
-rw-r--r--nova/virt/powervm/driver.py5
-rw-r--r--nova/virt/virtapi.py44
-rw-r--r--nova/virt/vmwareapi/driver.py4
-rw-r--r--nova/virt/xenapi/driver.py31
-rw-r--r--nova/virt/xenapi/host.py17
-rw-r--r--nova/virt/xenapi/pool.py2
-rw-r--r--nova/virt/xenapi/vmops.py47
-rw-r--r--openstack-common.conf2
-rw-r--r--smoketests/run_tests.py6
-rw-r--r--tools/pip-requires1
-rw-r--r--tools/test-requires1
-rw-r--r--tools/xenserver/destroy_cached_images.py3
104 files changed, 2713 insertions, 3255 deletions
diff --git a/bin/nova-all b/bin/nova-all
index 2bbc27c82..ce0a459b4 100755
--- a/bin/nova-all
+++ b/bin/nova-all
@@ -40,6 +40,7 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
+from nova import config
from nova import flags
from nova.objectstore import s3server
from nova.openstack.common import log as logging
@@ -51,7 +52,7 @@ from nova.vnc import xvp_proxy
LOG = logging.getLogger('nova.all')
if __name__ == '__main__':
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
launcher = service.ProcessLauncher()
diff --git a/bin/nova-api b/bin/nova-api
index f55eca719..4bcfa7f79 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -36,13 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
if __name__ == '__main__':
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
launcher = service.ProcessLauncher()
diff --git a/bin/nova-api-ec2 b/bin/nova-api-ec2
index 8e66ab874..f165b5ce9 100755
--- a/bin/nova-api-ec2
+++ b/bin/nova-api-ec2
@@ -32,13 +32,14 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
if __name__ == '__main__':
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
server = service.WSGIService('ec2')
diff --git a/bin/nova-api-metadata b/bin/nova-api-metadata
index d445a16a9..f50e5ce84 100755
--- a/bin/nova-api-metadata
+++ b/bin/nova-api-metadata
@@ -32,13 +32,14 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
if __name__ == '__main__':
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
server = service.WSGIService('metadata')
diff --git a/bin/nova-api-os-compute b/bin/nova-api-os-compute
index 529d58821..5cf5f6910 100755
--- a/bin/nova-api-os-compute
+++ b/bin/nova-api-os-compute
@@ -32,13 +32,14 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
if __name__ == '__main__':
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
server = service.WSGIService('osapi_compute')
diff --git a/bin/nova-cert b/bin/nova-cert
index ebdd0e0b9..317739329 100755
--- a/bin/nova-cert
+++ b/bin/nova-cert
@@ -32,13 +32,14 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
if __name__ == '__main__':
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
FLAGS = flags.FLAGS
logging.setup("nova")
utils.monkey_patch()
diff --git a/bin/nova-clear-rabbit-queues b/bin/nova-clear-rabbit-queues
index 0cbf5ab19..05531de9b 100755
--- a/bin/nova-clear-rabbit-queues
+++ b/bin/nova-clear-rabbit-queues
@@ -42,6 +42,7 @@ gettext.install('nova', unicode=1)
from nova import context
from nova import exception
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -69,7 +70,7 @@ def delete_queues(queues):
x.queue_delete(q)
if __name__ == '__main__':
- args = flags.parse_args(sys.argv)
+ args = config.parse_args(sys.argv)
logging.setup("nova")
delete_queues(args[1:])
if FLAGS.delete_exchange:
diff --git a/bin/nova-compute b/bin/nova-compute
index 2a2a0013a..08c3862d2 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -20,7 +20,14 @@
"""Starter script for Nova Compute."""
import eventlet
-eventlet.monkey_patch()
+import os
+
+if os.name == 'nt':
+ # eventlet monkey patching causes subprocess.Popen to fail on Windows
+ # when using pipes due to missing non blocking I/O support
+ eventlet.monkey_patch(os=False)
+else:
+ eventlet.monkey_patch()
import os
import sys
@@ -34,13 +41,14 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
if __name__ == '__main__':
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
FLAGS = flags.FLAGS
logging.setup('nova')
utils.monkey_patch()
diff --git a/bin/nova-console b/bin/nova-console
index f664040b7..92b99edfb 100755
--- a/bin/nova-console
+++ b/bin/nova-console
@@ -33,12 +33,13 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
from nova import service
if __name__ == '__main__':
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
FLAGS = flags.FLAGS
logging.setup("nova")
server = service.Service.create(binary='nova-console',
diff --git a/bin/nova-consoleauth b/bin/nova-consoleauth
index 58ecd37b3..14ef701a7 100755
--- a/bin/nova-consoleauth
+++ b/bin/nova-consoleauth
@@ -32,13 +32,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova.consoleauth import manager
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
from nova import service
if __name__ == "__main__":
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
FLAGS = flags.FLAGS
logging.setup("nova")
server = service.Service.create(binary='nova-consoleauth',
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 0fc7751fc..48639ce87 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -37,6 +37,7 @@ gettext.install('nova', unicode=1)
from nova import context
from nova import db
+from nova import config
from nova import flags
from nova.network import linux_net
from nova.network import rpcapi as network_rpcapi
@@ -94,7 +95,7 @@ def init_leases(network_id):
def main():
"""Parse environment and arguments and call the approproate action."""
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
- argv = flags.parse_args(sys.argv, default_config_files=[flagfile])
+ argv = config.parse_args(sys.argv, default_config_files=[flagfile])
logging.setup("nova")
if int(os.environ.get('TESTING', '0')):
diff --git a/bin/nova-manage b/bin/nova-manage
index 93b079488..1cb2dabb7 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -73,6 +73,7 @@ gettext.install('nova', unicode=1)
from nova.api.ec2 import ec2utils
from nova.compute import instance_types
from nova.compute import rpcapi as compute_rpcapi
+from nova import config
from nova import context
from nova import db
from nova.db import migration
@@ -1220,7 +1221,7 @@ def main():
"""Parse options and call the appropriate class/method."""
try:
- argv = flags.parse_args(sys.argv)
+ argv = config.parse_args(sys.argv)
logging.setup("nova")
except cfg.ConfigFilesNotFoundError:
cfgfile = FLAGS.config_file[-1] if FLAGS.config_file else None
diff --git a/bin/nova-network b/bin/nova-network
index 36343bb73..d23d7882c 100755
--- a/bin/nova-network
+++ b/bin/nova-network
@@ -34,13 +34,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
if __name__ == '__main__':
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
FLAGS = flags.FLAGS
logging.setup("nova")
utils.monkey_patch()
diff --git a/bin/nova-novncproxy b/bin/nova-novncproxy
index 483c72e81..d3d9702af 100755
--- a/bin/nova-novncproxy
+++ b/bin/nova-novncproxy
@@ -28,6 +28,7 @@ import sys
import websockify
+from nova import config
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova import flags
@@ -133,7 +134,7 @@ if __name__ == '__main__':
parser.error("SSL only and %s not found" % FLAGS.cert)
# Setup flags
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
# Check to see if novnc html/js/css files are present
if not os.path.exists(FLAGS.web):
diff --git a/bin/nova-objectstore b/bin/nova-objectstore
index 2149b1610..9b9e2b7a7 100755
--- a/bin/nova-objectstore
+++ b/bin/nova-objectstore
@@ -34,6 +34,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
+from nova import config
from nova import flags
from nova.objectstore import s3server
from nova.openstack.common import log as logging
@@ -42,7 +43,7 @@ from nova import utils
if __name__ == '__main__':
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
server = s3server.get_wsgi_server()
diff --git a/bin/nova-rpc-zmq-receiver b/bin/nova-rpc-zmq-receiver
index 5636718a2..d63ea108e 100755
--- a/bin/nova-rpc-zmq-receiver
+++ b/bin/nova-rpc-zmq-receiver
@@ -32,6 +32,7 @@ POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
@@ -45,7 +46,7 @@ FLAGS.register_opts(impl_zmq.zmq_opts)
def main():
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
diff --git a/bin/nova-scheduler b/bin/nova-scheduler
index 64dadb9e4..fc345808a 100755
--- a/bin/nova-scheduler
+++ b/bin/nova-scheduler
@@ -36,13 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
if __name__ == '__main__':
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
FLAGS = flags.FLAGS
logging.setup("nova")
utils.monkey_patch()
diff --git a/bin/nova-xvpvncproxy b/bin/nova-xvpvncproxy
index 5929a33b5..e884b3f52 100755
--- a/bin/nova-xvpvncproxy
+++ b/bin/nova-xvpvncproxy
@@ -31,6 +31,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
@@ -40,7 +41,7 @@ from nova.vnc import xvp_proxy
FLAGS = flags.FLAGS
if __name__ == "__main__":
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
logging.setup("nova")
wsgi_server = xvp_proxy.get_wsgi_server()
diff --git a/doc/source/devref/scheduler.rst b/doc/source/devref/scheduler.rst
index 066781514..6f0b8edf5 100644
--- a/doc/source/devref/scheduler.rst
+++ b/doc/source/devref/scheduler.rst
@@ -48,16 +48,6 @@ The :mod:`nova.scheduler.chance` Driver
:show-inheritance:
-The :mod:`nova.scheduler.simple` Driver
----------------------------------------
-
-.. automodule:: nova.scheduler.simple
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
Tests
-----
diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini
index 0d57e02a9..3970974c0 100644
--- a/etc/nova/api-paste.ini
+++ b/etc/nova/api-paste.ini
@@ -124,4 +124,4 @@ auth_protocol = http
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
-signing_dirname = /tmp/keystone-signing-nova
+signing_dir = /tmp/keystone-signing-nova
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index f77f733c6..bd015802a 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -7,6 +7,7 @@
"compute:create": "",
"compute:create:attach_network": "",
"compute:create:attach_volume": "",
+ "compute:create:forced_host": "is_admin:True",
"compute:get_all": "",
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 20e68030f..3446b5a8f 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -457,7 +457,7 @@ class CloudController(object):
r['groups'] = []
r['ipRanges'] = []
if rule.group_id:
- source_group = db.security_group_get(context, rule.group_id)
+ source_group = rule.grantee_group
r['groups'] += [{'groupName': source_group.name,
'userId': source_group.project_id}]
if rule.protocol:
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index 4d0a926df..580cfdac7 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -25,7 +25,7 @@ from nova import flags
from nova.network import model as network_model
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
-from nova import utils
+from nova.openstack.common import uuidutils
FLAGS = flags.FLAGS
@@ -130,7 +130,7 @@ def id_to_ec2_id(instance_id, template='i-%08x'):
def id_to_ec2_inst_id(instance_id):
"""Get or create an ec2 instance ID (i-[base 16 number]) from uuid."""
- if utils.is_uuid_like(instance_id):
+ if uuidutils.is_uuid_like(instance_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_instance_uuid(ctxt, instance_id)
return id_to_ec2_id(int_id)
@@ -150,7 +150,7 @@ def get_instance_uuid_from_int_id(context, int_id):
def id_to_ec2_snap_id(snapshot_id):
"""Get or create an ec2 volume ID (vol-[base 16 number]) from uuid."""
- if utils.is_uuid_like(snapshot_id):
+ if uuidutils.is_uuid_like(snapshot_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_snapshot_uuid(ctxt, snapshot_id)
return id_to_ec2_id(int_id, 'snap-%08x')
@@ -160,7 +160,7 @@ def id_to_ec2_snap_id(snapshot_id):
def id_to_ec2_vol_id(volume_id):
"""Get or create an ec2 volume ID (vol-[base 16 number]) from uuid."""
- if utils.is_uuid_like(volume_id):
+ if uuidutils.is_uuid_like(volume_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_volume_uuid(ctxt, volume_id)
return id_to_ec2_id(int_id, 'vol-%08x')
diff --git a/nova/api/openstack/compute/contrib/floating_ips.py b/nova/api/openstack/compute/contrib/floating_ips.py
index 56a6a8fad..4f6dbffdb 100644
--- a/nova/api/openstack/compute/contrib/floating_ips.py
+++ b/nova/api/openstack/compute/contrib/floating_ips.py
@@ -27,7 +27,7 @@ from nova.compute import utils as compute_utils
from nova import exception
from nova import network
from nova.openstack.common import log as logging
-from nova import utils
+from nova.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
@@ -307,7 +307,7 @@ class FloatingIPActionController(wsgi.Controller):
# disassociate if associated
if (instance and
floating_ip.get('fixed_ip_id') and
- (utils.is_uuid_like(id) and
+ (uuidutils.is_uuid_like(id) and
[instance['uuid'] == id] or
[instance['id'] == id])[0]):
disassociate_floating_ip(self, context, instance, address)
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index ba88d72e7..c4293255d 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -36,6 +36,7 @@ from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
from nova import utils
@@ -621,7 +622,7 @@ class Controller(wsgi.Controller):
# port parameter is only for qunatum v2.0
msg = _("Unknown argment : port")
raise exc.HTTPBadRequest(explanation=msg)
- if not utils.is_uuid_like(port_id):
+ if not uuidutils.is_uuid_like(port_id):
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % port_id
@@ -629,9 +630,9 @@ class Controller(wsgi.Controller):
else:
network_uuid = network['uuid']
- if not port_id and not utils.is_uuid_like(network_uuid):
+ if not port_id and not uuidutils.is_uuid_like(network_uuid):
br_uuid = network_uuid.split('-', 1)[-1]
- if not utils.is_uuid_like(br_uuid):
+ if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % network_uuid
@@ -1099,7 +1100,7 @@ class Controller(wsgi.Controller):
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
- if not utils.is_uuid_like(image_uuid):
+ if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
diff --git a/nova/common/deprecated.py b/nova/common/deprecated.py
deleted file mode 100644
index 6b1c587e8..000000000
--- a/nova/common/deprecated.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 IBM
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import warnings
-
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-deprecate_opts = [
- cfg.BoolOpt('fatal_deprecations',
- default=False,
- help='make deprecations fatal')
- ]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(deprecate_opts)
-
-
-def _showwarning(message, category, filename, lineno, file=None, line=None):
- """
- Redirect warnings into logging.
- """
- LOG.warn(str(message))
-
-
-# Install our warnings handler
-warnings.showwarning = _showwarning
-
-
-def warn(msg=""):
- """
- Warn of a deprecated config option that an operator has specified.
- This should be added in the code where we've made a change in how
- we use some operator changeable parameter to indicate that it will
- go away in a future version of OpenStack.
- """
- warnings.warn(_("Deprecated Config: %s") % msg)
- if FLAGS.fatal_deprecations:
- raise exception.DeprecatedConfig(msg=msg)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 9e5971862..ba4f2b4ff 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -48,6 +48,7 @@ from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
@@ -499,7 +500,8 @@ class API(base.Base):
LOG.debug(_("Going to run %s instances...") % num_instances)
filter_properties = dict(scheduler_hints=scheduler_hints)
- if context.is_admin and forced_host:
+ if forced_host:
+ check_policy(context, 'create:forced_host', {})
filter_properties['force_hosts'] = [forced_host]
for i in xrange(num_instances):
@@ -837,11 +839,11 @@ class API(base.Base):
# no daemon to reclaim, so delete it immediately.
if instance['host']:
instance = self.update(context, instance,
- task_state=task_states.POWERING_OFF,
+ task_state=task_states.SOFT_DELETING,
expected_task_state=None,
deleted_at=timeutils.utcnow())
- self.compute_rpcapi.power_off_instance(context, instance)
+ self.compute_rpcapi.soft_delete_instance(context, instance)
else:
LOG.warning(_('No host for instance, deleting immediately'),
instance=instance)
@@ -1001,10 +1003,10 @@ class API(base.Base):
"""Restore a previously deleted (but not reclaimed) instance."""
if instance['host']:
instance = self.update(context, instance,
- task_state=task_states.POWERING_ON,
+ task_state=task_states.RESTORING,
expected_task_state=None,
deleted_at=None)
- self.compute_rpcapi.power_on_instance(context, instance)
+ self.compute_rpcapi.restore_instance(context, instance)
else:
self.update(context,
instance,
@@ -1030,7 +1032,7 @@ class API(base.Base):
LOG.debug(_("Going to try to stop instance"), instance=instance)
instance = self.update(context, instance,
- task_state=task_states.STOPPING,
+ task_state=task_states.POWERING_OFF,
expected_task_state=None,
progress=0)
@@ -1044,7 +1046,7 @@ class API(base.Base):
LOG.debug(_("Going to try to start instance"), instance=instance)
instance = self.update(context, instance,
- task_state=task_states.STARTING,
+ task_state=task_states.POWERING_ON,
expected_task_state=None)
# TODO(yamahata): injected_files isn't supported right now.
@@ -1067,7 +1069,7 @@ class API(base.Base):
def get(self, context, instance_id):
"""Get a single instance with the given instance_id."""
# NOTE(ameade): we still need to support integer ids for ec2
- if utils.is_uuid_like(instance_id):
+ if uuidutils.is_uuid_like(instance_id):
instance = self.db.instance_get_by_uuid(context, instance_id)
else:
instance = self.db.instance_get(context, instance_id)
@@ -1557,7 +1559,7 @@ class API(base.Base):
expected_task_state=None)
self.compute_rpcapi.revert_resize(context,
- instance=instance, migration_id=migration_ref['id'],
+ instance=instance, migration=migration_ref,
host=migration_ref['dest_compute'], reservations=reservations)
self.db.migration_update(elevated, migration_ref['id'],
@@ -2173,7 +2175,7 @@ class AggregateAPI(base.Base):
self.db.aggregate_host_add(context, aggregate_id, host)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
- aggregate_id=aggregate_id, host_param=host, host=host)
+ aggregate=aggregate, host_param=host, host=host)
return self.get_aggregate(context, aggregate_id)
def remove_host_from_aggregate(self, context, aggregate_id, host):
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index dac9b58a0..de848abdd 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -73,6 +73,7 @@ from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova.virt import driver
+from nova.virt import virtapi
from nova import volume
@@ -209,10 +210,27 @@ def _get_image_meta(context, image_ref):
return image_service.show(context, image_id)
+class ComputeVirtAPI(virtapi.VirtAPI):
+ def __init__(self, compute):
+ super(ComputeVirtAPI, self).__init__()
+ self._compute = compute
+
+ def instance_update(self, context, instance_uuid, updates):
+ return self._compute.db.instance_update_and_get_original(context,
+ instance_uuid,
+ updates)
+
+ def instance_get_by_uuid(self, context, instance_uuid):
+ return self._compute.db.instance_get_by_uuid(context, instance_uuid)
+
+ def instance_get_all_by_host(self, context, host):
+ return self._compute.db.instance_get_all_by_host(context, host)
+
+
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.10'
+ RPC_API_VERSION = '2.14'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -225,10 +243,13 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.error(_("Compute driver option required, but not specified"))
sys.exit(1)
+ self.virtapi = ComputeVirtAPI(self)
+
LOG.info(_("Loading compute driver '%s'") % compute_driver)
try:
self.driver = utils.check_isinstance(
- importutils.import_object_ns('nova.virt', compute_driver),
+ importutils.import_object_ns('nova.virt', compute_driver,
+ self.virtapi),
driver.ComputeDriver)
except ImportError as e:
LOG.error(_("Unable to load the virtualization driver: %s") % (e))
@@ -342,6 +363,13 @@ class ComputeManager(manager.SchedulerDependentManager):
self._report_driver_status(context)
self.publish_service_capabilities(context)
+ def pre_start_hook(self):
+ """After the service is initialized, but before we fully bring
+ the service up by listening on RPC queues, make sure to update
+ our available resources.
+ """
+ self.update_available_resource(nova.context.get_admin_context())
+
def _get_power_state(self, context, instance):
"""Retrieve the power state for the given instance."""
LOG.debug(_('Checking state'), instance=instance)
@@ -972,48 +1000,34 @@ class ComputeManager(manager.SchedulerDependentManager):
do_terminate_instance(instance, bdms)
+ # NOTE(johannes): This is probably better named power_off_instance
+ # so it matches the driver method, but because of other issues, we
+ # can't use that name in grizzly.
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def stop_instance(self, context, instance):
- """Stopping an instance on this host.
-
- Alias for power_off_instance for compatibility"""
- self.power_off_instance(context, instance=instance,
- final_state=vm_states.STOPPED)
-
- @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
- @reverts_task_state
- @wrap_instance_fault
- def start_instance(self, context, instance):
- """Starting an instance on this host.
-
- Alias for power_on_instance for compatibility"""
- self.power_on_instance(context, instance=instance)
-
- @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
- @reverts_task_state
- @wrap_instance_fault
- def power_off_instance(self, context, instance,
- final_state=vm_states.SOFT_DELETED):
- """Power off an instance on this host."""
+ """Stopping an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_off.start")
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
- vm_state=final_state,
+ vm_state=vm_states.STOPPED,
expected_task_state=(task_states.POWERING_OFF,
task_states.STOPPING),
task_state=None)
self._notify_about_instance_usage(context, instance, "power_off.end")
+ # NOTE(johannes): This is probably better named power_on_instance
+ # so it matches the driver method, but because of other issues, we
+ # can't use that name in grizzly.
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
- def power_on_instance(self, context, instance):
- """Power on an instance on this host."""
+ def start_instance(self, context, instance):
+ """Starting an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_on.start")
self.driver.power_on(instance)
current_power_state = self._get_power_state(context, instance)
@@ -1029,6 +1043,71 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
+ def soft_delete_instance(self, context, instance):
+ """Soft delete an instance on this host."""
+ self._notify_about_instance_usage(context, instance,
+ "soft_delete.start")
+ try:
+ self.driver.soft_delete(instance)
+ except NotImplementedError:
+ # Fallback to just powering off the instance if the hypervisor
+ # doesn't implement the soft_delete method
+ self.driver.power_off(instance)
+ current_power_state = self._get_power_state(context, instance)
+ self._instance_update(context,
+ instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.SOFT_DELETED,
+ expected_task_state=task_states.SOFT_DELETING,
+ task_state=None)
+ self._notify_about_instance_usage(context, instance, "soft_delete.end")
+
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @reverts_task_state
+ @wrap_instance_fault
+ def restore_instance(self, context, instance):
+ """Restore a soft-deleted instance on this host."""
+ self._notify_about_instance_usage(context, instance, "restore.start")
+ try:
+ self.driver.restore(instance)
+ except NotImplementedError:
+ # Fallback to just powering on the instance if the hypervisor
+ # doesn't implement the restore method
+ self.driver.power_on(instance)
+ current_power_state = self._get_power_state(context, instance)
+ self._instance_update(context,
+ instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ expected_task_state=task_states.RESTORING,
+ task_state=None)
+ self._notify_about_instance_usage(context, instance, "restore.end")
+
+ # NOTE(johannes): In the folsom release, power_off_instance was poorly
+ # named. It was the main entry point to soft delete an instance. That
+ # has been changed to soft_delete_instance now, but power_off_instance
+ # will need to stick around for compatibility in grizzly.
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @reverts_task_state
+ @wrap_instance_fault
+ def power_off_instance(self, context, instance):
+ """Power off an instance on this host."""
+ self.soft_delete_instance(context, instance)
+
+ # NOTE(johannes): In the folsom release, power_on_instance was poorly
+ # named. It was the main entry point to restore a soft deleted instance.
+ # That has been changed to restore_instance now, but power_on_instance
+ # will need to stick around for compatibility in grizzly.
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @reverts_task_state
+ @wrap_instance_fault
+ def power_on_instance(self, context, instance):
+ """Power on an instance on this host."""
+ self.restore_instance(context, instance)
+
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @reverts_task_state
+ @wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata=None):
"""Destroy and re-make this instance.
@@ -1453,16 +1532,17 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
- def revert_resize(self, context, instance, migration_id,
- reservations=None):
+ def revert_resize(self, context, instance, migration=None,
+ migration_id=None, reservations=None):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
source machine.
"""
- migration_ref = self.db.migration_get(context.elevated(),
- migration_id)
+ if not migration:
+ migration = self.db.migration_get(context.elevated(),
+ migration_id)
# NOTE(comstud): A revert_resize is essentially a resize back to
# the old size, so we need to send a usage event here.
@@ -1476,7 +1556,7 @@ class ComputeManager(manager.SchedulerDependentManager):
teardown=True)
self.network_api.migrate_instance_start(context, instance,
- migration_ref)
+ migration)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
@@ -1488,14 +1568,14 @@ class ComputeManager(manager.SchedulerDependentManager):
self._terminate_volume_connections(context, instance)
self.compute_rpcapi.finish_revert_resize(context, instance,
- migration_ref['id'], migration_ref['source_compute'],
+ migration, migration['source_compute'],
reservations)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
- def finish_revert_resize(self, context, migration_id, instance,
- reservations=None):
+ def finish_revert_resize(self, context, instance, reservations=None,
+ migration=None, migration_id=None):
"""Finishes the second half of reverting a resize.
Power back on the source instance and revert the resized attributes
@@ -1503,7 +1583,9 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
elevated = context.elevated()
- migration_ref = self.db.migration_get(elevated, migration_id)
+
+ if not migration:
+ migration = self.db.migration_get(elevated, migration_id)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
@@ -1514,11 +1596,11 @@ class ComputeManager(manager.SchedulerDependentManager):
instance = self._instance_update(context,
instance['uuid'],
- host=migration_ref['source_compute'])
+ host=migration['source_compute'])
self.network_api.setup_networks_on_host(context, instance,
- migration_ref['source_compute'])
+ migration['source_compute'])
- old_instance_type = migration_ref['old_instance_type_id']
+ old_instance_type = migration['old_instance_type_id']
instance_type = instance_types.get_instance_type(old_instance_type)
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
@@ -1549,13 +1631,13 @@ class ComputeManager(manager.SchedulerDependentManager):
RESIZE_REVERTING)
self.network_api.migrate_instance_finish(context, instance,
- migration_ref)
+ migration)
self._instance_update(context, instance['uuid'],
vm_state=vm_states.ACTIVE,
task_state=None)
- self.db.migration_update(elevated, migration_id,
+ self.db.migration_update(elevated, migration['id'],
{'status': 'reverted'})
self._notify_about_instance_usage(
@@ -2071,7 +2153,8 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
- def reserve_block_device_name(self, context, instance, device, volume_id):
+ def reserve_block_device_name(self, context, instance, device,
+ volume_id=None):
@lockutils.synchronized(instance['uuid'], 'nova-')
def do_reserve():
@@ -2080,7 +2163,7 @@ class ComputeManager(manager.SchedulerDependentManager):
device)
# NOTE(vish): create bdm here to avoid race condition
values = {'instance_uuid': instance['uuid'],
- 'volume_id': volume_id,
+ 'volume_id': volume_id or 'reserved',
'device_name': result}
self.db.block_device_mapping_create(context, values)
return result
@@ -2587,7 +2670,10 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _poll_rebooting_instances(self, context):
if FLAGS.reboot_timeout > 0:
- self.driver.poll_rebooting_instances(FLAGS.reboot_timeout)
+ instances = self.db.instance_get_all_hung_in_rebooting(
+ context, FLAGS.reboot_timeout)
+ self.driver.poll_rebooting_instances(FLAGS.reboot_timeout,
+ instances)
@manager.periodic_task
def _poll_rescued_instances(self, context):
@@ -3028,9 +3114,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._set_instance_error_state(context, instance_uuid)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
- def add_aggregate_host(self, context, aggregate_id, host, slave_info=None):
+ def add_aggregate_host(self, context, host, slave_info=None,
+ aggregate=None, aggregate_id=None):
"""Notify hypervisor of change (for hypervisor pools)."""
- aggregate = self.db.aggregate_get(context, aggregate_id)
+ if not aggregate:
+ aggregate = self.db.aggregate_get(context, aggregate_id)
+
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
@@ -3038,7 +3127,7 @@ class ComputeManager(manager.SchedulerDependentManager):
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(context,
self.db.aggregate_host_delete,
- aggregate.id, host)
+ aggregate['id'], host)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def remove_aggregate_host(self, context, aggregate_id,
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 7acaa3dc1..5e3d745bb 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -474,7 +474,7 @@ class ResourceTracker(object):
self.tracked_instances[uuid] = 1
sign = 1
- if instance['vm_state'] == vm_states.DELETED:
+ if is_deleted_instance:
self.tracked_instances.pop(uuid)
sign = -1
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 8b4a4a529..5bf17adcd 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -138,6 +138,10 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.8 - Remove migration_id, add migration to finish_resize
2.9 - Add publish_service_capabilities()
2.10 - Adds filter_properties and request_spec to prep_resize()
+ 2.11 - Adds soft_delete_instance() and restore_instance()
+ 2.12 - Remove migration_id, add migration to revert_resize
+ 2.13 - Remove migration_id, add migration to finish_revert_resize
+ 2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
'''
#
@@ -155,7 +159,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
topic=FLAGS.compute_topic,
default_version=self.BASE_RPC_API_VERSION)
- def add_aggregate_host(self, ctxt, aggregate_id, host_param, host,
+ def add_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Add aggregate host.
@@ -166,11 +170,12 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
:param host: This is the host to send the message to.
'''
+ aggregate_p = jsonutils.to_primitive(aggregate)
self.cast(ctxt, self.make_msg('add_aggregate_host',
- aggregate_id=aggregate_id, host=host_param,
+ aggregate=aggregate_p, host=host_param,
slave_info=slave_info),
topic=_compute_topic(self.topic, ctxt, host, None),
- version='2.2')
+ version='2.14')
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
instance_p = jsonutils.to_primitive(instance)
@@ -236,13 +241,15 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
topic=_compute_topic(self.topic, ctxt, host, None),
version='2.8')
- def finish_revert_resize(self, ctxt, instance, migration_id, host,
+ def finish_revert_resize(self, ctxt, instance, migration, host,
reservations=None):
instance_p = jsonutils.to_primitive(instance)
+ migration_p = jsonutils.to_primitive(migration)
self.cast(ctxt, self.make_msg('finish_revert_resize',
- instance=instance_p, migration_id=migration_id,
+ instance=instance_p, migration=migration_p,
reservations=reservations),
- topic=_compute_topic(self.topic, ctxt, host, None))
+ topic=_compute_topic(self.topic, ctxt, host, None),
+ version='2.13')
def get_console_output(self, ctxt, instance, tail_length):
instance_p = jsonutils.to_primitive(instance)
@@ -440,13 +447,15 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
- def revert_resize(self, ctxt, instance, migration_id, host,
+ def revert_resize(self, ctxt, instance, migration, host,
reservations=None):
instance_p = jsonutils.to_primitive(instance)
+ migration_p = jsonutils.to_primitive(migration)
self.cast(ctxt, self.make_msg('revert_resize',
- instance=instance_p, migration_id=migration_id,
+ instance=instance_p, migration=migration_p,
reservations=reservations),
- topic=_compute_topic(self.topic, ctxt, host, instance))
+ topic=_compute_topic(self.topic, ctxt, host, instance),
+ version='2.12')
def rollback_live_migration_at_destination(self, ctxt, instance, host):
instance_p = jsonutils.to_primitive(instance)
@@ -518,8 +527,9 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def terminate_instance(self, ctxt, instance, bdms):
instance_p = jsonutils.to_primitive(instance)
+ bdms_p = jsonutils.to_primitive(bdms)
self.cast(ctxt, self.make_msg('terminate_instance',
- instance=instance_p, bdms=bdms),
+ instance=instance_p, bdms=bdms_p),
topic=_compute_topic(self.topic, ctxt, None, instance),
version='2.4')
@@ -538,6 +548,18 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def publish_service_capabilities(self, ctxt):
self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'))
+ def soft_delete_instance(self, ctxt, instance):
+ instance_p = jsonutils.to_primitive(instance)
+ self.cast(ctxt, self.make_msg('soft_delete_instance',
+ instance=instance_p),
+ topic=_compute_topic(self.topic, ctxt, None, instance))
+
+ def restore_instance(self, ctxt, instance):
+ instance_p = jsonutils.to_primitive(instance)
+ self.cast(ctxt, self.make_msg('restore_instance',
+ instance=instance_p),
+ topic=_compute_topic(self.topic, ctxt, None, instance))
+
class SecurityGroupAPI(nova.openstack.common.rpc.proxy.RpcProxy):
'''Client side of the security group rpc API.
diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py
index d4df75e60..c2966d554 100644
--- a/nova/compute/task_states.py
+++ b/nova/compute/task_states.py
@@ -68,16 +68,18 @@ SUSPENDING = 'suspending'
# possible task states during resume()
RESUMING = 'resuming'
+# NOTE(johannes): STOPPING and STARTING need to stick around for the
+# grizzly release for compatibility, but can be removed afterwards.
# possible task states during stop()
STOPPING = 'stopping'
# possible task states during start()
STARTING = 'starting'
-# possible task states during soft_delete()
+# possible task states during power_off()
POWERING_OFF = 'powering-off'
-# possible task states during restore()
+# possible task states during power_on()
POWERING_ON = 'powering-on'
# possible task states during rescue()
@@ -96,3 +98,9 @@ MIGRATING = "migrating"
# possible task states during delete()
DELETING = 'deleting'
+
+# possible task states during soft_delete()
+SOFT_DELETING = 'soft-deleting'
+
+# possible task states during restore()
+RESTORING = 'restoring'
diff --git a/nova/config.py b/nova/config.py
new file mode 100644
index 000000000..608a3ee53
--- /dev/null
+++ b/nova/config.py
@@ -0,0 +1,29 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.openstack.common import cfg
+
+CONF = cfg.CONF
+
+
+def parse_args(argv, default_config_files=None):
+ CONF.disable_interspersed_args()
+ return argv[:1] + CONF(argv[1:],
+ project='nova',
+ default_config_files=default_config_files)
diff --git a/nova/console/api.py b/nova/console/api.py
index 8becf35cf..5a9294ce7 100644
--- a/nova/console/api.py
+++ b/nova/console/api.py
@@ -22,7 +22,7 @@ from nova.console import rpcapi as console_rpcapi
from nova.db import base
from nova import flags
from nova.openstack.common import rpc
-from nova import utils
+from nova.openstack.common import uuidutils
FLAGS = flags.FLAGS
@@ -63,7 +63,7 @@ class API(base.Base):
return rpcapi.get_console_topic(context, instance_host)
def _get_instance(self, context, instance_uuid):
- if utils.is_uuid_like(instance_uuid):
+ if uuidutils.is_uuid_like(instance_uuid):
instance = self.db.instance_get_by_uuid(context, instance_uuid)
else:
instance = self.db.instance_get(context, instance_uuid)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index f5f7f3b15..1e3d83859 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -25,6 +25,16 @@ import datetime
import functools
import warnings
+from sqlalchemy import and_
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.orm import joinedload_all
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql.expression import desc
+from sqlalchemy.sql.expression import literal_column
+from sqlalchemy.sql import func
+
from nova import block_device
from nova.common.sqlalchemyutils import paginate_query
from nova.compute import vm_states
@@ -35,16 +45,9 @@ from nova import exception
from nova import flags
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
from nova import utils
-from sqlalchemy import and_
-from sqlalchemy.exc import IntegrityError
-from sqlalchemy import or_
-from sqlalchemy.orm import joinedload
-from sqlalchemy.orm import joinedload_all
-from sqlalchemy.sql.expression import asc
-from sqlalchemy.sql.expression import desc
-from sqlalchemy.sql.expression import literal_column
-from sqlalchemy.sql import func
+
FLAGS = flags.FLAGS
@@ -770,9 +773,9 @@ def floating_ip_create(context, values, session=None):
# check uniqueness for not deleted addresses
if not floating_ip_ref.deleted:
try:
- floating_ip = floating_ip_get_by_address(context,
- floating_ip_ref.address,
- session)
+ floating_ip = _floating_ip_get_by_address(context,
+ floating_ip_ref.address,
+ session)
except exception.FloatingIpNotFoundForAddress:
pass
else:
@@ -799,9 +802,9 @@ def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
session = get_session()
with session.begin():
- floating_ip_ref = floating_ip_get_by_address(context,
- floating_address,
- session=session)
+ floating_ip_ref = _floating_ip_get_by_address(context,
+ floating_address,
+ session=session)
fixed_ip_ref = fixed_ip_get_by_address(context,
fixed_address,
session=session)
@@ -812,25 +815,18 @@ def floating_ip_fixed_ip_associate(context, floating_address,
@require_context
def floating_ip_deallocate(context, address):
- session = get_session()
- with session.begin():
- floating_ip_ref = floating_ip_get_by_address(context,
- address,
- session=session)
- floating_ip_ref['project_id'] = None
- floating_ip_ref['host'] = None
- floating_ip_ref['auto_assigned'] = False
- floating_ip_ref.save(session=session)
+ model_query(context, models.FloatingIp).\
+ filter_by(address=address).\
+ update({'project_id': None,
+ 'host': None,
+ 'auto_assigned': False})
@require_context
def floating_ip_destroy(context, address):
- session = get_session()
- with session.begin():
- floating_ip_ref = floating_ip_get_by_address(context,
- address,
- session=session)
- floating_ip_ref.delete(session=session)
+ model_query(context, models.FloatingIp).\
+ filter_by(address=address).\
+ delete()
@require_context
@@ -860,13 +856,9 @@ def floating_ip_disassociate(context, address):
@require_context
def floating_ip_set_auto_assigned(context, address):
- session = get_session()
- with session.begin():
- floating_ip_ref = floating_ip_get_by_address(context,
- address,
- session=session)
- floating_ip_ref.auto_assigned = True
- floating_ip_ref.save(session=session)
+ model_query(context, models.FloatingIp).\
+ filter_by(address=address).\
+ update({'auto_assigned': True})
def _floating_ip_get_all(context, session=None):
@@ -903,7 +895,12 @@ def floating_ip_get_all_by_project(context, project_id):
@require_context
-def floating_ip_get_by_address(context, address, session=None):
+def floating_ip_get_by_address(context, address):
+ return _floating_ip_get_by_address(context, address)
+
+
+@require_context
+def _floating_ip_get_by_address(context, address, session=None):
result = model_query(context, models.FloatingIp, session=session).\
filter_by(address=address).\
first()
@@ -920,16 +917,14 @@ def floating_ip_get_by_address(context, address, session=None):
@require_context
-def floating_ip_get_by_fixed_address(context, fixed_address, session=None):
- if not session:
- session = get_session()
-
- fixed_ip = fixed_ip_get_by_address(context, fixed_address, session)
- fixed_ip_id = fixed_ip['id']
-
- return model_query(context, models.FloatingIp, session=session).\
- filter_by(fixed_ip_id=fixed_ip_id).\
- all()
+def floating_ip_get_by_fixed_address(context, fixed_address):
+ subq = model_query(context, models.FixedIp.id).\
+ filter_by(address=fixed_address).\
+ limit(1).\
+ subquery()
+ return model_query(context, models.FloatingIp).\
+ filter_by(fixed_ip_id=subq.as_scalar()).\
+ all()
# NOTE(tr3buchet) please don't invent an exception here, empty list is fine
@@ -948,7 +943,9 @@ def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id, session=None):
def floating_ip_update(context, address, values):
session = get_session()
with session.begin():
- floating_ip_ref = floating_ip_get_by_address(context, address, session)
+ floating_ip_ref = _floating_ip_get_by_address(context,
+ address,
+ session)
for (key, value) in values.iteritems():
floating_ip_ref[key] = value
floating_ip_ref.save(session=session)
@@ -1035,7 +1032,7 @@ def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved -- should be a boolean value(True or False), exact value will be
used to filter on the fixed ip address
"""
- if not utils.is_uuid_like(instance_uuid):
+ if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
@@ -1067,7 +1064,7 @@ def fixed_ip_associate(context, address, instance_uuid, network_id=None,
@require_admin_context
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
- if instance_uuid and not utils.is_uuid_like(instance_uuid):
+ if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
@@ -1211,7 +1208,7 @@ def fixed_ip_get_by_address(context, address, session=None):
@require_context
def fixed_ip_get_by_instance(context, instance_uuid):
- if not utils.is_uuid_like(instance_uuid):
+ if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
result = model_query(context, models.FixedIp, read_deleted="no").\
@@ -1463,7 +1460,7 @@ def instance_data_get_for_project(context, project_id, session=None):
def instance_destroy(context, instance_uuid, constraint=None):
session = get_session()
with session.begin():
- if utils.is_uuid_like(instance_uuid):
+ if uuidutils.is_uuid_like(instance_uuid):
instance_ref = instance_get_by_uuid(context, instance_uuid,
session=session)
else:
@@ -1783,7 +1780,7 @@ def instance_test_and_set(context, instance_uuid, attr, ok_states,
query = model_query(context, models.Instance, session=session,
project_only=True)
- if utils.is_uuid_like(instance_uuid):
+ if uuidutils.is_uuid_like(instance_uuid):
query = query.filter_by(uuid=instance_uuid)
else:
raise exception.InvalidUUID(instance_uuid)
@@ -1835,7 +1832,7 @@ def instance_update_and_get_original(context, instance_uuid, values):
def _instance_update(context, instance_uuid, values, copy_old_instance=False):
session = get_session()
- if not utils.is_uuid_like(instance_uuid):
+ if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(instance_uuid)
with session.begin():
@@ -2926,7 +2923,7 @@ def volume_allocate_iscsi_target(context, volume_id, host):
@require_admin_context
def volume_attached(context, volume_id, instance_uuid, mountpoint):
- if not utils.is_uuid_like(instance_uuid):
+ if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(instance_uuid)
session = get_session()
@@ -3423,7 +3420,7 @@ def _security_group_get_query(context, session=None, read_deleted=None,
query = model_query(context, models.SecurityGroup, session=session,
read_deleted=read_deleted, project_only=project_only)
if join_rules:
- query = query.options(joinedload_all('rules'))
+ query = query.options(joinedload_all('rules.grantee_group'))
return query
@@ -3480,7 +3477,7 @@ def security_group_get_by_name(context, project_id, group_name,
filter_by(name=group_name)
if columns_to_join is None:
- columns_to_join = ['instances', 'rules']
+ columns_to_join = ['instances', 'rules.grantee_group']
for column in columns_to_join:
query = query.options(joinedload_all(column))
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index 6c349e0c2..184d279ae 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -16,7 +16,149 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Session Handling for SQLAlchemy backend."""
+"""Session Handling for SQLAlchemy backend.
+
+Recommended ways to use sessions within this framework:
+
+* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
+ model_query() will implicitly use a session when called without one
+ supplied. This is the ideal situation because it will allow queries
+ to be automatically retried if the database connection is interrupted.
+
+ Note: Automatic retry will be enabled in a future patch.
+
+ It is generally fine to issue several queries in a row like this. Even though
+ they may be run in separate transactions and/or separate sessions, each one
+ will see the data from the prior calls. If needed, undo- or rollback-like
+ functionality should be handled at a logical level. For an example, look at
+ the code around quotas and reservation_rollback().
+
+ Examples:
+
+ def get_foo(context, foo):
+ return model_query(context, models.Foo).\
+ filter_by(foo=foo).\
+ first()
+
+ def update_foo(context, id, newfoo):
+ model_query(context, models.Foo).\
+ filter_by(id=id).\
+ update({'foo': newfoo})
+
+ def create_foo(context, values):
+ foo_ref = models.Foo()
+ foo_ref.update(values)
+ foo_ref.save()
+ return foo_ref
+
+
+* Within the scope of a single method, keeping all the reads and writes within
+ the context managed by a single session. In this way, the session's __exit__
+ handler will take care of calling flush() and commit() for you.
+ If using this approach, you should not explicitly call flush() or commit().
+ Any error within the context of the session will cause the session to emit
+ a ROLLBACK. If the connection is dropped before this is possible, the
+ database will implicitly rollback the transaction.
+
+ Note: statements in the session scope will not be automatically retried.
+
+ If you create models within the session, they need to be added, but you
+ do not need to call model.save()
+
+ def create_many_foo(context, foos):
+ session = get_session()
+ with session.begin():
+ for foo in foos:
+ foo_ref = models.Foo()
+ foo_ref.update(foo)
+ session.add(foo_ref)
+
+ def update_bar(context, foo_id, newbar):
+ session = get_session()
+ with session.begin():
+ foo_ref = model_query(context, models.Foo, session).\
+ filter_by(id=foo_id).\
+ first()
+ model_query(context, models.Bar, session).\
+ filter_by(id=foo_ref['bar_id']).\
+ update({'bar': newbar})
+
+ Note: update_bar is a trivially simple example of using "with session.begin".
+ Whereas create_many_foo is a good example of when a transaction is needed,
+ it is always best to use as few queries as possible. The two queries in
+ update_bar can be better expressed using a single query which avoids
+ the need for an explicit transaction. It can be expressed like so:
+
+ def update_bar(context, foo_id, newbar):
+ subq = model_query(context, models.Foo.id).\
+ filter_by(id=foo_id).\
+ limit(1).\
+ subquery()
+ model_query(context, models.Bar).\
+ filter_by(id=subq.as_scalar()).\
+ update({'bar': newbar})
+
+ For reference, this emits approximagely the following SQL statement:
+
+ UPDATE bar SET bar = ${newbar}
+ WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
+
+* Passing an active session between methods. Sessions should only be passed
+ to private methods. The private method must use a subtransaction; otherwise
+ SQLAlchemy will throw an error when you call session.begin() on an existing
+ transaction. Public methods should not accept a session parameter and should
+ not be involved in sessions within the caller's scope.
+
+ Note that this incurs more overhead in SQLAlchemy than the above means
+ due to nesting transactions, and it is not possible to implicitly retry
+ failed database operations when using this approach.
+
+ This also makes code somewhat more difficult to read and debug, because a
+ single database transaction spans more than one method. Error handling
+ becomes less clear in this situation. When this is needed for code clarity,
+ it should be clearly documented.
+
+ def myfunc(foo):
+ session = get_session()
+ with session.begin():
+ # do some database things
+ bar = _private_func(foo, session)
+ return bar
+
+ def _private_func(foo, session=None):
+ if not session:
+ session = get_session()
+ with session.begin(subtransaction=True):
+ # do some other database things
+ return bar
+
+
+There are some things which it is best to avoid:
+
+* Don't keep a transaction open any longer than necessary.
+
+ This means that your "with session.begin()" block should be as short
+ as possible, while still containing all the related calls for that
+ transaction.
+
+* Avoid "with_lockmode('UPDATE')" when possible.
+
+ In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
+ any rows, it will take a gap-lock. This is a form of write-lock on the
+ "gap" where no rows exist, and prevents any other writes to that space.
+ This can effectively prevent any INSERT into a table by locking the gap
+ at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
+ has an overly broad WHERE clause, or doesn't properly use an index.
+
+ One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
+ number of rows matching a query, and if only one row is returned,
+ then issue the SELECT FOR UPDATE.
+
+ The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
+ However, this can not be done until the "deleted" columns are removed and
+ proper UNIQUE constraints are added to the tables.
+
+"""
import re
import time
@@ -48,11 +190,25 @@ def get_session(autocommit=True, expire_on_commit=False):
_MAKER = get_maker(engine, autocommit, expire_on_commit)
session = _MAKER()
+ session = wrap_session(session)
+ return session
+
+
+def wrap_session(session):
+ """Return a session whose exceptions are wrapped."""
session.query = nova.exception.wrap_db_error(session.query)
session.flush = nova.exception.wrap_db_error(session.flush)
return session
+def get_engine():
+ """Return a SQLAlchemy engine."""
+ global _ENGINE
+ if _ENGINE is None:
+ _ENGINE = create_engine(FLAGS.sql_connection)
+ return _ENGINE
+
+
def synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode"""
dbapi_conn.execute("PRAGMA synchronous = OFF")
@@ -106,72 +262,70 @@ def is_db_connection_error(args):
return False
-def get_engine():
- """Return a SQLAlchemy engine."""
- global _ENGINE
- if _ENGINE is None:
- connection_dict = sqlalchemy.engine.url.make_url(FLAGS.sql_connection)
-
- engine_args = {
- "pool_recycle": FLAGS.sql_idle_timeout,
- "echo": False,
- 'convert_unicode': True,
- }
-
- # Map our SQL debug level to SQLAlchemy's options
- if FLAGS.sql_connection_debug >= 100:
- engine_args['echo'] = 'debug'
- elif FLAGS.sql_connection_debug >= 50:
- engine_args['echo'] = True
-
- if "sqlite" in connection_dict.drivername:
- engine_args["poolclass"] = NullPool
-
- if FLAGS.sql_connection == "sqlite://":
- engine_args["poolclass"] = StaticPool
- engine_args["connect_args"] = {'check_same_thread': False}
-
- _ENGINE = sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args)
-
- sqlalchemy.event.listen(_ENGINE, 'checkin', greenthread_yield)
-
- if 'mysql' in connection_dict.drivername:
- sqlalchemy.event.listen(_ENGINE, 'checkout', ping_listener)
- elif 'sqlite' in connection_dict.drivername:
- if not FLAGS.sqlite_synchronous:
- sqlalchemy.event.listen(_ENGINE, 'connect',
- synchronous_switch_listener)
- sqlalchemy.event.listen(_ENGINE, 'connect', add_regexp_listener)
-
- if (FLAGS.sql_connection_trace and
- _ENGINE.dialect.dbapi.__name__ == 'MySQLdb'):
- import MySQLdb.cursors
- _do_query = debug_mysql_do_query()
- setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
-
- try:
- _ENGINE.connect()
- except OperationalError, e:
- if not is_db_connection_error(e.args[0]):
- raise
-
- remaining = FLAGS.sql_max_retries
- if remaining == -1:
- remaining = 'infinite'
- while True:
- msg = _('SQL connection failed. %s attempts left.')
- LOG.warn(msg % remaining)
- if remaining != 'infinite':
- remaining -= 1
- time.sleep(FLAGS.sql_retry_interval)
- try:
- _ENGINE.connect()
- break
- except OperationalError, e:
- if (remaining != 'infinite' and remaining == 0) or \
- not is_db_connection_error(e.args[0]):
- raise
- return _ENGINE
+def create_engine(sql_connection):
+ """Return a new SQLAlchemy engine."""
+ connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
+
+ engine_args = {
+ "pool_recycle": FLAGS.sql_idle_timeout,
+ "echo": False,
+ 'convert_unicode': True,
+ }
+
+ # Map our SQL debug level to SQLAlchemy's options
+ if FLAGS.sql_connection_debug >= 100:
+ engine_args['echo'] = 'debug'
+ elif FLAGS.sql_connection_debug >= 50:
+ engine_args['echo'] = True
+
+ if "sqlite" in connection_dict.drivername:
+ engine_args["poolclass"] = NullPool
+
+ if FLAGS.sql_connection == "sqlite://":
+ engine_args["poolclass"] = StaticPool
+ engine_args["connect_args"] = {'check_same_thread': False}
+
+ engine = sqlalchemy.create_engine(sql_connection, **engine_args)
+
+ sqlalchemy.event.listen(engine, 'checkin', greenthread_yield)
+
+ if 'mysql' in connection_dict.drivername:
+ sqlalchemy.event.listen(engine, 'checkout', ping_listener)
+ elif 'sqlite' in connection_dict.drivername:
+ if not FLAGS.sqlite_synchronous:
+ sqlalchemy.event.listen(engine, 'connect',
+ synchronous_switch_listener)
+ sqlalchemy.event.listen(engine, 'connect', add_regexp_listener)
+
+ if (FLAGS.sql_connection_trace and
+ engine.dialect.dbapi.__name__ == 'MySQLdb'):
+ import MySQLdb.cursors
+ _do_query = debug_mysql_do_query()
+ setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
+
+ try:
+ engine.connect()
+ except OperationalError, e:
+ if not is_db_connection_error(e.args[0]):
+ raise
+
+ remaining = FLAGS.sql_max_retries
+ if remaining == -1:
+ remaining = 'infinite'
+ while True:
+ msg = _('SQL connection failed. %s attempts left.')
+ LOG.warn(msg % remaining)
+ if remaining != 'infinite':
+ remaining -= 1
+ time.sleep(FLAGS.sql_retry_interval)
+ try:
+ engine.connect()
+ break
+ except OperationalError, e:
+ if (remaining != 'infinite' and remaining == 0) or \
+ not is_db_connection_error(e.args[0]):
+ raise
+ return engine
def get_maker(engine, autocommit=True, expire_on_commit=False):
diff --git a/nova/exception.py b/nova/exception.py
index ab9265f5d..c3e1fe39e 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -175,10 +175,6 @@ class DBError(NovaException):
super(DBError, self).__init__(str(inner_exception))
-class DeprecatedConfig(NovaException):
- message = _("Fatal call to deprecated config %(msg)s")
-
-
class DecryptionFailure(NovaException):
message = _("Failed to decrypt text")
diff --git a/nova/flags.py b/nova/flags.py
index 7d09915a5..a27674472 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -36,13 +36,6 @@ from nova.openstack.common import cfg
FLAGS = cfg.CONF
-def parse_args(argv, default_config_files=None):
- FLAGS.disable_interspersed_args()
- return argv[:1] + FLAGS(argv[1:],
- project='nova',
- default_config_files=default_config_files)
-
-
class UnrecognizedFlag(Exception):
pass
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index 7bb8bcaae..c1646f64d 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova 2013.1\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2012-10-29 00:01+0000\n"
+"POT-Creation-Date: 2012-11-05 00:02+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -97,249 +97,244 @@ msgid "Unknown"
msgstr ""
#: nova/exception.py:179
-#, python-format
-msgid "Fatal call to deprecated config %(msg)s"
-msgstr ""
-
-#: nova/exception.py:183
msgid "Failed to decrypt text"
msgstr ""
-#: nova/exception.py:187
+#: nova/exception.py:183
msgid "Virtual Interface creation failed"
msgstr ""
-#: nova/exception.py:191
+#: nova/exception.py:187
msgid "5 attempts to create virtual interfacewith unique mac address failed"
msgstr ""
-#: nova/exception.py:196
+#: nova/exception.py:192
#, python-format
msgid "Connection to glance host %(host)s:%(port)s failed: %(reason)s"
msgstr ""
-#: nova/exception.py:201
+#: nova/exception.py:197
msgid "Not authorized."
msgstr ""
-#: nova/exception.py:206
+#: nova/exception.py:202
msgid "User does not have admin privileges"
msgstr ""
-#: nova/exception.py:210
+#: nova/exception.py:206
#, python-format
msgid "Policy doesn't allow %(action)s to be performed."
msgstr ""
-#: nova/exception.py:214
+#: nova/exception.py:210
#, python-format
msgid "Image %(image_id)s is not active."
msgstr ""
-#: nova/exception.py:218
+#: nova/exception.py:214
#, python-format
msgid "Not authorized for image %(image_id)s."
msgstr ""
-#: nova/exception.py:222
+#: nova/exception.py:218
msgid "Unacceptable parameters."
msgstr ""
-#: nova/exception.py:227
+#: nova/exception.py:223
msgid "Invalid snapshot"
msgstr ""
-#: nova/exception.py:231
+#: nova/exception.py:227
#, python-format
msgid "Volume %(volume_id)s is not attached to anything"
msgstr ""
-#: nova/exception.py:235
+#: nova/exception.py:231
#, python-format
msgid "Volume %(volume_id)s is still attached, detach volume first."
msgstr ""
-#: nova/exception.py:239 nova/api/ec2/cloud.py:389 nova/api/ec2/cloud.py:414
-#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2194
+#: nova/exception.py:235 nova/api/ec2/cloud.py:389 nova/api/ec2/cloud.py:414
+#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2234
msgid "Keypair data is invalid"
msgstr ""
-#: nova/exception.py:243
+#: nova/exception.py:239
msgid "Failed to load data into json format"
msgstr ""
-#: nova/exception.py:247
+#: nova/exception.py:243
msgid "The request is invalid."
msgstr ""
-#: nova/exception.py:251
+#: nova/exception.py:247
msgid "Invalid input received"
msgstr ""
-#: nova/exception.py:255
+#: nova/exception.py:251
msgid "Invalid volume type"
msgstr ""
-#: nova/exception.py:259
+#: nova/exception.py:255
msgid "Invalid volume"
msgstr ""
-#: nova/exception.py:263 nova/api/openstack/compute/servers.py:1283
+#: nova/exception.py:259 nova/api/openstack/compute/servers.py:1283
#: nova/api/openstack/compute/contrib/admin_actions.py:242
msgid "Invalid metadata"
msgstr ""
-#: nova/exception.py:267
+#: nova/exception.py:263
msgid "Invalid metadata size"
msgstr ""
-#: nova/exception.py:271
+#: nova/exception.py:267
#, python-format
msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s"
msgstr ""
-#: nova/exception.py:275 nova/api/ec2/cloud.py:571
+#: nova/exception.py:271 nova/api/ec2/cloud.py:571
#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr ""
-#: nova/exception.py:279
+#: nova/exception.py:275
#, python-format
msgid "Invalid content type %(content_type)s."
msgstr ""
-#: nova/exception.py:283
+#: nova/exception.py:279
#, python-format
msgid "Invalid cidr %(cidr)s."
msgstr ""
-#: nova/exception.py:287
+#: nova/exception.py:283
msgid "Invalid Parameter: Unicode is not supported by the current database."
msgstr ""
-#: nova/exception.py:294
+#: nova/exception.py:290
#, python-format
msgid "%(err)s"
msgstr ""
-#: nova/exception.py:298
+#: nova/exception.py:294
#, python-format
msgid ""
"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:"
" %(reason)s."
msgstr ""
-#: nova/exception.py:303
+#: nova/exception.py:299
#, python-format
msgid "Group not valid. Reason: %(reason)s"
msgstr ""
-#: nova/exception.py:307
+#: nova/exception.py:303
msgid "Sort key supplied was not valid."
msgstr ""
-#: nova/exception.py:311
+#: nova/exception.py:307
#, python-format
msgid ""
"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while"
" the instance is in this state."
msgstr ""
-#: nova/exception.py:316
+#: nova/exception.py:312
#, python-format
msgid "Instance %(instance_id)s is not running."
msgstr ""
-#: nova/exception.py:320
+#: nova/exception.py:316
#, python-format
msgid "Instance %(instance_id)s is not in rescue mode"
msgstr ""
-#: nova/exception.py:324
+#: nova/exception.py:320
#, python-format
msgid "Instance %(instance_id)s is not ready"
msgstr ""
-#: nova/exception.py:328
+#: nova/exception.py:324
msgid "Failed to suspend instance"
msgstr ""
-#: nova/exception.py:332
+#: nova/exception.py:328
msgid "Failed to resume server"
msgstr ""
-#: nova/exception.py:336
+#: nova/exception.py:332
msgid "Failed to reboot instance"
msgstr ""
-#: nova/exception.py:340
+#: nova/exception.py:336
msgid "Failed to terminate instance"
msgstr ""
-#: nova/exception.py:344
+#: nova/exception.py:340
msgid "Service is unavailable at this time."
msgstr ""
-#: nova/exception.py:348
+#: nova/exception.py:344
msgid "Insufficient compute resources."
msgstr ""
-#: nova/exception.py:352
+#: nova/exception.py:348
msgid "Compute service is unavailable at this time."
msgstr ""
-#: nova/exception.py:356
+#: nova/exception.py:352
#, python-format
msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)."
msgstr ""
-#: nova/exception.py:361
+#: nova/exception.py:357
msgid "The supplied hypervisor type of is invalid."
msgstr ""
-#: nova/exception.py:365
+#: nova/exception.py:361
msgid "The instance requires a newer hypervisor version than has been provided."
msgstr ""
-#: nova/exception.py:370
+#: nova/exception.py:366
#, python-format
msgid ""
"The supplied disk path (%(path)s) already exists, it is expected not to "
"exist."
msgstr ""
-#: nova/exception.py:375
+#: nova/exception.py:371
#, python-format
msgid "The supplied device path (%(path)s) is invalid."
msgstr ""
-#: nova/exception.py:379
+#: nova/exception.py:375
#, python-format
msgid "The supplied device path (%(path)s) is in use."
msgstr ""
-#: nova/exception.py:383
+#: nova/exception.py:379
#, python-format
msgid "The supplied device (%(device)s) is busy."
msgstr ""
-#: nova/exception.py:387
+#: nova/exception.py:383
msgid "Unacceptable CPU info"
msgstr ""
-#: nova/exception.py:391
+#: nova/exception.py:387
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
msgstr ""
-#: nova/exception.py:395
+#: nova/exception.py:391
#, python-format
msgid ""
"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN "
"tag is %(tag)s, but the one associated with the port group is %(pgroup)s."
msgstr ""
-#: nova/exception.py:401
+#: nova/exception.py:397
#, python-format
msgid ""
"vSwitch which contains the port group %(bridge)s is not associated with "
@@ -347,132 +342,132 @@ msgid ""
"one associated is %(actual)s."
msgstr ""
-#: nova/exception.py:408
+#: nova/exception.py:404
#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr ""
-#: nova/exception.py:412
+#: nova/exception.py:408
#, python-format
msgid "Image %(image_id)s is unacceptable: %(reason)s"
msgstr ""
-#: nova/exception.py:416
+#: nova/exception.py:412
#, python-format
msgid "Instance %(instance_id)s is unacceptable: %(reason)s"
msgstr ""
-#: nova/exception.py:420
+#: nova/exception.py:416
#, python-format
msgid "Ec2 id %(ec2_id)s is unacceptable."
msgstr ""
-#: nova/exception.py:424
+#: nova/exception.py:420
#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr ""
-#: nova/exception.py:428
+#: nova/exception.py:424
msgid "Constraint not met."
msgstr ""
-#: nova/exception.py:433
+#: nova/exception.py:429
msgid "Resource could not be found."
msgstr ""
-#: nova/exception.py:438
+#: nova/exception.py:434
#, python-format
msgid "Could not find driver for compute_driver %(name)s"
msgstr ""
-#: nova/exception.py:442
+#: nova/exception.py:438
#, python-format
msgid "Volume %(volume_id)s persistence file could not be found."
msgstr ""
-#: nova/exception.py:446
+#: nova/exception.py:442
#, python-format
msgid "Volume %(volume_id)s could not be found."
msgstr ""
-#: nova/exception.py:450
+#: nova/exception.py:446
#, python-format
msgid "Unable to locate account %(account_name)s on Solidfire device"
msgstr ""
-#: nova/exception.py:455
+#: nova/exception.py:451
#, python-format
msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:460
+#: nova/exception.py:456
#, python-format
msgid "Volume type %(volume_type_id)s could not be found."
msgstr ""
-#: nova/exception.py:464
+#: nova/exception.py:460
#, python-format
msgid "Volume type with name %(volume_type_name)s could not be found."
msgstr ""
-#: nova/exception.py:469
+#: nova/exception.py:465
#, python-format
msgid ""
"Volume Type %(volume_type_id)s has no extra specs with key "
"%(extra_specs_key)s."
msgstr ""
-#: nova/exception.py:474
+#: nova/exception.py:470
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr ""
-#: nova/exception.py:478
+#: nova/exception.py:474
#, python-format
msgid "deleting volume %(volume_name)s that has snapshot"
msgstr ""
-#: nova/exception.py:482
+#: nova/exception.py:478
#, python-format
msgid "deleting snapshot %(snapshot_name)s that has dependent volumes"
msgstr ""
-#: nova/exception.py:487
+#: nova/exception.py:483
#, python-format
msgid "No target id found for volume %(volume_id)s."
msgstr ""
-#: nova/exception.py:491
+#: nova/exception.py:487
#, python-format
msgid "Failed to create iscsi target for volume %(volume_id)s."
msgstr ""
-#: nova/exception.py:495
+#: nova/exception.py:491
#, python-format
msgid "Failed to remove iscsi target for volume %(volume_id)s."
msgstr ""
-#: nova/exception.py:499
+#: nova/exception.py:495
#, python-format
msgid "No disk at %(location)s"
msgstr ""
-#: nova/exception.py:503
+#: nova/exception.py:499
#, python-format
msgid "Could not find a handler for %(driver_type)s volume."
msgstr ""
-#: nova/exception.py:507
+#: nova/exception.py:503
#, python-format
msgid "Invalid image href %(image_href)s."
msgstr ""
-#: nova/exception.py:511
+#: nova/exception.py:507
#, python-format
msgid "Image %(image_id)s could not be found."
msgstr ""
-#: nova/exception.py:515
+#: nova/exception.py:511
#, python-format
msgid ""
"Image %(image_id)s could not be found. The nova EC2 API assigns image ids"
@@ -480,734 +475,734 @@ msgid ""
"image ids since adding this image?"
msgstr ""
-#: nova/exception.py:522
+#: nova/exception.py:518
#, python-format
msgid "Project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:526
+#: nova/exception.py:522
msgid "Cannot find SR to read/write VDI."
msgstr ""
-#: nova/exception.py:530
+#: nova/exception.py:526
#, python-format
msgid "Network %(network_id)s is still in use."
msgstr ""
-#: nova/exception.py:534
+#: nova/exception.py:530
#, python-format
msgid "%(req)s is required to create a network."
msgstr ""
-#: nova/exception.py:538
+#: nova/exception.py:534
#, python-format
msgid "Network %(network_id)s could not be found."
msgstr ""
-#: nova/exception.py:542
+#: nova/exception.py:538
#, python-format
msgid "Network could not be found for bridge %(bridge)s"
msgstr ""
-#: nova/exception.py:546
+#: nova/exception.py:542
#, python-format
msgid "Network could not be found for uuid %(uuid)s"
msgstr ""
-#: nova/exception.py:550
+#: nova/exception.py:546
#, python-format
msgid "Network could not be found with cidr %(cidr)s."
msgstr ""
-#: nova/exception.py:554
+#: nova/exception.py:550
#, python-format
msgid "Network could not be found for instance %(instance_id)s."
msgstr ""
-#: nova/exception.py:558
+#: nova/exception.py:554
msgid "No networks defined."
msgstr ""
-#: nova/exception.py:562
+#: nova/exception.py:558
#, python-format
msgid ""
"Either Network uuid %(network_uuid)s is not present or is not assigned to"
" the project %(project_id)s."
msgstr ""
-#: nova/exception.py:567
+#: nova/exception.py:563
#, python-format
msgid "Host is not set to the network (%(network_id)s)."
msgstr ""
-#: nova/exception.py:571
+#: nova/exception.py:567
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr ""
-#: nova/exception.py:575
+#: nova/exception.py:571
#, python-format
msgid "Port %(port_id)s is still in use."
msgstr ""
-#: nova/exception.py:579
+#: nova/exception.py:575
#, python-format
msgid "Port %(port_id)s could not be found."
msgstr ""
-#: nova/exception.py:583
+#: nova/exception.py:579
#, python-format
msgid "No fixed IP associated with id %(id)s."
msgstr ""
-#: nova/exception.py:587
+#: nova/exception.py:583
#, python-format
msgid "Fixed ip not found for address %(address)s."
msgstr ""
-#: nova/exception.py:591
+#: nova/exception.py:587
#, python-format
msgid "Instance %(instance_uuid)s has zero fixed ips."
msgstr ""
-#: nova/exception.py:595
+#: nova/exception.py:591
#, python-format
msgid "Network host %(host)s has zero fixed ips in network %(network_id)s."
msgstr ""
-#: nova/exception.py:600
+#: nova/exception.py:596
#, python-format
msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'."
msgstr ""
-#: nova/exception.py:604
+#: nova/exception.py:600
#, python-format
msgid ""
"Fixed IP address (%(address)s) does not exist in network "
"(%(network_uuid)s)."
msgstr ""
-#: nova/exception.py:609
+#: nova/exception.py:605
#, python-format
msgid ""
"Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s."
msgstr ""
-#: nova/exception.py:614
+#: nova/exception.py:610
#, python-format
msgid "More than one instance is associated with fixed ip address '%(address)s'."
msgstr ""
-#: nova/exception.py:619
+#: nova/exception.py:615
#, python-format
msgid "Fixed IP address %(address)s is invalid."
msgstr ""
-#: nova/exception.py:623
+#: nova/exception.py:619
msgid "Zero fixed ips available."
msgstr ""
-#: nova/exception.py:627
+#: nova/exception.py:623
msgid "Zero fixed ips could be found."
msgstr ""
-#: nova/exception.py:636
+#: nova/exception.py:632
#, python-format
msgid "Floating ip %(address)s already exists."
msgstr ""
-#: nova/exception.py:640
+#: nova/exception.py:636
#, python-format
msgid "Floating ip not found for id %(id)s."
msgstr ""
-#: nova/exception.py:644
+#: nova/exception.py:640
#, python-format
msgid "The DNS entry %(name)s already exists in domain %(domain)s."
msgstr ""
-#: nova/exception.py:648
+#: nova/exception.py:644
#, python-format
msgid "Floating ip not found for address %(address)s."
msgstr ""
-#: nova/exception.py:652
+#: nova/exception.py:648
#, python-format
msgid "Floating ip not found for host %(host)s."
msgstr ""
-#: nova/exception.py:656
+#: nova/exception.py:652
#, python-format
msgid "Multiple floating ips are found for address %(address)s."
msgstr ""
-#: nova/exception.py:660
+#: nova/exception.py:656
msgid "Floating ip pool not found."
msgstr ""
-#: nova/exception.py:665
+#: nova/exception.py:661
msgid "Zero floating ips available."
msgstr ""
-#: nova/exception.py:670
+#: nova/exception.py:666
#, python-format
msgid "Floating ip %(address)s is associated."
msgstr ""
-#: nova/exception.py:674
+#: nova/exception.py:670
#, python-format
msgid "Floating ip %(address)s is not associated."
msgstr ""
-#: nova/exception.py:678
+#: nova/exception.py:674
msgid "Zero floating ips exist."
msgstr ""
-#: nova/exception.py:682
+#: nova/exception.py:678
#, python-format
msgid "Interface %(interface)s not found."
msgstr ""
-#: nova/exception.py:686
+#: nova/exception.py:682
msgid "Cannot disassociate auto assigined floating ip"
msgstr ""
-#: nova/exception.py:690
+#: nova/exception.py:686
#, python-format
msgid "Keypair %(name)s not found for user %(user_id)s"
msgstr ""
-#: nova/exception.py:694
+#: nova/exception.py:690
#, python-format
msgid "Certificate %(certificate_id)s not found."
msgstr ""
-#: nova/exception.py:698
+#: nova/exception.py:694
#, python-format
msgid "Service %(service_id)s could not be found."
msgstr ""
-#: nova/exception.py:702
+#: nova/exception.py:698
#, python-format
msgid "Host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:706
+#: nova/exception.py:702
#, python-format
msgid "Compute host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:710
+#: nova/exception.py:706
#, python-format
msgid "Could not find binary %(binary)s on host %(host)s."
msgstr ""
-#: nova/exception.py:714
+#: nova/exception.py:710
#, python-format
msgid "Invalid reservation expiration %(expire)s."
msgstr ""
-#: nova/exception.py:718
+#: nova/exception.py:714
#, python-format
msgid ""
"Change would make usage less than 0 for the following resources: "
"%(unders)s"
msgstr ""
-#: nova/exception.py:723
+#: nova/exception.py:719
msgid "Quota could not be found"
msgstr ""
-#: nova/exception.py:727
+#: nova/exception.py:723
#, python-format
msgid "Unknown quota resources %(unknown)s."
msgstr ""
-#: nova/exception.py:731
+#: nova/exception.py:727
#, python-format
msgid "Quota for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:735
+#: nova/exception.py:731
#, python-format
msgid "Quota class %(class_name)s could not be found."
msgstr ""
-#: nova/exception.py:739
+#: nova/exception.py:735
#, python-format
msgid "Quota usage for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:743
+#: nova/exception.py:739
#, python-format
msgid "Quota reservation %(uuid)s could not be found."
msgstr ""
-#: nova/exception.py:747
+#: nova/exception.py:743
#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr ""
-#: nova/exception.py:751
+#: nova/exception.py:747
#, python-format
msgid "Security group %(security_group_id)s not found."
msgstr ""
-#: nova/exception.py:755
+#: nova/exception.py:751
#, python-format
msgid "Security group %(security_group_id)s not found for project %(project_id)s."
msgstr ""
-#: nova/exception.py:760
+#: nova/exception.py:756
#, python-format
msgid "Security group with rule %(rule_id)s not found."
msgstr ""
-#: nova/exception.py:764
+#: nova/exception.py:760
#, python-format
msgid ""
"Security group %(security_group_id)s is already associated with the "
"instance %(instance_id)s"
msgstr ""
-#: nova/exception.py:769
+#: nova/exception.py:765
#, python-format
msgid ""
"Security group %(security_group_id)s is not associated with the instance "
"%(instance_id)s"
msgstr ""
-#: nova/exception.py:774
+#: nova/exception.py:770
#, python-format
msgid "Migration %(migration_id)s could not be found."
msgstr ""
-#: nova/exception.py:778
+#: nova/exception.py:774
#, python-format
msgid "Migration not found for instance %(instance_id)s with status %(status)s."
msgstr ""
-#: nova/exception.py:783
+#: nova/exception.py:779
#, python-format
msgid "Console pool %(pool_id)s could not be found."
msgstr ""
-#: nova/exception.py:787
+#: nova/exception.py:783
#, python-format
msgid ""
"Console pool of type %(console_type)s for compute host %(compute_host)s "
"on proxy host %(host)s not found."
msgstr ""
-#: nova/exception.py:793
+#: nova/exception.py:789
#, python-format
msgid "Console %(console_id)s could not be found."
msgstr ""
-#: nova/exception.py:797
+#: nova/exception.py:793
#, python-format
msgid "Console for instance %(instance_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:801
+#: nova/exception.py:797
#, python-format
msgid ""
"Console for instance %(instance_uuid)s in pool %(pool_id)s could not be "
"found."
msgstr ""
-#: nova/exception.py:806
+#: nova/exception.py:802
#, python-format
msgid "Invalid console type %(console_type)s "
msgstr ""
-#: nova/exception.py:810
+#: nova/exception.py:806
#, python-format
msgid "Instance type %(instance_type_id)s could not be found."
msgstr ""
-#: nova/exception.py:814
+#: nova/exception.py:810
#, python-format
msgid "Instance type with name %(instance_type_name)s could not be found."
msgstr ""
-#: nova/exception.py:819
+#: nova/exception.py:815
#, python-format
msgid "Flavor %(flavor_id)s could not be found."
msgstr ""
-#: nova/exception.py:823
+#: nova/exception.py:819
#, python-format
msgid "Flavor access not found for %(flavor_id) / %(project_id) combination."
msgstr ""
-#: nova/exception.py:828
+#: nova/exception.py:824
#, python-format
msgid "Scheduler Host Filter %(filter_name)s could not be found."
msgstr ""
-#: nova/exception.py:832
+#: nova/exception.py:828
#, python-format
msgid "Scheduler cost function %(cost_fn_str)s could not be found."
msgstr ""
-#: nova/exception.py:837
+#: nova/exception.py:833
#, python-format
msgid "Scheduler weight flag not found: %(flag_name)s"
msgstr ""
-#: nova/exception.py:841
+#: nova/exception.py:837
#, python-format
msgid "Instance %(instance_uuid)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:846
+#: nova/exception.py:842
#, python-format
msgid ""
"Instance %(instance_uuid)s has no system metadata with key "
"%(metadata_key)s."
msgstr ""
-#: nova/exception.py:851
+#: nova/exception.py:847
#, python-format
msgid ""
"Instance Type %(instance_type_id)s has no extra specs with key "
"%(extra_specs_key)s."
msgstr ""
-#: nova/exception.py:856
+#: nova/exception.py:852
#, python-format
msgid "File %(file_path)s could not be found."
msgstr ""
-#: nova/exception.py:860
+#: nova/exception.py:856
msgid "Zero files could be found."
msgstr ""
-#: nova/exception.py:864
+#: nova/exception.py:860
#, python-format
msgid "Virtual switch associated with the network adapter %(adapter)s not found."
msgstr ""
-#: nova/exception.py:869
+#: nova/exception.py:865
#, python-format
msgid "Network adapter %(adapter)s could not be found."
msgstr ""
-#: nova/exception.py:873
+#: nova/exception.py:869
#, python-format
msgid "Class %(class_name)s could not be found: %(exception)s"
msgstr ""
-#: nova/exception.py:877
+#: nova/exception.py:873
msgid "Action not allowed."
msgstr ""
-#: nova/exception.py:881
+#: nova/exception.py:877
msgid "Rotation is not allowed for snapshots"
msgstr ""
-#: nova/exception.py:885
+#: nova/exception.py:881
msgid "Rotation param is required for backup image_type"
msgstr ""
-#: nova/exception.py:889
+#: nova/exception.py:885
#, python-format
msgid "Key pair %(key_name)s already exists."
msgstr ""
-#: nova/exception.py:893
+#: nova/exception.py:889
#, python-format
msgid "Instance %(name)s already exists."
msgstr ""
-#: nova/exception.py:897
+#: nova/exception.py:893
#, python-format
msgid "Instance Type with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:901
+#: nova/exception.py:897
#, python-format
msgid "Instance Type with ID %(flavor_id)s already exists."
msgstr ""
-#: nova/exception.py:905
+#: nova/exception.py:901
#, python-format
msgid ""
"Flavor access alreay exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
-#: nova/exception.py:910
+#: nova/exception.py:906
#, python-format
msgid "Volume Type %(name)s already exists."
msgstr ""
-#: nova/exception.py:914
+#: nova/exception.py:910
#, python-format
msgid "%(path)s is not on shared storage: %(reason)s"
msgstr ""
-#: nova/exception.py:918
+#: nova/exception.py:914
#, python-format
msgid "%(path)s is not on local storage: %(reason)s"
msgstr ""
-#: nova/exception.py:922
+#: nova/exception.py:918
msgid "Migration error"
msgstr ""
-#: nova/exception.py:926
+#: nova/exception.py:922
#, python-format
msgid "Malformed message body: %(reason)s"
msgstr ""
-#: nova/exception.py:932
+#: nova/exception.py:928
#, python-format
msgid "Could not find config at %(path)s"
msgstr ""
-#: nova/exception.py:936
+#: nova/exception.py:932
#, python-format
msgid "Could not load paste app '%(name)s' from %(path)s"
msgstr ""
-#: nova/exception.py:940
+#: nova/exception.py:936
msgid "When resizing, instances must change flavor!"
msgstr ""
-#: nova/exception.py:944
+#: nova/exception.py:940
msgid "Image is larger than instance type allows"
msgstr ""
-#: nova/exception.py:948
+#: nova/exception.py:944
msgid "Instance type's memory is too small for requested image."
msgstr ""
-#: nova/exception.py:952
+#: nova/exception.py:948
msgid "Instance type's disk is too small for requested image."
msgstr ""
-#: nova/exception.py:956
+#: nova/exception.py:952
#, python-format
msgid "Insufficient free memory on compute node to start %(uuid)s."
msgstr ""
-#: nova/exception.py:960
+#: nova/exception.py:956
msgid "Could not fetch bandwidth/cpu/disk metrics for this host."
msgstr ""
-#: nova/exception.py:964
+#: nova/exception.py:960
#, python-format
msgid "No valid host was found. %(reason)s"
msgstr ""
-#: nova/exception.py:968
+#: nova/exception.py:964
#, python-format
msgid "Host %(host)s is not up or doesn't exist."
msgstr ""
-#: nova/exception.py:972
+#: nova/exception.py:968
msgid "Quota exceeded"
msgstr ""
-#: nova/exception.py:979
+#: nova/exception.py:975
#, python-format
msgid ""
"Quota exceeded for %(overs)s: Requested %(req)s, but already used "
"%(used)d of %(allowed)d %(resource)s"
msgstr ""
-#: nova/exception.py:984
+#: nova/exception.py:980
msgid "Maximum volume size exceeded"
msgstr ""
-#: nova/exception.py:988
+#: nova/exception.py:984
#, python-format
msgid "Maximum number of volumes allowed (%(allowed)d) exceeded"
msgstr ""
-#: nova/exception.py:992
+#: nova/exception.py:988
msgid "Maximum number of floating ips exceeded"
msgstr ""
-#: nova/exception.py:996
+#: nova/exception.py:992
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr ""
-#: nova/exception.py:1000
+#: nova/exception.py:996
msgid "Personality file limit exceeded"
msgstr ""
-#: nova/exception.py:1004
+#: nova/exception.py:1000
msgid "Personality file path too long"
msgstr ""
-#: nova/exception.py:1008
+#: nova/exception.py:1004
msgid "Personality file content too long"
msgstr ""
-#: nova/exception.py:1012
+#: nova/exception.py:1008
msgid "Maximum number of key pairs exceeded"
msgstr ""
-#: nova/exception.py:1016
+#: nova/exception.py:1012
msgid "Maximum number of security groups or rules exceeded"
msgstr ""
-#: nova/exception.py:1020
+#: nova/exception.py:1016
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: "
"%(reason)s."
msgstr ""
-#: nova/exception.py:1025
+#: nova/exception.py:1021
#, python-format
msgid "Aggregate %(aggregate_id)s could not be found."
msgstr ""
-#: nova/exception.py:1029
+#: nova/exception.py:1025
#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr ""
-#: nova/exception.py:1033
+#: nova/exception.py:1029
#, python-format
msgid "Aggregate %(aggregate_id)s has no host %(host)s."
msgstr ""
-#: nova/exception.py:1037
+#: nova/exception.py:1033
#, python-format
msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:1042
+#: nova/exception.py:1038
#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr ""
-#: nova/exception.py:1046
+#: nova/exception.py:1042
#, python-format
msgid "Detected more than one volume with name %(vol_name)s"
msgstr ""
-#: nova/exception.py:1050
+#: nova/exception.py:1046
#, python-format
msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s"
msgstr ""
-#: nova/exception.py:1055
+#: nova/exception.py:1051
#, python-format
msgid "Bad or unexpected response from the storage volume backend API: %(data)s"
msgstr ""
-#: nova/exception.py:1060
+#: nova/exception.py:1056
msgid "Unknown NFS exception"
msgstr ""
-#: nova/exception.py:1064
+#: nova/exception.py:1060
msgid "No mounted NFS shares found"
msgstr ""
-#: nova/exception.py:1068
+#: nova/exception.py:1064
#, python-format
msgid "There is no share which can host %(volume_size)sG"
msgstr ""
-#: nova/exception.py:1072
+#: nova/exception.py:1068
msgid "Unable to create instance type"
msgstr ""
-#: nova/exception.py:1076
+#: nova/exception.py:1072
#, python-format
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr ""
-#: nova/exception.py:1082
+#: nova/exception.py:1078
msgid "Bad response from SolidFire API"
msgstr ""
-#: nova/exception.py:1086
+#: nova/exception.py:1082
#, python-format
msgid "Error in SolidFire API response: data=%(data)s"
msgstr ""
-#: nova/exception.py:1090
+#: nova/exception.py:1086
#, python-format
msgid "Detected existing vlan with id %(vlan)d"
msgstr ""
-#: nova/exception.py:1094
+#: nova/exception.py:1090
#, python-format
msgid "Instance %(instance_id)s could not be found."
msgstr ""
-#: nova/exception.py:1098
+#: nova/exception.py:1094
#, python-format
msgid "Marker %(marker)s could not be found."
msgstr ""
-#: nova/exception.py:1102
+#: nova/exception.py:1098
#, python-format
msgid "Invalid id: %(val)s (expecting \"i-...\")."
msgstr ""
-#: nova/exception.py:1106
+#: nova/exception.py:1102
#, python-format
msgid "Could not fetch image %(image_id)s"
msgstr ""
-#: nova/exception.py:1110
+#: nova/exception.py:1106
#, python-format
msgid "Task %(task_name)s is already running on host %(host)s"
msgstr ""
-#: nova/exception.py:1114
+#: nova/exception.py:1110
#, python-format
msgid "Task %(task_name)s is not running on host %(host)s"
msgstr ""
-#: nova/exception.py:1118
+#: nova/exception.py:1114
#, python-format
msgid "Instance %(instance_uuid)s is locked"
msgstr ""
-#: nova/exception.py:1122
+#: nova/exception.py:1118
#, python-format
msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s"
msgstr ""
-#: nova/exception.py:1127
+#: nova/exception.py:1123
#, python-format
msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat."
msgstr ""
-#: nova/exception.py:1132
+#: nova/exception.py:1128
#, python-format
msgid ""
"User data too large. User data must be no larger than %(maxsize)s bytes "
"once base64 encoded. Your data is %(length)d bytes"
msgstr ""
-#: nova/exception.py:1138
+#: nova/exception.py:1134
msgid "User data needs to be valid base 64."
msgstr ""
-#: nova/exception.py:1142
+#: nova/exception.py:1138
#, python-format
msgid ""
"unexpected task state: expecting %(expected)s but the actual state is "
"%(actual)s"
msgstr ""
-#: nova/exception.py:1147
+#: nova/exception.py:1143
#, python-format
msgid "The CA file for %(project)s could not be found"
msgstr ""
-#: nova/exception.py:1151
+#: nova/exception.py:1147
#, python-format
msgid "The CRL file for %(project)s could not be found"
msgstr ""
@@ -1227,7 +1222,7 @@ msgstr ""
msgid "Error during %(full_task_name)s: %(e)s"
msgstr ""
-#: nova/manager.py:231
+#: nova/manager.py:255
msgid "Notifying Schedulers of capabilities ..."
msgstr ""
@@ -1243,190 +1238,195 @@ msgstr ""
msgid "Rule checked when requested rule is not found"
msgstr ""
-#: nova/quota.py:697
+#: nova/quota.py:726
#, python-format
msgid "Created reservations %(reservations)s"
msgstr ""
-#: nova/quota.py:716
+#: nova/quota.py:745
#, python-format
msgid "Failed to commit reservations %(reservations)s"
msgstr ""
-#: nova/quota.py:734
+#: nova/quota.py:763
#, python-format
msgid "Failed to roll back reservations %(reservations)s"
msgstr ""
-#: nova/service.py:179
+#: nova/service.py:170
msgid "Full set of FLAGS:"
msgstr ""
-#: nova/service.py:186
+#: nova/service.py:177
#, python-format
msgid "%(flag)s : FLAG SET "
msgstr ""
-#: nova/service.py:196 nova/service.py:294
+#: nova/service.py:187 nova/service.py:285
#, python-format
msgid "Caught %s, exiting"
msgstr ""
-#: nova/service.py:240
+#: nova/service.py:231
msgid "Parent process has died unexpectedly, exiting"
msgstr ""
-#: nova/service.py:276
+#: nova/service.py:267
msgid "Forking too fast, sleeping"
msgstr ""
-#: nova/service.py:299
+#: nova/service.py:290
msgid "Unhandled exception"
msgstr ""
-#: nova/service.py:306
+#: nova/service.py:297
#, python-format
msgid "Started child %d"
msgstr ""
-#: nova/service.py:316
+#: nova/service.py:307
#, python-format
msgid "Starting %d workers"
msgstr ""
-#: nova/service.py:330
+#: nova/service.py:321
#, python-format
msgid "Child %(pid)d killed by signal %(sig)d"
msgstr ""
-#: nova/service.py:333
+#: nova/service.py:324
#, python-format
msgid "Child %(pid)d exited with status %(code)d"
msgstr ""
-#: nova/service.py:336
+#: nova/service.py:327
#, python-format
msgid "pid %d not in child list"
msgstr ""
-#: nova/service.py:356
+#: nova/service.py:347
#, python-format
msgid "Caught %s, stopping children"
msgstr ""
-#: nova/service.py:367
+#: nova/service.py:358
#, python-format
msgid "Waiting on %d children to exit"
msgstr ""
-#: nova/service.py:396
+#: nova/service.py:387
#, python-format
msgid "Starting %(topic)s node (version %(vcs_string)s)"
msgstr ""
-#: nova/service.py:413
+#: nova/service.py:403
#, python-format
msgid "Creating Consumer connection for Service %s"
msgstr ""
-#: nova/service.py:503
+#: nova/service.py:495
msgid "Service killed that has no database entry"
msgstr ""
-#: nova/service.py:540
+#: nova/service.py:532
msgid "The service database object disappeared, Recreating it."
msgstr ""
-#: nova/service.py:555
+#: nova/service.py:547
msgid "Recovered model server connection!"
msgstr ""
-#: nova/service.py:561
+#: nova/service.py:553
msgid "model server went away"
msgstr ""
-#: nova/service.py:649
+#: nova/service.py:644
msgid "serve() can only be called once"
msgstr ""
-#: nova/utils.py:160
+#: nova/utils.py:170
#, python-format
msgid "Got unknown keyword args to utils.execute: %r"
msgstr ""
-#: nova/utils.py:171
+#: nova/utils.py:181
#, python-format
msgid "Running cmd (subprocess): %s"
msgstr ""
-#: nova/utils.py:187 nova/utils.py:265 nova/virt/powervm/common.py:82
+#: nova/utils.py:197 nova/utils.py:275 nova/virt/powervm/common.py:82
#, python-format
msgid "Result was %s"
msgstr ""
-#: nova/utils.py:200
+#: nova/utils.py:210
#, python-format
msgid "%r failed. Retrying."
msgstr ""
-#: nova/utils.py:240
+#: nova/utils.py:250
#, python-format
msgid "Running cmd (SSH): %s"
msgstr ""
-#: nova/utils.py:242
+#: nova/utils.py:252
msgid "Environment not supported over SSH"
msgstr ""
-#: nova/utils.py:246
+#: nova/utils.py:256
msgid "process_input not supported over SSH"
msgstr ""
-#: nova/utils.py:281
+#: nova/utils.py:291
#, python-format
msgid "debug in callback: %s"
msgstr ""
-#: nova/utils.py:440
+#: nova/utils.py:450
#, python-format
msgid "Link Local address is not found.:%s"
msgstr ""
-#: nova/utils.py:443
+#: nova/utils.py:453
#, python-format
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr ""
-#: nova/utils.py:478
+#: nova/utils.py:488
#, python-format
msgid "Invalid backend: %s"
msgstr ""
-#: nova/utils.py:539
+#: nova/utils.py:549
msgid "in looping call"
msgstr ""
-#: nova/utils.py:700
+#: nova/utils.py:609
+#, python-format
+msgid "Unknown byte multiplier: %s"
+msgstr ""
+
+#: nova/utils.py:738
#, python-format
msgid "Expected object of type: %s"
msgstr ""
-#: nova/utils.py:729
+#: nova/utils.py:767
#, python-format
msgid "Invalid server_string: %s"
msgstr ""
-#: nova/utils.py:869
+#: nova/utils.py:907
#, python-format
msgid "timefunc: '%(name)s' took %(total_time).2f secs"
msgstr ""
-#: nova/utils.py:947
+#: nova/utils.py:985
#, python-format
msgid "Reloading cached file %s"
msgstr ""
-#: nova/utils.py:1065 nova/virt/configdrive.py:151
+#: nova/utils.py:1103 nova/virt/configdrive.py:151
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr ""
@@ -1470,125 +1470,129 @@ msgstr ""
msgid "%(key)s with value %(value)s failed validator %(name)s"
msgstr ""
-#: nova/api/ec2/__init__.py:79
+#: nova/api/ec2/__init__.py:82
#, python-format
msgid "%(code)s: %(message)s"
msgstr ""
-#: nova/api/ec2/__init__.py:102
+#: nova/api/ec2/__init__.py:105
#, python-format
msgid "FaultWrapper: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:177
+#: nova/api/ec2/__init__.py:180
msgid "Too many failed authentications."
msgstr ""
-#: nova/api/ec2/__init__.py:187
+#: nova/api/ec2/__init__.py:190
#, python-format
msgid ""
"Access key %(access_key)s has had %(failures)d failed authentications and"
" will be locked out for %(lock_mins)d minutes."
msgstr ""
-#: nova/api/ec2/__init__.py:204
+#: nova/api/ec2/__init__.py:207
msgid "Signature not provided"
msgstr ""
-#: nova/api/ec2/__init__.py:208
+#: nova/api/ec2/__init__.py:211
msgid "Access key not provided"
msgstr ""
-#: nova/api/ec2/__init__.py:243 nova/api/ec2/__init__.py:258
+#: nova/api/ec2/__init__.py:246 nova/api/ec2/__init__.py:261
msgid "Failure communicating with keystone"
msgstr ""
-#: nova/api/ec2/__init__.py:328
+#: nova/api/ec2/__init__.py:320
+msgid "Timestamp failed validation."
+msgstr ""
+
+#: nova/api/ec2/__init__.py:340
#, python-format
msgid "action: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:330
+#: nova/api/ec2/__init__.py:342
#, python-format
msgid "arg: %(key)s\t\tval: %(value)s"
msgstr ""
-#: nova/api/ec2/__init__.py:405
+#: nova/api/ec2/__init__.py:417
#, python-format
msgid "Unauthorized request for controller=%(controller)s and action=%(action)s"
msgstr ""
-#: nova/api/ec2/__init__.py:477
+#: nova/api/ec2/__init__.py:489
#, python-format
msgid "InstanceNotFound raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:483
+#: nova/api/ec2/__init__.py:495
#, python-format
msgid "VolumeNotFound raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:489
+#: nova/api/ec2/__init__.py:501
#, python-format
msgid "SnapshotNotFound raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:495
+#: nova/api/ec2/__init__.py:507
#, python-format
msgid "NotFound raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:498
+#: nova/api/ec2/__init__.py:510
#, python-format
msgid "EC2APIError raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:506
+#: nova/api/ec2/__init__.py:518
#, python-format
msgid "KeyPairExists raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:510
+#: nova/api/ec2/__init__.py:522
#, python-format
msgid "InvalidParameterValue raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:514
+#: nova/api/ec2/__init__.py:526
#, python-format
msgid "InvalidPortRange raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:518
+#: nova/api/ec2/__init__.py:530
#, python-format
msgid "NotAuthorized raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:522
+#: nova/api/ec2/__init__.py:534
#, python-format
msgid "InvalidRequest raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:526
+#: nova/api/ec2/__init__.py:538
#, python-format
msgid "QuotaError raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:530
+#: nova/api/ec2/__init__.py:542
#, python-format
msgid "Invalid id: bogus (expecting \"i-...\"): %s"
msgstr ""
-#: nova/api/ec2/__init__.py:539
+#: nova/api/ec2/__init__.py:551
#, python-format
msgid "Unexpected error raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:540
+#: nova/api/ec2/__init__.py:552
#, python-format
msgid "Environment: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:542 nova/api/metadata/handler.py:79
+#: nova/api/ec2/__init__.py:554 nova/api/metadata/handler.py:79
msgid "An unknown error has occurred. Please try your request again."
msgstr ""
@@ -1661,8 +1665,7 @@ msgstr ""
msgid "Create volume from snapshot %s"
msgstr ""
-#: nova/api/ec2/cloud.py:770 nova/api/openstack/compute/contrib/volumes.py:250
-#: nova/api/openstack/volume/volumes.py:310
+#: nova/api/ec2/cloud.py:770 nova/api/openstack/compute/contrib/volumes.py:243
#, python-format
msgid "Create volume of %s GB"
msgstr ""
@@ -1680,7 +1683,7 @@ msgstr ""
msgid "Attach Failed."
msgstr ""
-#: nova/api/ec2/cloud.py:832 nova/api/openstack/compute/contrib/volumes.py:429
+#: nova/api/ec2/cloud.py:832 nova/api/openstack/compute/contrib/volumes.py:422
#, python-format
msgid "Detach volume %s"
msgstr ""
@@ -1836,6 +1839,14 @@ msgstr ""
msgid "Invalid CIDR"
msgstr ""
+#: nova/api/ec2/ec2utils.py:189
+msgid "Request must include either Timestamp or Expires, but cannot contain both"
+msgstr ""
+
+#: nova/api/ec2/ec2utils.py:209
+msgid "Timestamp is invalid."
+msgstr ""
+
#: nova/api/metadata/handler.py:77 nova/api/metadata/handler.py:84
#, python-format
msgid "Failed to get metadata for ip: %s"
@@ -1939,62 +1950,62 @@ msgstr ""
msgid "Instance snapshots are not permitted at this time."
msgstr ""
-#: nova/api/openstack/extensions.py:201
+#: nova/api/openstack/extensions.py:198
#, python-format
msgid "Loaded extension: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:240
+#: nova/api/openstack/extensions.py:237
#, python-format
msgid "Ext name: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:241
+#: nova/api/openstack/extensions.py:238
#, python-format
msgid "Ext alias: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:242
+#: nova/api/openstack/extensions.py:239
#, python-format
msgid "Ext description: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:244
+#: nova/api/openstack/extensions.py:241
#, python-format
msgid "Ext namespace: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:245
+#: nova/api/openstack/extensions.py:242
#, python-format
msgid "Ext updated: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:247
+#: nova/api/openstack/extensions.py:244
#, python-format
msgid "Exception loading extension: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:261
+#: nova/api/openstack/extensions.py:258
#, python-format
msgid "Loading extension %s"
msgstr ""
-#: nova/api/openstack/extensions.py:270
+#: nova/api/openstack/extensions.py:267
#, python-format
msgid "Calling extension factory %s"
msgstr ""
-#: nova/api/openstack/extensions.py:282
+#: nova/api/openstack/extensions.py:279
#, python-format
msgid "Failed to load extension %(ext_factory)s: %(exc)s"
msgstr ""
-#: nova/api/openstack/extensions.py:363
+#: nova/api/openstack/extensions.py:360
#, python-format
msgid "Failed to load extension %(classpath)s: %(exc)s"
msgstr ""
-#: nova/api/openstack/extensions.py:387
+#: nova/api/openstack/extensions.py:384
#, python-format
msgid "Failed to load extension %(ext_name)s: %(exc)s"
msgstr ""
@@ -2088,7 +2099,6 @@ msgid "subclasses must implement construct()!"
msgstr ""
#: nova/api/openstack/compute/extensions.py:30
-#: nova/api/openstack/volume/extensions.py:29
msgid "Initializing extension manager."
msgstr ""
@@ -2120,16 +2130,12 @@ msgstr ""
#: nova/api/openstack/compute/image_metadata.py:84
#: nova/api/openstack/compute/server_metadata.py:80
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:79
-#: nova/api/openstack/compute/contrib/volumetypes.py:174
-#: nova/api/openstack/volume/contrib/types_extra_specs.py:101
msgid "Request body and URI mismatch"
msgstr ""
#: nova/api/openstack/compute/image_metadata.py:87
#: nova/api/openstack/compute/server_metadata.py:84
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:82
-#: nova/api/openstack/compute/contrib/volumetypes.py:177
-#: nova/api/openstack/volume/contrib/types_extra_specs.py:104
msgid "Request body contains too many items"
msgstr ""
@@ -2338,8 +2344,6 @@ msgid "Missing imageRef attribute"
msgstr ""
#: nova/api/openstack/compute/servers.py:1103
-#: nova/api/openstack/volume/volumes.py:263
-#: nova/api/openstack/volume/volumes.py:267
msgid "Invalid imageRef provided."
msgstr ""
@@ -2372,7 +2376,6 @@ msgstr ""
#: nova/api/openstack/compute/contrib/aggregates.py:142
#: nova/api/openstack/compute/contrib/keypairs.py:78
#: nova/api/openstack/compute/contrib/networks.py:75
-#: nova/api/openstack/volume/contrib/volume_actions.py:86
msgid "Invalid request body"
msgstr ""
@@ -2571,7 +2574,6 @@ msgid "No request body"
msgstr ""
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:49
-#: nova/api/openstack/compute/contrib/volumetypes.py:147
msgid "No Request Body"
msgstr ""
@@ -2630,41 +2632,41 @@ msgstr ""
msgid "Error. Unable to associate floating ip"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:119
+#: nova/api/openstack/compute/contrib/hosts.py:124
#, python-format
msgid "Host '%s' could not be found."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:148
+#: nova/api/openstack/compute/contrib/hosts.py:153
#, python-format
msgid "Invalid status: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:152
+#: nova/api/openstack/compute/contrib/hosts.py:157
#, python-format
msgid "Invalid mode: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:156
+#: nova/api/openstack/compute/contrib/hosts.py:161
#, python-format
msgid "Invalid update setting: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:174
+#: nova/api/openstack/compute/contrib/hosts.py:179
#, python-format
msgid "Putting host %(host)s in maintenance mode %(mode)s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:185
+#: nova/api/openstack/compute/contrib/hosts.py:190
#, python-format
msgid "Setting host %(host)s to %(state)s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:232
+#: nova/api/openstack/compute/contrib/hosts.py:237
msgid "Describe-resource is admin only functionality"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:240
+#: nova/api/openstack/compute/contrib/hosts.py:245
msgid "Host not found"
msgstr ""
@@ -2792,43 +2794,38 @@ msgstr ""
msgid "stop instance"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:76
-#: nova/api/openstack/volume/volumes.py:105
+#: nova/api/openstack/compute/contrib/volumes.py:75
#, python-format
msgid "vol=%s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:188
-#: nova/api/openstack/volume/volumes.py:223
+#: nova/api/openstack/compute/contrib/volumes.py:187
#, python-format
msgid "Delete volume with id: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:359
-#: nova/api/openstack/compute/contrib/volumes.py:439
+#: nova/api/openstack/compute/contrib/volumes.py:352
+#: nova/api/openstack/compute/contrib/volumes.py:432
#, python-format
msgid "Instance %s is not attached."
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:390
+#: nova/api/openstack/compute/contrib/volumes.py:383
#, python-format
msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:553
-#: nova/api/openstack/volume/snapshots.py:112
+#: nova/api/openstack/compute/contrib/volumes.py:546
#, python-format
msgid "Delete snapshot with id: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:596
-#: nova/api/openstack/volume/snapshots.py:161
+#: nova/api/openstack/compute/contrib/volumes.py:589
#, python-format
msgid "Create snapshot from volume %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:600
-#: nova/api/openstack/volume/snapshots.py:165
+#: nova/api/openstack/compute/contrib/volumes.py:593
#, python-format
msgid "Invalid value '%s' for force. "
msgstr ""
@@ -2837,32 +2834,6 @@ msgstr ""
msgid "Instance has had its instance_type removed from the DB"
msgstr ""
-#: nova/api/openstack/volume/volumes.py:277
-msgid "Invalid request body. 'volume' not found"
-msgstr ""
-
-#: nova/api/openstack/volume/volumes.py:307
-msgid "Invalid request body. 'size' not found"
-msgstr ""
-
-#: nova/api/openstack/volume/volumes.py:317
-msgid "Snapshot and image cannot be specified together."
-msgstr ""
-
-#: nova/api/openstack/volume/volumes.py:361
-#, python-format
-msgid "Removing options '%(bad_options)s' from query"
-msgstr ""
-
-#: nova/api/openstack/volume/contrib/admin_actions.py:72
-#, python-format
-msgid "Updating status of %(resource)s '%(id)s' to '%(status)s'"
-msgstr ""
-
-#: nova/api/openstack/volume/contrib/volume_actions.py:90
-msgid "No image_name was specified in request."
-msgstr ""
-
#: nova/cloudpipe/pipelib.py:44
msgid "Instance type for vpn instances"
msgstr ""
@@ -2884,11 +2855,6 @@ msgstr ""
msgid "Launching VPN for %s"
msgstr ""
-#: nova/common/deprecated.py:53
-#, python-format
-msgid "Deprecated Config: %s"
-msgstr ""
-
#: nova/common/sqlalchemyutils.py:68
msgid "Id not in sort_keys; is sort_keys unique?"
msgstr ""
@@ -2897,201 +2863,200 @@ msgstr ""
msgid "Unknown sort direction, must be 'desc' or 'asc'"
msgstr ""
-#: nova/compute/api.py:220
+#: nova/compute/api.py:221
msgid "Cannot run any more instances of this type."
msgstr ""
-#: nova/compute/api.py:227
+#: nova/compute/api.py:228
#, python-format
msgid "Can only run %s more instances of this type."
msgstr ""
-#: nova/compute/api.py:236
+#: nova/compute/api.py:237
#, python-format
msgid ""
"%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)s "
"instances. %(msg)s"
msgstr ""
-#: nova/compute/api.py:256
+#: nova/compute/api.py:257
#, python-format
msgid ""
"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata "
"properties"
msgstr ""
-#: nova/compute/api.py:266
+#: nova/compute/api.py:267
msgid "Metadata property key blank"
msgstr ""
-#: nova/compute/api.py:270
+#: nova/compute/api.py:271
msgid "Metadata property key greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:274
+#: nova/compute/api.py:275
msgid "Metadata property value greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:498
+#: nova/compute/api.py:499
#, python-format
msgid "Going to run %s instances..."
msgstr ""
-#: nova/compute/api.py:569
+#: nova/compute/api.py:570
#, python-format
msgid "bdm %s"
msgstr ""
-#: nova/compute/api.py:596
+#: nova/compute/api.py:597
#, python-format
msgid "block_device_mapping %s"
msgstr ""
-#: nova/compute/api.py:828
+#: nova/compute/api.py:829
msgid "Going to try to soft delete instance"
msgstr ""
-#: nova/compute/api.py:845
+#: nova/compute/api.py:846
msgid "No host for instance, deleting immediately"
msgstr ""
-#: nova/compute/api.py:929
+#: nova/compute/api.py:946
msgid "host for instance is down, deleting from database"
msgstr ""
-#: nova/compute/api.py:950
+#: nova/compute/api.py:990
msgid "Going to try to terminate instance"
msgstr ""
-#: nova/compute/api.py:990
+#: nova/compute/api.py:1030
msgid "Going to try to stop instance"
msgstr ""
-#: nova/compute/api.py:1004
+#: nova/compute/api.py:1044
msgid "Going to try to start instance"
msgstr ""
-#: nova/compute/api.py:1068 nova/volume/api.py:264
-#: nova/volume/volume_types.py:64
+#: nova/compute/api.py:1108
#, python-format
msgid "Searching by: %s"
msgstr ""
-#: nova/compute/api.py:1203
+#: nova/compute/api.py:1243
#, python-format
msgid "Image type not recognized %s"
msgstr ""
-#: nova/compute/api.py:1312
+#: nova/compute/api.py:1352
#, python-format
msgid "snapshot for %s"
msgstr ""
-#: nova/compute/api.py:1634
+#: nova/compute/api.py:1674
msgid "flavor_id is None. Assuming migration."
msgstr ""
-#: nova/compute/api.py:1643
+#: nova/compute/api.py:1683
#, python-format
msgid ""
"Old instance type %(current_instance_type_name)s, new instance type "
"%(new_instance_type_name)s"
msgstr ""
-#: nova/compute/api.py:1685
+#: nova/compute/api.py:1725
#, python-format
msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance. %(msg)s"
msgstr ""
-#: nova/compute/api.py:1857
+#: nova/compute/api.py:1897
msgid "Locking"
msgstr ""
-#: nova/compute/api.py:1865
+#: nova/compute/api.py:1905
msgid "Unlocking"
msgstr ""
-#: nova/compute/api.py:1933
+#: nova/compute/api.py:1973
msgid "Volume must be attached in order to detach."
msgstr ""
-#: nova/compute/api.py:2018
+#: nova/compute/api.py:2058
#, python-format
msgid "Going to try to live migrate instance to %s"
msgstr ""
-#: nova/compute/api.py:2167
+#: nova/compute/api.py:2207
msgid "Keypair name contains unsafe characters"
msgstr ""
-#: nova/compute/api.py:2171
+#: nova/compute/api.py:2211
msgid "Keypair name must be between 1 and 255 characters long"
msgstr ""
-#: nova/compute/api.py:2272
+#: nova/compute/api.py:2312
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr ""
-#: nova/compute/api.py:2275
+#: nova/compute/api.py:2315
#, python-format
msgid "Security group %s cannot be empty."
msgstr ""
-#: nova/compute/api.py:2283
+#: nova/compute/api.py:2323
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
"limited to '%(allowed)'."
msgstr ""
-#: nova/compute/api.py:2289
+#: nova/compute/api.py:2329
#, python-format
msgid "Security group %s should not be greater than 255 characters."
msgstr ""
-#: nova/compute/api.py:2309
+#: nova/compute/api.py:2349
msgid "Quota exceeded, too many security groups."
msgstr ""
-#: nova/compute/api.py:2312
+#: nova/compute/api.py:2352
#, python-format
msgid "Create Security Group %s"
msgstr ""
-#: nova/compute/api.py:2319
+#: nova/compute/api.py:2359
#, python-format
msgid "Security group %s already exists"
msgstr ""
-#: nova/compute/api.py:2384
+#: nova/compute/api.py:2424
msgid "Security group is still in use"
msgstr ""
-#: nova/compute/api.py:2392
+#: nova/compute/api.py:2432
msgid "Failed to update usages deallocating security group"
msgstr ""
-#: nova/compute/api.py:2395
+#: nova/compute/api.py:2435
#, python-format
msgid "Delete security group %s"
msgstr ""
-#: nova/compute/api.py:2652
+#: nova/compute/api.py:2692
#, python-format
msgid "Rule (%s) not found"
msgstr ""
-#: nova/compute/api.py:2661
+#: nova/compute/api.py:2701
msgid "Quota exceeded, too many security group rules."
msgstr ""
-#: nova/compute/api.py:2664
+#: nova/compute/api.py:2704
#, python-format
msgid "Authorize security group ingress %s"
msgstr ""
-#: nova/compute/api.py:2675
+#: nova/compute/api.py:2715
#, python-format
msgid "Revoke security group ingress %s"
msgstr ""
@@ -3104,7 +3069,7 @@ msgstr ""
msgid "create arguments must be positive integers"
msgstr ""
-#: nova/compute/instance_types.py:94 nova/volume/volume_types.py:41
+#: nova/compute/instance_types.py:94
#, python-format
msgid "DB error: %s"
msgstr ""
@@ -3114,504 +3079,509 @@ msgstr ""
msgid "Instance type %s not found for deletion"
msgstr ""
-#: nova/compute/manager.py:167
+#: nova/compute/manager.py:163
msgid "Possibly task preempted."
msgstr ""
-#: nova/compute/manager.py:230
+#: nova/compute/manager.py:243
msgid "Compute driver option required, but not specified"
msgstr ""
-#: nova/compute/manager.py:233
+#: nova/compute/manager.py:248
#, python-format
msgid "Loading compute driver '%s'"
msgstr ""
-#: nova/compute/manager.py:239
+#: nova/compute/manager.py:255
#, python-format
msgid "Unable to load the virtualization driver: %s"
msgstr ""
-#: nova/compute/manager.py:274
+#: nova/compute/manager.py:290
msgid "Instance has been destroyed from under us while trying to set it to ERROR"
msgstr ""
-#: nova/compute/manager.py:302
+#: nova/compute/manager.py:318
#, python-format
msgid "Current state is %(drv_state)s, state in DB is %(db_state)s."
msgstr ""
-#: nova/compute/manager.py:316
+#: nova/compute/manager.py:332
msgid "Rebooting instance after nova-compute restart."
msgstr ""
-#: nova/compute/manager.py:330
+#: nova/compute/manager.py:346
msgid "Hypervisor driver does not support resume guests"
msgstr ""
-#: nova/compute/manager.py:340
+#: nova/compute/manager.py:356
msgid "Hypervisor driver does not support firewall rules"
msgstr ""
-#: nova/compute/manager.py:352
+#: nova/compute/manager.py:375
msgid "Checking state"
msgstr ""
-#: nova/compute/manager.py:425
+#: nova/compute/manager.py:448
#, python-format
msgid "Setting up bdm %s"
msgstr ""
-#: nova/compute/manager.py:518
+#: nova/compute/manager.py:541
msgid "Failed to dealloc network for deleted instance"
msgstr ""
-#: nova/compute/manager.py:550
+#: nova/compute/manager.py:564
#, python-format
-msgid "Build error: %s"
+msgid "Error: %s"
msgstr ""
-#: nova/compute/manager.py:567
+#: nova/compute/manager.py:597 nova/compute/manager.py:1740
msgid "Error trying to reschedule"
msgstr ""
-#: nova/compute/manager.py:584
+#: nova/compute/manager.py:614
msgid "Retry info not present, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:589
+#: nova/compute/manager.py:619
msgid "No request spec, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:595
+#: nova/compute/manager.py:625
#, python-format
-msgid "Re-scheduling instance: attempt %d"
+msgid "Re-scheduling %(method)s: attempt %(num)d"
msgstr ""
-#: nova/compute/manager.py:620
+#: nova/compute/manager.py:648
msgid "Instance build timed out. Set to error state."
msgstr ""
-#: nova/compute/manager.py:651
+#: nova/compute/manager.py:679
msgid "Instance has already been created"
msgstr ""
-#: nova/compute/manager.py:694
+#: nova/compute/manager.py:722
#, python-format
msgid ""
"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, "
"allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:700
+#: nova/compute/manager.py:728
#, python-format
msgid ""
"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed "
"size %(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:710
+#: nova/compute/manager.py:738
msgid "Starting instance..."
msgstr ""
-#: nova/compute/manager.py:731
+#: nova/compute/manager.py:759
msgid "Instance failed network setup"
msgstr ""
-#: nova/compute/manager.py:735
+#: nova/compute/manager.py:763
#, python-format
msgid "Instance network_info: |%s|"
msgstr ""
-#: nova/compute/manager.py:748
+#: nova/compute/manager.py:776
msgid "Instance failed block device setup"
msgstr ""
-#: nova/compute/manager.py:766
+#: nova/compute/manager.py:794
msgid "Instance failed to spawn"
msgstr ""
-#: nova/compute/manager.py:790
+#: nova/compute/manager.py:818
msgid "Deallocating network for instance"
msgstr ""
-#: nova/compute/manager.py:862
+#: nova/compute/manager.py:890
#, python-format
msgid "%(action_str)s instance"
msgstr ""
-#: nova/compute/manager.py:893
+#: nova/compute/manager.py:921
#, python-format
msgid "Ignoring DiskNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:896
+#: nova/compute/manager.py:924
#, python-format
msgid "Ignoring VolumeNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:903
+#: nova/compute/manager.py:931
#, python-format
msgid "terminating bdm %s"
msgstr ""
-#: nova/compute/manager.py:927
+#: nova/compute/manager.py:955
#, python-format
msgid "Ignoring volume cleanup failure due to %s"
msgstr ""
-#: nova/compute/manager.py:967 nova/compute/manager.py:1808
-#: nova/compute/manager.py:2993
+#: nova/compute/manager.py:995 nova/compute/manager.py:1909
+#: nova/compute/manager.py:3106
#, python-format
msgid "%s. Setting instance vm_state to ERROR"
msgstr ""
-#: nova/compute/manager.py:1049
+#: nova/compute/manager.py:1128
msgid "Rebuilding instance"
msgstr ""
-#: nova/compute/manager.py:1128
+#: nova/compute/manager.py:1207
msgid "Rebooting instance"
msgstr ""
-#: nova/compute/manager.py:1152
+#: nova/compute/manager.py:1231
#, python-format
msgid ""
"trying to reboot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1161
+#: nova/compute/manager.py:1240
#, python-format
msgid "Cannot reboot instance: %(exc)s"
msgstr ""
-#: nova/compute/manager.py:1198
+#: nova/compute/manager.py:1277
msgid "instance snapshotting"
msgstr ""
-#: nova/compute/manager.py:1204
+#: nova/compute/manager.py:1283
#, python-format
msgid ""
"trying to snapshot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1274
+#: nova/compute/manager.py:1336
#, python-format
msgid "Found %(num_images)d images (rotation: %(rotation)d)"
msgstr ""
-#: nova/compute/manager.py:1280
+#: nova/compute/manager.py:1343
#, python-format
msgid "Rotating out %d backups"
msgstr ""
-#: nova/compute/manager.py:1285
+#: nova/compute/manager.py:1348
#, python-format
msgid "Deleting image %s"
msgstr ""
-#: nova/compute/manager.py:1316
+#: nova/compute/manager.py:1379
#, python-format
msgid "Failed to set admin password. Instance %s is not running"
msgstr ""
-#: nova/compute/manager.py:1323
+#: nova/compute/manager.py:1386
msgid "Root password set"
msgstr ""
-#: nova/compute/manager.py:1333
+#: nova/compute/manager.py:1396
msgid "set_admin_password is not implemented by this driver."
msgstr ""
-#: nova/compute/manager.py:1349
+#: nova/compute/manager.py:1412
#, python-format
msgid "set_admin_password failed: %s"
msgstr ""
-#: nova/compute/manager.py:1357
+#: nova/compute/manager.py:1420
msgid "error setting admin password"
msgstr ""
-#: nova/compute/manager.py:1372
+#: nova/compute/manager.py:1435
#, python-format
msgid ""
"trying to inject a file into a non-running (state: "
"%(current_power_state)s expected: %(expected_state)s)"
msgstr ""
-#: nova/compute/manager.py:1376
+#: nova/compute/manager.py:1439
#, python-format
msgid "injecting file to %(path)s"
msgstr ""
-#: nova/compute/manager.py:1389
+#: nova/compute/manager.py:1452
msgid "Rescuing"
msgstr ""
-#: nova/compute/manager.py:1416
+#: nova/compute/manager.py:1479
msgid "Unrescuing"
msgstr ""
-#: nova/compute/manager.py:1437
+#: nova/compute/manager.py:1500
#, python-format
msgid "Changing instance metadata according to %(diff)r"
msgstr ""
-#: nova/compute/manager.py:1619
+#: nova/compute/manager.py:1677
msgid "destination same as source!"
msgstr ""
-#: nova/compute/manager.py:1637
+#: nova/compute/manager.py:1696
msgid "Migrating"
msgstr ""
-#: nova/compute/manager.py:1805
+#: nova/compute/manager.py:1906
#, python-format
msgid "Failed to rollback quota for failed finish_resize: %(qr_error)s"
msgstr ""
-#: nova/compute/manager.py:1861
+#: nova/compute/manager.py:1962
msgid "Pausing"
msgstr ""
-#: nova/compute/manager.py:1878
+#: nova/compute/manager.py:1979
msgid "Unpausing"
msgstr ""
-#: nova/compute/manager.py:1916
+#: nova/compute/manager.py:2017
msgid "Retrieving diagnostics"
msgstr ""
-#: nova/compute/manager.py:1946
+#: nova/compute/manager.py:2047
msgid "Resuming"
msgstr ""
-#: nova/compute/manager.py:1962
+#: nova/compute/manager.py:2063
msgid "Reset network"
msgstr ""
-#: nova/compute/manager.py:1967
+#: nova/compute/manager.py:2068
msgid "Inject network info"
msgstr ""
-#: nova/compute/manager.py:1970
+#: nova/compute/manager.py:2071
#, python-format
msgid "network_info to inject: |%s|"
msgstr ""
-#: nova/compute/manager.py:1987
+#: nova/compute/manager.py:2088
msgid "Get console output"
msgstr ""
-#: nova/compute/manager.py:2012
+#: nova/compute/manager.py:2113
msgid "Getting vnc console"
msgstr ""
-#: nova/compute/manager.py:2040
+#: nova/compute/manager.py:2141
#, python-format
msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2083
+#: nova/compute/manager.py:2185
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2092
+#: nova/compute/manager.py:2194
#, python-format
msgid ""
"Failed to connect to volume %(volume_id)s while attaching at "
"%(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2107
+#: nova/compute/manager.py:2209
#, python-format
msgid "Failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2136
+#: nova/compute/manager.py:2238
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2140
+#: nova/compute/manager.py:2242
msgid "Detaching volume from unknown instance"
msgstr ""
-#: nova/compute/manager.py:2153
+#: nova/compute/manager.py:2255
#, python-format
msgid "Faild to detach volume %(volume_id)s from %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2246
+#: nova/compute/manager.py:2299
+#, python-format
+msgid "Host %(host)s not found"
+msgstr ""
+
+#: nova/compute/manager.py:2359
msgid "Instance has no volume."
msgstr ""
-#: nova/compute/manager.py:2306
+#: nova/compute/manager.py:2419
#, python-format
msgid "Pre live migration failed at %(dest)s"
msgstr ""
-#: nova/compute/manager.py:2332
+#: nova/compute/manager.py:2445
msgid "_post_live_migration() is started.."
msgstr ""
-#: nova/compute/manager.py:2365
+#: nova/compute/manager.py:2478
msgid "No floating_ip found"
msgstr ""
-#: nova/compute/manager.py:2373
+#: nova/compute/manager.py:2486
msgid "No floating_ip found."
msgstr ""
-#: nova/compute/manager.py:2375
+#: nova/compute/manager.py:2488
#, python-format
msgid ""
"Live migration: Unexpected error: cannot inherit floating ip.\n"
"%(e)s"
msgstr ""
-#: nova/compute/manager.py:2401
+#: nova/compute/manager.py:2514
#, python-format
msgid "Migrating instance to %(dest)s finished successfully."
msgstr ""
-#: nova/compute/manager.py:2403
+#: nova/compute/manager.py:2516
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
msgstr ""
-#: nova/compute/manager.py:2417
+#: nova/compute/manager.py:2530
msgid "Post operation of migration started"
msgstr ""
-#: nova/compute/manager.py:2548
+#: nova/compute/manager.py:2661
msgid "Updated the info_cache for instance"
msgstr ""
-#: nova/compute/manager.py:2574
+#: nova/compute/manager.py:2687
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
"%(confirm_window)d seconds"
msgstr ""
-#: nova/compute/manager.py:2579
+#: nova/compute/manager.py:2692
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr ""
-#: nova/compute/manager.py:2588
+#: nova/compute/manager.py:2701
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/compute/manager.py:2595
+#: nova/compute/manager.py:2708
#, python-format
msgid "Instance %(instance_uuid)s not found"
msgstr ""
-#: nova/compute/manager.py:2599
+#: nova/compute/manager.py:2712
msgid "In ERROR state"
msgstr ""
-#: nova/compute/manager.py:2606
+#: nova/compute/manager.py:2719
#, python-format
msgid "In states %(vm_state)s/%(task_state)s, notRESIZED/None"
msgstr ""
-#: nova/compute/manager.py:2614
+#: nova/compute/manager.py:2727
#, python-format
msgid "Error auto-confirming resize: %(e)s. Will retry later."
msgstr ""
-#: nova/compute/manager.py:2631
+#: nova/compute/manager.py:2744
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s instances."
msgstr ""
-#: nova/compute/manager.py:2649
+#: nova/compute/manager.py:2762
#, python-format
msgid "Failed to generate usage audit for instance on host %s"
msgstr ""
-#: nova/compute/manager.py:2672
+#: nova/compute/manager.py:2785
msgid "Updating bandwidth usage cache"
msgstr ""
-#: nova/compute/manager.py:2737
+#: nova/compute/manager.py:2850
msgid "Updating host status"
msgstr ""
-#: nova/compute/manager.py:2763
+#: nova/compute/manager.py:2876
#, python-format
msgid ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
"the hypervisor."
msgstr ""
-#: nova/compute/manager.py:2769 nova/compute/manager.py:2807
+#: nova/compute/manager.py:2882 nova/compute/manager.py:2920
msgid "During sync_power_state the instance has a pending task. Skip."
msgstr ""
-#: nova/compute/manager.py:2794
+#: nova/compute/manager.py:2907
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
" host %(dst)s"
msgstr ""
-#: nova/compute/manager.py:2831
+#: nova/compute/manager.py:2944
msgid "Instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:2843 nova/compute/manager.py:2854
-#: nova/compute/manager.py:2868
+#: nova/compute/manager.py:2956 nova/compute/manager.py:2967
+#: nova/compute/manager.py:2981
msgid "error during stop() in sync_power_state."
msgstr ""
-#: nova/compute/manager.py:2848
+#: nova/compute/manager.py:2961
msgid "Instance is paused or suspended unexpectedly. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:2861
+#: nova/compute/manager.py:2974
msgid "Instance is not stopped. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:2877
+#: nova/compute/manager.py:2990
msgid "Instance is not (soft-)deleted."
msgstr ""
-#: nova/compute/manager.py:2885
+#: nova/compute/manager.py:2998
msgid "FLAGS.reclaim_instance_interval <= 0, skipping..."
msgstr ""
-#: nova/compute/manager.py:2898
+#: nova/compute/manager.py:3011
msgid "Reclaiming deleted instance"
msgstr ""
-#: nova/compute/manager.py:2947
+#: nova/compute/manager.py:3060
#, python-format
msgid ""
"Detected instance with name label '%(name)s' which is marked as DELETED "
"but still present on host."
msgstr ""
-#: nova/compute/manager.py:2954
+#: nova/compute/manager.py:3067
#, python-format
msgid ""
"Destroying instance with name label '%(name)s' which is marked as DELETED"
" but still present on host."
msgstr ""
-#: nova/compute/manager.py:2961
+#: nova/compute/manager.py:3074
#, python-format
msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action"
msgstr ""
@@ -3843,45 +3813,45 @@ msgstr ""
msgid "Unrecognized read_deleted value '%s'"
msgstr ""
-#: nova/db/sqlalchemy/api.py:2790
+#: nova/db/sqlalchemy/api.py:2796
#, python-format
msgid "Change will make usage less than 0 for the following resources: %(unders)s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4691
+#: nova/db/sqlalchemy/api.py:4697
msgid "Backend exists"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4711 nova/db/sqlalchemy/api.py:4737
+#: nova/db/sqlalchemy/api.py:4717 nova/db/sqlalchemy/api.py:4743
#, python-format
msgid "No backend config with id %(sm_backend_id)s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4749
+#: nova/db/sqlalchemy/api.py:4755
#, python-format
msgid "No backend config with sr uuid %(sr_uuid)s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4783
+#: nova/db/sqlalchemy/api.py:4789
msgid "Flavor exists"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4798
+#: nova/db/sqlalchemy/api.py:4804
#, python-format
msgid "%(sm_flavor_id) flavor not found"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4817
+#: nova/db/sqlalchemy/api.py:4823
#, python-format
msgid "No sm_flavor called %(sm_flavor_id)s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4834
+#: nova/db/sqlalchemy/api.py:4840
#, python-format
msgid "No sm_flavor called %(sm_flavor_label)s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4872
+#: nova/db/sqlalchemy/api.py:4878
#, python-format
msgid "No sm_volume with id %(volume_id)s"
msgstr ""
@@ -3894,7 +3864,7 @@ msgstr ""
msgid "Upgrade DB using Essex release first."
msgstr ""
-#: nova/db/sqlalchemy/session.py:162
+#: nova/db/sqlalchemy/session.py:174
#, python-format
msgid "SQL connection failed. %s attempts left."
msgstr ""
@@ -4029,21 +3999,21 @@ msgstr ""
msgid "Bad project_id for to_global_ipv6: %s"
msgstr ""
-#: nova/network/api.py:53
+#: nova/network/api.py:54
msgid "instance is a required argument to use @refresh_cache"
msgstr ""
-#: nova/network/api.py:80
+#: nova/network/api.py:81
#, python-format
msgid "args: %s"
msgstr ""
-#: nova/network/api.py:81
+#: nova/network/api.py:82
#, python-format
msgid "kwargs: %s"
msgstr ""
-#: nova/network/api.py:169
+#: nova/network/api.py:170
#, python-format
msgid "re-assign floating IP %(address)s from instance %(instance_id)s"
msgstr ""
@@ -4093,114 +4063,114 @@ msgstr ""
msgid "Pid %d is stale, relaunching dnsmasq"
msgstr ""
-#: nova/network/linux_net.py:894
+#: nova/network/linux_net.py:895
#, python-format
msgid "killing radvd threw %s"
msgstr ""
-#: nova/network/linux_net.py:896
+#: nova/network/linux_net.py:897
#, python-format
msgid "Pid %d is stale, relaunching radvd"
msgstr ""
-#: nova/network/linux_net.py:1126
+#: nova/network/linux_net.py:1127
#, python-format
msgid "Starting VLAN inteface %s"
msgstr ""
-#: nova/network/linux_net.py:1162
+#: nova/network/linux_net.py:1163
#, python-format
msgid "Starting Bridge interface for %s"
msgstr ""
-#: nova/network/linux_net.py:1205
+#: nova/network/linux_net.py:1206
#, python-format
msgid "Failed to add interface: %s"
msgstr ""
-#: nova/network/linux_net.py:1306
+#: nova/network/linux_net.py:1307
#, python-format
msgid "Starting bridge %s "
msgstr ""
-#: nova/network/linux_net.py:1314
+#: nova/network/linux_net.py:1315
#, python-format
msgid "Done starting bridge %s"
msgstr ""
-#: nova/network/linux_net.py:1333
+#: nova/network/linux_net.py:1334
#, python-format
msgid "Failed unplugging gateway interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1335
+#: nova/network/linux_net.py:1336
#, python-format
msgid "Unplugged gateway interface '%s'"
msgstr ""
-#: nova/network/manager.py:277
+#: nova/network/manager.py:284
#, python-format
msgid "Fixed ip %(fixed_ip_id)s not found"
msgstr ""
-#: nova/network/manager.py:286 nova/network/manager.py:545
+#: nova/network/manager.py:293 nova/network/manager.py:552
#, python-format
msgid "Interface %(interface)s not found"
msgstr ""
-#: nova/network/manager.py:301
+#: nova/network/manager.py:308
#, python-format
msgid "floating IP allocation for instance |%s|"
msgstr ""
-#: nova/network/manager.py:365
+#: nova/network/manager.py:372
msgid "Floating IP is not associated. Ignore."
msgstr ""
-#: nova/network/manager.py:383
+#: nova/network/manager.py:390
#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr ""
-#: nova/network/manager.py:387
+#: nova/network/manager.py:394
#, python-format
msgid "Address |%(address)s| is not allocated to your project |%(project)s|"
msgstr ""
-#: nova/network/manager.py:408
+#: nova/network/manager.py:415
#, python-format
msgid "Quota exceeded for %(pid)s, tried to allocate floating IP"
msgstr ""
-#: nova/network/manager.py:469
+#: nova/network/manager.py:476
msgid "Failed to update usages deallocating floating IP"
msgstr ""
-#: nova/network/manager.py:650
+#: nova/network/manager.py:673
#, python-format
msgid "Starting migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/manager.py:657
+#: nova/network/manager.py:680
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notmigrate it "
msgstr ""
-#: nova/network/manager.py:678
+#: nova/network/manager.py:706
#, python-format
msgid "Finishing migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/manager.py:686
+#: nova/network/manager.py:714
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notsetup it."
msgstr ""
-#: nova/network/manager.py:733
+#: nova/network/manager.py:761
#, python-format
msgid ""
"Database inconsistency: DNS domain |%s| is registered in the Nova db but "
@@ -4208,39 +4178,39 @@ msgid ""
"ignored."
msgstr ""
-#: nova/network/manager.py:779
+#: nova/network/manager.py:807
#, python-format
msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
msgstr ""
-#: nova/network/manager.py:789
+#: nova/network/manager.py:817
#, python-format
msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|."
msgstr ""
-#: nova/network/manager.py:903
+#: nova/network/manager.py:931
#, python-format
msgid "Disassociated %s stale fixed ip(s)"
msgstr ""
-#: nova/network/manager.py:907
+#: nova/network/manager.py:935
msgid "setting network host"
msgstr ""
-#: nova/network/manager.py:1021
+#: nova/network/manager.py:1049
msgid "network allocations"
msgstr ""
-#: nova/network/manager.py:1026
+#: nova/network/manager.py:1054
#, python-format
msgid "networks retrieved for instance: |%(networks)s|"
msgstr ""
-#: nova/network/manager.py:1056
+#: nova/network/manager.py:1084
msgid "network deallocation for instance"
msgstr ""
-#: nova/network/manager.py:1279
+#: nova/network/manager.py:1307
#, python-format
msgid ""
"instance-dns-zone is |%(domain)s|, which is in availability zone "
@@ -4248,89 +4218,89 @@ msgid ""
"created."
msgstr ""
-#: nova/network/manager.py:1358
+#: nova/network/manager.py:1388
#, python-format
msgid "Unable to release %s because vif doesn't exist."
msgstr ""
-#: nova/network/manager.py:1379
+#: nova/network/manager.py:1409
#, python-format
msgid "Leased IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:1383
+#: nova/network/manager.py:1413
#, python-format
msgid "IP %s leased that is not associated"
msgstr ""
-#: nova/network/manager.py:1391
+#: nova/network/manager.py:1421
#, python-format
msgid "IP |%s| leased that isn't allocated"
msgstr ""
-#: nova/network/manager.py:1396
+#: nova/network/manager.py:1426
#, python-format
msgid "Released IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:1400
+#: nova/network/manager.py:1430
#, python-format
msgid "IP %s released that is not associated"
msgstr ""
-#: nova/network/manager.py:1403
+#: nova/network/manager.py:1433
#, python-format
msgid "IP %s released that was not leased"
msgstr ""
-#: nova/network/manager.py:1422
+#: nova/network/manager.py:1452
#, python-format
msgid "%s must be an integer"
msgstr ""
-#: nova/network/manager.py:1446
+#: nova/network/manager.py:1476
msgid "Maximum allowed length for 'label' is 255."
msgstr ""
-#: nova/network/manager.py:1466
+#: nova/network/manager.py:1496
#, python-format
msgid ""
"Subnet(s) too large, defaulting to /%s. To override, specify "
"network_size flag."
msgstr ""
-#: nova/network/manager.py:1547
+#: nova/network/manager.py:1577
msgid "cidr already in use"
msgstr ""
-#: nova/network/manager.py:1550
+#: nova/network/manager.py:1580
#, python-format
msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
msgstr ""
-#: nova/network/manager.py:1561
+#: nova/network/manager.py:1591
#, python-format
msgid ""
"requested cidr (%(cidr)s) conflicts with existing smaller cidr "
"(%(smaller)s)"
msgstr ""
-#: nova/network/manager.py:1620
+#: nova/network/manager.py:1650
msgid "Network already exists!"
msgstr ""
-#: nova/network/manager.py:1640
+#: nova/network/manager.py:1670
#, python-format
msgid "Network must be disassociated from project %s before delete"
msgstr ""
-#: nova/network/manager.py:2097
+#: nova/network/manager.py:2136
msgid ""
"The sum between the number of networks and the vlan start cannot be "
"greater than 4094"
msgstr ""
-#: nova/network/manager.py:2104
+#: nova/network/manager.py:2143
#, python-format
msgid ""
"The network range is not big enough to fit %(num_networks)s. Network size"
@@ -4363,42 +4333,42 @@ msgstr ""
msgid "_get_auth_token() failed"
msgstr ""
-#: nova/network/quantumv2/api.py:97
+#: nova/network/quantumv2/api.py:98
#, python-format
msgid "allocate_for_instance() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:100
+#: nova/network/quantumv2/api.py:101
#, python-format
msgid "empty project id for instance %s"
msgstr ""
-#: nova/network/quantumv2/api.py:153
+#: nova/network/quantumv2/api.py:154
#, python-format
msgid "Fail to delete port %(portid)s with failure: %(exception)s"
msgstr ""
-#: nova/network/quantumv2/api.py:164
+#: nova/network/quantumv2/api.py:166
#, python-format
msgid "deallocate_for_instance() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:173
+#: nova/network/quantumv2/api.py:175
#, python-format
msgid "Failed to delete quantum port %(portid)s "
msgstr ""
-#: nova/network/quantumv2/api.py:182
+#: nova/network/quantumv2/api.py:185
#, python-format
msgid "get_instance_nw_info() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:197
+#: nova/network/quantumv2/api.py:200
#, python-format
msgid "validate_networks() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:438
+#: nova/network/quantumv2/api.py:452
#, python-format
msgid "Multiple floating IP pools matches found for name '%s'"
msgstr ""
@@ -4423,11 +4393,21 @@ msgstr ""
msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/log.py:298
+#: nova/openstack/common/log.py:177
+#, python-format
+msgid "Deprecated Config: %s"
+msgstr ""
+
+#: nova/openstack/common/log.py:309
#, python-format
msgid "syslog facility must be one of: %s"
msgstr ""
+#: nova/openstack/common/log.py:467
+#, python-format
+msgid "Fatal call to deprecated config: %(msg)s"
+msgstr ""
+
#: nova/openstack/common/policy.py:394
#, python-format
msgid "Failed to understand rule %(rule)s"
@@ -4784,7 +4764,7 @@ msgstr ""
msgid "No key defining hosts for topic '%s', see ringfile"
msgstr ""
-#: nova/scheduler/chance.py:49 nova/scheduler/simple.py:96
+#: nova/scheduler/chance.py:49
msgid "Is the appropriate service running?"
msgstr ""
@@ -4796,120 +4776,101 @@ msgstr ""
msgid "Exception during scheduler.run_instance"
msgstr ""
-#: nova/scheduler/driver.py:68 nova/scheduler/manager.py:181
+#: nova/scheduler/driver.py:68 nova/scheduler/manager.py:184
#, python-format
msgid "Setting instance to %(state)s state."
msgstr ""
-#: nova/scheduler/driver.py:101
-#, python-format
-msgid "Casted '%(method)s' to volume '%(host)s'"
-msgstr ""
-
-#: nova/scheduler/driver.py:124
+#: nova/scheduler/driver.py:110
#, python-format
msgid "Casted '%(method)s' to compute '%(host)s'"
msgstr ""
-#: nova/scheduler/driver.py:141
+#: nova/scheduler/driver.py:125
#, python-format
msgid "Casted '%(method)s' to %(topic)s '%(host)s'"
msgstr ""
-#: nova/scheduler/driver.py:189
+#: nova/scheduler/driver.py:173
msgid "Driver must implement schedule_prep_resize"
msgstr ""
-#: nova/scheduler/driver.py:197
+#: nova/scheduler/driver.py:181
msgid "Driver must implement schedule_run_instance"
msgstr ""
-#: nova/scheduler/driver.py:202
-msgid "Driver must implement schedule_create_volune"
-msgstr ""
-
-#: nova/scheduler/driver.py:334
+#: nova/scheduler/driver.py:313
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of "
"memory(host:%(avail)s <= instance:%(mem_inst)s)"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:49
-#, python-format
-msgid "No host selection for %s defined."
-msgstr ""
-
-#: nova/scheduler/filter_scheduler.py:65
+#: nova/scheduler/filter_scheduler.py:57
#, python-format
msgid "Attempting to build %(num_instances)d instance(s)"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:186
+#: nova/scheduler/filter_scheduler.py:192
msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:213
+#: nova/scheduler/filter_scheduler.py:219
#, python-format
msgid ""
"Exceeded max scheduling attempts %(max_attempts)d for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:224
+#: nova/scheduler/filter_scheduler.py:230
msgid "Scheduler only understands Compute nodes (for now)"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:276
+#: nova/scheduler/filter_scheduler.py:282
#, python-format
msgid "Filtered %(hosts)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:286
+#: nova/scheduler/filter_scheduler.py:292
#, python-format
msgid "Weighted %(weighted_host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:237
+#: nova/scheduler/host_manager.py:246
#, python-format
msgid "Host filter fails for ignored host %(host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:244
+#: nova/scheduler/host_manager.py:253
#, python-format
msgid "Host filter fails for non-forced host %(host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:250
+#: nova/scheduler/host_manager.py:259
#, python-format
msgid "Host filter function %(func)s failed for %(host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:256
+#: nova/scheduler/host_manager.py:265
#, python-format
msgid "Host filter passes for %(host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:318
+#: nova/scheduler/host_manager.py:328
#, python-format
msgid "Received %(service_name)s service update from %(host)s."
msgstr ""
-#: nova/scheduler/host_manager.py:341
+#: nova/scheduler/host_manager.py:351
msgid "host_manager only implemented for 'compute'"
msgstr ""
-#: nova/scheduler/host_manager.py:351
+#: nova/scheduler/host_manager.py:359
#, python-format
msgid "No service for compute ID %s"
msgstr ""
-#: nova/scheduler/manager.py:79
-#, python-format
-msgid "Failed to schedule create_volume: %(ex)s"
-msgstr ""
-
-#: nova/scheduler/manager.py:165
+#: nova/scheduler/manager.py:168
#, python-format
msgid "Failed to schedule_%(method)s: %(ex)s"
msgstr ""
@@ -4924,23 +4885,6 @@ msgstr ""
msgid "Could not decode scheduler options: '%(e)s'"
msgstr ""
-#: nova/scheduler/simple.py:52
-msgid ""
-"SimpleScheduler now only covers volume scheduling and is deprecated in "
-"Folsom. Non-volume functionality in SimpleScheduler has been replaced by "
-"FilterScheduler"
-msgstr ""
-
-#: nova/scheduler/simple.py:62
-msgid ""
-"nova-volume functionality is deprecated in Folsom and will be removed in "
-"Grizzly. Volumes are now handled by Cinder"
-msgstr ""
-
-#: nova/scheduler/simple.py:89
-msgid "Not enough allocatable volume gigabytes remaining"
-msgstr ""
-
#: nova/scheduler/filters/aggregate_instance_extra_specs.py:49
#: nova/scheduler/filters/aggregate_instance_extra_specs.py:56
#: nova/scheduler/filters/compute_capabilities_filter.py:48
@@ -5050,18 +4994,15 @@ msgstr ""
msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'"
msgstr ""
-#: nova/tests/fake_volume.py:177 nova/volume/api.py:90 nova/volume/api.py:318
-#: nova/volume/cinder.py:159
+#: nova/tests/fake_volume.py:180 nova/volume/cinder.py:159
msgid "status must be available"
msgstr ""
-#: nova/tests/fake_volume.py:180 nova/volume/api.py:321
-#: nova/volume/cinder.py:162
+#: nova/tests/fake_volume.py:184 nova/volume/cinder.py:162
msgid "already attached"
msgstr ""
-#: nova/tests/fake_volume.py:185 nova/volume/api.py:328
-#: nova/volume/cinder.py:168
+#: nova/tests/fake_volume.py:189 nova/volume/cinder.py:168
msgid "already detached"
msgstr ""
@@ -5124,48 +5065,12 @@ msgstr ""
msgid "uuid"
msgstr ""
-#: nova/tests/test_storwize_svc.py:177
-#, python-format
-msgid "unrecognized argument %s"
-msgstr ""
-
-#: nova/tests/test_storwize_svc.py:897
-#, python-format
-msgid "Run CLI command: %s"
-msgstr ""
-
-#: nova/tests/test_storwize_svc.py:900
-#, python-format
-msgid ""
-"CLI output:\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s"
-msgstr ""
-
-#: nova/tests/test_storwize_svc.py:905
-#, python-format
-msgid ""
-"CLI Exception output:\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s"
-msgstr ""
-
-#: nova/tests/test_volume_types.py:58
-#, python-format
-msgid "Given data: %s"
-msgstr ""
-
-#: nova/tests/test_volume_types.py:59
-#, python-format
-msgid "Result data: %s"
-msgstr ""
-
-#: nova/tests/test_xenapi.py:691
+#: nova/tests/test_xenapi.py:722
#, python-format
msgid "Creating files in %s to simulate guest agent"
msgstr ""
-#: nova/tests/test_xenapi.py:702
+#: nova/tests/test_xenapi.py:733
#, python-format
msgid "Removing simulated guest agent files in %s"
msgstr ""
@@ -5216,69 +5121,69 @@ msgstr ""
msgid "_get_all: %s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:126
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:128
#, python-format
msgid "test_snapshot_create: param=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:135
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:137
#, python-format
msgid "test_snapshot_create: resp_dict=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:157
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:183
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:159
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:185
#, python-format
msgid "test_snapshot_create_force: param=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:166
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:168
#, python-format
msgid "test_snapshot_create_force: resp_dict=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:219
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:221
#, python-format
msgid "test_snapshot_show: resp=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:245
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:247
#, python-format
msgid "test_snapshot_detail: resp_dict=%s"
msgstr ""
-#: nova/tests/compute/test_compute.py:630
-#: nova/tests/compute/test_compute.py:648
-#: nova/tests/compute/test_compute.py:684
-#: nova/tests/compute/test_compute.py:709
-#: nova/tests/compute/test_compute.py:2372
+#: nova/tests/compute/test_compute.py:633
+#: nova/tests/compute/test_compute.py:651
+#: nova/tests/compute/test_compute.py:687
+#: nova/tests/compute/test_compute.py:712
+#: nova/tests/compute/test_compute.py:2387
#, python-format
msgid "Running instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:636
-#: nova/tests/compute/test_compute.py:671
-#: nova/tests/compute/test_compute.py:697
-#: nova/tests/compute/test_compute.py:727
+#: nova/tests/compute/test_compute.py:639
+#: nova/tests/compute/test_compute.py:674
+#: nova/tests/compute/test_compute.py:700
+#: nova/tests/compute/test_compute.py:730
#, python-format
msgid "After terminating instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:1104
+#: nova/tests/compute/test_compute.py:1107
msgid "Internal error"
msgstr ""
-#: nova/tests/compute/test_compute.py:2383
+#: nova/tests/compute/test_compute.py:2398
#, python-format
msgid "After force-killing instances: %s"
msgstr ""
-#: nova/tests/hyperv/hypervutils.py:141 nova/virt/hyperv/vmops.py:473
+#: nova/tests/hyperv/hypervutils.py:141 nova/virt/hyperv/vmops.py:471
#, python-format
msgid "Failed to change vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/tests/hyperv/hypervutils.py:192 nova/virt/hyperv/vmops.py:408
+#: nova/tests/hyperv/hypervutils.py:192 nova/virt/hyperv/vmops.py:406
#, python-format
msgid "Failed to destroy vm %s"
msgstr ""
@@ -5288,12 +5193,12 @@ msgstr ""
msgid "Failed to get info for disk %s"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:138
+#: nova/tests/integrated/test_api_samples.py:140
#, python-format
msgid "Result: %(result)s is not a dict."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:142
+#: nova/tests/integrated/test_api_samples.py:144
#, python-format
msgid ""
"Key mismatch:\n"
@@ -5301,25 +5206,25 @@ msgid ""
"%(res_keys)s"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:150
+#: nova/tests/integrated/test_api_samples.py:152
#, python-format
msgid "Result: %(result)s is not a list."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:153
+#: nova/tests/integrated/test_api_samples.py:155
#, python-format
msgid ""
"Length mismatch: %(result)s\n"
"%(expected)s."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:164
+#: nova/tests/integrated/test_api_samples.py:166
#, python-format
msgid "Result: %(res_obj)s not in %(expected)s."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:181
-#: nova/tests/integrated/test_api_samples.py:194
+#: nova/tests/integrated/test_api_samples.py:183
+#: nova/tests/integrated/test_api_samples.py:196
#, python-format
msgid ""
"Values do not match:\n"
@@ -5408,16 +5313,20 @@ msgstr ""
msgid "Adding provider rule: %s"
msgstr ""
-#: nova/virt/images.py:101
+#: nova/virt/images.py:114
+msgid "Snapshot list encountered but no header found!"
+msgstr ""
+
+#: nova/virt/images.py:213
msgid "'qemu-img info' parsing failed."
msgstr ""
-#: nova/virt/images.py:107
+#: nova/virt/images.py:219
#, python-format
msgid "fmt=%(fmt)s backed by: %(backing_file)s"
msgstr ""
-#: nova/virt/images.py:118
+#: nova/virt/images.py:230
#, python-format
msgid "Converted to raw, but format is now %s"
msgstr ""
@@ -5520,116 +5429,116 @@ msgstr ""
msgid "domain does not exist"
msgstr ""
-#: nova/virt/baremetal/driver.py:117
+#: nova/virt/baremetal/driver.py:116
#, python-format
msgid "Error encountered when destroying instance '%(name)s': %(ex)s"
msgstr ""
-#: nova/virt/baremetal/driver.py:131
+#: nova/virt/baremetal/driver.py:130
#, python-format
msgid "instance %(instance_name)s: deleting instance files %(target)s"
msgstr ""
-#: nova/virt/baremetal/driver.py:158
+#: nova/virt/baremetal/driver.py:157
#, python-format
msgid "instance %s: rebooted"
msgstr ""
-#: nova/virt/baremetal/driver.py:162
+#: nova/virt/baremetal/driver.py:161
msgid "_wait_for_reboot failed"
msgstr ""
-#: nova/virt/baremetal/driver.py:191
+#: nova/virt/baremetal/driver.py:190
#, python-format
msgid "instance %s: rescued"
msgstr ""
-#: nova/virt/baremetal/driver.py:195
+#: nova/virt/baremetal/driver.py:194
msgid "_wait_for_rescue failed"
msgstr ""
-#: nova/virt/baremetal/driver.py:212
+#: nova/virt/baremetal/driver.py:211
msgid "<============= spawn of baremetal =============>"
msgstr ""
-#: nova/virt/baremetal/driver.py:225
+#: nova/virt/baremetal/driver.py:224
#, python-format
msgid "instance %s: is building"
msgstr ""
-#: nova/virt/baremetal/driver.py:231
+#: nova/virt/baremetal/driver.py:230
msgid "Key is injected but instance is not running yet"
msgstr ""
-#: nova/virt/baremetal/driver.py:240
+#: nova/virt/baremetal/driver.py:239
#, python-format
msgid "instance %s: booted"
msgstr ""
-#: nova/virt/baremetal/driver.py:247
+#: nova/virt/baremetal/driver.py:246
#, python-format
msgid "~~~~~~ current state = %s ~~~~~~"
msgstr ""
-#: nova/virt/baremetal/driver.py:249
+#: nova/virt/baremetal/driver.py:248
#, python-format
msgid "instance %s spawned successfully"
msgstr ""
-#: nova/virt/baremetal/driver.py:252
+#: nova/virt/baremetal/driver.py:251
#, python-format
msgid "instance %s:not booted"
msgstr ""
-#: nova/virt/baremetal/driver.py:255
+#: nova/virt/baremetal/driver.py:254
msgid "Baremetal assignment is overcommitted."
msgstr ""
-#: nova/virt/baremetal/driver.py:339
+#: nova/virt/baremetal/driver.py:338
#, python-format
msgid "instance %s: Creating image"
msgstr ""
-#: nova/virt/baremetal/driver.py:457
+#: nova/virt/baremetal/driver.py:456
#, python-format
msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s"
msgstr ""
-#: nova/virt/baremetal/driver.py:467
+#: nova/virt/baremetal/driver.py:466
#, python-format
msgid ""
"instance %(inst_name)s: ignoring error injecting data into image "
"%(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/baremetal/driver.py:513
+#: nova/virt/baremetal/driver.py:512
#, python-format
msgid "instance %s: starting toXML method"
msgstr ""
-#: nova/virt/baremetal/driver.py:516
+#: nova/virt/baremetal/driver.py:515
#, python-format
msgid "instance %s: finished toXML method"
msgstr ""
-#: nova/virt/baremetal/driver.py:560 nova/virt/hyperv/vmops.py:486
-#: nova/virt/libvirt/driver.py:1987
+#: nova/virt/baremetal/driver.py:559 nova/virt/hyperv/hostops.py:43
+#: nova/virt/libvirt/driver.py:1986
msgid ""
"Cannot get the number of cpu, because this function is not implemented "
"for this platform. This error can be safely ignored for now."
msgstr ""
-#: nova/virt/baremetal/driver.py:683
+#: nova/virt/baremetal/driver.py:682
#, python-format
msgid "#### RLK: cpu_arch = %s "
msgstr ""
-#: nova/virt/baremetal/driver.py:700
+#: nova/virt/baremetal/driver.py:699
msgid "Updating!"
msgstr ""
-#: nova/virt/baremetal/driver.py:727 nova/virt/libvirt/driver.py:3033
-#: nova/virt/xenapi/host.py:148
+#: nova/virt/baremetal/driver.py:726 nova/virt/hyperv/hostops.py:141
+#: nova/virt/libvirt/driver.py:3028 nova/virt/xenapi/host.py:149
msgid "Updating host stats"
msgstr ""
@@ -5681,45 +5590,45 @@ msgstr ""
msgid "Node is unknown error state."
msgstr ""
-#: nova/virt/disk/api.py:198
+#: nova/virt/disk/api.py:196
msgid "no capable image handler configured"
msgstr ""
-#: nova/virt/disk/api.py:245
+#: nova/virt/disk/api.py:243
#, python-format
msgid "no disk image handler for: %s"
msgstr ""
-#: nova/virt/disk/api.py:257
+#: nova/virt/disk/api.py:255
msgid "image already mounted"
msgstr ""
-#: nova/virt/disk/api.py:323
+#: nova/virt/disk/api.py:321
#, python-format
msgid ""
"Failed to mount container filesystem '%(image)s' on '%(target)s': "
"%(errors)s"
msgstr ""
-#: nova/virt/disk/api.py:340
+#: nova/virt/disk/api.py:338
#, python-format
msgid "Failed to unmount container filesystem: %s"
msgstr ""
-#: nova/virt/disk/api.py:373
+#: nova/virt/disk/api.py:371
msgid "injected file path not valid"
msgstr ""
-#: nova/virt/disk/api.py:518
+#: nova/virt/disk/api.py:516
msgid "Not implemented on Windows"
msgstr ""
-#: nova/virt/disk/api.py:552
+#: nova/virt/disk/api.py:550
#, python-format
msgid "User %(username)s not found in password file."
msgstr ""
-#: nova/virt/disk/api.py:568
+#: nova/virt/disk/api.py:566
#, python-format
msgid "User %(username)s not found in shadow file."
msgstr ""
@@ -5782,34 +5691,47 @@ msgstr ""
msgid "nbd device %s did not show up"
msgstr ""
-#: nova/virt/hyperv/driver.py:189 nova/virt/hyperv/driver.py:192
+#: nova/virt/hyperv/driver.py:190 nova/virt/hyperv/driver.py:193
msgid "plug_vifs called"
msgstr ""
-#: nova/virt/hyperv/driver.py:195
+#: nova/virt/hyperv/driver.py:196
msgid "ensure_filtering_rules_for_instance called"
msgstr ""
-#: nova/virt/hyperv/driver.py:200
+#: nova/virt/hyperv/driver.py:201
msgid "unfilter_instance called"
msgstr ""
-#: nova/virt/hyperv/driver.py:204
+#: nova/virt/hyperv/driver.py:205
msgid "confirm_migration called"
msgstr ""
-#: nova/virt/hyperv/driver.py:209
+#: nova/virt/hyperv/driver.py:210
msgid "finish_revert_migration called"
msgstr ""
-#: nova/virt/hyperv/driver.py:215
+#: nova/virt/hyperv/driver.py:216
msgid "finish_migration called"
msgstr ""
-#: nova/virt/hyperv/driver.py:218
+#: nova/virt/hyperv/driver.py:219
msgid "get_console_output called"
msgstr ""
+#: nova/virt/hyperv/hostops.py:112
+#, python-format
+msgid "Windows version: %s "
+msgstr ""
+
+#: nova/virt/hyperv/hostops.py:124
+msgid "get_available_resource called"
+msgstr ""
+
+#: nova/virt/hyperv/hostops.py:161
+msgid "get_host_stats called"
+msgstr ""
+
#: nova/virt/hyperv/livemigrationops.py:52
msgid ""
"Live migration is not supported \" \"by this version "
@@ -5944,197 +5866,188 @@ msgstr ""
msgid "Removing folder %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:69
+#: nova/virt/hyperv/vmops.py:67
msgid "get_info called for instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:93
+#: nova/virt/hyperv/vmops.py:91
#, python-format
msgid "hyperv vm state: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:99
+#: nova/virt/hyperv/vmops.py:97
#, python-format
msgid ""
"Got Info for vm %(instance_name)s: state=%(state)s, mem=%(memusage)s, "
"num_cpu=%(numprocs)s, uptime=%(uptime)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:135
+#: nova/virt/hyperv/vmops.py:133
#, python-format
msgid "cache image failed: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:154
+#: nova/virt/hyperv/vmops.py:152
#, python-format
msgid "Starting VM %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:156
+#: nova/virt/hyperv/vmops.py:154
#, python-format
msgid "Started VM %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:158
+#: nova/virt/hyperv/vmops.py:156
#, python-format
msgid "spawn vm failed: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:177
+#: nova/virt/hyperv/vmops.py:175
#, python-format
msgid "Failed to create VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:180
+#: nova/virt/hyperv/vmops.py:178
#, python-format
msgid "Created VM %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:197
+#: nova/virt/hyperv/vmops.py:195
#, python-format
msgid "Set memory for vm %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:210
+#: nova/virt/hyperv/vmops.py:208
#, python-format
msgid "Set vcpus for vm %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:214
+#: nova/virt/hyperv/vmops.py:212
#, python-format
msgid "Creating a scsi controller for %(vm_name)s for volume attaching"
msgstr ""
-#: nova/virt/hyperv/vmops.py:223
+#: nova/virt/hyperv/vmops.py:221
msgid "Controller not found"
msgstr ""
-#: nova/virt/hyperv/vmops.py:231
+#: nova/virt/hyperv/vmops.py:229
#, python-format
msgid "Failed to add scsi controller to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:236
+#: nova/virt/hyperv/vmops.py:234
#, python-format
msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:263
+#: nova/virt/hyperv/vmops.py:261
#, python-format
msgid "Failed to add diskdrive to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:266
+#: nova/virt/hyperv/vmops.py:264
#, python-format
msgid "New disk drive path is %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:285
+#: nova/virt/hyperv/vmops.py:283
#, python-format
msgid "Failed to add vhd file to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:287
+#: nova/virt/hyperv/vmops.py:285
#, python-format
msgid "Created disk for %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:291
+#: nova/virt/hyperv/vmops.py:289
#, python-format
msgid "Creating nic for %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:296
+#: nova/virt/hyperv/vmops.py:294
msgid "Cannot find vSwitch"
msgstr ""
-#: nova/virt/hyperv/vmops.py:316
+#: nova/virt/hyperv/vmops.py:314
msgid "Failed creating a port on the external vswitch"
msgstr ""
-#: nova/virt/hyperv/vmops.py:317
+#: nova/virt/hyperv/vmops.py:315
#, python-format
msgid "Failed creating port for %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:320
+#: nova/virt/hyperv/vmops.py:318
#, python-format
msgid "Created switch port %(vm_name)s on switch %(ext_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:332
+#: nova/virt/hyperv/vmops.py:330
#, python-format
msgid "Failed to add nic to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:334
+#: nova/virt/hyperv/vmops.py:332
#, python-format
msgid "Created nic for %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:341 nova/virt/hyperv/vmops.py:344
+#: nova/virt/hyperv/vmops.py:339 nova/virt/hyperv/vmops.py:342
#, python-format
msgid "Attempting to bind NIC to %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:349
+#: nova/virt/hyperv/vmops.py:347
msgid "No vSwitch specified, attaching to default"
msgstr ""
-#: nova/virt/hyperv/vmops.py:374
+#: nova/virt/hyperv/vmops.py:372
#, python-format
msgid "Got request to destroy vm %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:418
+#: nova/virt/hyperv/vmops.py:416
#, python-format
msgid "Del: disk %(vhdfile)s vm %(instance_name)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:424
+#: nova/virt/hyperv/vmops.py:422
msgid "Pause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:429
+#: nova/virt/hyperv/vmops.py:427
msgid "Unpause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:435
+#: nova/virt/hyperv/vmops.py:433
msgid "Suspend instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:440
+#: nova/virt/hyperv/vmops.py:438
msgid "Resume instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:445
+#: nova/virt/hyperv/vmops.py:443
msgid "Power off instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:450
+#: nova/virt/hyperv/vmops.py:448
msgid "Power on instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:470
+#: nova/virt/hyperv/vmops.py:468
#, python-format
msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:555
-#, python-format
-msgid "Windows version: %s "
-msgstr ""
-
-#: nova/virt/hyperv/vmops.py:567
-msgid "get_available_resource called"
-msgstr ""
-
-#: nova/virt/hyperv/vmops.py:604
+#: nova/virt/hyperv/vmops.py:497
#, python-format
msgid "use_cow_image:%s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:624
+#: nova/virt/hyperv/vmops.py:517
#, python-format
msgid "Failed to create Difference Disk from %(base)s to %(target)s"
msgstr ""
@@ -6181,7 +6094,7 @@ msgstr ""
msgid "Unable to attach boot volume to instance %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:109 nova/virt/xenapi/volumeops.py:115
+#: nova/virt/hyperv/volumeops.py:109 nova/virt/xenapi/volumeops.py:114
#, python-format
msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s"
msgstr ""
@@ -6191,7 +6104,7 @@ msgstr ""
msgid "Attach volume failed: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:137 nova/virt/xenapi/volumeops.py:191
+#: nova/virt/hyperv/volumeops.py:137 nova/virt/xenapi/volumeops.py:190
#, python-format
msgid "Unable to attach volume to instance %s"
msgstr ""
@@ -6226,7 +6139,7 @@ msgstr ""
msgid "Failed to remove volume from VM %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:207 nova/virt/libvirt/driver.py:604
+#: nova/virt/hyperv/volumeops.py:207 nova/virt/libvirt/driver.py:603
msgid "Could not determine iscsi initiator name"
msgstr ""
@@ -6344,219 +6257,219 @@ msgstr ""
msgid "The ISCSI initiator name can't be found. Choosing the default one"
msgstr ""
-#: nova/virt/hyperv/volumeutils.py:121 nova/virt/libvirt/driver.py:1463
+#: nova/virt/hyperv/volumeutils.py:121 nova/virt/libvirt/driver.py:1462
#: nova/virt/xenapi/vm_utils.py:476
#, python-format
msgid "block_device_list %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:333
+#: nova/virt/libvirt/driver.py:332
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:339
+#: nova/virt/libvirt/driver.py:338
#, python-format
msgid "Connecting to libvirt: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:360
+#: nova/virt/libvirt/driver.py:359
msgid "Connection to libvirt broke"
msgstr ""
-#: nova/virt/libvirt/driver.py:382 nova/virt/libvirt/driver.py:385
+#: nova/virt/libvirt/driver.py:381 nova/virt/libvirt/driver.py:384
#, python-format
msgid "Can not handle authentication request for %d credentials"
msgstr ""
-#: nova/virt/libvirt/driver.py:467
+#: nova/virt/libvirt/driver.py:466
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:481
+#: nova/virt/libvirt/driver.py:480
msgid "During wait destroy, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:486
+#: nova/virt/libvirt/driver.py:485
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:508
+#: nova/virt/libvirt/driver.py:507
msgid "Error from libvirt during undefineFlags. Retrying with undefine"
msgstr ""
-#: nova/virt/libvirt/driver.py:523
+#: nova/virt/libvirt/driver.py:522
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:536
+#: nova/virt/libvirt/driver.py:535
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:553
+#: nova/virt/libvirt/driver.py:552
#, python-format
msgid "Deleting instance files %(target)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:567
+#: nova/virt/libvirt/driver.py:566
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:729
+#: nova/virt/libvirt/driver.py:728
msgid "During detach_volume, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:739
+#: nova/virt/libvirt/driver.py:738
msgid "attaching LXC block device"
msgstr ""
-#: nova/virt/libvirt/driver.py:752
+#: nova/virt/libvirt/driver.py:751
msgid "detaching LXC block device"
msgstr ""
-#: nova/virt/libvirt/driver.py:884
+#: nova/virt/libvirt/driver.py:883
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:888
+#: nova/virt/libvirt/driver.py:887
msgid "Failed to soft reboot instance."
msgstr ""
-#: nova/virt/libvirt/driver.py:920
+#: nova/virt/libvirt/driver.py:919
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:955
+#: nova/virt/libvirt/driver.py:954
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1085
+#: nova/virt/libvirt/driver.py:1084
msgid "Instance is running"
msgstr ""
-#: nova/virt/libvirt/driver.py:1092 nova/virt/powervm/operator.py:253
+#: nova/virt/libvirt/driver.py:1091 nova/virt/powervm/operator.py:253
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1108
+#: nova/virt/libvirt/driver.py:1107
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:1154
+#: nova/virt/libvirt/driver.py:1153
msgid "Guest does not have a console available"
msgstr ""
-#: nova/virt/libvirt/driver.py:1198
+#: nova/virt/libvirt/driver.py:1197
#, python-format
msgid "Path '%(path)s' supports direct I/O"
msgstr ""
-#: nova/virt/libvirt/driver.py:1202
+#: nova/virt/libvirt/driver.py:1201
#, python-format
msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1206 nova/virt/libvirt/driver.py:1210
+#: nova/virt/libvirt/driver.py:1205 nova/virt/libvirt/driver.py:1209
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1276
+#: nova/virt/libvirt/driver.py:1275
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:1402
+#: nova/virt/libvirt/driver.py:1401
msgid "Using config drive"
msgstr ""
-#: nova/virt/libvirt/driver.py:1412
+#: nova/virt/libvirt/driver.py:1411
#, python-format
msgid "Creating config drive at %(path)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1426
+#: nova/virt/libvirt/driver.py:1425
#, python-format
msgid "Injecting %(injection)s into image %(img_id)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1436
+#: nova/virt/libvirt/driver.py:1435
#, python-format
msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:1510
+#: nova/virt/libvirt/driver.py:1509
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
"hypervisor '%s' does not support selecting CPU models"
msgstr ""
-#: nova/virt/libvirt/driver.py:1516
+#: nova/virt/libvirt/driver.py:1515
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr ""
-#: nova/virt/libvirt/driver.py:1520
+#: nova/virt/libvirt/driver.py:1519
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
-#: nova/virt/libvirt/driver.py:1524
+#: nova/virt/libvirt/driver.py:1523
#, python-format
msgid "CPU mode '%(mode)s' model '%(model)s' was chosen"
msgstr ""
-#: nova/virt/libvirt/driver.py:1540
+#: nova/virt/libvirt/driver.py:1539
msgid ""
"Passthrough of the host CPU was requested but this libvirt version does "
"not support this feature"
msgstr ""
-#: nova/virt/libvirt/driver.py:1832
+#: nova/virt/libvirt/driver.py:1831
msgid "Starting toXML method"
msgstr ""
-#: nova/virt/libvirt/driver.py:1836
+#: nova/virt/libvirt/driver.py:1835
msgid "Finished toXML method"
msgstr ""
-#: nova/virt/libvirt/driver.py:1853
+#: nova/virt/libvirt/driver.py:1852
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_name)s: [Error Code "
"%(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2105
+#: nova/virt/libvirt/driver.py:2104
msgid "libvirt version is too old (does not support getVersion)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2292
+#: nova/virt/libvirt/driver.py:2291
msgid "Block migration can not be used with shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2300
+#: nova/virt/libvirt/driver.py:2299
msgid "Live migration can not be used without shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2340
+#: nova/virt/libvirt/driver.py:2334
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
"large(available on destination host:%(available)s < need:%(necessary)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2360
+#: nova/virt/libvirt/driver.py:2354
#, python-format
msgid ""
"Instance launched has CPU info:\n"
"%s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2372
+#: nova/virt/libvirt/driver.py:2366
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -6566,51 +6479,51 @@ msgid ""
"Refer to %(u)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2389
+#: nova/virt/libvirt/driver.py:2383
#, python-format
msgid ""
"Creating tmpfile %s to notify to other compute nodes that they should "
"mount the same storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2437
+#: nova/virt/libvirt/driver.py:2431
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr ""
-#: nova/virt/libvirt/driver.py:2507
+#: nova/virt/libvirt/driver.py:2501
#, python-format
msgid "Live Migration failure: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2551
+#: nova/virt/libvirt/driver.py:2545
#, python-format
msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s."
msgstr ""
-#: nova/virt/libvirt/driver.py:2678
+#: nova/virt/libvirt/driver.py:2672
#, python-format
msgid "skipping %(path)s since it looks like volume"
msgstr ""
-#: nova/virt/libvirt/driver.py:2727
+#: nova/virt/libvirt/driver.py:2721
#, python-format
msgid "Getting disk size of %(i_name)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2789
+#: nova/virt/libvirt/driver.py:2783
msgid "Starting migrate_disk_and_power_off"
msgstr ""
-#: nova/virt/libvirt/driver.py:2848
+#: nova/virt/libvirt/driver.py:2842
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2855
+#: nova/virt/libvirt/driver.py:2849
msgid "Starting finish_migration"
msgstr ""
-#: nova/virt/libvirt/driver.py:2906
+#: nova/virt/libvirt/driver.py:2900
msgid "Starting finish_revert_migration"
msgstr ""
@@ -6800,31 +6713,31 @@ msgstr ""
msgid "Path %s must be LVM logical volume"
msgstr ""
-#: nova/virt/libvirt/utils.py:410
+#: nova/virt/libvirt/utils.py:406
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
-#: nova/virt/libvirt/utils.py:499
+#: nova/virt/libvirt/utils.py:495
#, python-format
msgid "Reading image info file: %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:503
+#: nova/virt/libvirt/utils.py:499
#, python-format
msgid "Read: %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:509
+#: nova/virt/libvirt/utils.py:505
#, python-format
msgid "Error reading image info file %(filename)s: %(error)s"
msgstr ""
-#: nova/virt/libvirt/utils.py:533
+#: nova/virt/libvirt/utils.py:529
#, python-format
msgid "Writing image info file: %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:534
+#: nova/virt/libvirt/utils.py:530
#, python-format
msgid "Wrote: %s"
msgstr ""
@@ -6861,7 +6774,7 @@ msgstr ""
msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)"
msgstr ""
-#: nova/virt/libvirt/volume_nfs.py:81 nova/volume/nfs.py:276
+#: nova/virt/libvirt/volume_nfs.py:81
#, python-format
msgid "%s is already mounted"
msgstr ""
@@ -7426,19 +7339,19 @@ msgstr ""
msgid "Got image size of %(size)s for the image %(image)s"
msgstr ""
-#: nova/virt/xenapi/agent.py:80 nova/virt/xenapi/vmops.py:1432
+#: nova/virt/xenapi/agent.py:80 nova/virt/xenapi/vmops.py:1471
#, python-format
msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:84 nova/virt/xenapi/vmops.py:1436
+#: nova/virt/xenapi/agent.py:84 nova/virt/xenapi/vmops.py:1475
#, python-format
msgid ""
"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
"args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:89 nova/virt/xenapi/vmops.py:1441
+#: nova/virt/xenapi/agent.py:89 nova/virt/xenapi/vmops.py:1480
#, python-format
msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r"
msgstr ""
@@ -7455,74 +7368,74 @@ msgstr ""
msgid "Failed to query agent version: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:121
+#: nova/virt/xenapi/agent.py:127
msgid "Querying agent version"
msgstr ""
-#: nova/virt/xenapi/agent.py:135
+#: nova/virt/xenapi/agent.py:141
msgid "Reached maximum time attempting to query agent version"
msgstr ""
-#: nova/virt/xenapi/agent.py:144
+#: nova/virt/xenapi/agent.py:149
#, python-format
msgid "Updating agent to %s"
msgstr ""
-#: nova/virt/xenapi/agent.py:151
+#: nova/virt/xenapi/agent.py:157
#, python-format
msgid "Failed to update agent: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:166
+#: nova/virt/xenapi/agent.py:171
msgid "Setting admin password"
msgstr ""
-#: nova/virt/xenapi/agent.py:176
+#: nova/virt/xenapi/agent.py:182
#, python-format
msgid "Failed to exchange keys: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:195
+#: nova/virt/xenapi/agent.py:202
#, python-format
msgid "Failed to update password: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:203
+#: nova/virt/xenapi/agent.py:209
#, python-format
msgid "Injecting file path: %r"
msgstr ""
-#: nova/virt/xenapi/agent.py:215
+#: nova/virt/xenapi/agent.py:222
#, python-format
msgid "Failed to inject file: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:223
+#: nova/virt/xenapi/agent.py:229
msgid "Resetting network"
msgstr ""
-#: nova/virt/xenapi/agent.py:228
+#: nova/virt/xenapi/agent.py:235
#, python-format
msgid "Failed to reset network: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:248
+#: nova/virt/xenapi/agent.py:255
msgid ""
"XenServer tools installed in this image are capable of network injection."
" Networking files will not bemanipulated"
msgstr ""
-#: nova/virt/xenapi/agent.py:256
+#: nova/virt/xenapi/agent.py:263
msgid ""
"XenServer tools are present in this image but are not capable of network "
"injection"
msgstr ""
-#: nova/virt/xenapi/agent.py:260
+#: nova/virt/xenapi/agent.py:267
msgid "XenServer tools are not installed in this image"
msgstr ""
-#: nova/virt/xenapi/agent.py:312
+#: nova/virt/xenapi/agent.py:319
#, python-format
msgid "OpenSSL error: %s"
msgstr ""
@@ -7538,82 +7451,82 @@ msgstr ""
msgid "Failure while cleaning up attached VDIs"
msgstr ""
-#: nova/virt/xenapi/driver.py:355
+#: nova/virt/xenapi/driver.py:363
#, python-format
msgid "Could not determine key: %s"
msgstr ""
-#: nova/virt/xenapi/driver.py:565
+#: nova/virt/xenapi/driver.py:574
msgid "Host startup on XenServer is not supported."
msgstr ""
-#: nova/virt/xenapi/driver.py:617
+#: nova/virt/xenapi/driver.py:626
msgid "Unable to log in to XenAPI (is the Dom0 disk full?)"
msgstr ""
-#: nova/virt/xenapi/driver.py:655
+#: nova/virt/xenapi/driver.py:664
msgid "Host is member of a pool, but DB says otherwise"
msgstr ""
-#: nova/virt/xenapi/driver.py:739 nova/virt/xenapi/driver.py:753
+#: nova/virt/xenapi/driver.py:748 nova/virt/xenapi/driver.py:762
#, python-format
msgid "Got exception: %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:669 nova/virt/xenapi/fake.py:771
-#: nova/virt/xenapi/fake.py:790 nova/virt/xenapi/fake.py:858
+#: nova/virt/xenapi/fake.py:670 nova/virt/xenapi/fake.py:772
+#: nova/virt/xenapi/fake.py:791 nova/virt/xenapi/fake.py:859
msgid "Raising NotImplemented"
msgstr ""
-#: nova/virt/xenapi/fake.py:671
+#: nova/virt/xenapi/fake.py:672
#, python-format
msgid "xenapi.fake does not have an implementation for %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:705
+#: nova/virt/xenapi/fake.py:706
#, python-format
msgid "Calling %(localname)s %(impl)s"
msgstr ""
-#: nova/virt/xenapi/fake.py:710
+#: nova/virt/xenapi/fake.py:711
#, python-format
msgid "Calling getter %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:713
+#: nova/virt/xenapi/fake.py:714
#, python-format
msgid "Calling setter %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:773
+#: nova/virt/xenapi/fake.py:774
#, python-format
msgid ""
"xenapi.fake does not have an implementation for %s or it has been called "
"with the wrong number of arguments"
msgstr ""
-#: nova/virt/xenapi/host.py:70
+#: nova/virt/xenapi/host.py:71
#, python-format
msgid ""
"Instance %(name)s running on %(host)s could not be found in the database:"
" assuming it is a worker VM and skip ping migration to a new host"
msgstr ""
-#: nova/virt/xenapi/host.py:156
+#: nova/virt/xenapi/host.py:157
#, python-format
msgid "Unable to get SR for this host: %s"
msgstr ""
-#: nova/virt/xenapi/host.py:190
+#: nova/virt/xenapi/host.py:191
#, python-format
msgid "Failed to extract instance support from %s"
msgstr ""
-#: nova/virt/xenapi/host.py:207
+#: nova/virt/xenapi/host.py:208
msgid "Unable to get updated status"
msgstr ""
-#: nova/virt/xenapi/host.py:210
+#: nova/virt/xenapi/host.py:211
#, python-format
msgid "The call to %(method)s returned an error: %(e)s."
msgstr ""
@@ -7783,498 +7696,497 @@ msgstr ""
msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:875
+#: nova/virt/xenapi/vm_utils.py:876
#, python-format
msgid ""
"Fast cloning is only supported on default local SR of type ext. SR on "
"this system was found to be of type %(sr_type)s. Ignoring the cow flag."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:933
+#: nova/virt/xenapi/vm_utils.py:934
#, python-format
msgid "Unrecognized cache_images value '%s', defaulting to True"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:967
+#: nova/virt/xenapi/vm_utils.py:968
#, python-format
msgid "Fetched VDIs of type '%(vdi_type)s' with UUID '%(vdi_uuid)s'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:979
+#: nova/virt/xenapi/vm_utils.py:980
#, python-format
msgid ""
"download_vhd %(image_id)s, attempt %(attempt_num)d/%(max_attempts)d, "
"params: %(params)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:992
+#: nova/virt/xenapi/vm_utils.py:993
#, python-format
msgid "download_vhd failed: %r"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1028
+#: nova/virt/xenapi/vm_utils.py:1029
#, python-format
msgid "Invalid value '%s' for xenapi_torrent_images"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1039
+#: nova/virt/xenapi/vm_utils.py:1040
#, python-format
msgid "Asking xapi to fetch vhd image %(image_id)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1103
+#: nova/virt/xenapi/vm_utils.py:1104
#, python-format
msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1119
+#: nova/virt/xenapi/vm_utils.py:1120
#, python-format
msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1123
+#: nova/virt/xenapi/vm_utils.py:1124
#, python-format
msgid ""
"Image size %(size_bytes)d exceeded instance_type allowed size "
"%(allowed_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1145
+#: nova/virt/xenapi/vm_utils.py:1146
#, python-format
msgid "Fetching image %(image_id)s, type %(image_type_str)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1158
+#: nova/virt/xenapi/vm_utils.py:1159
#, python-format
msgid "Size for image %(image_id)s: %(virtual_size)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1167
+#: nova/virt/xenapi/vm_utils.py:1168
#, python-format
msgid ""
"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d "
"bytes"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1186
+#: nova/virt/xenapi/vm_utils.py:1187
#, python-format
msgid "Copying VDI %s to /boot/guest on dom0"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1200
+#: nova/virt/xenapi/vm_utils.py:1201
#, python-format
msgid "Kernel/Ramdisk VDI %s destroyed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1209
+#: nova/virt/xenapi/vm_utils.py:1210
msgid "Failed to fetch glance image"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1247
+#: nova/virt/xenapi/vm_utils.py:1248
#, python-format
msgid "Detected %(image_type_str)s format for image %(image_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1268
+#: nova/virt/xenapi/vm_utils.py:1269
#, python-format
msgid "Looking up vdi %s for PV kernel"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1286
+#: nova/virt/xenapi/vm_utils.py:1287
#, python-format
msgid "Unknown image format %(disk_image_type)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1317
+#: nova/virt/xenapi/vm_utils.py:1318
#, python-format
msgid "VDI %s is still available"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1413
+#: nova/virt/xenapi/vm_utils.py:1414
#, python-format
msgid "Unable to parse rrd of %(vm_uuid)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1440
+#: nova/virt/xenapi/vm_utils.py:1441
#, python-format
msgid "Re-scanning SR %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1468
+#: nova/virt/xenapi/vm_utils.py:1469
#, python-format
msgid "Flag sr_matching_filter '%s' does not respect formatting convention"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1486
+#: nova/virt/xenapi/vm_utils.py:1487
msgid ""
"XenAPI is unable to find a Storage Repository to install guest instances "
"on. Please check your configuration and/or configure the flag "
"'sr_matching_filter'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1499
+#: nova/virt/xenapi/vm_utils.py:1500
msgid "Cannot find SR of content-type ISO"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1507
+#: nova/virt/xenapi/vm_utils.py:1508
#, python-format
msgid "ISO: looking at SR %(sr_rec)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1509
+#: nova/virt/xenapi/vm_utils.py:1510
msgid "ISO: not iso content"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1512
+#: nova/virt/xenapi/vm_utils.py:1513
msgid "ISO: iso content_type, no 'i18n-key' key"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1515
+#: nova/virt/xenapi/vm_utils.py:1516
msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1519
+#: nova/virt/xenapi/vm_utils.py:1520
msgid "ISO: SR MATCHing our criteria"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1521
+#: nova/virt/xenapi/vm_utils.py:1522
msgid "ISO: ISO, looking to see if it is host local"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1524
+#: nova/virt/xenapi/vm_utils.py:1525
#, python-format
msgid "ISO: PBD %(pbd_ref)s disappeared"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1527
+#: nova/virt/xenapi/vm_utils.py:1528
#, python-format
msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1530
+#: nova/virt/xenapi/vm_utils.py:1531
msgid "ISO: SR with local PBD"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1552
+#: nova/virt/xenapi/vm_utils.py:1553
#, python-format
msgid ""
"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: "
"%(server)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1568
+#: nova/virt/xenapi/vm_utils.py:1569
#, python-format
msgid "Unable to obtain RRD XML updates with server details: %(server)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1622
+#: nova/virt/xenapi/vm_utils.py:1623
#, python-format
msgid "Invalid statistics data from Xenserver: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1682
+#: nova/virt/xenapi/vm_utils.py:1683
#, python-format
msgid "VHD %(vdi_uuid)s has parent %(parent_uuid)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1769
+#: nova/virt/xenapi/vm_utils.py:1770
#, python-format
msgid ""
"Parent %(parent_uuid)s doesn't match original parent "
"%(original_parent_uuid)s, waiting for coalesce..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1779
+#: nova/virt/xenapi/vm_utils.py:1780
#, python-format
msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1814
+#: nova/virt/xenapi/vm_utils.py:1815
#, python-format
msgid "Timeout waiting for device %s to be created"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1834
+#: nova/virt/xenapi/vm_utils.py:1835
#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1847
+#: nova/virt/xenapi/vm_utils.py:1848
#, python-format
msgid "Plugging VBD %s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1850
+#: nova/virt/xenapi/vm_utils.py:1851
#, python-format
msgid "Plugging VBD %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1852
+#: nova/virt/xenapi/vm_utils.py:1853
#, python-format
msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1855
+#: nova/virt/xenapi/vm_utils.py:1856
#, python-format
msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1860
+#: nova/virt/xenapi/vm_utils.py:1861
#, python-format
msgid "Destroying VBD for VDI %s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1868
+#: nova/virt/xenapi/vm_utils.py:1869
#, python-format
msgid "Destroying VBD for VDI %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1881
+#: nova/virt/xenapi/vm_utils.py:1882
#, python-format
msgid "Running pygrub against %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1888
+#: nova/virt/xenapi/vm_utils.py:1889
#, python-format
msgid "Found Xen kernel %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1890
+#: nova/virt/xenapi/vm_utils.py:1891
msgid "No Xen kernel found. Booting HVM."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1903
+#: nova/virt/xenapi/vm_utils.py:1904
msgid "Partitions:"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1909
+#: nova/virt/xenapi/vm_utils.py:1910
#, python-format
msgid " %(num)s: %(ptype)s %(size)d sectors"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1934
+#: nova/virt/xenapi/vm_utils.py:1935
#, python-format
msgid ""
"Writing partition table %(primary_first)d %(primary_last)d to "
"%(dev_path)s..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1947
+#: nova/virt/xenapi/vm_utils.py:1948
#, python-format
msgid "Writing partition table %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2001
+#: nova/virt/xenapi/vm_utils.py:2002
#, python-format
msgid ""
"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
"virtual_size=%(virtual_size)d block_size=%(block_size)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2033
+#: nova/virt/xenapi/vm_utils.py:2034
#, python-format
msgid ""
"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% "
"reduction in size"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2082
+#: nova/virt/xenapi/vm_utils.py:2083
msgid "Manipulating interface files directly"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2091
+#: nova/virt/xenapi/vm_utils.py:2092
#, python-format
msgid "Failed to mount filesystem (expected for non-linux instances): %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2203
+#: nova/virt/xenapi/vm_utils.py:2204
msgid "This domU must be running on the host specified by xenapi_connection_url"
msgstr ""
-#: nova/virt/xenapi/vmops.py:126 nova/virt/xenapi/vmops.py:638
+#: nova/virt/xenapi/vmops.py:126 nova/virt/xenapi/vmops.py:664
#, python-format
msgid "Updating progress to %(progress)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:224
+#: nova/virt/xenapi/vmops.py:229
msgid "Starting instance"
msgstr ""
-#: nova/virt/xenapi/vmops.py:298
+#: nova/virt/xenapi/vmops.py:297
msgid "Removing kernel/ramdisk files from dom0"
msgstr ""
-#: nova/virt/xenapi/vmops.py:358
+#: nova/virt/xenapi/vmops.py:369
#, python-format
msgid "Block device information present: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:381
+#: nova/virt/xenapi/vmops.py:400
msgid "Failed to spawn, rolling back"
msgstr ""
-#: nova/virt/xenapi/vmops.py:445
+#: nova/virt/xenapi/vmops.py:473
msgid "Detected ISO image type, creating blank VM for install"
msgstr ""
-#: nova/virt/xenapi/vmops.py:462
+#: nova/virt/xenapi/vmops.py:490
msgid "Auto configuring disk, attempting to resize partition..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:488
+#: nova/virt/xenapi/vmops.py:516
msgid "Starting VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:495
+#: nova/virt/xenapi/vmops.py:523
#, python-format
msgid ""
"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is "
"%(version)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:498
+#: nova/virt/xenapi/vmops.py:526
#, python-format
msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:505
+#: nova/virt/xenapi/vmops.py:533
msgid "Waiting for instance state to become running"
msgstr ""
-#: nova/virt/xenapi/vmops.py:519
+#: nova/virt/xenapi/vmops.py:548
#, python-format
msgid "Instance agent version: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:549
+#: nova/virt/xenapi/vmops.py:575
msgid "Setting VCPU weight"
msgstr ""
-#: nova/virt/xenapi/vmops.py:557
+#: nova/virt/xenapi/vmops.py:583
#, python-format
msgid "Could not find VM with name %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:607
+#: nova/virt/xenapi/vmops.py:633
msgid "Finished snapshot and upload for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:611
+#: nova/virt/xenapi/vmops.py:637
#, python-format
msgid "Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:619
+#: nova/virt/xenapi/vmops.py:645
msgid "Failed to transfer vhd to new host"
msgstr ""
-#: nova/virt/xenapi/vmops.py:655
+#: nova/virt/xenapi/vmops.py:682
#, python-format
msgid "Resizing down VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:779
+#: nova/virt/xenapi/vmops.py:806
#, python-format
msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:784
+#: nova/virt/xenapi/vmops.py:811
msgid "Resize complete"
msgstr ""
-#: nova/virt/xenapi/vmops.py:828
+#: nova/virt/xenapi/vmops.py:855
msgid "Starting halted instance found during reboot"
msgstr ""
-#: nova/virt/xenapi/vmops.py:911
+#: nova/virt/xenapi/vmops.py:940
msgid "Unable to find root VBD/VDI for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:938
-#, python-format
-msgid "Destroying VDIs for Instance %(instance_uuid)s"
+#: nova/virt/xenapi/vmops.py:966
+msgid "Destroying VDIs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:966
+#: nova/virt/xenapi/vmops.py:993
msgid "Using RAW or VHD, skipping kernel and ramdisk deletion"
msgstr ""
-#: nova/virt/xenapi/vmops.py:973
+#: nova/virt/xenapi/vmops.py:1000
msgid "instance has a kernel or ramdisk but not both"
msgstr ""
-#: nova/virt/xenapi/vmops.py:980
+#: nova/virt/xenapi/vmops.py:1007
msgid "kernel/ramdisk files removed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1006
+#: nova/virt/xenapi/vmops.py:1033
msgid "Destroying VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1032
+#: nova/virt/xenapi/vmops.py:1059
msgid "VM is not present, skipping destroy..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1083
+#: nova/virt/xenapi/vmops.py:1110
#, python-format
msgid "Instance is already in Rescue Mode: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1118
-msgid "VM is not present, skipping power off..."
+#: nova/virt/xenapi/vmops.py:1144
+msgid "VM is not present, skipping soft delete..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1154
+#: nova/virt/xenapi/vmops.py:1192
#, python-format
msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1158
+#: nova/virt/xenapi/vmops.py:1196
msgid "Automatically hard rebooting"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1257
+#: nova/virt/xenapi/vmops.py:1295
msgid "Fetching VM ref while BUILDING failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1340
+#: nova/virt/xenapi/vmops.py:1378
msgid "Injecting network info to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1359
+#: nova/virt/xenapi/vmops.py:1397
msgid "Creating vifs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1368
+#: nova/virt/xenapi/vmops.py:1406
#, python-format
msgid "Creating VIF for network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1371
+#: nova/virt/xenapi/vmops.py:1409
#, python-format
msgid "Created VIF %(vif_ref)s, network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1395
+#: nova/virt/xenapi/vmops.py:1434
msgid "Injecting hostname to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1491
+#: nova/virt/xenapi/vmops.py:1530
#, python-format
msgid ""
"Destination host:%(hostname)s must be in the same aggregate as the source"
" server"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1523
+#: nova/virt/xenapi/vmops.py:1562
msgid "Migrate Receive failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1571
+#: nova/virt/xenapi/vmops.py:1610
msgid "VM.assert_can_migratefailed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1607
+#: nova/virt/xenapi/vmops.py:1646
msgid "Migrate Send failed"
msgstr ""
@@ -8306,7 +8218,7 @@ msgid "introducing sr within volume_utils"
msgstr ""
#: nova/virt/xenapi/volume_utils.py:92 nova/virt/xenapi/volume_utils.py:159
-#: nova/virt/xenapi/volumeops.py:151
+#: nova/virt/xenapi/volumeops.py:150
#, python-format
msgid "Introduced %(label)s as %(sr_ref)s."
msgstr ""
@@ -8319,7 +8231,7 @@ msgstr ""
msgid "Plugging SR"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:105 nova/virt/xenapi/volumeops.py:155
+#: nova/virt/xenapi/volume_utils.py:105 nova/virt/xenapi/volumeops.py:154
msgid "Unable to introduce Storage Repository"
msgstr ""
@@ -8440,47 +8352,47 @@ msgstr ""
msgid "Could not forget SR"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:173
+#: nova/virt/xenapi/volumeops.py:172
#, python-format
msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:183
+#: nova/virt/xenapi/volumeops.py:182
#, python-format
msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:194
+#: nova/virt/xenapi/volumeops.py:193
#, python-format
msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:204
+#: nova/virt/xenapi/volumeops.py:202
#, python-format
msgid "Detach_volume: %(instance_name)s, %(mountpoint)s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:212
+#: nova/virt/xenapi/volumeops.py:210
#, python-format
msgid "Unable to locate volume %s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:221
+#: nova/virt/xenapi/volumeops.py:219
#, python-format
msgid "Unable to detach volume %s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:226
+#: nova/virt/xenapi/volumeops.py:224
#, python-format
msgid "Unable to destroy vbd %s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:233
+#: nova/virt/xenapi/volumeops.py:231
#, python-format
msgid "Error purging SR %s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:235
+#: nova/virt/xenapi/volumeops.py:233
#, python-format
msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
msgstr ""
@@ -8520,64 +8432,12 @@ msgstr ""
msgid "Starting nova-xvpvncproxy node (version %s)"
msgstr ""
-#: nova/volume/api.py:109
-#, python-format
-msgid "Volume size '%s' must be an integer and greater than 0"
-msgstr ""
-
-#: nova/volume/api.py:128
-#, python-format
-msgid ""
-"Quota exceeded for %(pid)s, tried to create %(size)sG volume "
-"(%(consumed)dG of %(quota)dG already consumed)"
-msgstr ""
-
-#: nova/volume/api.py:134
-#, python-format
-msgid ""
-"Quota exceeded for %(pid)s, tried to create volume (%(consumed)d volumes "
-"already consumed)"
-msgstr ""
-
-#: nova/volume/api.py:145
-msgid "Size of specified image is larger than volume size."
-msgstr ""
-
-#: nova/volume/api.py:215
-msgid "Failed to update quota for deleting volume."
-msgstr ""
-
-#: nova/volume/api.py:223
-msgid "Volume status must be available or error"
-msgstr ""
-
-#: nova/volume/api.py:228
-#, python-format
-msgid "Volume still has %d dependent snapshots"
-msgstr ""
-
-#: nova/volume/api.py:395
-msgid "must be available"
-msgstr ""
-
-#: nova/volume/api.py:428
-msgid "Volume Snapshot status must be available or error"
-msgstr ""
-
-#: nova/volume/api.py:479
-msgid "Volume status must be available/in-use."
-msgstr ""
-
-#: nova/volume/api.py:482
-msgid "Volume status is in-use."
-msgstr ""
-
#: nova/volume/cinder.py:68
#, python-format
msgid "Cinderclient connection created using URL: %s"
msgstr ""
-#: nova/volume/driver.py:103 nova/volume/netapp_nfs.py:255
+#: nova/volume/driver.py:103
#, python-format
msgid "Recovering from a failed execute. Try number %s"
msgstr ""
@@ -8703,1152 +8563,3 @@ msgstr ""
msgid "valid iqn needed for show_target"
msgstr ""
-#: nova/volume/manager.py:102
-#, python-format
-msgid "Re-exporting %s volumes"
-msgstr ""
-
-#: nova/volume/manager.py:107
-#, python-format
-msgid "volume %s: skipping export"
-msgstr ""
-
-#: nova/volume/manager.py:109
-msgid "Resuming any in progress delete operations"
-msgstr ""
-
-#: nova/volume/manager.py:112
-#, python-format
-msgid "Resuming delete on volume: %s"
-msgstr ""
-
-#: nova/volume/manager.py:121
-#, python-format
-msgid "volume %s: creating"
-msgstr ""
-
-#: nova/volume/manager.py:136
-#, python-format
-msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG"
-msgstr ""
-
-#: nova/volume/manager.py:159
-#, python-format
-msgid "volume %s: creating export"
-msgstr ""
-
-#: nova/volume/manager.py:172
-#, python-format
-msgid "volume %s: created successfully"
-msgstr ""
-
-#: nova/volume/manager.py:190
-msgid "Volume is not local to this node"
-msgstr ""
-
-#: nova/volume/manager.py:195
-#, python-format
-msgid "volume %s: removing export"
-msgstr ""
-
-#: nova/volume/manager.py:197
-#, python-format
-msgid "volume %s: deleting"
-msgstr ""
-
-#: nova/volume/manager.py:200
-#, python-format
-msgid "volume %s: volume is busy"
-msgstr ""
-
-#: nova/volume/manager.py:217
-msgid "Failed to update usages deleting volume"
-msgstr ""
-
-#: nova/volume/manager.py:220
-#, python-format
-msgid "volume %s: deleted successfully"
-msgstr ""
-
-#: nova/volume/manager.py:233
-#, python-format
-msgid "snapshot %s: creating"
-msgstr ""
-
-#: nova/volume/manager.py:237
-#, python-format
-msgid "snapshot %(snap_name)s: creating"
-msgstr ""
-
-#: nova/volume/manager.py:252
-#, python-format
-msgid "snapshot %s: created successfully"
-msgstr ""
-
-#: nova/volume/manager.py:261
-#, python-format
-msgid "snapshot %s: deleting"
-msgstr ""
-
-#: nova/volume/manager.py:264
-#, python-format
-msgid "snapshot %s: snapshot is busy"
-msgstr ""
-
-#: nova/volume/manager.py:276
-#, python-format
-msgid "snapshot %s: deleted successfully"
-msgstr ""
-
-#: nova/volume/manager.py:323
-#, python-format
-msgid "Downloaded image %(image_id)s to %(volume_id)s successfully"
-msgstr ""
-
-#: nova/volume/manager.py:342
-#, python-format
-msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully"
-msgstr ""
-
-#: nova/volume/manager.py:426
-msgid "Checking volume capabilities"
-msgstr ""
-
-#: nova/volume/manager.py:430
-#, python-format
-msgid "New capabilities found: %s"
-msgstr ""
-
-#: nova/volume/manager.py:441
-msgid "Clear capabilities"
-msgstr ""
-
-#: nova/volume/manager.py:445
-#, python-format
-msgid "Notification {%s} received"
-msgstr ""
-
-#: nova/volume/netapp.py:108
-#, python-format
-msgid "API %(name)s failed: %(reason)s"
-msgstr ""
-
-#: nova/volume/netapp.py:119 nova/volume/netapp.py:1033
-#, python-format
-msgid "Using WSDL: %s"
-msgstr ""
-
-#: nova/volume/netapp.py:129
-#, python-format
-msgid "Using DFM server: %s"
-msgstr ""
-
-#: nova/volume/netapp.py:134
-#, python-format
-msgid "Using storage service: %s"
-msgstr ""
-
-#: nova/volume/netapp.py:139
-#, python-format
-msgid "Using storage service prefix: %s"
-msgstr ""
-
-#: nova/volume/netapp.py:145
-#, python-format
-msgid "Using vfiler: %s"
-msgstr ""
-
-#: nova/volume/netapp.py:154 nova/volume/netapp.py:1048
-#: nova/volume/netapp_nfs.py:110 nova/volume/storwize_svc.py:326
-#, python-format
-msgid "%s is not set"
-msgstr ""
-
-#: nova/volume/netapp.py:157
-msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set"
-msgstr ""
-
-#: nova/volume/netapp.py:184
-msgid "Connected to DFM server"
-msgstr ""
-
-#: nova/volume/netapp.py:262
-#, python-format
-msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs"
-msgstr ""
-
-#: nova/volume/netapp.py:297
-#, python-format
-msgid "Job failed: %s"
-msgstr ""
-
-#: nova/volume/netapp.py:326
-msgid ""
-"Attempt to use volume_type without specifying "
-"netapp_storage_service_prefix flag."
-msgstr ""
-
-#: nova/volume/netapp.py:330
-msgid ""
-"You must set the netapp_storage_service flag in order to create volumes "
-"with no volume_type."
-msgstr ""
-
-#: nova/volume/netapp.py:399
-msgid "Failed to provision dataset member"
-msgstr ""
-
-#: nova/volume/netapp.py:414
-msgid "No LUN was created by the provision job"
-msgstr ""
-
-#: nova/volume/netapp.py:452
-msgid "Failed to remove and delete dataset member"
-msgstr ""
-
-#: nova/volume/netapp.py:493
-#, python-format
-msgid "No entry in LUN table for volume %s"
-msgstr ""
-
-#: nova/volume/netapp.py:513
-#, python-format
-msgid "Failed to get LUN details for LUN ID %s"
-msgstr ""
-
-#: nova/volume/netapp.py:530
-#, python-format
-msgid "Failed to get host details for host ID %s"
-msgstr ""
-
-#: nova/volume/netapp.py:767 nova/volume/netapp.py:814
-#, python-format
-msgid "No LUN ID for volume %s"
-msgstr ""
-
-#: nova/volume/netapp.py:776
-#, python-format
-msgid "Failed to get target portal for filer: %s"
-msgstr ""
-
-#: nova/volume/netapp.py:781
-#, python-format
-msgid "Failed to get target IQN for filer: %s"
-msgstr ""
-
-#: nova/volume/netapp.py:968 nova/volume/netapp_nfs.py:74
-#, python-format
-msgid ""
-"Cannot create volume of size %(vol_size)s from snapshot of size "
-"%(snap_size)s"
-msgstr ""
-
-#: nova/volume/netapp.py:980
-#, python-format
-msgid ""
-"Cannot create volume of type %(new_type)s from snapshot of type "
-"%(old_type)s"
-msgstr ""
-
-#: nova/volume/netapp.py:1014
-#, python-format
-msgid "No metadata property %(prop)s defined for the LUN %(name)s"
-msgstr ""
-
-#: nova/volume/netapp.py:1078
-msgid "Success getting LUN list from server"
-msgstr ""
-
-#: nova/volume/netapp.py:1100
-#, python-format
-msgid "Created LUN with name %s"
-msgstr ""
-
-#: nova/volume/netapp.py:1109 nova/volume/netapp.py:1217
-#, python-format
-msgid "Destroyed LUN %s"
-msgstr ""
-
-#: nova/volume/netapp.py:1146
-#, python-format
-msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s"
-msgstr ""
-
-#: nova/volume/netapp.py:1151
-#, python-format
-msgid ""
-"Succesfully fetched target details for LUN %(handle)s and initiator "
-"%(initiator_name)s"
-msgstr ""
-
-#: nova/volume/netapp.py:1156
-#, python-format
-msgid "Failed to get LUN target details for the LUN %s"
-msgstr ""
-
-#: nova/volume/netapp.py:1160
-#, python-format
-msgid "Failed to get target portal for the LUN %s"
-msgstr ""
-
-#: nova/volume/netapp.py:1164
-#, python-format
-msgid "Failed to get target IQN for the LUN %s"
-msgstr ""
-
-#: nova/volume/netapp.py:1197
-#, python-format
-msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s"
-msgstr ""
-
-#: nova/volume/netapp.py:1253
-msgid "Object is not a NetApp LUN."
-msgstr ""
-
-#: nova/volume/netapp.py:1263
-#, python-format
-msgid "Cloned LUN with new name %s"
-msgstr ""
-
-#: nova/volume/netapp.py:1280
-#, python-format
-msgid "Could not find handle for LUN named %s"
-msgstr ""
-
-#: nova/volume/nfs.py:60
-msgid "There's no NFS config file configured "
-msgstr ""
-
-#: nova/volume/nfs.py:62
-msgid "NFS config file doesn't exist"
-msgstr ""
-
-#: nova/volume/nfs.py:85
-#, python-format
-msgid "casted to %s"
-msgstr ""
-
-#: nova/volume/nfs.py:95
-#, python-format
-msgid "Volume %s does not have provider_location specified, skipping"
-msgstr ""
-
-#: nova/volume/nfs.py:106
-#, python-format
-msgid "Trying to delete non-existing volume %(volume)s at path %(mounted_path)s"
-msgstr ""
-
-#: nova/volume/san.py:116 nova/volume/san.py:156
-msgid "Specify san_password or san_private_key"
-msgstr ""
-
-#: nova/volume/san.py:160
-msgid "san_ip must be set"
-msgstr ""
-
-#: nova/volume/san.py:230
-#, python-format
-msgid "Cannot parse list-view output: %s"
-msgstr ""
-
-#: nova/volume/san.py:324
-#, python-format
-msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s"
-msgstr ""
-
-#: nova/volume/san.py:457
-#, python-format
-msgid "CLIQ command returned %s"
-msgstr ""
-
-#: nova/volume/san.py:463
-#, python-format
-msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s"
-msgstr ""
-
-#: nova/volume/san.py:471
-#, python-format
-msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s"
-msgstr ""
-
-#: nova/volume/san.py:501
-#, python-format
-msgid ""
-"Unexpected number of virtual ips for cluster %(cluster_name)s. "
-"Result=%(_xml)s"
-msgstr ""
-
-#: nova/volume/san.py:554
-#, python-format
-msgid "Volume info: %(volume_name)s => %(volume_attributes)s"
-msgstr ""
-
-#: nova/volume/san.py:610
-msgid "local_path not supported"
-msgstr ""
-
-#: nova/volume/solidfire.py:123
-#, python-format
-msgid "Payload for SolidFire API call: %s"
-msgstr ""
-
-#: nova/volume/solidfire.py:140
-#, python-format
-msgid "Call to json.loads() raised an exception: %s"
-msgstr ""
-
-#: nova/volume/solidfire.py:145
-#, python-format
-msgid "Results of SolidFire API call: %s"
-msgstr ""
-
-#: nova/volume/solidfire.py:159
-#, python-format
-msgid "Found solidfire account: %s"
-msgstr ""
-
-#: nova/volume/solidfire.py:173
-#, python-format
-msgid "solidfire account: %s does not exist, create it..."
-msgstr ""
-
-#: nova/volume/solidfire.py:279
-#, python-format
-msgid "More than one valid preset was detected, using %s"
-msgstr ""
-
-#: nova/volume/solidfire.py:306
-msgid "Enter SolidFire delete_volume..."
-msgstr ""
-
-#: nova/volume/solidfire.py:334
-#, python-format
-msgid "Deleting volumeID: %s"
-msgstr ""
-
-#: nova/volume/solidfire.py:342
-msgid "Leaving SolidFire delete_volume"
-msgstr ""
-
-#: nova/volume/solidfire.py:345
-msgid "Executing SolidFire ensure_export..."
-msgstr ""
-
-#: nova/volume/solidfire.py:349
-msgid "Executing SolidFire create_export..."
-msgstr ""
-
-#: nova/volume/solidfire.py:354
-msgid "Enter SolidFire create_snapshot..."
-msgstr ""
-
-#: nova/volume/storwize_svc.py:123
-#, python-format
-msgid ""
-"_get_hdr_dic: attribute headers and values do not match.\n"
-" Headers: %(header)s\n"
-" Values: %(row)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:141
-msgid "enter: check_for_setup_error"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:147 nova/volume/storwize_svc.py:163
-#: nova/volume/storwize_svc.py:171 nova/volume/storwize_svc.py:218
-#: nova/volume/storwize_svc.py:227
-#, python-format
-msgid ""
-"check_for_setup_error: failed with unexpected CLI output.\n"
-" Command: %(cmd)s\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:155
-#, python-format
-msgid "pool %s doesn't exist"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:182 nova/volume/storwize_svc.py:238
-#, python-format
-msgid ""
-"check_for_setup_error: failed with unexpected CLI output.\n"
-" Command: %(cmd)s\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s\n"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:200
-#, python-format
-msgid "Did not find expected column name in svcinfo lsnode: %s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:203 nova/volume/storwize_svc.py:253
-#, python-format
-msgid ""
-"check_for_setup_error: Unexpected CLI output.\n"
-" Details: %(msg)s\n"
-"Command: %(cmd)s\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:250
-#, python-format
-msgid "Did not find expected column name in lsportip: %s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:272
-#, python-format
-msgid ""
-"check_for_setup_error: fail to storage configuration: unknown storage "
-"node %(node_id)s from CLI output.\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s\n"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:294
-#, python-format
-msgid ""
-"check_for_setup_error: fail to storage configuration: storage node %s has"
-" no IP addresses configured"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:302
-#, python-format
-msgid ""
-"could not obtain IP address and iSCSI name from the storage. Please "
-"verify that the storage is configured for iSCSI.\n"
-" Storage nodes: %(nodes)s\n"
-" portips: %(portips)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:310
-msgid "leave: check_for_setup_error"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:331
-msgid ""
-"Password or SSH private key is required for authentication: set either "
-"san_password or san_private_key option"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:339
-msgid ""
-"Illegal value specified for storwize_svc_vol_rsize: set to either a "
-"number or a percentage"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:346
-msgid ""
-"Illegal value specified for storwize_svc_vol_warning: set to either a "
-"number or a percentage"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:354
-msgid ""
-"Illegal value specified for storwize_svc_vol_grainsize: set to either "
-"'32', '64', '128', or '256'"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:363
-#, python-format
-msgid ""
-"Illegal value %s specified for storwize_svc_flashcopy_timeout: valid "
-"values are between 0 and 600"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:373
-msgid "If compression is set to True, rsize must also be set (not equal to -1)"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:378
-msgid "enter: do_setup"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:380
-msgid "leave: do_setup"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:392
-#, python-format
-msgid "enter: create_volume: volume %s "
-msgstr ""
-
-#: nova/volume/storwize_svc.py:429 nova/volume/storwize_svc.py:439
-#, python-format
-msgid ""
-"create volume %(name)s - did not find success message in CLI output.\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:444
-#, python-format
-msgid "leave: create_volume: volume %(name)s "
-msgstr ""
-
-#: nova/volume/storwize_svc.py:453
-#, python-format
-msgid "enter: delete_volume: volume %(name)s "
-msgstr ""
-
-#: nova/volume/storwize_svc.py:468
-#, python-format
-msgid ""
-"delete volume %(name)s - non empty output from CLI.\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:475
-#, python-format
-msgid "warning: tried to delete volume %(name)s but it does not exist."
-msgstr ""
-
-#: nova/volume/storwize_svc.py:478
-#, python-format
-msgid "leave: delete_volume: volume %(name)s "
-msgstr ""
-
-#: nova/volume/storwize_svc.py:488
-#, python-format
-msgid "ensure_export: volume %s not found on storage"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:512
-#, python-format
-msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:527
-msgid "_create_new_host failed to return the host name."
-msgstr ""
-
-#: nova/volume/storwize_svc.py:545
-#, python-format
-msgid ""
-"initialize_connection: did not find preferred node %(node)s for volume "
-"%(vol)s in iSCSI configuration"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:553
-#, python-format
-msgid ""
-"initialize_connection: did not find a preferred node for volume %s in "
-"iSCSI configuration"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:567
-#, python-format
-msgid ""
-"leave: initialize_connection:\n"
-" volume: %(vol)s\n"
-" connector %(conn)s\n"
-" properties: %(prop)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:585
-#, python-format
-msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:595
-#, python-format
-msgid "_get_host_from_iscsiname failed to return the host name for iscsi name %s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:606
-#, python-format
-msgid ""
-"delete mapping of volume %(vol)s to host %(host)s - non empty output from"
-" CLI.\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:615
-#, python-format
-msgid "terminate_connection: no mapping of volume %(vol)s to host %(host)s found"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:623
-#, python-format
-msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:634
-#, python-format
-msgid ""
-"_run_flashcopy: fail to cleanup failed FlashCopy mapping %(fc_map_id)% "
-"from %(source)s to %(target)s.\n"
-"stdout: %(out)s\n"
-" stderr: %(err)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:648
-#, python-format
-msgid ""
-"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target"
-" %(target)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:656 nova/volume/storwize_svc.py:669
-#, python-format
-msgid ""
-"create FC mapping from %(source)s to %(target)s - did not find success "
-"message in CLI output.\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s\n"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:680 nova/volume/storwize_svc.py:689
-#, python-format
-msgid ""
-"create FC mapping from %(source)s to %(target)s - did not find mapping id"
-" in CLI output.\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s\n"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:700
-#, python-format
-msgid ""
-"_run_flashcopy: fail to prepare FlashCopy from %(source)s to %(target)s.\n"
-"stdout: %(out)s\n"
-" stderr: %(err)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:725
-#, python-format
-msgid ""
-"unexecpted mapping status %(status)s for mapping %(id)s. Attributes: "
-"%(attr)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:737
-#, python-format
-msgid ""
-"mapping %(id)s prepare failed to complete within the alloted %(to)s "
-"seconds timeout. Terminating"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:741
-#, python-format
-msgid ""
-"_run_flashcopy: fail to start FlashCopy from %(source)s to %(target)s "
-"with exception %(ex)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:749
-#, python-format
-msgid "_run_flashcopy: %s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:755
-#, python-format
-msgid ""
-"_run_flashcopy: fail to start FlashCopy from %(source)s to %(target)s.\n"
-"stdout: %(out)s\n"
-" stderr: %(err)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:764
-#, python-format
-msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:774
-#, python-format
-msgid "enter: create_volume_from_snapshot: snapshot %(tgt)s from volume %(src)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:780
-#, python-format
-msgid "create_volume_from_snapshot: source volume %s does not exist"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:787 nova/volume/storwize_svc.py:841
-#: nova/volume/storwize_svc.py:860
-#, python-format
-msgid ""
-"create_volume_from_snapshot: cannot get source volume %(src)s capacity "
-"from volume attributes %(attr)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:796
-#, python-format
-msgid ""
-"create_volume_from_snapshot: target volume %s already exists, cannot "
-"create"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:815
-#, python-format
-msgid "leave: create_volume_from_snapshot: %s created successfully"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:827
-#, python-format
-msgid "enter: create_snapshot: snapshot %(tgt)s from volume %(src)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:834
-#, python-format
-msgid "create_snapshot: source volume %s does not exist"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:867
-#, python-format
-msgid ""
-"create_snapshot: source %(src)s and target volume %(tgt)s have different "
-"capacities (source:%(ssize)s target:%(tsize)s)"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:885
-#, python-format
-msgid "leave: create_snapshot: %s created successfully"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:893
-#, python-format
-msgid "enter: delete_snapshot: snapshot %s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:902
-#, python-format
-msgid "leave: delete_snapshot: snapshot %s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:911
-#, python-format
-msgid "enter: _get_host_from_iscsiname: iSCSI initiator %s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:921
-#, python-format
-msgid ""
-"_get_host_from_iscsiname: failed with unexpected CLI output.\n"
-" command: %(cmd)s\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:941
-#, python-format
-msgid ""
-"_get_host_from_iscsiname: Unexpected response from CLI output. Command: "
-"%(cmd)s\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:957
-#, python-format
-msgid "leave: _get_host_from_iscsiname: iSCSI initiator %s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:970
-#, python-format
-msgid "enter: _create_new_host: host %(name)s with iSCSI initiator %(init)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:979
-msgid ""
-"_create_new_host: cannot clean host name. Host name is not unicode or "
-"string"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:992
-#, python-format
-msgid ""
-"create host %(name)s with iSCSI initiator %(init)s - did not find success"
-" message in CLI output.\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s\n"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1000
-#, python-format
-msgid "leave: _create_new_host: host %(host)s with iSCSI initiator %(init)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1009
-#, python-format
-msgid "enter: _delete_host: host %s "
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1017
-#, python-format
-msgid "warning: tried to delete host %(name)s but it does not exist."
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1020
-#, python-format
-msgid "leave: _delete_host: host %s "
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1024
-#, python-format
-msgid "enter: _is_volume_defined: volume %s "
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1026
-#, python-format
-msgid "leave: _is_volume_defined: volume %(vol)s with %(str)s "
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1037
-#, python-format
-msgid "enter: _is_host_defined: host %s "
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1049
-#, python-format
-msgid ""
-"_is_host_defined: Unexpected response from CLI output.\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s\n"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1059
-#, python-format
-msgid ""
-"Data received for host %(host1)s instead of host %(host2)s.\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s\n"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1069
-#, python-format
-msgid "leave: _is_host_defined: host %(host)s with %(str)s "
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1097
-#, python-format
-msgid "enter: _map_vol_to_host: volume %(vol)s to host %(host)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1127
-#, python-format
-msgid ""
-"_map_vol_to_host: mapping host %(host)s to volume %(vol)s with LUN "
-"%(lun)s - did not find success message in CLI output. stdout: %(out)s\n"
-" stderr: %(err)s\n"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1137
-#, python-format
-msgid "leave: _map_vol_to_host: LUN %(lun)s, volume %(vol)s, host %(host)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1153
-#, python-format
-msgid "enter: _get_flashcopy_mapping_attributes: mapping %s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1160 nova/volume/storwize_svc.py:1172
-#, python-format
-msgid ""
-"_get_flashcopy_mapping_attributes: Unexpected response from CLI output. "
-"Command: %(cmd)s\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1184
-#, python-format
-msgid ""
-"leave: _get_flashcopy_mapping_attributes: mapping %(id)s, attributes "
-"%(attr)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1198
-#, python-format
-msgid "enter: _get_volume_attributes: volume %s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1207
-#, python-format
-msgid ""
-"CLI Exception output:\n"
-" command: %(cmd)s\n"
-" stdout: %(out)s\n"
-" stderr: %(err)s"
-msgstr ""
-
-#: nova/volume/storwize_svc.py:1228
-#, python-format
-msgid ""
-"leave: _get_volume_attributes:\n"
-" volume %(vol)s\n"
-" attributes: %(attr)s"
-msgstr ""
-
-#: nova/volume/volume_types.py:49 nova/volume/volume_types.py:108
-msgid "name cannot be None"
-msgstr ""
-
-#: nova/volume/volume_types.py:96
-msgid "id cannot be None"
-msgstr ""
-
-#: nova/volume/xensm.py:55
-#, python-format
-msgid "SR name = %s"
-msgstr ""
-
-#: nova/volume/xensm.py:56
-#, python-format
-msgid "Params: %s"
-msgstr ""
-
-#: nova/volume/xensm.py:60
-#, python-format
-msgid "Failed to create sr %s...continuing"
-msgstr ""
-
-#: nova/volume/xensm.py:62
-msgid "Create failed"
-msgstr ""
-
-#: nova/volume/xensm.py:65
-#, python-format
-msgid "SR UUID of new SR is: %s"
-msgstr ""
-
-#: nova/volume/xensm.py:72
-msgid "Failed to update db"
-msgstr ""
-
-#: nova/volume/xensm.py:82
-#, python-format
-msgid "Failed to introduce sr %s...continuing"
-msgstr ""
-
-#: nova/volume/xensm.py:93
-#, python-format
-msgid "Failed to reach backend %d"
-msgstr ""
-
-#: nova/volume/xensm.py:102
-#, python-format
-msgid "XenSMDriver requires xenapi connection, using %s"
-msgstr ""
-
-#: nova/volume/xensm.py:114
-msgid "Failed to initiate session"
-msgstr ""
-
-#: nova/volume/xensm.py:147
-#, python-format
-msgid "Volume will be created in backend - %d"
-msgstr ""
-
-#: nova/volume/xensm.py:159
-msgid "Failed to update volume in db"
-msgstr ""
-
-#: nova/volume/xensm.py:163
-msgid "Unable to create volume"
-msgstr ""
-
-#: nova/volume/xensm.py:170
-#, python-format
-msgid "Volume %s does not exist"
-msgstr ""
-
-#: nova/volume/xensm.py:180
-msgid "Failed to delete vdi"
-msgstr ""
-
-#: nova/volume/xensm.py:187
-msgid "Failed to delete volume in db"
-msgstr ""
-
-#: nova/volume/xensm.py:221
-msgid "Failed to find volume in db"
-msgstr ""
-
-#: nova/volume/xensm.py:233
-msgid "Failed to find backend in db"
-msgstr ""
-
-#: nova/volume/nexenta/__init__.py:27
-msgid "Nexenta SA returned the error"
-msgstr ""
-
-#: nova/volume/nexenta/jsonrpc.py:64
-#, python-format
-msgid "Sending JSON data: %s"
-msgstr ""
-
-#: nova/volume/nexenta/jsonrpc.py:69
-#, python-format
-msgid "Auto switching to HTTPS connection to %s"
-msgstr ""
-
-#: nova/volume/nexenta/jsonrpc.py:75
-msgid "No headers in server response"
-msgstr ""
-
-#: nova/volume/nexenta/jsonrpc.py:76
-msgid "Bad response from server"
-msgstr ""
-
-#: nova/volume/nexenta/jsonrpc.py:79
-#, python-format
-msgid "Got response: %s"
-msgstr ""
-
-#: nova/volume/nexenta/volume.py:96
-#, python-format
-msgid "Volume %s does not exist in Nexenta SA"
-msgstr ""
-
-#: nova/volume/nexenta/volume.py:180
-msgid ""
-"Call to local_path should not happen. Verify that use_local_volumes flag "
-"is turned off."
-msgstr ""
-
-#: nova/volume/nexenta/volume.py:202
-#, python-format
-msgid "Ignored target creation error \"%s\" while ensuring export"
-msgstr ""
-
-#: nova/volume/nexenta/volume.py:210
-#, python-format
-msgid "Ignored target group creation error \"%s\" while ensuring export"
-msgstr ""
-
-#: nova/volume/nexenta/volume.py:219
-#, python-format
-msgid "Ignored target group member addition error \"%s\" while ensuring export"
-msgstr ""
-
-#: nova/volume/nexenta/volume.py:227
-#, python-format
-msgid "Ignored LU creation error \"%s\" while ensuring export"
-msgstr ""
-
-#: nova/volume/nexenta/volume.py:237
-#, python-format
-msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export"
-msgstr ""
-
-#: nova/volume/nexenta/volume.py:273
-#, python-format
-msgid ""
-"Got error trying to destroy target group %(target_group)s, assuming it is"
-" already gone: %(exc)s"
-msgstr ""
-
-#: nova/volume/nexenta/volume.py:280
-#, python-format
-msgid ""
-"Got error trying to delete target %(target)s, assuming it is already "
-"gone: %(exc)s"
-msgstr ""
-
diff --git a/nova/manager.py b/nova/manager.py
index 2a7570c3b..0e447c7c1 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -183,10 +183,30 @@ class Manager(base.Base):
locals())
def init_host(self):
- """Handle initialization if this is a standalone service.
+ """Hook to do additional manager initialization when one requests
+ the service be started. This is called before any service record
+ is created.
Child classes should override this method.
+ """
+ pass
+
+ def pre_start_hook(self):
+ """Hook to provide the manager the ability to do additional
+ start-up work before any RPC queues/consumers are created. This is
+ called after other initialization has succeeded and a service
+ record is created.
+ Child classes should override this method.
+ """
+ pass
+
+ def post_start_hook(self):
+ """Hook to provide the manager the ability to do additional
+ start-up work immediately after a service creates RPC consumers
+ and starts 'running'.
+
+ Child classes should override this method.
"""
pass
diff --git a/nova/network/api.py b/nova/network/api.py
index bb05a0c2a..d2ce876b8 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -353,6 +353,7 @@ class API(base.Base):
if self._is_multi_host(context, instance):
args['floating_addresses'] = \
self._get_floating_ip_addresses(context, instance)
+ args['host'] = migration['dest_compute']
self.network_rpcapi.migrate_instance_start(context, **args)
@@ -370,5 +371,6 @@ class API(base.Base):
if self._is_multi_host(context, instance):
args['floating_addresses'] = \
self._get_floating_ip_addresses(context, instance)
+ args['host'] = migration['dest_compute']
self.network_rpcapi.migrate_instance_finish(context, **args)
diff --git a/nova/network/manager.py b/nova/network/manager.py
index a944ebd40..cfd1a320f 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -69,6 +69,7 @@ from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova import utils
@@ -589,7 +590,7 @@ class FloatingIP(object):
instance = self.db.instance_get_by_uuid(context,
fixed_ip['instance_uuid'])
service = self.db.service_get_by_host_and_topic(
- context, instance['host'], 'network')
+ context.elevated(), instance['host'], 'network')
if service and utils.service_is_up(service):
host = instance['host']
else:
@@ -840,7 +841,7 @@ class NetworkManager(manager.SchedulerDependentManager):
The one at a time part is to flatten the layout to help scale
"""
- RPC_API_VERSION = '1.1'
+ RPC_API_VERSION = '1.2'
# If True, this manager requires VIF to create a bridge.
SHOULD_CREATE_BRIDGE = False
@@ -943,7 +944,7 @@ class NetworkManager(manager.SchedulerDependentManager):
# NOTE(francois.charlier): the instance may have been deleted already
# thus enabling `read_deleted`
admin_context = context.get_admin_context(read_deleted='yes')
- if utils.is_uuid_like(instance_id):
+ if uuidutils.is_uuid_like(instance_id):
instance_ref = self.db.instance_get_by_uuid(admin_context,
instance_id)
else:
@@ -1277,7 +1278,7 @@ class NetworkManager(manager.SchedulerDependentManager):
@wrap_check_policy
def add_fixed_ip_to_instance(self, context, instance_id, host, network_id):
"""Adds a fixed ip to an instance from specified network."""
- if utils.is_uuid_like(network_id):
+ if uuidutils.is_uuid_like(network_id):
network = self.get_network(context, network_id)
else:
network = self._get_network_by_id(context, network_id)
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 241bb1a2d..232b82c3d 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -26,7 +26,7 @@ from nova.network import quantumv2
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
-from nova import utils
+from nova.openstack.common import uuidutils
quantum_opts = [
@@ -90,6 +90,11 @@ class API(base.Base):
search_opts['id'] = net_ids
nets += quantum.list_networks(**search_opts).get('networks', [])
+ _ensure_requested_network_ordering(
+ lambda x: x['id'],
+ nets,
+ net_ids)
+
return nets
def allocate_for_instance(self, context, instance, **kwargs):
@@ -437,7 +442,7 @@ class API(base.Base):
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {NET_EXTERNAL: True, 'fields': 'id'}
- if utils.is_uuid_like(name_or_id):
+ if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
@@ -508,15 +513,13 @@ class API(base.Base):
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
- def migrate_instance_start(self, context, instance_uuid, rxtx_factor,
- project_id, source, dest, floating_addresses):
+ def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance"""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
- def migrate_instance_finish(self, context, instance_uuid, rxtx_factor,
- project_id, source, dest, floating_addresses):
+ def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance"""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
@@ -534,6 +537,13 @@ class API(base.Base):
if not networks:
networks = self._get_available_networks(context,
instance['project_id'])
+ else:
+ # ensure ports are in preferred network order
+ _ensure_requested_network_ordering(
+ lambda x: x['network_id'],
+ ports,
+ [n['id'] for n in networks])
+
nw_info = network_model.NetworkInfo()
for port in ports:
network_name = None
@@ -647,3 +657,9 @@ class API(base.Base):
def create_public_dns_domain(self, context, domain, project=None):
"""Create a private DNS domain with optional nova project."""
raise NotImplementedError()
+
+
+def _ensure_requested_network_ordering(accessor, unordered, preferred):
+ """Sort a list with respect to the preferred network ordering."""
+ if preferred:
+ unordered.sort(key=lambda i: preferred.index(accessor(i)))
diff --git a/nova/network/rpcapi.py b/nova/network/rpcapi.py
index 6f31e140b..8dd5e3a69 100644
--- a/nova/network/rpcapi.py
+++ b/nova/network/rpcapi.py
@@ -267,7 +267,11 @@ class NetworkAPI(rpc_proxy.RpcProxy):
def migrate_instance_start(self, ctxt, instance_uuid, rxtx_factor,
project_id, source_compute, dest_compute,
- floating_addresses):
+ floating_addresses, host=None):
+ if host is not None:
+ topic = rpc.queue_get_for(ctxt, self.topic, host)
+ else:
+ topic = self.topic
return self.call(ctxt, self.make_msg(
'migrate_instance_start',
instance_uuid=instance_uuid,
@@ -276,13 +280,16 @@ class NetworkAPI(rpc_proxy.RpcProxy):
source=source_compute,
dest=dest_compute,
floating_addresses=floating_addresses),
- topic=rpc.queue_get_for(ctxt, self.topic,
- dest_compute),
- version='1.2')
+ topic=topic,
+ version='1.2')
def migrate_instance_finish(self, ctxt, instance_uuid, rxtx_factor,
project_id, source_compute, dest_compute,
- floating_addresses):
+ floating_addresses, host=None):
+ if host is not None:
+ topic = rpc.queue_get_for(ctxt, self.topic, host)
+ else:
+ topic = self.topic
return self.call(ctxt, self.make_msg(
'migrate_instance_finish',
instance_uuid=instance_uuid,
@@ -291,6 +298,5 @@ class NetworkAPI(rpc_proxy.RpcProxy):
source=source_compute,
dest=dest_compute,
floating_addresses=floating_addresses),
- topic=rpc.queue_get_for(ctxt, self.topic,
- dest_compute),
- version='1.2')
+ topic=topic,
+ version='1.2')
diff --git a/nova/openstack/common/log.py b/nova/openstack/common/log.py
index 35c7972c8..67a06a7af 100644
--- a/nova/openstack/common/log.py
+++ b/nova/openstack/common/log.py
@@ -76,6 +76,9 @@ log_opts = [
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
+ cfg.BoolOpt('fatal_deprecations',
+ default=False,
+ help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
@@ -170,6 +173,14 @@ class ContextAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
+ def deprecated(self, msg, *args, **kwargs):
+ stdmsg = _("Deprecated Config: %s") % msg
+ if CONF.fatal_deprecations:
+ self.critical(stdmsg, *args, **kwargs)
+ raise DeprecatedConfig(msg=stdmsg)
+ else:
+ self.warn(stdmsg, *args, **kwargs)
+
def process(self, msg, kwargs):
if 'extra' not in kwargs:
kwargs['extra'] = {}
@@ -450,3 +461,10 @@ class ColorHandler(logging.StreamHandler):
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
+
+
+class DeprecatedConfig(Exception):
+ message = _("Fatal call to deprecated config: %(msg)s")
+
+ def __init__(self, msg):
+ super(Exception, self).__init__(self.message % dict(msg=msg))
diff --git a/nova/openstack/common/uuidutils.py b/nova/openstack/common/uuidutils.py
new file mode 100644
index 000000000..51042a798
--- /dev/null
+++ b/nova/openstack/common/uuidutils.py
@@ -0,0 +1,35 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Intel Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+UUID related utilities and helper functions.
+"""
+
+import uuid
+
+
+def is_uuid_like(val):
+ """Returns validation of a value as a UUID.
+
+ For our purposes, a UUID is a canonical form string:
+ aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
+
+ """
+ try:
+ return str(uuid.UUID(val)) == val
+ except (TypeError, ValueError, AttributeError):
+ return False
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index e0f351a78..6d6288d83 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -92,11 +92,3 @@ class ChanceScheduler(driver.Scheduler):
filter_properties)
self.compute_rpcapi.prep_resize(context, image, instance,
instance_type, host, reservations)
-
- def schedule_create_volume(self, context, volume_id, snapshot_id,
- image_id):
- """Picks a host that is up at random."""
- host = self._schedule(context, FLAGS.volume_topic, None, {})
- driver.cast_to_host(context, FLAGS.volume_topic, host, 'create_volume',
- volume_id=volume_id, snapshot_id=snapshot_id,
- image_id=image_id)
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index cba1ed935..012ad09ed 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -181,11 +181,6 @@ class Scheduler(object):
msg = _("Driver must implement schedule_run_instance")
raise NotImplementedError(msg)
- def schedule_create_volume(self, context, volume_id, snapshot_id,
- image_id):
- msg = _("Driver must implement schedule_create_volune")
- raise NotImplementedError(msg)
-
def schedule_live_migration(self, context, instance, dest,
block_migration, disk_over_commit):
"""Live migration scheduling method.
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index 4bddb949b..78bd49a96 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -42,13 +42,6 @@ class FilterScheduler(driver.Scheduler):
self.cost_function_cache = {}
self.options = scheduler_options.SchedulerOptions()
- def schedule_create_volume(self, context, volume_id, snapshot_id, image_id,
- reservations):
- # NOTE: We're only focused on compute instances right now,
- # so this method will always raise NoValidHost().
- msg = _("No host selection for %s defined.") % FLAGS.volume_topic
- raise exception.NoValidHost(reason=msg)
-
def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 4727c4706..fc9f3c8cc 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -96,15 +96,8 @@ class HostState(object):
def __init__(self, host, topic, capabilities=None, service=None):
self.host = host
self.topic = topic
+ self.update_capabilities(topic, capabilities, service)
- # Read-only capability dicts
-
- if capabilities is None:
- capabilities = {}
- self.capabilities = ReadOnlyDict(capabilities.get(topic, None))
- if service is None:
- service = {}
- self.service = ReadOnlyDict(service)
# Mutable available resources.
# These will change as resources are virtually "consumed".
self.total_usable_disk_gb = 0
@@ -130,8 +123,22 @@ class HostState(object):
# Resource oversubscription values for the compute host:
self.limits = {}
+ self.updated = None
+
+ def update_capabilities(self, topic, capabilities=None, service=None):
+ # Read-only capability dicts
+
+ if capabilities is None:
+ capabilities = {}
+ self.capabilities = ReadOnlyDict(capabilities.get(topic, None))
+ if service is None:
+ service = {}
+ self.service = ReadOnlyDict(service)
+
def update_from_compute_node(self, compute):
"""Update information about a host from its compute_node info."""
+ if self.updated and self.updated > compute['updated_at']:
+ return
all_ram_mb = compute['memory_mb']
# Assume virtual size is all consumed by instances if use qcow2 disk.
@@ -148,6 +155,7 @@ class HostState(object):
self.free_disk_mb = free_disk_mb
self.vcpus_total = compute['vcpus']
self.vcpus_used = compute['vcpus_used']
+ self.updated = compute['updated_at']
stats = compute.get('stats', [])
statmap = self._statmap(stats)
@@ -191,6 +199,7 @@ class HostState(object):
self.free_ram_mb -= ram_mb
self.free_disk_mb -= disk_mb
self.vcpus_used += vcpus
+ self.updated = timeutils.utcnow()
# Track number of instances on host
self.num_instances += 1
@@ -270,6 +279,7 @@ class HostManager(object):
def __init__(self):
self.service_states = {} # { <host> : { <service> : { cap k : v }}}
+ self.host_state_map = {}
self.filter_classes = filters.get_filter_classes(
FLAGS.scheduler_available_filters)
@@ -341,8 +351,6 @@ class HostManager(object):
raise NotImplementedError(_(
"host_manager only implemented for 'compute'"))
- host_state_map = {}
-
# Get resource usage across the available compute nodes:
compute_nodes = db.compute_node_get_all(context)
for compute in compute_nodes:
@@ -352,10 +360,15 @@ class HostManager(object):
continue
host = service['host']
capabilities = self.service_states.get(host, None)
- host_state = self.host_state_cls(host, topic,
- capabilities=capabilities,
- service=dict(service.iteritems()))
+ host_state = self.host_state_map.get(host)
+ if host_state:
+ host_state.update_capabilities(topic, capabilities,
+ dict(service.iteritems()))
+ else:
+ host_state = self.host_state_cls(host, topic,
+ capabilities=capabilities,
+ service=dict(service.iteritems()))
+ self.host_state_map[host] = host_state
host_state.update_from_compute_node(compute)
- host_state_map[host] = host_state
- return host_state_map
+ return self.host_state_map
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 4c3f8025a..531c54726 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -26,6 +26,7 @@ import sys
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+import nova.context
from nova import db
from nova import exception
from nova import flags
@@ -42,7 +43,7 @@ from nova import quota
LOG = logging.getLogger(__name__)
scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
- default='nova.scheduler.multi.MultiScheduler',
+ default='nova.scheduler.filter_scheduler.FilterScheduler',
help='Default driver to use for the scheduler')
FLAGS = flags.FLAGS
@@ -54,7 +55,7 @@ QUOTAS = quota.QUOTAS
class SchedulerManager(manager.Manager):
"""Chooses a host to run instances on."""
- RPC_API_VERSION = '2.2'
+ RPC_API_VERSION = '2.3'
def __init__(self, scheduler_driver=None, *args, **kwargs):
if not scheduler_driver:
@@ -62,6 +63,13 @@ class SchedulerManager(manager.Manager):
self.driver = importutils.import_object(scheduler_driver)
super(SchedulerManager, self).__init__(*args, **kwargs)
+ def post_start_hook(self):
+ """After we start up and can receive messages via RPC, tell all
+ compute nodes to send us their capabilities.
+ """
+ ctxt = nova.context.get_admin_context()
+ compute_rpcapi.ComputeAPI().publish_service_capabilities(ctxt)
+
def update_service_capabilities(self, context, service_name,
host, capabilities):
"""Process a capability update from a service node."""
@@ -72,14 +80,8 @@ class SchedulerManager(manager.Manager):
def create_volume(self, context, volume_id, snapshot_id,
reservations=None, image_id=None):
- try:
- self.driver.schedule_create_volume(
- context, volume_id, snapshot_id, image_id)
- except Exception as ex:
- with excutils.save_and_reraise_exception():
- LOG.warning(_("Failed to schedule create_volume: %(ex)s") %
- locals())
- db.volume_update(context, volume_id, {'status': 'error'})
+ #function removed in RPC API 2.3
+ pass
def live_migration(self, context, instance, dest,
block_migration, disk_over_commit):
@@ -259,6 +261,3 @@ class SchedulerManager(manager.Manager):
@manager.periodic_task
def _expire_reservations(self, context):
QUOTAS.expire(context)
-
- def request_service_capabilities(self, context):
- compute_rpcapi.ComputeAPI().publish_service_capabilities(context)
diff --git a/nova/scheduler/multi.py b/nova/scheduler/multi.py
index c589d6276..13e3c0e1a 100644
--- a/nova/scheduler/multi.py
+++ b/nova/scheduler/multi.py
@@ -19,6 +19,12 @@
"""
Scheduler that allows routing some calls to one driver and others to another.
+
+This scheduler was originally used to deal with both compute and volume. But
+is now used for openstack extensions that want to use the nova-scheduler to
+schedule requests to compute nodes but provide their own manager and topic.
+
+https://bugs.launchpad.net/nova/+bug/1009681
"""
from nova import flags
@@ -32,9 +38,6 @@ multi_scheduler_opts = [
default='nova.scheduler.'
'filter_scheduler.FilterScheduler',
help='Driver to use for scheduling compute calls'),
- cfg.StrOpt('volume_scheduler_driver',
- default='nova.scheduler.chance.ChanceScheduler',
- help='Driver to use for scheduling volume calls'),
cfg.StrOpt('default_scheduler_driver',
default='nova.scheduler.chance.ChanceScheduler',
help='Default driver to use for scheduling calls'),
@@ -56,13 +59,10 @@ class MultiScheduler(driver.Scheduler):
super(MultiScheduler, self).__init__()
compute_driver = importutils.import_object(
FLAGS.compute_scheduler_driver)
- volume_driver = importutils.import_object(
- FLAGS.volume_scheduler_driver)
default_driver = importutils.import_object(
FLAGS.default_scheduler_driver)
self.drivers = {'compute': compute_driver,
- 'volume': volume_driver,
'default': default_driver}
def schedule_run_instance(self, *args, **kwargs):
@@ -71,9 +71,6 @@ class MultiScheduler(driver.Scheduler):
def schedule_prep_resize(self, *args, **kwargs):
return self.drivers['compute'].schedule_prep_resize(*args, **kwargs)
- def schedule_create_volume(self, *args, **kwargs):
- return self.drivers['volume'].schedule_create_volume(*args, **kwargs)
-
def update_service_capabilities(self, service_name, host, capabilities):
# Multi scheduler is only a holder of sub-schedulers, so
# pass the capabilities to the schedulers that matter
diff --git a/nova/scheduler/rpcapi.py b/nova/scheduler/rpcapi.py
index 2c280be44..b41668733 100644
--- a/nova/scheduler/rpcapi.py
+++ b/nova/scheduler/rpcapi.py
@@ -46,6 +46,7 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.0 - Remove 1.x backwards compat
2.1 - Add image_id to create_volume()
2.2 - Remove reservations argument to create_volume()
+ 2.3 - Remove create_volume()
'''
#
@@ -95,13 +96,6 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
disk_over_commit=disk_over_commit, instance=instance_p,
dest=dest))
- def create_volume(self, ctxt, volume_id, snapshot_id, image_id):
- self.cast(ctxt,
- self.make_msg('create_volume',
- volume_id=volume_id, snapshot_id=snapshot_id,
- image_id=image_id),
- version='2.2')
-
def update_service_capabilities(self, ctxt, service_name, host,
capabilities):
self.fanout_cast(ctxt, self.make_msg('update_service_capabilities',
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
deleted file mode 100644
index 48e5ea37d..000000000
--- a/nova/scheduler/simple.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2010 OpenStack, LLC.
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Simple Scheduler - for Volumes
-
-Note: Deprecated in Folsom. Will be removed along with nova-volumes
-"""
-
-from nova.common import deprecated
-from nova import db
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.scheduler import chance
-from nova.scheduler import driver
-from nova import utils
-
-
-simple_scheduler_opts = [
- cfg.IntOpt("max_gigabytes",
- default=10000,
- help="maximum number of volume gigabytes to allow per host"),
- ]
-
-FLAGS = flags.FLAGS
-FLAGS.register_opts(simple_scheduler_opts)
-
-
-class SimpleScheduler(chance.ChanceScheduler):
- """Implements Naive Scheduler that tries to find least loaded host."""
-
- def schedule_run_instance(self, context, request_spec, admin_password,
- injected_files, requested_networks,
- is_first_time, filter_properties):
- deprecated.warn(_('SimpleScheduler now only covers volume scheduling '
- 'and is deprecated in Folsom. Non-volume functionality in '
- 'SimpleScheduler has been replaced by FilterScheduler'))
- super(SimpleScheduler, self).schedule_run_instance(context,
- request_spec, admin_password, injected_files,
- requested_networks, is_first_time, filter_properties)
-
- def schedule_create_volume(self, context, volume_id, snapshot_id,
- image_id):
- """Picks a host that is up and has the fewest volumes."""
- deprecated.warn(_('nova-volume functionality is deprecated in Folsom '
- 'and will be removed in Grizzly. Volumes are now handled '
- 'by Cinder'))
- elevated = context.elevated()
-
- volume_ref = db.volume_get(context, volume_id)
- availability_zone = volume_ref.get('availability_zone')
-
- zone, host = None, None
- if availability_zone:
- zone, _x, host = availability_zone.partition(':')
- if host and context.is_admin:
- service = db.service_get_by_args(elevated, host, 'nova-volume')
- if not utils.service_is_up(service):
- raise exception.WillNotSchedule(host=host)
- driver.cast_to_volume_host(context, host, 'create_volume',
- volume_id=volume_id, snapshot_id=snapshot_id,
- image_id=image_id)
- return None
-
- results = db.service_get_all_volume_sorted(elevated)
- if zone:
- results = [(service, gigs) for (service, gigs) in results
- if service['availability_zone'] == zone]
- for result in results:
- (service, volume_gigabytes) = result
- if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
- msg = _("Not enough allocatable volume gigabytes remaining")
- raise exception.NoValidHost(reason=msg)
- if utils.service_is_up(service) and not service['disabled']:
- driver.cast_to_volume_host(context, service['host'],
- 'create_volume', volume_id=volume_id,
- snapshot_id=snapshot_id, image_id=image_id)
- return None
- msg = _("Is the appropriate service running?")
- raise exception.NoValidHost(reason=msg)
diff --git a/nova/service.py b/nova/service.py
index 26f430a80..41c1b0012 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -397,8 +397,7 @@ class Service(object):
except exception.NotFound:
self._create_service_ref(ctxt)
- if 'nova-compute' == self.binary:
- self.manager.update_available_resource(ctxt)
+ self.manager.pre_start_hook()
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
@@ -417,8 +416,7 @@ class Service(object):
# Consume from all consumers in a thread
self.conn.consume_in_thread()
- if 'nova-scheduler' == self.binary:
- self.manager.request_service_capabilities(ctxt)
+ self.manager.post_start_hook()
if self.report_interval:
pulse = utils.LoopingCall(self.report_state)
@@ -612,7 +610,10 @@ class WSGIService(object):
"""
if self.manager:
self.manager.init_host()
+ self.manager.pre_start_hook()
self.server.start()
+ if self.manager:
+ self.manager.post_start_hook()
def stop(self):
"""Stop serving this API.
diff --git a/nova/test.py b/nova/test.py
index 911ad1390..cd82d74e2 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -32,6 +32,7 @@ import nose.plugins.skip
import stubout
import testtools
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -68,7 +69,7 @@ class TestCase(testtools.TestCase):
super(TestCase, self).setUp()
fake_flags.set_defaults(FLAGS)
- flags.parse_args([], default_config_files=[])
+ config.parse_args([], default_config_files=[])
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py
index 49ee9c152..13e854077 100644
--- a/nova/tests/api/ec2/test_cinder_cloud.py
+++ b/nova/tests/api/ec2/test_cinder_cloud.py
@@ -114,8 +114,7 @@ class CinderCloudTestCase(test.TestCase):
# set up our cloud
self.cloud = cloud.CloudController()
- self.flags(compute_scheduler_driver='nova.scheduler.'
- 'chance.ChanceScheduler')
+ self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
# set up services
self.compute = self.start_service('compute')
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 1abde1069..95003ee87 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -127,8 +127,7 @@ class CloudTestCase(test.TestCase):
# set up our cloud
self.cloud = cloud.CloudController()
- self.flags(compute_scheduler_driver='nova.scheduler.'
- 'chance.ChanceScheduler')
+ self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
# set up services
self.compute = self.start_service('compute')
@@ -1825,19 +1824,30 @@ class CloudTestCase(test.TestCase):
class BDM(object):
def __init__(self):
self.no_device = None
- self.values = dict(snapshot_id=snapshots[0],
+ self.values = dict(id=1,
+ snapshot_id=snapshots[0],
volume_id=volumes[0],
virtual_name=None,
volume_size=1,
device_name='sda1',
- delete_on_termination=False)
+ delete_on_termination=False,
+ connection_info='{"foo":"bar"}')
def __getattr__(self, name):
- return self.values.get(name)
+ """Properly delegate dotted lookups"""
+ if name in self.__dict__['values']:
+ return self.values.get(name)
+ try:
+ return self.__dict__[name]
+ except KeyError:
+ raise AttributeError
def __getitem__(self, key):
return self.values.get(key)
+ def iteritems(self):
+ return self.values.iteritems()
+
return [BDM()]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
@@ -1913,11 +1923,20 @@ class CloudTestCase(test.TestCase):
delete_on_termination=False)
def __getattr__(self, name):
- return self.values.get(name)
+ """Properly delegate dotted lookups"""
+ if name in self.__dict__['values']:
+ return self.values.get(name)
+ try:
+ return self.__dict__[name]
+ except KeyError:
+ raise AttributeError
def __getitem__(self, key):
return self.values.get(key)
+ def iteritems(self):
+ return self.values.iteritems()
+
return [BDM()]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
index 995e566f3..0afa00f2b 100644
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/test_server_actions.py
@@ -722,14 +722,24 @@ class ServerActionsControllerTest(test.TestCase):
virtual_name=None,
volume_size=1,
device_name='vda',
+ snapshot_id=1,
delete_on_termination=False)
def __getattr__(self, name):
- return self.values.get(name)
+ """Properly delegate dotted lookups"""
+ if name in self.__dict__['values']:
+ return self.values.get(name)
+ try:
+ return self.__dict__[name]
+ except KeyError:
+ raise AttributeError
def __getitem__(self, key):
return self.values.get(key)
+ def iteritems(self):
+ return self.values.iteritems()
+
return [BDM()]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
diff --git a/nova/tests/baremetal/test_proxy_bare_metal.py b/nova/tests/baremetal/test_proxy_bare_metal.py
index 4fe6722c3..e9184ee5d 100644
--- a/nova/tests/baremetal/test_proxy_bare_metal.py
+++ b/nova/tests/baremetal/test_proxy_bare_metal.py
@@ -257,7 +257,7 @@ class BareMetalTestCase(test.TestCase):
self.mox.ReplayAll()
# Code under test
- conn = driver.BareMetalDriver(True)
+ conn = driver.BareMetalDriver(None, True)
# TODO(mikalstill): this is not a very good fake instance
info = conn.get_info({'name': 'instance-00000001'})
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 8a5ced502..be3eeb601 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -207,6 +207,8 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(network_api.API, 'allocate_for_instance',
fake_get_nw_info)
self.compute_api = compute.API()
+ # Just to make long lines short
+ self.rt = self.compute.resource_tracker
def tearDown(self):
super(ComputeTestCase, self).tearDown()
@@ -305,20 +307,17 @@ class ComputeTestCase(BaseTestCase):
def test_create_instance_unlimited_memory(self):
"""Default of memory limit=None is unlimited"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
params = {"memory_mb": 999999999999}
filter_properties = {'limits': {'memory_mb': None}}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
- self.assertEqual(999999999999,
- self.compute.resource_tracker.compute_node['memory_mb_used'])
+ self.assertEqual(999999999999, self.rt.compute_node['memory_mb_used'])
def test_create_instance_unlimited_disk(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
params = {"root_gb": 999999999999,
"ephemeral_gb": 99999999999}
filter_properties = {'limits': {'disk_gb': None}}
@@ -328,26 +327,21 @@ class ComputeTestCase(BaseTestCase):
def test_create_multiple_instances_then_starve(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
filter_properties = {'limits': {'memory_mb': 4096, 'disk_gb': 1000}}
params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
- self.assertEquals(1024,
- self.compute.resource_tracker.compute_node['memory_mb_used'])
- self.assertEquals(256,
- self.compute.resource_tracker.compute_node['local_gb_used'])
+ self.assertEquals(1024, self.rt.compute_node['memory_mb_used'])
+ self.assertEquals(256, self.rt.compute_node['local_gb_used'])
params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
- self.assertEquals(3072,
- self.compute.resource_tracker.compute_node['memory_mb_used'])
- self.assertEquals(768,
- self.compute.resource_tracker.compute_node['local_gb_used'])
+ self.assertEquals(3072, self.rt.compute_node['memory_mb_used'])
+ self.assertEquals(768, self.rt.compute_node['local_gb_used'])
params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192}
instance = self._create_fake_instance(params)
@@ -359,8 +353,7 @@ class ComputeTestCase(BaseTestCase):
"""Test passing of oversubscribed ram policy from the scheduler."""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource()
@@ -380,16 +373,14 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
- self.assertEqual(instance_mb,
- self.compute.resource_tracker.compute_node['memory_mb_used'])
+ self.assertEqual(instance_mb, self.rt.compute_node['memory_mb_used'])
def test_create_instance_with_oversubscribed_ram_fail(self):
"""Test passing of oversubscribed ram policy from the scheduler, but
with insufficient memory.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource()
@@ -414,8 +405,7 @@ class ComputeTestCase(BaseTestCase):
"""Test passing of oversubscribed cpu policy from the scheduler."""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
limits = {'vcpu': 3}
filter_properties = {'limits': limits}
@@ -431,8 +421,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
- self.assertEqual(2,
- self.compute.resource_tracker.compute_node['vcpus_used'])
+ self.assertEqual(2, self.rt.compute_node['vcpus_used'])
# create one more instance:
params = {"memory_mb": 10, "root_gb": 1,
@@ -441,16 +430,14 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
- self.assertEqual(3,
- self.compute.resource_tracker.compute_node['vcpus_used'])
+ self.assertEqual(3, self.rt.compute_node['vcpus_used'])
# delete the instance:
instance['vm_state'] = vm_states.DELETED
- self.compute.resource_tracker.update_usage(self.context,
+ self.rt.update_usage(self.context,
instance=instance)
- self.assertEqual(2,
- self.compute.resource_tracker.compute_node['vcpus_used'])
+ self.assertEqual(2, self.rt.compute_node['vcpus_used'])
# now oversubscribe vcpus and fail:
params = {"memory_mb": 10, "root_gb": 1,
@@ -467,8 +454,7 @@ class ComputeTestCase(BaseTestCase):
"""Test passing of oversubscribed disk policy from the scheduler."""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource()
@@ -487,16 +473,14 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
- self.assertEqual(instance_gb,
- self.compute.resource_tracker.compute_node['local_gb_used'])
+ self.assertEqual(instance_gb, self.rt.compute_node['local_gb_used'])
def test_create_instance_with_oversubscribed_disk_fail(self):
"""Test passing of oversubscribed disk policy from the scheduler, but
with insufficient disk.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource()
@@ -754,7 +738,7 @@ class ComputeTestCase(BaseTestCase):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.STOPPING})
+ {"task_state": task_states.POWERING_OFF})
self.compute.stop_instance(self.context, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
@@ -763,10 +747,10 @@ class ComputeTestCase(BaseTestCase):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.STOPPING})
+ {"task_state": task_states.POWERING_OFF})
self.compute.stop_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.STARTING})
+ {"task_state": task_states.POWERING_ON})
self.compute.start_instance(self.context, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
@@ -792,11 +776,11 @@ class ComputeTestCase(BaseTestCase):
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'],
+ db.instance_update(self.context, instance_uuid,
{"task_state": task_states.RESCUING})
self.compute.rescue_instance(self.context, instance=instance)
self.assertTrue(called['rescued'])
- db.instance_update(self.context, instance['uuid'],
+ db.instance_update(self.context, instance_uuid,
{"task_state": task_states.UNRESCUING})
self.compute.unrescue_instance(self.context, instance=instance)
self.assertTrue(called['unrescued'])
@@ -818,7 +802,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_ON})
- self.compute.power_on_instance(self.context, instance=instance)
+ self.compute.start_instance(self.context, instance=instance)
self.assertTrue(called['power_on'])
self.compute.terminate_instance(self.context, instance=instance)
@@ -837,7 +821,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
- self.compute.power_off_instance(self.context, instance=instance)
+ self.compute.stop_instance(self.context, instance=instance)
self.assertTrue(called['power_off'])
self.compute.terminate_instance(self.context, instance=instance)
@@ -1165,7 +1149,6 @@ class ComputeTestCase(BaseTestCase):
fake_driver_reset_network)
instance = jsonutils.to_primitive(self._create_fake_instance())
- instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
self.compute.reset_network(self.context, instance=instance)
@@ -1177,7 +1160,6 @@ class ComputeTestCase(BaseTestCase):
def test_snapshot(self):
"""Ensure instance can be snapshotted"""
instance = jsonutils.to_primitive(self._create_fake_instance())
- instance_uuid = instance['uuid']
name = "myfakesnapshot"
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -1317,7 +1299,6 @@ class ComputeTestCase(BaseTestCase):
'reset_network', dummy)
instance = jsonutils.to_primitive(self._create_fake_instance())
- instance_uuid = instance['uuid']
self.assertEquals(len(test_notifier.NOTIFICATIONS), 0)
self.compute.add_fixed_ip_to_instance(self.context, network_id=1,
@@ -1338,7 +1319,6 @@ class ComputeTestCase(BaseTestCase):
'reset_network', dummy)
instance = jsonutils.to_primitive(self._create_fake_instance())
- instance_uuid = instance['uuid']
self.assertEquals(len(test_notifier.NOTIFICATIONS), 0)
self.compute.remove_fixed_ip_from_instance(self.context, 1,
@@ -1590,12 +1570,14 @@ class ComputeTestCase(BaseTestCase):
"""ensure that task_state is reverted after a failed operation"""
actions = [
("reboot_instance", task_states.REBOOTING),
- ("stop_instance", task_states.STOPPING),
- ("start_instance", task_states.STARTING),
+ ("stop_instance", task_states.POWERING_OFF),
+ ("start_instance", task_states.POWERING_ON),
("terminate_instance", task_states.DELETING,
task_states.DELETING),
("power_off_instance", task_states.POWERING_OFF),
("power_on_instance", task_states.POWERING_ON),
+ ("soft_delete_instance", task_states.SOFT_DELETING),
+ ("restore_instance", task_states.RESTORING),
("rebuild_instance", task_states.REBUILDING, None,
{'orig_image_ref': None,
'image_ref': None,
@@ -1770,8 +1752,6 @@ class ComputeTestCase(BaseTestCase):
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = jsonutils.to_primitive(self._create_fake_instance())
- old_type_id = instance_types.get_instance_type_by_name(
- 'm1.tiny')['id']
new_type = instance_types.get_instance_type_by_name('m1.small')
new_type = jsonutils.to_primitive(new_type)
new_type_id = new_type['id']
@@ -1954,12 +1934,12 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance)
# Confirm the instance size before the resize starts
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
+ inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
instance_type_ref = db.instance_type_get(self.context,
inst_ref['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], '1')
- new_inst_ref = db.instance_update(self.context, instance['uuid'],
+ new_inst_ref = db.instance_update(self.context, instance_uuid,
{'host': 'foo'})
new_instance_type_ref = db.instance_type_get_by_flavor_id(
@@ -1974,7 +1954,7 @@ class ComputeTestCase(BaseTestCase):
inst_ref['uuid'], 'pre-migrating')
instance = jsonutils.to_primitive(inst_ref)
- db.instance_update(self.context, instance["uuid"],
+ db.instance_update(self.context, instance_uuid,
{"task_state": task_states.RESIZE_PREP})
self.compute.resize_instance(self.context, instance=instance,
migration=migration_ref,
@@ -1984,14 +1964,14 @@ class ComputeTestCase(BaseTestCase):
disk_info={}, image={}, instance=instance)
# Prove that the instance size is now the new size
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
+ inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
instance_type_ref = db.instance_type_get(self.context,
inst_ref['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], '3')
# Finally, revert and confirm the old flavor has been applied
rpcinst = jsonutils.to_primitive(inst_ref)
- db.instance_update(self.context, instance["uuid"],
+ db.instance_update(self.context, instance_uuid,
{"task_state": task_states.RESIZE_REVERTING})
self.compute.revert_resize(self.context,
migration_id=migration_ref['id'], instance=rpcinst,
@@ -2006,14 +1986,14 @@ class ComputeTestCase(BaseTestCase):
fake_setup_networks_on_host)
self.compute.finish_revert_resize(self.context,
- migration_id=migration_ref['id'], instance=rpcinst,
- reservations=reservations)
+ migration=jsonutils.to_primitive(migration_ref),
+ instance=rpcinst, reservations=reservations)
- instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertEqual(instance['task_state'], None)
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
+ inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
instance_type_ref = db.instance_type_get(self.context,
inst_ref['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], '1')
@@ -2076,8 +2056,6 @@ class ComputeTestCase(BaseTestCase):
"""Confirm check_can_live_migrate_source works on positive path"""
inst_ref = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'fake_host_2'}))
- inst_id = inst_ref["id"]
- dest = "fake_host_1"
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.compute.driver,
@@ -2096,8 +2074,6 @@ class ComputeTestCase(BaseTestCase):
"""Confirm check_can_live_migrate_destination works on positive path"""
inst_ref = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'fake_host_2'}))
- inst_id = inst_ref["id"]
- dest = "fake_host_1"
compute_info = {"compute": "info"}
self.mox.StubOutWithMock(self.compute,
@@ -2132,8 +2108,6 @@ class ComputeTestCase(BaseTestCase):
"""Confirm check_can_live_migrate_destination works on positive path"""
inst_ref = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'fake_host_2'}))
- inst_id = inst_ref["id"]
- dest = "fake_host_1"
compute_info = {"compute": "info"}
self.mox.StubOutWithMock(self.compute,
@@ -2160,8 +2134,6 @@ class ComputeTestCase(BaseTestCase):
"""Confirm check_can_live_migrate_destination works on positive path"""
inst_ref = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'fake_host_2'}))
- inst_id = inst_ref["id"]
- dest = "fake_host_1"
compute_info = {"compute": "info"}
self.mox.StubOutWithMock(self.compute,
@@ -2197,7 +2169,6 @@ class ComputeTestCase(BaseTestCase):
"""Confirm raising exception if instance doesn't have fixed_ip."""
# creating instance testdata
instance = jsonutils.to_primitive(self._create_fake_instance())
- inst_id = instance["id"]
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpNotFoundForInstance,
@@ -2215,7 +2186,6 @@ class ComputeTestCase(BaseTestCase):
# creating instance testdata
instance = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'dummy'}))
- inst_id = instance['id']
c = context.get_admin_context()
nw_info = fake_network.fake_get_instance_nw_info(self.stubs)
@@ -2411,7 +2381,7 @@ class ComputeTestCase(BaseTestCase):
instances = db.instance_get_all(self.context)
LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 1)
- self.assertEqual(task_states.STOPPING, instances[0]['task_state'])
+ self.assertEqual(task_states.POWERING_OFF, instances[0]['task_state'])
def test_add_instance_fault(self):
exc_info = None
@@ -3091,7 +3061,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.STOPPING})
+ {"task_state": task_states.POWERING_OFF})
self.compute.stop_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
@@ -3100,7 +3070,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.start(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.STARTING)
+ self.assertEqual(instance['task_state'], task_states.POWERING_ON)
db.instance_destroy(self.context, instance['uuid'])
@@ -3115,7 +3085,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.stop(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.STOPPING)
+ self.assertEqual(instance['task_state'], task_states.POWERING_OFF)
db.instance_destroy(self.context, instance['uuid'])
@@ -3147,7 +3117,7 @@ class ComputeAPITestCase(BaseTestCase):
None)
start_check_state(instance['uuid'], power_state.NOSTATE,
- vm_states.STOPPED, task_states.STARTING)
+ vm_states.STOPPED, task_states.POWERING_ON)
db.instance_destroy(self.context, instance['uuid'])
@@ -3264,7 +3234,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.soft_delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.POWERING_OFF)
+ self.assertEqual(instance['task_state'], task_states.SOFT_DELETING)
db.instance_destroy(self.context, instance['uuid'])
@@ -3291,7 +3261,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.soft_delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.POWERING_OFF)
+ self.assertEqual(instance['task_state'], task_states.SOFT_DELETING)
# set the state that the instance gets when soft_delete finishes
instance = db.instance_update(self.context, instance['uuid'],
@@ -3382,7 +3352,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.soft_delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.POWERING_OFF)
+ self.assertEqual(instance['task_state'], task_states.SOFT_DELETING)
# set the state that the instance gets when soft_delete finishes
instance = db.instance_update(self.context, instance['uuid'],
@@ -3392,7 +3362,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.restore(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.POWERING_ON)
+ self.assertEqual(instance['task_state'], task_states.RESTORING)
db.instance_destroy(self.context, instance['uuid'])
@@ -3611,7 +3581,7 @@ class ComputeAPITestCase(BaseTestCase):
"""Ensure a snapshot of an instance can be created"""
instance = self._create_fake_instance()
image = self.compute_api.snapshot(self.context, instance, 'snap1',
- {'extra_param': 'value1'})
+ {'extra_param': 'value1'})
self.assertEqual(image['name'], 'snap1')
properties = image['properties']
@@ -3640,7 +3610,7 @@ class ComputeAPITestCase(BaseTestCase):
instance['instance_type'].update(inst_params)
image = self.compute_api.snapshot(self.context, instance, 'snap1',
- {'extra_param': 'value1'})
+ {'extra_param': 'value1'})
self.assertEqual(image['name'], 'snap1')
self.assertEqual(image['min_ram'], 256)
@@ -3672,7 +3642,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance()
image = self.compute_api.snapshot(self.context, instance, 'snap1',
- {'extra_param': 'value1'})
+ {'extra_param': 'value1'})
self.assertEqual(image['name'], 'snap1')
self.assertEqual(image['min_ram'], 512)
@@ -3701,7 +3671,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance()
image = self.compute_api.snapshot(self.context, instance, 'snap1',
- {'extra_param': 'value1'})
+ {'extra_param': 'value1'})
self.assertEqual(image['name'], 'snap1')
self.assertFalse('min_ram' in image)
@@ -3729,7 +3699,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance()
image = self.compute_api.snapshot(self.context, instance, 'snap1',
- {'extra_param': 'value1'})
+ {'extra_param': 'value1'})
self.assertEqual(image['name'], 'snap1')
@@ -5099,7 +5069,8 @@ class ComputeAggrTestCase(BaseTestCase):
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
- self.compute.add_aggregate_host(self.context, self.aggr.id, "host")
+ self.compute.add_aggregate_host(self.context, "host",
+ aggregate=jsonutils.to_primitive(self.aggr))
self.assertTrue(fake_driver_add_to_aggregate.called)
def test_remove_aggregate_host(self):
@@ -5117,15 +5088,16 @@ class ComputeAggrTestCase(BaseTestCase):
def test_add_aggregate_host_passes_slave_info_to_driver(self):
def driver_add_to_aggregate(context, aggregate, host, **kwargs):
self.assertEquals(self.context, context)
- self.assertEquals(aggregate.id, self.aggr.id)
+ self.assertEquals(aggregate['id'], self.aggr.id)
self.assertEquals(host, "the_host")
self.assertEquals("SLAVE_INFO", kwargs.get("slave_info"))
self.stubs.Set(self.compute.driver, "add_to_aggregate",
driver_add_to_aggregate)
- self.compute.add_aggregate_host(self.context, self.aggr.id,
- "the_host", slave_info="SLAVE_INFO")
+ self.compute.add_aggregate_host(self.context, "the_host",
+ slave_info="SLAVE_INFO",
+ aggregate=jsonutils.to_primitive(self.aggr))
def test_remove_from_aggregate_passes_slave_info_to_driver(self):
def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
@@ -5248,6 +5220,23 @@ class ComputePolicyTestCase(BaseTestCase):
self.compute_api.get_instance_faults,
self.context, instances)
+ def test_force_host_fail(self):
+ rules = {"compute:create": [],
+ "compute:create:forced_host": [["role:fake"]]}
+ self._set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.create, self.context, None, '1',
+ availability_zone='1:1')
+
+ def test_force_host_pass(self):
+ rules = {"compute:create": [],
+ "compute:create:forced_host": []}
+ self._set_rules(rules)
+
+ self.compute_api.create(self.context, None, '1',
+ availability_zone='1:1')
+
class ComputeHostAPITestCase(BaseTestCase):
def setUp(self):
@@ -5569,8 +5558,6 @@ class ComputeReschedulingTestCase(BaseTestCase):
scheduler_method = self.compute.scheduler_rpcapi.run_instance
method_args = (request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties)
- task_state = task_states.SCHEDULING
-
return self.compute._reschedule(self.context, request_spec,
filter_properties, instance_uuid, scheduler_method,
method_args, self.expected_task_state)
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index 6276c47ac..dfeebf0d0 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -42,7 +42,7 @@ class UnsupportedVirtDriver(driver.ComputeDriver):
class FakeVirtDriver(driver.ComputeDriver):
- def __init__(self):
+ def __init__(self, virtapi):
self.memory_mb = 5
self.local_gb = 6
self.vcpus = 1
@@ -148,9 +148,9 @@ class BaseTestCase(test.TestCase):
host = "fakehost"
if unsupported:
- driver = UnsupportedVirtDriver()
+ driver = UnsupportedVirtDriver(None)
else:
- driver = FakeVirtDriver()
+ driver = FakeVirtDriver(None)
tracker = resource_tracker.ResourceTracker(host, driver)
return tracker
@@ -293,12 +293,12 @@ class ResourceTestCase(BaseTestCase):
self.assertEqual(1, self.tracker.compute_node['current_workload'])
def testFreeRamResourceValue(self):
- driver = FakeVirtDriver()
+ driver = FakeVirtDriver(None)
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def testFreeDiskResourceValue(self):
- driver = FakeVirtDriver()
+ driver = FakeVirtDriver(None)
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index 7b3d58909..1edfa771f 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -95,8 +95,9 @@ class ComputeRpcAPITestCase(test.TestCase):
self.assertEqual(arg, expected_arg)
def test_add_aggregate_host(self):
- self._test_compute_api('add_aggregate_host', 'cast', aggregate_id='id',
- host_param='host', host='host', slave_info={}, version='2.2')
+ self._test_compute_api('add_aggregate_host', 'cast',
+ aggregate={'id': 'fake_id'}, host_param='host', host='host',
+ slave_info={}, version='2.14')
def test_add_fixed_ip_to_instance(self):
self._test_compute_api('add_fixed_ip_to_instance', 'cast',
@@ -143,8 +144,8 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_finish_revert_resize(self):
self._test_compute_api('finish_revert_resize', 'cast',
- instance=self.fake_instance, migration_id='id', host='host',
- reservations=list('fake_res'))
+ instance=self.fake_instance, migration={'id': 'fake_id'},
+ host='host', reservations=list('fake_res'), version='2.13')
def test_get_console_output(self):
self._test_compute_api('get_console_output', 'call',
@@ -204,6 +205,14 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('power_on_instance', 'cast',
instance=self.fake_instance)
+ def test_soft_delete_instance(self):
+ self._test_compute_api('soft_delete_instance', 'cast',
+ instance=self.fake_instance)
+
+ def test_restore_instance(self):
+ self._test_compute_api('restore_instance', 'cast',
+ instance=self.fake_instance)
+
def test_pre_live_migration(self):
self._test_compute_api('pre_live_migration', 'call',
instance=self.fake_instance, block_migration='block_migration',
@@ -285,8 +294,8 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_revert_resize(self):
self._test_compute_api('revert_resize', 'cast',
- instance=self.fake_instance, migration_id='id', host='host',
- reservations=list('fake_res'))
+ instance=self.fake_instance, migration={'id': 'fake_id'},
+ host='host', reservations=list('fake_res'), version='2.12')
def test_rollback_live_migration_at_destination(self):
self._test_compute_api('rollback_live_migration_at_destination',
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 70c166aa9..d1c166ba1 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -20,7 +20,7 @@ from nova import flags
FLAGS = flags.FLAGS
-flags.DECLARE('compute_scheduler_driver', 'nova.scheduler.multi')
+flags.DECLARE('scheduler_driver', 'nova.scheduler.manager')
flags.DECLARE('fake_network', 'nova.network.manager')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
flags.DECLARE('network_size', 'nova.network.manager')
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index f3bd944da..b1b2c076e 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -69,7 +69,7 @@ class _IntegratedTestBase(test.TestCase):
self.stub_module('crypto', fake_crypto)
nova.tests.image.fake.stub_out_image_service(self.stubs)
- self.flags(compute_scheduler_driver='nova.scheduler.'
+ self.flags(scheduler_driver='nova.scheduler.'
'chance.ChanceScheduler')
# set up services
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
index 9bbd7ba92..04f646ef0 100644
--- a/nova/tests/network/test_api.py
+++ b/nova/tests/network/test_api.py
@@ -19,6 +19,7 @@
from nova import context
from nova import network
+from nova.network import rpcapi as network_rpcapi
from nova.openstack.common import rpc
from nova import test
@@ -78,3 +79,66 @@ class ApiTestCase(test.TestCase):
def test_associate_unassociated_floating_ip(self):
self._do_test_associate_floating_ip(None)
+
+ def _stub_migrate_instance_calls(self, method, multi_host, info):
+ fake_instance_type = {'rxtx_factor': 'fake_factor'}
+ fake_instance = {'uuid': 'fake_uuid',
+ 'instance_type': fake_instance_type,
+ 'project_id': 'fake_project_id'}
+ fake_migration = {'source_compute': 'fake_compute_source',
+ 'dest_compute': 'fake_compute_dest'}
+
+ def fake_mig_inst_method(*args, **kwargs):
+ info['kwargs'] = kwargs
+
+ def fake_is_multi_host(*args, **kwargs):
+ return multi_host
+
+ def fake_get_floaters(*args, **kwargs):
+ return ['fake_float1', 'fake_float2']
+
+ self.stubs.Set(network_rpcapi.NetworkAPI, method,
+ fake_mig_inst_method)
+ self.stubs.Set(self.network_api, '_is_multi_host',
+ fake_is_multi_host)
+ self.stubs.Set(self.network_api, '_get_floating_ip_addresses',
+ fake_get_floaters)
+
+ expected = {'instance_uuid': 'fake_uuid',
+ 'source_compute': 'fake_compute_source',
+ 'dest_compute': 'fake_compute_dest',
+ 'rxtx_factor': 'fake_factor',
+ 'project_id': 'fake_project_id',
+ 'floating_addresses': None}
+ if multi_host:
+ expected['host'] = 'fake_compute_dest'
+ expected['floating_addresses'] = ['fake_float1', 'fake_float2']
+ return fake_instance, fake_migration, expected
+
+ def test_migrate_instance_start_with_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_start', True, info)
+ self.network_api.migrate_instance_start(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
+
+ def test_migrate_instance_start_without_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_start', False, info)
+ self.network_api.migrate_instance_start(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
+
+ def test_migrate_instance_finish_with_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_finish', True, info)
+ self.network_api.migrate_instance_finish(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
+
+ def test_migrate_instance_finish_without_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_finish', False, info)
+ self.network_api.migrate_instance_finish(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index edb477b70..a8f29e012 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -351,35 +351,37 @@ class TestQuantumv2(test.TestCase):
self.moxed_client.show_port(port_id).AndReturn(
{'port': {'id': 'my_portid1',
'network_id': 'my_netid1'}})
- req_net_ids.append('my_netid1')
ports['my_netid1'] = self.port_data1[0]
id = 'my_netid1'
else:
fixed_ips[id] = fixed_ip
req_net_ids.append(id)
+ expected_network_order = req_net_ids
+ else:
+ expected_network_order = [n['id'] for n in nets]
search_ids = [net['id'] for net in nets if net['id'] in req_net_ids]
mox_list_network_params = dict(tenant_id=self.instance['project_id'],
shared=False)
if search_ids:
- mox_list_network_params['id'] = search_ids
+ mox_list_network_params['id'] = mox.SameElementsAs(search_ids)
self.moxed_client.list_networks(
**mox_list_network_params).AndReturn({'networks': nets})
mox_list_network_params = dict(shared=True)
if search_ids:
- mox_list_network_params['id'] = search_ids
+ mox_list_network_params['id'] = mox.SameElementsAs(search_ids)
self.moxed_client.list_networks(
**mox_list_network_params).AndReturn({'networks': []})
- for network in nets:
+ for net_id in expected_network_order:
port_req_body = {
'port': {
'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
},
}
- port = ports.get(network['id'], None)
+ port = ports.get(net_id, None)
if port:
port_id = port['id']
self.moxed_client.update_port(port_id,
@@ -387,10 +389,10 @@ class TestQuantumv2(test.TestCase):
).AndReturn(
{'port': port})
else:
- fixed_ip = fixed_ips.get(network['id'])
+ fixed_ip = fixed_ips.get(net_id)
if fixed_ip:
port_req_body['port']['fixed_ip'] = fixed_ip
- port_req_body['port']['network_id'] = network['id']
+ port_req_body['port']['network_id'] = net_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = \
self.instance['project_id']
@@ -410,8 +412,9 @@ class TestQuantumv2(test.TestCase):
def test_allocate_for_instance_with_requested_networks(self):
# specify only first and last network
- requested_networks = [(net['id'], None, None)
- for net in (self.nets3[0], self.nets3[-1])]
+ requested_networks = [
+ (net['id'], None, None)
+ for net in (self.nets3[1], self.nets3[0], self.nets3[2])]
self._allocate_for_instance(net_idx=3,
requested_networks=requested_networks)
@@ -902,3 +905,33 @@ class TestQuantumv2(test.TestCase):
self.mox.ReplayAll()
api.disassociate_floating_ip(self.context, self.instance, address)
+
+
+class TestQuantumv2ModuleMethods(test.TestCase):
+ def test_ensure_requested_network_ordering_no_preference(self):
+ l = [1, 2, 3]
+
+ quantumapi._ensure_requested_network_ordering(
+ lambda x: x,
+ l,
+ None)
+
+ def test_ensure_requested_network_ordering_no_preference(self):
+ l = [{'id': 3}, {'id': 1}, {'id': 2}]
+
+ quantumapi._ensure_requested_network_ordering(
+ lambda x: x['id'],
+ l,
+ None)
+
+ self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}])
+
+ def test_ensure_requested_network_ordering_with_preference(self):
+ l = [{'id': 3}, {'id': 1}, {'id': 2}]
+
+ quantumapi._ensure_requested_network_ordering(
+ lambda x: x['id'],
+ l,
+ [1, 2, 3])
+
+ self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}])
diff --git a/nova/tests/network/test_rpcapi.py b/nova/tests/network/test_rpcapi.py
index a087ba97f..de3f19cea 100644
--- a/nova/tests/network/test_rpcapi.py
+++ b/nova/tests/network/test_rpcapi.py
@@ -36,11 +36,17 @@ class NetworkRpcAPITestCase(test.TestCase):
expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
expected_topic = FLAGS.network_topic
expected_msg = rpcapi.make_msg(method, **kwargs)
+ if 'source_compute' in expected_msg['args']:
+ # Fix up for migrate_instance_* calls.
+ args = expected_msg['args']
+ args['source'] = args.pop('source_compute')
+ args['dest'] = args.pop('dest_compute')
targeted_methods = [
'lease_fixed_ip', 'release_fixed_ip', 'rpc_setup_network_on_host',
'_rpc_allocate_fixed_ip', 'deallocate_fixed_ip',
'_associate_floating_ip', '_disassociate_floating_ip',
- 'lease_fixed_ip', 'release_fixed_ip'
+ 'lease_fixed_ip', 'release_fixed_ip',
+ 'migrate_instance_start', 'migrate_instance_finish',
]
if method in targeted_methods and 'host' in kwargs:
if method != 'deallocate_fixed_ip':
@@ -258,3 +264,45 @@ class NetworkRpcAPITestCase(test.TestCase):
def test_release_fixed_ip(self):
self._test_network_api('release_fixed_ip', rpc_method='cast',
address='fake_addr', host='fake_host')
+
+ def test_migrate_instance_start(self):
+ self._test_network_api('migrate_instance_start', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ version='1.2')
+
+ def test_migrate_instance_start_multi_host(self):
+ self._test_network_api('migrate_instance_start', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ host='fake_host',
+ version='1.2')
+
+ def test_migrate_instance_finish(self):
+ self._test_network_api('migrate_instance_finish', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ version='1.2')
+
+ def test_migrate_instance_finish_multi_host(self):
+ self._test_network_api('migrate_instance_finish', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ host='fake_host',
+ version='1.2')
diff --git a/nova/tests/scheduler/fakes.py b/nova/tests/scheduler/fakes.py
index 29466fbe1..ba255c32c 100644
--- a/nova/tests/scheduler/fakes.py
+++ b/nova/tests/scheduler/fakes.py
@@ -28,19 +28,19 @@ from nova.scheduler import host_manager
COMPUTE_NODES = [
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=512, free_ram_mb=512, vcpus_used=1,
- free_disk_mb=512, local_gb_used=0,
+ free_disk_mb=512, local_gb_used=0, updated_at=None,
service=dict(host='host1', disabled=False)),
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
- free_disk_mb=1024, local_gb_used=0,
+ free_disk_mb=1024, local_gb_used=0, updated_at=None,
service=dict(host='host2', disabled=True)),
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3072, free_ram_mb=3072, vcpus_used=1,
- free_disk_mb=3072, local_gb_used=0,
+ free_disk_mb=3072, local_gb_used=0, updated_at=None,
service=dict(host='host3', disabled=False)),
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
- free_disk_mb=8192, local_gb_used=0,
+ free_disk_mb=8192, local_gb_used=0, updated_at=None,
service=dict(host='host4', disabled=False)),
# Broken entry
dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index b6bdd359b..be6bc3317 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -224,7 +224,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
hostinfo.update_from_compute_node(dict(memory_mb=1000,
local_gb=0, vcpus=1, disk_available_least=1000,
free_disk_mb=1000, free_ram_mb=872, vcpus_used=0,
- local_gb_used=0))
+ local_gb_used=0, updated_at=None))
self.assertEquals(872, fn(hostinfo, {}))
def test_max_attempts(self):
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
index 5074d188a..74c24d56b 100644
--- a/nova/tests/scheduler/test_host_manager.py
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -264,7 +264,8 @@ class HostStateTestCase(test.TestCase):
dict(key='io_workload', value='42'),
]
compute = dict(stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
- local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0)
+ local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
+ updated_at=None)
host = host_manager.HostState("fakehost", "faketopic")
host.update_from_compute_node(compute)
diff --git a/nova/tests/scheduler/test_multi_scheduler.py b/nova/tests/scheduler/test_multi_scheduler.py
index 04ab67675..ee9e0bbd3 100644
--- a/nova/tests/scheduler/test_multi_scheduler.py
+++ b/nova/tests/scheduler/test_multi_scheduler.py
@@ -36,14 +36,6 @@ class FakeComputeScheduler(driver.Scheduler):
pass
-class FakeVolumeScheduler(driver.Scheduler):
- is_fake_volume = True
-
- def __init__(self):
- super(FakeVolumeScheduler, self).__init__()
- self.is_update_caps_called = False
-
-
class FakeDefaultScheduler(driver.Scheduler):
is_fake_default = True
@@ -61,18 +53,15 @@ class MultiDriverTestCase(test_scheduler.SchedulerTestCase):
super(MultiDriverTestCase, self).setUp()
base_name = 'nova.tests.scheduler.test_multi_scheduler.%s'
compute_cls_name = base_name % 'FakeComputeScheduler'
- volume_cls_name = base_name % 'FakeVolumeScheduler'
default_cls_name = base_name % 'FakeDefaultScheduler'
self.flags(compute_scheduler_driver=compute_cls_name,
- volume_scheduler_driver=volume_cls_name,
default_scheduler_driver=default_cls_name)
self._manager = multi.MultiScheduler()
def test_drivers_inited(self):
mgr = self._manager
- self.assertEqual(len(mgr.drivers), 3)
+ self.assertEqual(len(mgr.drivers), 2)
self.assertTrue(mgr.drivers['compute'].is_fake_compute)
- self.assertTrue(mgr.drivers['volume'].is_fake_volume)
self.assertTrue(mgr.drivers['default'].is_fake_default)
def test_update_service_capabilities(self):
@@ -84,10 +73,8 @@ class MultiDriverTestCase(test_scheduler.SchedulerTestCase):
'update_service_capabilities',
fake_update_service_capabilities)
self.assertFalse(mgr.drivers['compute'].is_update_caps_called)
- self.assertFalse(mgr.drivers['volume'].is_update_caps_called)
mgr.update_service_capabilities('foo_svc', 'foo_host', 'foo_caps')
self.assertTrue(mgr.drivers['compute'].is_update_caps_called)
- self.assertTrue(mgr.drivers['volume'].is_update_caps_called)
class SimpleSchedulerTestCase(MultiDriverTestCase):
@@ -99,10 +86,8 @@ class SimpleSchedulerTestCase(MultiDriverTestCase):
super(SimpleSchedulerTestCase, self).setUp()
base_name = 'nova.tests.scheduler.test_multi_scheduler.%s'
compute_cls_name = base_name % 'FakeComputeScheduler'
- volume_cls_name = 'nova.scheduler.simple.SimpleScheduler'
default_cls_name = base_name % 'FakeDefaultScheduler'
self.flags(compute_scheduler_driver=compute_cls_name,
- volume_scheduler_driver=volume_cls_name,
default_scheduler_driver=default_cls_name)
self._manager = multi.MultiScheduler()
@@ -117,11 +102,9 @@ class SimpleSchedulerTestCase(MultiDriverTestCase):
self.assertFalse(mgr.drivers['compute'].is_update_caps_called)
mgr.update_service_capabilities('foo_svc', 'foo_host', 'foo_caps')
self.assertTrue(mgr.drivers['compute'].is_update_caps_called)
- self.assertTrue(mgr.drivers['volume'].is_update_caps_called)
def test_drivers_inited(self):
mgr = self._manager
- self.assertEqual(len(mgr.drivers), 3)
+ self.assertEqual(len(mgr.drivers), 2)
self.assertTrue(mgr.drivers['compute'].is_fake_compute)
- self.assertTrue(mgr.drivers['volume'] is not None)
self.assertTrue(mgr.drivers['default'].is_fake_default)
diff --git a/nova/tests/scheduler/test_rpcapi.py b/nova/tests/scheduler/test_rpcapi.py
index 100812175..8cf741118 100644
--- a/nova/tests/scheduler/test_rpcapi.py
+++ b/nova/tests/scheduler/test_rpcapi.py
@@ -83,9 +83,3 @@ class SchedulerRpcAPITestCase(test.TestCase):
self._test_scheduler_api('update_service_capabilities',
rpc_method='fanout_cast', service_name='fake_name',
host='fake_host', capabilities='fake_capabilities')
-
- def test_create_volume(self):
- self._test_scheduler_api('create_volume',
- rpc_method='cast', volume_id="fake_volume",
- snapshot_id="fake_snapshots", image_id="fake_image",
- version='2.2')
diff --git a/nova/tests/test_deprecated.py b/nova/tests/test_deprecated.py
deleted file mode 100644
index ebc6fed93..000000000
--- a/nova/tests/test_deprecated.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
-# Copyright 2010 OpenStack LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.common import deprecated
-from nova import exception
-from nova import test
-
-
-class DeprecatedConfigTestCase(test.TestCase):
- def setUp(self):
- super(DeprecatedConfigTestCase, self).setUp()
- self.logbuffer = ""
-
- def local_log(msg):
- self.logbuffer = msg
-
- self.stubs.Set(deprecated.LOG, 'warn', local_log)
-
- def test_deprecated(self):
- deprecated.warn('test')
- self.assertEqual(self.logbuffer, 'Deprecated Config: test')
-
- def test_deprecated_fatal(self):
- self.flags(fatal_deprecations=True)
- self.assertRaises(exception.DeprecatedConfig,
- deprecated.warn, "test2")
- self.assertEqual(self.logbuffer, 'Deprecated Config: test2')
-
- def test_deprecated_logs_only_once(self):
- deprecated.warn('only once!')
- deprecated.warn('only once!')
- deprecated.warn('only once!')
- self.assertEqual(self.logbuffer, 'Deprecated Config: only once!')
diff --git a/nova/tests/test_flags.py b/nova/tests/test_flags.py
index 973d54a1f..15dec56b9 100644
--- a/nova/tests/test_flags.py
+++ b/nova/tests/test_flags.py
@@ -17,6 +17,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova import test
@@ -44,7 +45,7 @@ class FlagsTestCase(test.TestCase):
self.assert_('runtime_answer' not in FLAGS)
argv = ['flags_test', 'extra_arg', '--runtime_answer=60']
- args = flags.parse_args(argv, default_config_files=[])
+ args = config.parse_args(argv, default_config_files=[])
self.assertEqual(len(args), 3)
self.assertEqual(argv, args)
@@ -60,7 +61,7 @@ class FlagsTestCase(test.TestCase):
default='val',
help='desc'))
argv = ['flags_test', '--duplicate_answer=60', 'extra_arg']
- args = flags.parse_args(argv, default_config_files=[])
+ args = config.parse_args(argv, default_config_files=[])
self.assert_('duplicate_answer' not in FLAGS)
self.assert_(FLAGS.duplicate_answer_long, 60)
@@ -68,7 +69,7 @@ class FlagsTestCase(test.TestCase):
FLAGS.clear()
FLAGS.register_cli_opt(cfg.IntOpt('duplicate_answer',
default=60, help='desc'))
- args = flags.parse_args(argv, default_config_files=[])
+ args = config.parse_args(argv, default_config_files=[])
self.assertEqual(FLAGS.duplicate_answer, 60)
self.assertEqual(FLAGS.duplicate_answer_long, 'val')
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index e3f9fde4c..6d2396350 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -66,7 +66,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
vswitch_name='external')
self._hypervutils = hypervutils.HyperVUtils()
- self._conn = driver_hyperv.HyperVDriver()
+ self._conn = driver_hyperv.HyperVDriver(None)
def _setup_stubs(self):
db_fakes.stub_out_db_instance_api(self.stubs)
diff --git a/nova/tests/test_image_utils.py b/nova/tests/test_image_utils.py
index 711f1c202..fac0422bf 100644
--- a/nova/tests/test_image_utils.py
+++ b/nova/tests/test_image_utils.py
@@ -21,7 +21,7 @@ from nova.virt import images
class ImageUtilsTestCase(test.TestCase):
- def test_qemu_info(self):
+ def test_qemu_info_canon(self):
path = "disk.config"
example_output = """image: disk.config
file format: raw
@@ -35,14 +35,35 @@ blah BLAH: bb
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
image_info = images.qemu_img_info(path)
- self.assertEquals('disk.config', image_info['image'])
- self.assertEquals('raw', image_info['file format'])
- self.assertEquals('64M (67108864 bytes)', image_info['virtual size'])
- self.assertEquals('96K', image_info['disk size'])
- self.assertEquals('bb', image_info['blah blah'])
- self.assertEquals("65536", image_info['cluster_size'])
+ self.assertEquals('disk.config', image_info.image)
+ self.assertEquals('raw', image_info.file_format)
+ self.assertEquals(67108864, image_info.virtual_size)
+ self.assertEquals(98304, image_info.disk_size)
+ self.assertEquals(65536, image_info.cluster_size)
- def test_qemu_info_snap(self):
+ def test_qemu_info_canon2(self):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: QCOW2
+virtual size: 67108844
+cluster_size: 65536
+disk size: 963434
+backing file: /var/lib/nova/a328c7998805951a_2
+"""
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path).AndReturn((example_output, ''))
+ self.mox.ReplayAll()
+ image_info = images.qemu_img_info(path)
+ self.assertEquals('disk.config', image_info.image)
+ self.assertEquals('qcow2', image_info.file_format)
+ self.assertEquals(67108844, image_info.virtual_size)
+ self.assertEquals(963434, image_info.disk_size)
+ self.assertEquals(65536, image_info.cluster_size)
+ self.assertEquals('/var/lib/nova/a328c7998805951a_2',
+ image_info.backing_file)
+
+ def test_qemu_backing_file_actual(self):
path = "disk.config"
example_output = """image: disk.config
file format: raw
@@ -52,18 +73,63 @@ disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+backing file: /var/lib/nova/a328c7998805951a_2 (actual path: /b/3a988059e51a_2)
+"""
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path).AndReturn((example_output, ''))
+ self.mox.ReplayAll()
+ image_info = images.qemu_img_info(path)
+ self.assertEquals('disk.config', image_info.image)
+ self.assertEquals('raw', image_info.file_format)
+ self.assertEquals(67108864, image_info.virtual_size)
+ self.assertEquals(98304, image_info.disk_size)
+ self.assertEquals(1, len(image_info.snapshots))
+ self.assertEquals('/b/3a988059e51a_2',
+ image_info.backing_file)
+
+ def test_qemu_info_convert(self):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: raw
+virtual size: 64M
+disk size: 96K
+Snapshot list:
+ID TAG VM SIZE DATE VM CLOCK
+1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+junk stuff: bbb
+"""
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path).AndReturn((example_output, ''))
+ self.mox.ReplayAll()
+ image_info = images.qemu_img_info(path)
+ self.assertEquals('disk.config', image_info.image)
+ self.assertEquals('raw', image_info.file_format)
+ self.assertEquals(67108864, image_info.virtual_size)
+ self.assertEquals(98304, image_info.disk_size)
+
+ def test_qemu_info_snaps(self):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: raw
+virtual size: 64M (67108864 bytes)
+disk size: 96K
+Snapshot list:
+ID TAG VM SIZE DATE VM CLOCK
+1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
"""
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
image_info = images.qemu_img_info(path)
- self.assertEquals('disk.config', image_info['image'])
- self.assertEquals('raw', image_info['file format'])
- self.assertEquals('64M (67108864 bytes)', image_info['virtual size'])
- self.assertEquals('96K', image_info['disk size'])
- self.assertEquals("65536", image_info['cluster_size'])
- # This would be triggered if the split encountered this section
- self.assertNotIn('snapshot list', image_info)
- bad_cap = '1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10'
- self.assertNotIn(bad_cap, image_info)
+ self.assertEquals('disk.config', image_info.image)
+ self.assertEquals('raw', image_info.file_format)
+ self.assertEquals(67108864, image_info.virtual_size)
+ self.assertEquals(98304, image_info.disk_size)
+ self.assertEquals(3, len(image_info.snapshots))
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 5a158c4b2..ea35ff29e 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -48,6 +48,7 @@ import nova.tests.image.fake
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import driver
+from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import images
from nova.virt.libvirt import config
@@ -147,7 +148,7 @@ class LibvirtVolumeTestCase(test.TestCase):
def get_all_block_devices(self):
return []
- self.fake_conn = FakeLibvirtDriver()
+ self.fake_conn = FakeLibvirtDriver(fake.FakeVirtAPI())
self.connr = {
'ip': '127.0.0.1',
'initiator': 'fake_initiator',
@@ -619,7 +620,7 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(my_ip=ip)
self.flags(host=host)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
expected = {
'ip': ip,
'initiator': initiator,
@@ -632,7 +633,7 @@ class LibvirtConnTestCase(test.TestCase):
self.assertDictMatch(expected, result)
def test_get_guest_config(self):
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref,
@@ -676,7 +677,7 @@ class LibvirtConnTestCase(test.TestCase):
"catchup")
def test_get_guest_config_with_two_nics(self):
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref,
@@ -708,7 +709,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_get_guest_config_with_root_device_name(self):
self.flags(libvirt_type='uml')
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref, [], None, None,
@@ -728,7 +729,7 @@ class LibvirtConnTestCase(test.TestCase):
config.LibvirtConfigGuestConsole)
def test_get_guest_config_with_block_device(self):
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
@@ -746,7 +747,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_get_guest_cpu_config_none(self):
self.flags(libvirt_cpu_mode="none")
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
conf = conn.get_guest_config(instance_ref,
@@ -764,7 +765,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
conf = conn.get_guest_config(instance_ref,
@@ -779,7 +780,7 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(libvirt_type="uml",
libvirt_cpu_mode=None)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
conf = conn.get_guest_config(instance_ref,
@@ -791,7 +792,7 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(libvirt_type="lxc",
libvirt_cpu_mode=None)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
conf = conn.get_guest_config(instance_ref,
@@ -806,7 +807,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
@@ -825,7 +826,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
@@ -844,7 +845,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="custom")
@@ -863,7 +864,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt.virConnect, "getLibVersion",
get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
@@ -894,7 +895,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt_driver.LibvirtDriver,
"get_host_capabilities",
get_host_capabilities_stub)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
@@ -914,7 +915,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="custom")
@@ -1038,7 +1039,7 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# Only one should be listed, since domain with ID 0 must be skiped
self.assertEquals(len(instances), 1)
@@ -1054,7 +1055,7 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# None should be listed, since we fake deleted the last one
self.assertEquals(len(instances), 0)
@@ -1107,7 +1108,7 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = conn.get_all_block_devices()
self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
@@ -1168,7 +1169,7 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = conn.get_disks(conn.list_instances()[0])
self.assertEqual(devices, ['vda', 'vdb'])
@@ -1201,7 +1202,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1240,7 +1241,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1279,7 +1280,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1319,7 +1320,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1354,7 +1355,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1390,7 +1391,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1428,7 +1429,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1466,7 +1467,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1499,7 +1500,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1545,7 +1546,7 @@ class LibvirtConnTestCase(test.TestCase):
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{"driver_volume_type": "badtype"},
@@ -1555,7 +1556,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_multi_nic(self):
instance_data = dict(self.test_instance)
network_info = _fake_network_info(self.stubs, 2)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, instance_data)
xml = conn.to_xml(instance_ref, network_info, None, False)
tree = etree.fromstring(xml)
@@ -1572,7 +1573,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, instance)
self.flags(libvirt_type='lxc')
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri, 'lxc:///')
@@ -1615,7 +1616,7 @@ class LibvirtConnTestCase(test.TestCase):
for (libvirt_type, checks) in type_disk_map.iteritems():
self.flags(libvirt_type=libvirt_type)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self.stubs, 1)
xml = conn.to_xml(instance_ref, network_info)
@@ -1651,9 +1652,8 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = libvirt_driver.LibvirtDriver(True).to_xml(instance_ref,
- network_info,
- image_meta)
+ xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
+ instance_ref, network_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
@@ -1663,9 +1663,8 @@ class LibvirtConnTestCase(test.TestCase):
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
- xml = libvirt_driver.LibvirtDriver(True).to_xml(instance_ref,
- network_info,
- image_meta)
+ xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
+ instance_ref, network_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
@@ -1677,7 +1676,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = libvirt_driver.LibvirtDriver(True).to_xml(
+ xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
instance_ref,
network_info,
image_meta,
@@ -1704,9 +1703,8 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = libvirt_driver.LibvirtDriver(True).to_xml(instance_ref,
- network_info,
- image_meta)
+ xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
+ instance_ref, network_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
@@ -1818,7 +1816,7 @@ class LibvirtConnTestCase(test.TestCase):
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri, expected_uri)
@@ -1847,7 +1845,7 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(libvirt_uri=testuri)
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri, testuri)
db.instance_destroy(user_context, instance_ref['uuid'])
@@ -1879,7 +1877,7 @@ class LibvirtConnTestCase(test.TestCase):
# Start test
self.mox.ReplayAll()
try:
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
@@ -1907,7 +1905,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
@@ -1935,7 +1933,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
filename = "file"
@@ -1961,7 +1959,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
self.mox.StubOutWithMock(conn, '_compare_cpu')
@@ -1980,7 +1978,7 @@ class LibvirtConnTestCase(test.TestCase):
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, '_cleanup_shared_storage_test_file')
conn._cleanup_shared_storage_test_file("file")
@@ -1995,7 +1993,7 @@ class LibvirtConnTestCase(test.TestCase):
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
@@ -2015,7 +2013,7 @@ class LibvirtConnTestCase(test.TestCase):
"block_migration": True,
"disk_over_commit": False,
'disk_available_mb': 1024}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(True)
@@ -2031,7 +2029,7 @@ class LibvirtConnTestCase(test.TestCase):
"block_migration": False,
"disk_over_commit": False,
'disk_available_mb': 1024}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
@@ -2045,7 +2043,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
@@ -2100,7 +2098,7 @@ class LibvirtConnTestCase(test.TestCase):
#start test
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(libvirt.libvirtError,
conn._live_migration,
self.context, instance_ref, 'dest', False,
@@ -2120,7 +2118,7 @@ class LibvirtConnTestCase(test.TestCase):
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo():
def fixed_ips(self):
@@ -2174,7 +2172,7 @@ class LibvirtConnTestCase(test.TestCase):
user_id=None).AndReturn(None)
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.pre_block_migration(self.context, instance_ref,
dummyjson)
@@ -2227,7 +2225,7 @@ class LibvirtConnTestCase(test.TestCase):
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = conn.get_instance_disk_info(instance_ref.name)
info = jsonutils.loads(info)
self.assertEquals(info[0]['type'], 'raw')
@@ -2284,7 +2282,7 @@ class LibvirtConnTestCase(test.TestCase):
# Start test
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
@@ -2343,7 +2341,7 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
libvirt_driver.libvirt_utils = fake_libvirt_utils
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
output = conn.get_console_output(instance)
self.assertEquals("foo", output)
@@ -2388,12 +2386,12 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
libvirt_driver.libvirt_utils = fake_libvirt_utils
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
output = conn.get_console_output(instance)
self.assertEquals("foo", output)
def test_get_host_ip_addr(self):
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ip = conn.get_host_ip_addr()
self.assertEquals(ip, FLAGS.my_ip)
@@ -2402,7 +2400,7 @@ class LibvirtConnTestCase(test.TestCase):
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_REMOTE),
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC)):
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_wrapped_conn")
self.mox.StubOutWithMock(conn._wrapped_conn, "getCapabilities")
@@ -2422,7 +2420,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.UnsetStubs()
def test_volume_in_mapping(self):
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
@@ -2461,7 +2459,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_lookup_by_name(instance_name):
raise exception.InstanceNotFound()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
instance = db.instance_create(self.context, self.test_instance)
@@ -2480,7 +2478,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
@@ -2501,7 +2499,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
@@ -2524,7 +2522,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
@@ -2546,7 +2544,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
@@ -2564,7 +2562,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
raise exception.InstanceNotFound()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
@@ -2574,7 +2572,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_available_least_handles_missing(self):
"""Ensure destroy calls managedSaveRemove for saved instance"""
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def list_instances():
return ['fake']
@@ -2589,7 +2587,7 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEqual(result, space / 1024 ** 3)
def test_cpu_info(self):
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
cpu = config.LibvirtConfigCPU()
@@ -2681,7 +2679,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'vda_read': 688640L,
'vda_read_req': 169L,
@@ -2761,7 +2759,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
@@ -2835,7 +2833,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
@@ -2911,7 +2909,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
@@ -2993,7 +2991,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
@@ -3024,7 +3022,7 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEqual(actual, expect)
def test_get_instance_capabilities(self):
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
caps = config.LibvirtConfigCaps()
@@ -3107,11 +3105,13 @@ class HostStateTestCase(test.TestCase):
return HostStateTestCase.instance_caps
def test_update_status(self):
+ virtapi = fake.FakeVirtAPI()
self.mox.StubOutWithMock(libvirt_driver, 'LibvirtDriver')
- libvirt_driver.LibvirtDriver(True).AndReturn(self.FakeConnection())
+ libvirt_driver.LibvirtDriver(virtapi, True).AndReturn(
+ self.FakeConnection())
self.mox.ReplayAll()
- hs = libvirt_driver.HostState(True)
+ hs = libvirt_driver.HostState(virtapi, True)
stats = hs._stats
self.assertEquals(stats["vcpus"], 1)
self.assertEquals(stats["vcpus_used"], 0)
@@ -3891,7 +3891,8 @@ class LibvirtDriverTestCase(test.TestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
def setUp(self):
super(LibvirtDriverTestCase, self).setUp()
- self.libvirtconnection = libvirt_driver.LibvirtDriver(read_only=True)
+ self.libvirtconnection = libvirt_driver.LibvirtDriver(
+ fake.FakeVirtAPI(), read_only=True)
def _create_instance(self, params=None):
"""Create a test instance"""
diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py
index f40ef534c..83a7514db 100644
--- a/nova/tests/test_powervm.py
+++ b/nova/tests/test_powervm.py
@@ -117,7 +117,7 @@ class PowerVMDriverTestCase(test.TestCase):
super(PowerVMDriverTestCase, self).setUp()
self.stubs.Set(operator, 'get_powervm_operator',
fake_get_powervm_operator)
- self.powervm_connection = powervm_driver.PowerVMDriver()
+ self.powervm_connection = powervm_driver.PowerVMDriver(None)
self.instance = self._create_instance()
def _create_instance(self):
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 5e357deeb..3f31e8e01 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -21,6 +21,7 @@ Unit Tests for remote procedure calls using queue
"""
import mox
+import sys
from nova import context
@@ -109,82 +110,95 @@ class ServiceTestCase(test.TestCase):
def setUp(self):
super(ServiceTestCase, self).setUp()
+ self.host = 'foo'
+ self.binary = 'nova-fake'
+ self.topic = 'fake'
self.mox.StubOutWithMock(service, 'db')
def test_create(self):
- host = 'foo'
- binary = 'nova-fake'
- topic = 'fake'
# NOTE(vish): Create was moved out of mox replay to make sure that
# the looping calls are created in StartService.
- app = service.Service.create(host=host, binary=binary, topic=topic)
+ app = service.Service.create(host=self.host, binary=self.binary,
+ topic=self.topic)
self.assert_(app)
- def test_report_state_newly_disconnected(self):
- host = 'foo'
- binary = 'bar'
- topic = 'test'
- service_create = {'host': host,
- 'binary': binary,
- 'topic': topic,
+ def _service_start_mocks(self):
+ service_create = {'host': self.host,
+ 'binary': self.binary,
+ 'topic': self.topic,
'report_count': 0,
'availability_zone': 'nova'}
- service_ref = {'host': host,
- 'binary': binary,
- 'topic': topic,
+ service_ref = {'host': self.host,
+ 'binary': self.binary,
+ 'topic': self.topic,
'report_count': 0,
'availability_zone': 'nova',
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
- host,
- binary).AndRaise(exception.NotFound())
+ self.host, self.binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
- service_create).AndReturn(service_ref)
+ service_create).AndReturn(service_ref)
+ return service_ref
+
+ def test_init_and_start_hooks(self):
+ self.manager_mock = self.mox.CreateMock(FakeManager)
+ self.mox.StubOutWithMock(sys.modules[__name__],
+ 'FakeManager', use_mock_anything=True)
+ self.mox.StubOutWithMock(self.manager_mock, 'init_host')
+ self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
+ self.mox.StubOutWithMock(self.manager_mock, 'create_rpc_dispatcher')
+ self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
+
+ FakeManager(host=self.host).AndReturn(self.manager_mock)
+
+ # init_host is called before any service record is created
+ self.manager_mock.init_host()
+ self._service_start_mocks()
+ # pre_start_hook is called after service record is created,
+ # but before RPC consumer is created
+ self.manager_mock.pre_start_hook()
+ self.manager_mock.create_rpc_dispatcher()
+ # post_start_hook is called after RPC consumer is created.
+ self.manager_mock.post_start_hook()
+
+ self.mox.ReplayAll()
+
+ serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.test_service.FakeManager')
+ serv.start()
+
+ def test_report_state_newly_disconnected(self):
+ self._service_start_mocks()
+
service.db.service_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(Exception())
self.mox.ReplayAll()
- serv = service.Service(host,
- binary,
- topic,
+ serv = service.Service(self.host,
+ self.binary,
+ self.topic,
'nova.tests.test_service.FakeManager')
serv.start()
serv.report_state()
self.assert_(serv.model_disconnected)
def test_report_state_newly_connected(self):
- host = 'foo'
- binary = 'bar'
- topic = 'test'
- service_create = {'host': host,
- 'binary': binary,
- 'topic': topic,
- 'report_count': 0,
- 'availability_zone': 'nova'}
- service_ref = {'host': host,
- 'binary': binary,
- 'topic': topic,
- 'report_count': 0,
- 'availability_zone': 'nova',
- 'id': 1}
+ service_ref = self._service_start_mocks()
- service.db.service_get_by_args(mox.IgnoreArg(),
- host,
- binary).AndRaise(exception.NotFound())
- service.db.service_create(mox.IgnoreArg(),
- service_create).AndReturn(service_ref)
service.db.service_get(mox.IgnoreArg(),
service_ref['id']).AndReturn(service_ref)
service.db.service_update(mox.IgnoreArg(), service_ref['id'],
mox.ContainsKeyValue('report_count', 1))
self.mox.ReplayAll()
- serv = service.Service(host,
- binary,
- topic,
+ serv = service.Service(self.host,
+ self.binary,
+ self.topic,
'nova.tests.test_service.FakeManager')
serv.start()
serv.model_disconnected = True
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index f97d6eeb7..8b883f755 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -30,6 +30,7 @@ import nova
from nova import exception
from nova import flags
from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
from nova import test
from nova import utils
@@ -37,6 +38,41 @@ from nova import utils
FLAGS = flags.FLAGS
+class ByteConversionTest(test.TestCase):
+ def test_string_conversions(self):
+ working_examples = {
+ '1024KB': 1048576,
+ '1024TB': 1125899906842624,
+ '1024K': 1048576,
+ '1024T': 1125899906842624,
+ '1TB': 1099511627776,
+ '1T': 1099511627776,
+ '1KB': 1024,
+ '1K': 1024,
+ '1B': 1,
+ '1B': 1,
+ '1': 1,
+ '1MB': 1048576,
+ '7MB': 7340032,
+ '0MB': 0,
+ '0KB': 0,
+ '0TB': 0,
+ '': 0,
+ }
+ for (in_value, expected_value) in working_examples.items():
+ b_value = utils.to_bytes(in_value)
+ self.assertEquals(expected_value, b_value)
+ if len(in_value):
+ in_value = "-" + in_value
+ b_value = utils.to_bytes(in_value)
+ self.assertEquals(expected_value * -1, b_value)
+ breaking_examples = [
+ 'junk1KB', '1023BBBB',
+ ]
+ for v in breaking_examples:
+ self.assertRaises(TypeError, utils.to_bytes, v)
+
+
class ExecuteTestCase(test.TestCase):
def test_retry_on_failure(self):
fd, tmpfilename = tempfile.mkstemp()
@@ -475,7 +511,7 @@ class GenericUtilsTestCase(test.TestCase):
class IsUUIDLikeTestCase(test.TestCase):
def assertUUIDLike(self, val, expected):
- result = utils.is_uuid_like(val)
+ result = uuidutils.is_uuid_like(val)
self.assertEqual(result, expected)
def test_good_uuid(self):
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index ec87c9111..9d48cdf06 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -20,12 +20,14 @@ import sys
import traceback
from nova.compute.manager import ComputeManager
+from nova import db
from nova import exception
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import test
from nova.tests.image import fake as fake_image
from nova.tests import utils as test_utils
+from nova.virt import fake
LOG = logging.getLogger(__name__)
@@ -171,7 +173,8 @@ class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase):
class _VirtDriverTestCase(_FakeDriverBackendTestCase):
def setUp(self):
super(_VirtDriverTestCase, self).setUp()
- self.connection = importutils.import_object(self.driver_module, '')
+ self.connection = importutils.import_object(self.driver_module,
+ fake.FakeVirtAPI())
self.ctxt = test_utils.get_test_admin_context()
self.image_service = fake_image.FakeImageService()
@@ -266,7 +269,8 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
@catch_notimplementederror
def test_poll_rebooting_instances(self):
- self.connection.poll_rebooting_instances(10)
+ instances = [self._get_running_instance()]
+ self.connection.poll_rebooting_instances(10, instances)
@catch_notimplementederror
def test_poll_rescued_instances(self):
@@ -286,17 +290,33 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.connection.power_off(instance_ref)
@catch_notimplementederror
- def test_test_power_on_running(self):
+ def test_power_on_running(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_on(instance_ref)
@catch_notimplementederror
- def test_test_power_on_powered_off(self):
+ def test_power_on_powered_off(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_off(instance_ref)
self.connection.power_on(instance_ref)
@catch_notimplementederror
+ def test_soft_delete(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.soft_delete(instance_ref)
+
+ @catch_notimplementederror
+ def test_restore_running(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.restore(instance_ref)
+
+ @catch_notimplementederror
+ def test_restore_soft_deleted(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.soft_delete(instance_ref)
+ self.connection.restore(instance_ref)
+
+ @catch_notimplementederror
def test_pause(self):
instance_ref, network_info = self._get_running_instance()
self.connection.pause(instance_ref)
@@ -507,17 +527,7 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
class AbstractDriverTestCase(_VirtDriverTestCase):
def setUp(self):
- from nova.virt.driver import ComputeDriver
-
self.driver_module = "nova.virt.driver.ComputeDriver"
-
- # TODO(sdague): the abstract driver doesn't have a constructor,
- # add one now that the loader loads classes directly
- def __new_init__(self, read_only=False):
- super(ComputeDriver, self).__init__()
-
- ComputeDriver.__init__ = __new_init__
-
super(AbstractDriverTestCase, self).setUp()
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index aa28f2762..757ec2bf2 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -50,7 +50,7 @@ class VMWareAPIVMTestCase(test.TestCase):
vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs)
- self.conn = driver.VMWareESXDriver(False)
+ self.conn = driver.VMWareESXDriver(None, False)
# NOTE(vish): none of the network plugging code is actually
# being tested
self.network_info = [({'bridge': 'fa0',
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index bd1bcd4f1..54f7948b6 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -27,6 +27,8 @@ import re
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
@@ -40,6 +42,7 @@ from nova.tests import fake_network
from nova.tests import fake_utils
import nova.tests.image.fake as fake_image
from nova.tests.xenapi import stubs
+from nova.virt import fake
from nova.virt.xenapi import agent
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
@@ -231,7 +234,7 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
def test_attach_volume(self):
"""This shows how to test Ops classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
vm = xenapi_fake.create_vm(instance.name, 'Running')
@@ -249,7 +252,7 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
"""This shows how to test when exceptions are raised."""
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
@@ -283,7 +286,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
fake_image.stub_out_image_service(self.stubs)
set_image_fixtures()
@@ -822,7 +825,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=1)
xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
conn.rescue(self.context, instance, [], image_meta, '')
@@ -839,7 +842,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def test_unrescue(self):
instance = self._create_instance()
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
rescue_vm = xenapi_fake.create_vm(instance.name + '-rescue', 'Running')
@@ -847,7 +850,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def test_unrescue_not_in_rescue(self):
instance = self._create_instance()
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
instance, None)
@@ -863,25 +866,25 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def finish_revert_migration(self, instance):
self.finish_revert_migration_called = True
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def test_reboot_hard(self):
instance = self._create_instance()
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(instance, None, "HARD")
def test_reboot_soft(self):
instance = self._create_instance()
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(instance, None, "SOFT")
def test_reboot_halted(self):
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
instance = self._create_instance(spawn=False)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance.name, 'Halted')
conn.reboot(instance, None, "SOFT")
vm_ref = vm_utils.lookup(session, instance.name)
@@ -890,11 +893,42 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def test_reboot_unknown_state(self):
instance = self._create_instance(spawn=False)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance.name, 'Unknown')
self.assertRaises(xenapi_fake.Failure, conn.reboot, instance,
None, "SOFT")
+ def test_maintenance_mode(self):
+ real_call_xenapi = self.conn._session.call_xenapi
+ instance = self._create_instance(spawn=True)
+ api_calls = {}
+
+ # Record all the xenapi calls, and return a fake list of hosts
+ # for the host.get_all call
+ def fake_call_xenapi(method, *args):
+ api_calls[method] = args
+ if method == 'host.get_all':
+ return ['foo', 'bar', 'baz']
+ return real_call_xenapi(method, *args)
+ self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
+
+ # Always find the 'bar' destination host
+ def fake_host_find(context, session, src, dst):
+ return 'bar'
+ self.stubs.Set(host, '_host_find', fake_host_find)
+
+ result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
+ self.assertEqual(result, 'on_maintenance')
+
+ # We expect the VM.pool_migrate call to have been called to
+ # migrate our instance to the 'bar' host
+ expected = (instance['uuid'], 'bar', {})
+ self.assertTrue(api_calls.get('VM.pool_migrate'), expected)
+
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertTrue(instance['vm_state'], vm_states.ACTIVE)
+ self.assertTrue(instance['task_state'], task_states.MIGRATING)
+
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
instance_values = {
@@ -1019,7 +1053,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(6, 0, 0),
product_brand='XenServer')
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vdi_ref = xenapi_fake.create_vdi('hurr', 'fake')
vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid']
conn._vmops._resize_instance(instance,
@@ -1038,7 +1072,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(1, 4, 99),
product_brand='XCP')
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vdi_ref = xenapi_fake.create_vdi('hurr', 'fake')
vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid']
conn._vmops._resize_instance(instance,
@@ -1049,7 +1083,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', instance_type, None)
@@ -1062,7 +1096,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_vhd", fake_raise)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance,
@@ -1092,7 +1126,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
product_version=(4, 0, 0),
product_brand='XenServer')
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
@@ -1127,7 +1161,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
product_version=(4, 0, 0),
product_brand='XenServer')
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
@@ -1149,7 +1183,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
@@ -1165,7 +1199,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
# Resize instance would be determined by the compute call
@@ -1256,7 +1290,7 @@ class XenAPIHostTestCase(stubs.XenAPITestBase):
xenapi_connection_password='test_pass')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.create_local_srs()
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def test_host_state(self):
stats = self.conn.get_host_stats()
@@ -1347,7 +1381,7 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
@@ -1440,7 +1474,7 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
@@ -1487,6 +1521,11 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
instance = db.instance_update(self.context, instance['uuid'],
{'instance_type_id': 5})
+ # NOTE(danms): because we're stubbing out the instance_types from
+ # the database, our instance['instance_type'] doesn't get properly
+ # filled out here, so put what we need into it
+ instance['instance_type']['swap'] = 1024
+
def fake_generate_swap(*args, **kwargs):
self.called = True
self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
@@ -1499,6 +1538,11 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
instance = db.instance_update(self.context, instance['uuid'],
{'instance_type_id': 4})
+ # NOTE(danms): because we're stubbing out the instance_types from
+ # the database, our instance['instance_type'] doesn't get properly
+ # filled out here, so put what we need into it
+ instance['instance_type']['ephemeral_gb'] = 160
+
def fake_generate_ephemeral(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
@@ -1528,7 +1572,7 @@ class XenAPIBWCountersTestCase(stubs.XenAPITestBase):
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def _fake_get_vif_device_map(vm_rec):
return vm_rec['_vifmap']
@@ -1661,7 +1705,7 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = importutils.import_object(FLAGS.network_manager)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
@@ -1987,7 +2031,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.compute = importutils.import_object(FLAGS.compute_manager)
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
@@ -2230,7 +2274,8 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
self.assertRaises(exception.AggregateError,
self.compute.add_aggregate_host,
- self.context, self.aggr.id, "fake_host")
+ self.context, "fake_host",
+ aggregate=jsonutils.to_primitive(self.aggr))
excepted = db.aggregate_get(self.context, self.aggr.id)
self.assertEqual(excepted.metadetails[pool_states.KEY],
pool_states.ERROR)
@@ -2247,10 +2292,10 @@ class MockComputeAPI(object):
def __init__(self):
self._mock_calls = []
- def add_aggregate_host(self, ctxt, aggregate_id,
+ def add_aggregate_host(self, ctxt, aggregate,
host_param, host, slave_info):
self._mock_calls.append((
- self.add_aggregate_host, ctxt, aggregate_id,
+ self.add_aggregate_host, ctxt, aggregate,
host_param, host, slave_info))
def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
@@ -2293,7 +2338,8 @@ class HypervisorPoolTestCase(test.TestCase):
self.assertIn(
(slave.compute_rpcapi.add_aggregate_host,
- "CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
+ "CONTEXT", jsonutils.to_primitive(aggregate),
+ "slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
def test_slave_asks_master_to_remove_slave_from_pool(self):
@@ -2392,7 +2438,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_live_migration_calls_vmops(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_live_migrate(context, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data):
@@ -2406,18 +2452,18 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_pre_live_migration(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn.pre_live_migration(None, None, None, None)
def test_post_live_migration_at_destination(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn.post_live_migration_at_destination(None, None, None, None)
def test_check_can_live_migrate_destination_with_block_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
@@ -2436,7 +2482,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_check_can_live_migrate_destination_block_migration_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
@@ -2445,7 +2491,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_check_can_live_migrate_source_with_block_migrate(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
@@ -2470,7 +2516,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_check_can_live_migrate_source_with_block_migrate_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
@@ -2497,7 +2543,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_check_can_live_migrate_works(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
class fake_aggregate:
def __init__(self):
@@ -2514,7 +2560,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_check_can_live_migrate_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
class fake_aggregate:
def __init__(self):
@@ -2532,7 +2578,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_live_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
@@ -2554,7 +2600,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_live_migration_on_failure(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
@@ -2581,7 +2627,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_live_migration_calls_post_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
@@ -2608,7 +2654,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_live_migration_with_block_migration_raises_invalid_param(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
@@ -2627,7 +2673,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_live_migration_with_block_migration_fails_migrate_send(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
@@ -2661,7 +2707,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
stubs.stubout_session(self.stubs, Session)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
@@ -2687,7 +2733,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_generate_vdi_map(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = "fake_vm_ref"
@@ -2719,7 +2765,7 @@ class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase):
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.xenstore = dict(persist={}, ephem={})
@@ -2890,7 +2936,7 @@ class VMOpsTestCase(test.TestCase):
def test_check_resize_func_name_defaults_to_VDI_resize(self):
session = self._get_mock_session(None, None)
- ops = vmops.VMOps(session)
+ ops = vmops.VMOps(session, fake.FakeVirtAPI())
self.assertEquals(
'VDI.resize',
diff --git a/nova/utils.py b/nova/utils.py
index 26637a489..d97c2ce3f 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -63,6 +63,16 @@ FLAGS.register_opt(
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'))
+# Used for looking up extensions of text
+# to their 'multiplied' byte amount
+BYTE_MULTIPLIERS = {
+ '': 1,
+ 't': 1024 ** 4,
+ 'g': 1024 ** 3,
+ 'm': 1024 ** 2,
+ 'k': 1024,
+}
+
def vpn_ping(address, port, timeout=0.05, session_id=None):
"""Sends a vpn negotiation packet and returns the server session.
@@ -574,6 +584,34 @@ def utf8(value):
return value
+def to_bytes(text, default=0):
+ """Try to turn a string into a number of bytes. Looks at the last
+ characters of the text to determine what conversion is needed to
+ turn the input text into a byte number.
+
+ Supports: B/b, K/k, M/m, G/g, T/t (or the same with b/B on the end)
+
+ """
+ # Take off everything not number 'like' (which should leave
+ # only the byte 'identifier' left)
+ mult_key_org = text.lstrip('-1234567890')
+ mult_key = mult_key_org.lower()
+ mult_key_len = len(mult_key)
+ if mult_key.endswith("b"):
+ mult_key = mult_key[0:-1]
+ try:
+ multiplier = BYTE_MULTIPLIERS[mult_key]
+ if mult_key_len:
+ # Empty cases shouldn't cause text[0:-0]
+ text = text[0:-mult_key_len]
+ return int(text) * multiplier
+ except KeyError:
+ msg = _('Unknown byte multiplier: %s') % mult_key_org
+ raise TypeError(msg)
+ except ValueError:
+ return default
+
+
def delete_if_exists(pathname):
"""delete a file, but ignore file not found error"""
@@ -734,18 +772,6 @@ def gen_uuid():
return uuid.uuid4()
-def is_uuid_like(val):
- """For our purposes, a UUID is a string in canonical form:
-
- aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
- """
- try:
- uuid.UUID(val)
- return True
- except (TypeError, ValueError, AttributeError):
- return False
-
-
def bool_from_str(val):
"""Convert a string representation of a bool into a bool value"""
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 166eacba6..c38b0f98b 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -37,7 +37,6 @@ from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import vm_states
from nova import context as nova_context
-from nova import db
from nova import exception
from nova import flags
from nova import notifications
@@ -78,11 +77,11 @@ def _late_load_cheetah():
class BareMetalDriver(driver.ComputeDriver):
- def __init__(self, read_only):
+ def __init__(self, virtapi, read_only):
_late_load_cheetah()
# Note that baremetal doesn't have a read-only connection
# mode, so the read_only parameter is ignored
- super(BareMetalDriver, self).__init__()
+ super(BareMetalDriver, self).__init__(virtapi)
self.baremetal_nodes = nodes.get_baremetal_nodes()
self._wrapped_conn = None
self._host_state = None
@@ -230,7 +229,7 @@ class BareMetalDriver(driver.ComputeDriver):
try:
LOG.debug(_("Key is injected but instance is not running yet"),
instance=instance)
- (old_ref, new_ref) = db.instance_update_and_get_original(
+ (old_ref, new_ref) = self.virtapi.instance_update(
context, instance['uuid'],
{'vm_state': vm_states.BUILDING})
notifications.send_update(context, old_ref, new_ref)
@@ -239,7 +238,7 @@ class BareMetalDriver(driver.ComputeDriver):
if state == power_state.RUNNING:
LOG.debug(_('instance %s: booted'), instance['name'],
instance=instance)
- (old_ref, new_ref) = db.instance_update_and_get_original(
+ (old_ref, new_ref) = self.virtapi.instance_update(
context, instance['uuid'],
{'vm_state': vm_states.ACTIVE})
notifications.send_update(context, old_ref, new_ref)
@@ -254,7 +253,7 @@ class BareMetalDriver(driver.ComputeDriver):
except Exception:
LOG.exception(_("Baremetal assignment is overcommitted."),
instance=instance)
- (old_ref, new_ref) = db.instance_update_and_get_original(
+ (old_ref, new_ref) = self.virtapi.instance_update(
context, instance['uuid'],
{'vm_state': vm_states.ERROR,
'power_state': power_state.FAILED})
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index 0cddcfa69..e113391a5 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -112,9 +112,7 @@ def get_disk_size(path):
:returns: Size (in bytes) of the given disk image as it would be seen
by a virtual machine.
"""
- size = images.qemu_img_info(path)['virtual size']
- size = size.split('(')[1].split()[0]
- return int(size)
+ return images.qemu_img_info(path).virtual_size
def extend(image, size):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index c78d063f9..a466fa180 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -92,6 +92,9 @@ class ComputeDriver(object):
"has_imagecache": False,
}
+ def __init__(self, virtapi):
+ self.virtapi = virtapi
+
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host."""
@@ -338,6 +341,14 @@ class ComputeDriver(object):
"""Power on the specified instance"""
raise NotImplementedError()
+ def soft_delete(self, instance):
+ """Soft delete the specified instance."""
+ raise NotImplementedError()
+
+ def restore(self, instance):
+ """Restore the specified instance"""
+ raise NotImplementedError()
+
def get_available_resource(self):
"""Retrieve resource information.
@@ -593,8 +604,14 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
pass
- def poll_rebooting_instances(self, timeout):
- """Poll for rebooting instances"""
+ def poll_rebooting_instances(self, timeout, instances):
+ """Poll for rebooting instances
+
+ :param timeout: the currently configured timeout for considering
+ rebooting instances to be stuck
+ :param instances: instances that have been in rebooting state
+ longer than the configured timeout
+ """
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 8c3253adc..877fb7603 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -30,6 +30,7 @@ from nova import db
from nova import exception
from nova.openstack.common import log as logging
from nova.virt import driver
+from nova.virt import virtapi
LOG = logging.getLogger(__name__)
@@ -52,7 +53,8 @@ class FakeDriver(driver.ComputeDriver):
"""Fake hypervisor driver"""
- def __init__(self, read_only=False):
+ def __init__(self, virtapi, read_only=False):
+ super(FakeDriver, self).__init__(virtapi)
self.instances = {}
self.host_status = {
'host_name-description': 'Fake Host',
@@ -121,7 +123,7 @@ class FakeDriver(driver.ComputeDriver):
def unrescue(self, instance, network_info):
pass
- def poll_rebooting_instances(self, timeout):
+ def poll_rebooting_instances(self, timeout, instances):
pass
def poll_rescued_instances(self, timeout):
@@ -142,6 +144,12 @@ class FakeDriver(driver.ComputeDriver):
def power_on(self, instance):
pass
+ def soft_delete(self, instance):
+ pass
+
+ def restore(self, instance):
+ pass
+
def pause(self, instance):
pass
@@ -329,3 +337,16 @@ class FakeDriver(driver.ComputeDriver):
def get_volume_connector(self, instance):
return {'ip': '127.0.0.1', 'initiator': 'fake', 'host': 'fakehost'}
+
+
+class FakeVirtAPI(virtapi.VirtAPI):
+ def instance_update(self, context, instance_uuid, updates):
+ return db.instance_update_and_get_original(context,
+ instance_uuid,
+ updates)
+
+ def instance_get_by_uuid(self, context, instance_uuid):
+ return db.instance_get_by_uuid(context, instance_uuid)
+
+ def instance_get_all_by_host(self, context, host):
+ return db.instance_get_all_by_host(context, host)
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 3f43ca0a6..6d9f66ff8 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -73,8 +73,8 @@ LOG = logging.getLogger(__name__)
class HyperVDriver(driver.ComputeDriver):
- def __init__(self):
- super(HyperVDriver, self).__init__()
+ def __init__(self, virtapi):
+ super(HyperVDriver, self).__init__(virtapi)
self._hostops = hostops.HostOps()
self._volumeops = volumeops.VolumeOps()
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 133f5e25b..5b631a0da 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -22,6 +22,7 @@ Handling of VM disk images.
"""
import os
+import re
from nova import exception
from nova import flags
@@ -43,31 +44,142 @@ FLAGS = flags.FLAGS
FLAGS.register_opts(image_opts)
-def qemu_img_info(path):
- """Return a dict containing the parsed output from qemu-img info."""
+class QemuImgInfo(object):
+ BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:"
+ r"\s+(.*?)\)\s*$"), re.I)
+ TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$")
+ SIZE_RE = re.compile(r"\(\s*(\d+)\s+bytes\s*\)", re.I)
+
+ def __init__(self, cmd_output):
+ details = self._parse(cmd_output)
+ self.image = details.get('image')
+ self.backing_file = details.get('backing_file')
+ self.file_format = details.get('file_format')
+ self.virtual_size = details.get('virtual_size')
+ self.cluster_size = details.get('cluster_size')
+ self.disk_size = details.get('disk_size')
+ self.snapshots = details.get('snapshot_list', [])
+ self.encryption = details.get('encryption')
+
+ def __str__(self):
+ lines = [
+ 'image: %s' % self.image,
+ 'file_format: %s' % self.file_format,
+ 'virtual_size: %s' % self.virtual_size,
+ 'disk_size: %s' % self.disk_size,
+ 'cluster_size: %s' % self.cluster_size,
+ 'backing_file: %s' % self.backing_file,
+ ]
+ if self.snapshots:
+ lines.append("snapshots: %s" % self.snapshots)
+ return "\n".join(lines)
+
+ def _canonicalize(self, field):
+ # Standardize on underscores/lc/no dash and no spaces
+ # since qemu seems to have mixed outputs here... and
+ # this format allows for better integration with python
+ # - ie for usage in kwargs and such...
+ field = field.lower().strip()
+ for c in (" ", "-"):
+ field = field.replace(c, '_')
+ return field
+
+ def _extract_bytes(self, details):
+ # Replace it with the byte amount
+ real_size = self.SIZE_RE.search(details)
+ if real_size:
+ details = real_size.group(1)
+ try:
+ details = utils.to_bytes(details)
+ except (TypeError, ValueError):
+ pass
+ return details
+
+ def _extract_details(self, root_cmd, root_details, lines_after):
+ consumed_lines = 0
+ real_details = root_details
+ if root_cmd == 'backing_file':
+ # Replace it with the real backing file
+ backing_match = self.BACKING_FILE_RE.match(root_details)
+ if backing_match:
+ real_details = backing_match.group(2).strip()
+ elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']:
+ # Replace it with the byte amount (if we can convert it)
+ real_details = self._extract_bytes(root_details)
+ elif root_cmd == 'file_format':
+ real_details = real_details.strip().lower()
+ elif root_cmd == 'snapshot_list':
+ # Next line should be a header, starting with 'ID'
+ if not lines_after or not lines_after[0].startswith("ID"):
+ msg = _("Snapshot list encountered but no header found!")
+ raise ValueError(msg)
+ consumed_lines += 1
+ possible_contents = lines_after[1:]
+ real_details = []
+ # This is the sprintf pattern we will try to match
+ # "%-10s%-20s%7s%20s%15s"
+ # ID TAG VM SIZE DATE VM CLOCK (current header)
+ for line in possible_contents:
+ line_pieces = line.split(None)
+ if len(line_pieces) != 6:
+ break
+ else:
+ # Check against this pattern occuring in the final position
+ # "%02d:%02d:%02d.%03d"
+ date_pieces = line_pieces[5].split(":")
+ if len(date_pieces) != 3:
+ break
+ real_details.append({
+ 'id': line_pieces[0],
+ 'tag': line_pieces[1],
+ 'vm_size': line_pieces[2],
+ 'date': line_pieces[3],
+ 'vm_clock': line_pieces[4] + " " + line_pieces[5],
+ })
+ consumed_lines += 1
+ return (real_details, consumed_lines)
+
+ def _parse(self, cmd_output):
+ # Analysis done of qemu-img.c to figure out what is going on here
+ # Find all points start with some chars and then a ':' then a newline
+ # and then handle the results of those 'top level' items in a separate
+ # function.
+ #
+ # TODO(harlowja): newer versions might have a json output format
+ # we should switch to that whenever possible.
+ # see: http://bit.ly/XLJXDX
+ if not cmd_output:
+ cmd_output = ''
+ contents = {}
+ lines = cmd_output.splitlines()
+ i = 0
+ line_am = len(lines)
+ while i < line_am:
+ line = lines[i]
+ if not line.strip():
+ i += 1
+ continue
+ consumed_lines = 0
+ top_level = self.TOP_LEVEL_RE.match(line)
+ if top_level:
+ root = self._canonicalize(top_level.group(1))
+ if not root:
+ i += 1
+ continue
+ root_details = top_level.group(2).strip()
+ details, consumed_lines = self._extract_details(root,
+ root_details,
+ lines[i + 1:])
+ contents[root] = details
+ i += consumed_lines + 1
+ return contents
+
+def qemu_img_info(path):
+ """Return a object containing the parsed output from qemu-img info."""
out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
-
- # output of qemu-img is 'field: value'
- # except when its in the snapshot listing mode
- data = {}
- for line in out.splitlines():
- pieces = line.split(':', 1)
- if len(pieces) != 2:
- continue
- (field, val) = pieces
- field = field.strip().lower()
- val = val.strip()
- if field == 'snapshot list':
- # Skip everything after the snapshot list
- # which is safe to do since the code prints
- # these out at the end and nobody currently
- # uses this information in openstack as-is.
- break
- data[field] = val
-
- return data
+ return QemuImgInfo(out)
def convert_image(source, dest, out_format):
@@ -95,13 +207,13 @@ def fetch_to_raw(context, image_href, path, user_id, project_id):
with utils.remove_path_on_error(path_tmp):
data = qemu_img_info(path_tmp)
- fmt = data.get('file format')
+ fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_href)
- backing_file = data.get('backing file')
+ backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(image_id=image_href,
reason=_("fmt=%(fmt)s backed by: %(backing_file)s") % locals())
@@ -113,10 +225,10 @@ def fetch_to_raw(context, image_href, path, user_id, project_id):
convert_image(path_tmp, staged, 'raw')
data = qemu_img_info(staged)
- if data.get('file format') != "raw":
+ if data.file_format != "raw":
raise exception.ImageUnacceptable(image_id=image_href,
reason=_("Converted to raw, but format is now %s") %
- data.get('file format'))
+ data.file_format)
os.rename(staged, path)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 132a4b744..3104fafd3 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -61,7 +61,6 @@ from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import vm_mode
from nova import context as nova_context
-from nova import db
from nova import exception
from nova import flags
from nova.image import glance
@@ -257,8 +256,8 @@ class LibvirtDriver(driver.ComputeDriver):
"has_imagecache": True,
}
- def __init__(self, read_only=False):
- super(LibvirtDriver, self).__init__()
+ def __init__(self, virtapi, read_only=False):
+ super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
@@ -311,7 +310,7 @@ class LibvirtDriver(driver.ComputeDriver):
@property
def host_state(self):
if not self._host_state:
- self._host_state = HostState(self.read_only)
+ self._host_state = HostState(self.virtapi, self.read_only)
return self._host_state
def has_min_version(self, ver):
@@ -1053,7 +1052,7 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt_utils.file_delete(rescue_file)
@exception.wrap_exception()
- def poll_rebooting_instances(self, timeout):
+ def poll_rebooting_instances(self, timeout, instances):
pass
@exception.wrap_exception()
@@ -1621,7 +1620,7 @@ class LibvirtDriver(driver.ComputeDriver):
if ephemeral_device is not None:
swap_device = self.default_third_device
- db.instance_update(
+ self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_ephemeral_device':
'/dev/' + self.default_second_device})
@@ -1646,7 +1645,7 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info)):
diskswap = disk_info('disk.swap', swap_device)
devices.append(diskswap)
- db.instance_update(
+ self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_swap_device': '/dev/' + swap_device})
@@ -1700,7 +1699,7 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
root_device = self.default_root_device
- db.instance_update(
+ self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'root_device_name': '/dev/' + self.default_root_device})
@@ -3008,11 +3007,12 @@ class LibvirtDriver(driver.ComputeDriver):
class HostState(object):
"""Manages information about the compute node through libvirt"""
- def __init__(self, read_only):
+ def __init__(self, virtapi, read_only):
super(HostState, self).__init__()
self.read_only = read_only
self._stats = {}
self.connection = None
+ self.virtapi = virtapi
self.update_status()
def get_host_stats(self, refresh=False):
@@ -3027,7 +3027,7 @@ class HostState(object):
"""Retrieve status info from libvirt."""
LOG.debug(_("Updating host stats"))
if self.connection is None:
- self.connection = LibvirtDriver(self.read_only)
+ self.connection = LibvirtDriver(self.virtapi, self.read_only)
data = {}
data["vcpus"] = self.connection.get_vcpu_total()
data["vcpus_used"] = self.connection.get_vcpu_used()
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index fe54cacec..5da0aa6fb 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -94,18 +94,18 @@ def create_cow_image(backing_file, path):
cow_opts += ['backing_file=%s' % backing_file]
base_details = images.qemu_img_info(backing_file)
else:
- base_details = {}
+ base_details = None
# This doesn't seem to get inherited so force it to...
# http://paste.ubuntu.com/1213295/
# TODO(harlowja) probably file a bug against qemu-img/qemu
- if 'cluster_size' in base_details:
- cow_opts += ['cluster_size=%s' % base_details['cluster_size']]
+ if base_details and base_details.cluster_size is not None:
+ cow_opts += ['cluster_size=%s' % base_details.cluster_size]
# For now don't inherit this due the following discussion...
# See: http://www.gossamer-threads.com/lists/openstack/dev/10592
# if 'preallocation' in base_details:
# cow_opts += ['preallocation=%s' % base_details['preallocation']]
- if 'encryption' in base_details:
- cow_opts += ['encryption=%s' % base_details['encryption']]
+ if base_details and base_details.encryption:
+ cow_opts += ['encryption=%s' % base_details.encryption]
if cow_opts:
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
@@ -228,8 +228,7 @@ def get_disk_size(path):
:returns: Size (in bytes) of the given disk image as it would be seen
by a virtual machine.
"""
- size = images.qemu_img_info(path)['virtual size']
- size = size.split('(')[1].split()[0]
+ size = images.qemu_img_info(path).virtual_size
return int(size)
@@ -239,11 +238,8 @@ def get_disk_backing_file(path):
:param path: Path to the disk image
:returns: a path to the image's backing store
"""
- backing_file = images.qemu_img_info(path).get('backing file')
-
+ backing_file = images.qemu_img_info(path).backing_file
if backing_file:
- if 'actual path: ' in backing_file:
- backing_file = backing_file.split('actual path: ')[1][:-1]
backing_file = os.path.basename(backing_file)
return backing_file
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index cd91a7299..f4f26045e 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -18,7 +18,6 @@ from nova.compute import task_states
from nova.compute import vm_states
from nova import context as nova_context
-from nova import db
from nova import flags
from nova.openstack.common import cfg
@@ -59,8 +58,8 @@ class PowerVMDriver(driver.ComputeDriver):
"""PowerVM Implementation of Compute Driver."""
- def __init__(self):
- super(PowerVMDriver, self).__init__()
+ def __init__(self, virtapi):
+ super(PowerVMDriver, self).__init__(virtapi)
self._powervm = operator.PowerVMOperator()
@property
diff --git a/nova/virt/virtapi.py b/nova/virt/virtapi.py
new file mode 100644
index 000000000..13aaa7e4d
--- /dev/null
+++ b/nova/virt/virtapi.py
@@ -0,0 +1,44 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova import db
+
+
+class VirtAPI(object):
+ def instance_update(self, context, instance_uuid, updates):
+ """Perform an instance update operation on behalf of a virt driver
+ :param context: security context
+ :param instance_uuid: uuid of the instance to be updated
+ :param updates: dict of attribute=value pairs to change
+
+ Returns: orig_instance, new_instance
+ """
+ raise NotImplementedError()
+
+ def instance_get_by_uuid(self, context, instance_uuid):
+ """Look up an instance by uuid
+ :param context: security context
+ :param instance_uuid: uuid of the instance to be fetched
+ """
+ raise NotImplementedError()
+
+ def instance_get_all_by_host(self, context, host):
+ """Find all instances on a given host
+ :param context: security context
+ :param host: host running instances to be returned
+ """
+ raise NotImplementedError()
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index ec8673418..e56f81213 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -100,8 +100,8 @@ class Failure(Exception):
class VMWareESXDriver(driver.ComputeDriver):
"""The ESX host connection object."""
- def __init__(self, read_only=False, scheme="https"):
- super(VMWareESXDriver, self).__init__()
+ def __init__(self, virtapi, read_only=False, scheme="https"):
+ super(VMWareESXDriver, self).__init__(virtapi)
host_ip = FLAGS.vmwareapi_host_ip
host_username = FLAGS.vmwareapi_host_username
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 2ae4c27e9..4d032e891 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -125,8 +125,8 @@ FLAGS.register_opts(xenapi_opts)
class XenAPIDriver(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform"""
- def __init__(self, read_only=False):
- super(XenAPIDriver, self).__init__()
+ def __init__(self, virtapi, read_only=False):
+ super(XenAPIDriver, self).__init__(virtapi)
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
@@ -140,8 +140,8 @@ class XenAPIDriver(driver.ComputeDriver):
self._session = XenAPISession(url, username, password)
self._volumeops = volumeops.VolumeOps(self._session)
self._host_state = None
- self._host = host.Host(self._session)
- self._vmops = vmops.VMOps(self._session)
+ self._host = host.Host(self._session, self.virtapi)
+ self._vmops = vmops.VMOps(self._session, self.virtapi)
self._initiator = None
self._hypervisor_hostname = None
self._pool = pool.ResourcePool(self._session)
@@ -181,13 +181,7 @@ class XenAPIDriver(driver.ComputeDriver):
"""Finish reverting a resize, powering back on the instance"""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(instance)
- block_device_mapping = driver.block_device_info_get_mapping(
- block_device_info)
- for vol in block_device_mapping:
- connection_info = vol['connection_info']
- mount_device = vol['mount_device'].rpartition("/")[2]
- self.attach_volume(connection_info,
- instance['name'], mount_device)
+ self._attach_mapped_block_devices(instance, block_device_info)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
@@ -195,6 +189,9 @@ class XenAPIDriver(driver.ComputeDriver):
"""Completes a resize, turning on the migrated instance"""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance)
+ self._attach_mapped_block_devices(instance, block_device_info)
+
+ def _attach_mapped_block_devices(self, instance, block_device_info):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
@@ -282,9 +279,17 @@ class XenAPIDriver(driver.ComputeDriver):
"""Power on the specified instance"""
self._vmops.power_on(instance)
- def poll_rebooting_instances(self, timeout):
+ def soft_delete(self, instance):
+ """Soft delete the specified instance"""
+ self._vmops.soft_delete(instance)
+
+ def restore(self, instance):
+ """Restore the specified instance"""
+ self._vmops.restore(instance)
+
+ def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances"""
- self._vmops.poll_rebooting_instances(timeout)
+ self._vmops.poll_rebooting_instances(timeout, instances)
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index 5186a40ce..8a69f7c54 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -38,8 +38,9 @@ class Host(object):
"""
Implements host related operations.
"""
- def __init__(self, session):
+ def __init__(self, session, virtapi):
self._session = session
+ self._virtapi = virtapi
def host_power_action(self, _host, action):
"""Reboots or shuts down the host."""
@@ -65,7 +66,7 @@ class Host(object):
uuid = vm_rec['other_config'].get('nova_uuid')
if not uuid:
name = vm_rec['name_label']
- uuid = _uuid_find(ctxt, host, name)
+ uuid = _uuid_find(self._virtapi, ctxt, host, name)
if not uuid:
msg = _('Instance %(name)s running on %(host)s'
' could not be found in the database:'
@@ -73,11 +74,11 @@ class Host(object):
' ping migration to a new host')
LOG.info(msg % locals())
continue
- instance = db.instance_get_by_uuid(ctxt, uuid)
+ instance = self._virtapi.instance_get_by_uuid(ctxt, uuid)
vm_counter = vm_counter + 1
dest = _host_find(ctxt, self._session, host, host_ref)
- (old_ref, new_ref) = db.instance_update_and_get_original(
+ (old_ref, new_ref) = self._virtapi.instance_update(
ctxt,
instance['uuid'],
{'host': dest,
@@ -88,7 +89,7 @@ class Host(object):
vm_ref, host_ref, {})
migrations_counter = migrations_counter + 1
- (old_ref, new_ref) = db.instance_update_and_get_original(
+ (old_ref, new_ref) = self._virtapi.instance_update(
ctxt,
instance['uuid'],
{'vm_state': vm_states.ACTIVE})
@@ -98,7 +99,7 @@ class Host(object):
except self._session.XenAPI.Failure:
LOG.exception('Unable to migrate VM %(vm_ref)s'
'from %(host)s' % locals())
- (old_ref, new_ref) = db.instance_update_and_get_original(
+ (old_ref, new_ref) = self._virtapi.instance_update(
ctxt,
instance['uuid'],
{'host': host,
@@ -212,9 +213,9 @@ def call_xenhost(session, method, arg_dict):
return e.details[1]
-def _uuid_find(context, host, name_label):
+def _uuid_find(virtapi, context, host, name_label):
"""Return instance uuid by name_label."""
- for i in db.instance_get_all_by_host(context, host):
+ for i in virtapi.instance_get_all_by_host(context, host):
if i.name == name_label:
return i['uuid']
return None
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 71b21ce24..0c2565dbd 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -126,7 +126,7 @@ class ResourcePool(object):
slave_info = self._create_slave_info()
self.compute_rpcapi.add_aggregate_host(
- context, aggregate.id, host, master_compute, slave_info)
+ context, aggregate, host, master_compute, slave_info)
def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
"""Remove a compute host from an aggregate."""
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index be143ed9b..53d372839 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -93,7 +93,7 @@ def cmp_version(a, b):
return len(a) - len(b)
-def make_step_decorator(context, instance):
+def make_step_decorator(context, instance, instance_update):
"""Factory to create a decorator that records instance progress as a series
of discrete steps.
@@ -125,7 +125,7 @@ def make_step_decorator(context, instance):
step_info['total'] * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
- db.instance_update(context, instance['uuid'], {'progress': progress})
+ instance_update(context, instance['uuid'], {'progress': progress})
def step_decorator(f):
step_info['total'] += 1
@@ -145,9 +145,10 @@ class VMOps(object):
"""
Management class for VM-related tasks
"""
- def __init__(self, session):
+ def __init__(self, session, virtapi):
self.compute_api = compute.API()
self._session = session
+ self._virtapi = virtapi
self.poll_rescue_last_ran = None
self.firewall_driver = firewall.load_driver(
default=DEFAULT_FIREWALL_DRIVER,
@@ -260,7 +261,8 @@ class VMOps(object):
if name_label is None:
name_label = instance['name']
- step = make_step_decorator(context, instance)
+ step = make_step_decorator(context, instance,
+ self._virtapi.instance_update)
@step
def determine_disk_image_type_step(undo_mgr):
@@ -460,7 +462,8 @@ class VMOps(object):
if instance['vm_mode'] != mode:
# Update database with normalized (or determined) value
- db.instance_update(context, instance['uuid'], {'vm_mode': mode})
+ self._virtapi.instance_update(context,
+ instance['uuid'], {'vm_mode': mode})
vm_ref = vm_utils.create_vm(self._session, instance, name_label,
kernel_file, ramdisk_file, use_pv_kernel)
@@ -469,7 +472,7 @@ class VMOps(object):
def _attach_disks(self, instance, vm_ref, name_label, vdis,
disk_image_type):
ctx = nova_context.get_admin_context()
- instance_type = db.instance_type_get(ctx, instance['instance_type_id'])
+ instance_type = instance['instance_type']
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
@@ -478,7 +481,7 @@ class VMOps(object):
cd_vdi = vdis.pop('root')
root_vdi = vm_utils.fetch_blank_disk(self._session,
- instance['instance_type_id'])
+ instance_type['id'])
vdis['root'] = root_vdi
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
@@ -575,8 +578,7 @@ class VMOps(object):
agent.resetnetwork()
# Set VCPU weight
- inst_type = db.instance_type_get(ctx, instance['instance_type_id'])
- vcpu_weight = inst_type['vcpu_weight']
+ vcpu_weight = instance['instance_type']['vcpu_weight']
if vcpu_weight is not None:
LOG.debug(_("Setting VCPU weight"), instance=instance)
self._session.call_xenapi('VM.add_to_VCPUs_params', vm_ref,
@@ -669,7 +671,8 @@ class VMOps(object):
progress = round(float(step) / total_steps * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
- db.instance_update(context, instance['uuid'], {'progress': progress})
+ self._virtapi.instance_update(context, instance['uuid'],
+ {'progress': progress})
def _migrate_disk_resizing_down(self, context, instance, dest,
instance_type, vm_ref, sr_path):
@@ -1147,14 +1150,27 @@ class VMOps(object):
self._release_bootlock(original_vm_ref)
self._start(instance, original_vm_ref)
- def power_off(self, instance):
- """Power off the specified instance."""
+ def soft_delete(self, instance):
+ """Soft delete the specified instance."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
- vm_utils.shutdown_vm(self._session, instance, vm_ref, hard=True)
except exception.NotFound:
- LOG.warning(_("VM is not present, skipping power off..."),
+ LOG.warning(_("VM is not present, skipping soft delete..."),
instance=instance)
+ else:
+ vm_utils.shutdown_vm(self._session, instance, vm_ref, hard=True)
+ self._acquire_bootlock(vm_ref)
+
+ def restore(self, instance):
+ """Restore the specified instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ self._release_bootlock(vm_ref)
+ self._start(instance, vm_ref)
+
+ def power_off(self, instance):
+ """Power off the specified instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ vm_utils.shutdown_vm(self._session, instance, vm_ref, hard=True)
def power_on(self, instance):
"""Power on the specified instance."""
@@ -1172,7 +1188,7 @@ class VMOps(object):
if timeutils.is_older_than(task_created, timeout):
self._session.call_xenapi("task.cancel", task_ref)
- def poll_rebooting_instances(self, timeout):
+ def poll_rebooting_instances(self, timeout, instances):
"""Look for expirable rebooting instances.
- issue a "hard" reboot to any instance that has been stuck in a
@@ -1183,7 +1199,6 @@ class VMOps(object):
self._cancel_stale_tasks(timeout, 'VM.clean_reboot')
ctxt = nova_context.get_admin_context()
- instances = db.instance_get_all_hung_in_rebooting(ctxt, timeout)
instances_info = dict(instance_count=len(instances),
timeout=timeout)
diff --git a/openstack-common.conf b/openstack-common.conf
index 9a09fd5a8..666fb059e 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,7 @@
[DEFAULT]
# The list of modules to copy from openstack-common
-modules=cfg,context,excutils,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,setup,timeutils,rpc
+modules=cfg,context,excutils,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,setup,timeutils,rpc,uuidutils
# The base module to hold the copy of openstack.common
base=nova
diff --git a/smoketests/run_tests.py b/smoketests/run_tests.py
index 053acc09f..3c3ed0574 100644
--- a/smoketests/run_tests.py
+++ b/smoketests/run_tests.py
@@ -138,12 +138,12 @@ class _Win32Colorizer(object):
from win32console import FOREGROUND_INTENSITY
from win32console import FOREGROUND_RED
from win32console import GetStdHandle
- from win32console import STD_OUT_HANDLE
+ from win32console import STD_OUTPUT_HANDLE
red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN,
FOREGROUND_BLUE, FOREGROUND_INTENSITY)
self.stream = stream
- self.screenBuffer = GetStdHandle(STD_OUT_HANDLE)
+ self.screenBuffer = GetStdHandle(STD_OUTPUT_HANDLE)
self._colors = {
'normal': red | green | blue,
'red': red | bold,
@@ -159,7 +159,7 @@ class _Win32Colorizer(object):
try:
import win32console
screenBuffer = win32console.GetStdHandle(
- win32console.STD_OUT_HANDLE)
+ win32console.STD_OUTPUT_HANDLE)
except ImportError:
return False
import pywintypes
diff --git a/tools/pip-requires b/tools/pip-requires
index 922f1a24a..a214d7bc2 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -19,5 +19,6 @@ Babel>=0.9.6
iso8601>=0.1.4
httplib2
setuptools_git>=0.4
+python-cinderclient
python-quantumclient>=2.1
python-glanceclient>=0.5.0,<2
diff --git a/tools/test-requires b/tools/test-requires
index 536d4deee..fc56d3c87 100644
--- a/tools/test-requires
+++ b/tools/test-requires
@@ -11,5 +11,4 @@ pep8==1.2
pylint==0.25.2
sphinx>=1.1.2
feedparser
-python-cinderclient
MySQL-python
diff --git a/tools/xenserver/destroy_cached_images.py b/tools/xenserver/destroy_cached_images.py
index dd6e91adc..a9045cd61 100644
--- a/tools/xenserver/destroy_cached_images.py
+++ b/tools/xenserver/destroy_cached_images.py
@@ -25,6 +25,7 @@ POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -47,7 +48,7 @@ FLAGS.register_cli_opts(destroy_opts)
def main():
- flags.parse_args(sys.argv)
+ config.parse_args(sys.argv)
utils.monkey_patch()
xenapi = xenapi_driver.XenAPIDriver()