summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJosh Kearney <josh@jk0.org>2011-09-19 10:25:00 -0500
committerJosh Kearney <josh@jk0.org>2011-09-19 10:25:00 -0500
commit91664a2c07e4a5627558dbcaa2dd4a6719315561 (patch)
treed18ac2781af9ebfa296bb9ae14d660207d94a94b
parent90f01055a92153709a90115688a8fce3d3029976 (diff)
parent0561c0e01822d81fc90fed00f41b8d469c6c7808 (diff)
downloadnova-91664a2c07e4a5627558dbcaa2dd4a6719315561.tar.gz
nova-91664a2c07e4a5627558dbcaa2dd4a6719315561.tar.xz
nova-91664a2c07e4a5627558dbcaa2dd4a6719315561.zip
Merged trunk.
-rw-r--r--nova/api/ec2/cloud.py21
-rw-r--r--nova/api/openstack/contrib/rescue.py13
-rw-r--r--nova/api/openstack/create_instance_helper.py4
-rw-r--r--nova/api/openstack/servers.py20
-rw-r--r--nova/compute/api.py46
-rw-r--r--nova/compute/manager.py22
-rw-r--r--nova/db/sqlalchemy/api.py2
-rw-r--r--nova/flags.py3
-rw-r--r--nova/tests/api/ec2/test_cloud.py55
-rw-r--r--nova/tests/api/openstack/contrib/test_rescue.py23
-rw-r--r--nova/tests/api/openstack/contrib/test_volumes.py6
-rw-r--r--nova/tests/api/openstack/test_server_actions.py3
-rw-r--r--nova/tests/api/openstack/test_servers.py45
-rw-r--r--nova/tests/test_api.py2
-rw-r--r--nova/tests/test_compute.py53
-rw-r--r--nova/tests/test_libvirt.py51
-rw-r--r--nova/virt/libvirt/connection.py136
-rw-r--r--nova/virt/libvirt/firewall.py4
-rw-r--r--nova/virt/xenapi/vmops.py64
19 files changed, 400 insertions, 173 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index fb1afa43a..23ac30494 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -594,18 +594,31 @@ class CloudController(object):
g['ipPermissions'] = []
for rule in group.rules:
r = {}
- r['ipProtocol'] = rule.protocol
- r['fromPort'] = rule.from_port
- r['toPort'] = rule.to_port
r['groups'] = []
r['ipRanges'] = []
if rule.group_id:
source_group = db.security_group_get(context, rule.group_id)
r['groups'] += [{'groupName': source_group.name,
'userId': source_group.project_id}]
+ if rule.protocol:
+ r['ipProtocol'] = rule.protocol
+ r['fromPort'] = rule.from_port
+ r['toPort'] = rule.to_port
+ g['ipPermissions'] += [dict(r)]
+ else:
+ for protocol, min_port, max_port in (('icmp', -1, -1),
+ ('tcp', 1, 65535),
+ ('udp', 1, 65536)):
+ r['ipProtocol'] = protocol
+ r['fromPort'] = min_port
+ r['toPort'] = max_port
+ g['ipPermissions'] += [dict(r)]
else:
+ r['ipProtocol'] = rule.protocol
+ r['fromPort'] = rule.from_port
+ r['toPort'] = rule.to_port
r['ipRanges'] += [{'cidrIp': rule.cidr}]
- g['ipPermissions'] += [r]
+ g['ipPermissions'] += [r]
return g
def _rule_args_to_dict(self, context, kwargs):
diff --git a/nova/api/openstack/contrib/rescue.py b/nova/api/openstack/contrib/rescue.py
index 3de128895..2e5dbab73 100644
--- a/nova/api/openstack/contrib/rescue.py
+++ b/nova/api/openstack/contrib/rescue.py
@@ -18,11 +18,14 @@ import webob
from webob import exc
from nova import compute
+from nova import flags
from nova import log as logging
+from nova import utils
from nova.api.openstack import extensions as exts
from nova.api.openstack import faults
+FLAGS = flags.FLAGS
LOG = logging.getLogger("nova.api.contrib.rescue")
@@ -30,7 +33,7 @@ def wrap_errors(fn):
""""Ensure errors are not passed along."""
def wrapped(*args):
try:
- fn(*args)
+ return fn(*args)
except Exception, e:
return faults.Fault(exc.HTTPInternalServerError())
return wrapped
@@ -46,9 +49,13 @@ class Rescue(exts.ExtensionDescriptor):
def _rescue(self, input_dict, req, instance_id):
"""Rescue an instance."""
context = req.environ["nova.context"]
- self.compute_api.rescue(context, instance_id)
+ if input_dict['rescue'] and 'adminPass' in input_dict['rescue']:
+ password = input_dict['rescue']['adminPass']
+ else:
+ password = utils.generate_password(FLAGS.password_length)
+ self.compute_api.rescue(context, instance_id, rescue_password=password)
- return webob.Response(status_int=202)
+ return {'adminPass': password}
@wrap_errors
def _unrescue(self, input_dict, req, instance_id):
diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py
index e27ddf78b..79f17e27f 100644
--- a/nova/api/openstack/create_instance_helper.py
+++ b/nova/api/openstack/create_instance_helper.py
@@ -317,14 +317,14 @@ class CreateInstanceHelper(object):
def _get_server_admin_password_old_style(self, server):
""" Determine the admin password for a server on creation """
- return utils.generate_password(16)
+ return utils.generate_password(FLAGS.password_length)
def _get_server_admin_password_new_style(self, server):
""" Determine the admin password for a server on creation """
password = server.get('adminPass')
if password is None:
- return utils.generate_password(16)
+ return utils.generate_password(FLAGS.password_length)
if not isinstance(password, basestring) or password == '':
msg = _("Invalid adminPass")
raise exc.HTTPBadRequest(explanation=msg)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 5affd1f33..0ef246852 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -477,16 +477,22 @@ class Controller(object):
return webob.Response(status_int=202)
@scheduler_api.redirect_handler
- def rescue(self, req, id):
+ def rescue(self, req, id, body={}):
"""Permit users to rescue the server."""
context = req.environ["nova.context"]
try:
- self.compute_api.rescue(context, id)
+ if 'rescue' in body and body['rescue'] and \
+ 'adminPass' in body['rescue']:
+ password = body['rescue']['adminPass']
+ else:
+ password = utils.generate_password(FLAGS.password_length)
+ self.compute_api.rescue(context, id, rescue_password=password)
except Exception:
readable = traceback.format_exc()
LOG.exception(_("compute.api::rescue %s"), readable)
raise exc.HTTPUnprocessableEntity()
- return webob.Response(status_int=202)
+
+ return {'adminPass': password}
@scheduler_api.redirect_handler
def unrescue(self, req, id):
@@ -618,7 +624,7 @@ class ControllerV10(Controller):
LOG.debug(msg)
raise exc.HTTPBadRequest(explanation=msg)
- password = utils.generate_password(16)
+ password = utils.generate_password(FLAGS.password_length)
try:
self.compute_api.rebuild(context, instance_id, image_id, password)
@@ -760,8 +766,10 @@ class ControllerV11(Controller):
self._validate_metadata(metadata)
self._decode_personalities(personalities)
- password = info["rebuild"].get("adminPass",
- utils.generate_password(16))
+ if 'rebuild' in info and 'adminPass' in info['rebuild']:
+ password = info['rebuild']['adminPass']
+ else:
+ password = utils.generate_password(FLAGS.password_length)
try:
self.compute_api.rebuild(context, instance_id, image_href,
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 4220f47ae..d8657d403 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -287,18 +287,24 @@ class API(base.Base):
return (num_instances, base_options, image)
@staticmethod
- def _ephemeral_size(instance_type, ephemeral_name):
- num = block_device.ephemeral_num(ephemeral_name)
+ def _volume_size(instance_type, virtual_name):
+ size = 0
+ if virtual_name == 'swap':
+ size = instance_type.get('swap', 0)
+ elif block_device.is_ephemeral(virtual_name):
+ num = block_device.ephemeral_num(virtual_name)
- # TODO(yamahata): ephemeralN where N > 0
- # Only ephemeral0 is allowed for now because InstanceTypes
- # table only allows single local disk, local_gb.
- # In order to enhance it, we need to add a new columns to
- # instance_types table.
- if num > 0:
- return 0
+ # TODO(yamahata): ephemeralN where N > 0
+ # Only ephemeral0 is allowed for now because InstanceTypes
+ # table only allows single local disk, local_gb.
+ # In order to enhance it, we need to add a new columns to
+ # instance_types table.
+ if num > 0:
+ return 0
- return instance_type.get('local_gb')
+ size = instance_type.get('local_gb')
+
+ return size
def _update_image_block_device_mapping(self, elevated_context,
instance_type, instance_id,
@@ -319,12 +325,7 @@ class API(base.Base):
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
- size = 0
- if virtual_name == 'swap':
- size = instance_type.get('swap', 0)
- elif block_device.is_ephemeral(virtual_name):
- size = self._ephemeral_size(instance_type, virtual_name)
-
+ size = self._volume_size(instance_type, virtual_name)
if size == 0:
continue
@@ -354,8 +355,8 @@ class API(base.Base):
virtual_name = bdm.get('virtual_name')
if (virtual_name is not None and
- block_device.is_ephemeral(virtual_name)):
- size = self._ephemeral_size(instance_type, virtual_name)
+ block_device.is_swap_or_ephemeral(virtual_name)):
+ size = self._volume_size(instance_type, virtual_name)
if size == 0:
continue
values['volume_size'] = size
@@ -1274,13 +1275,18 @@ class API(base.Base):
self._cast_compute_message('resume_instance', context, instance_id)
@scheduler_api.reroute_compute("rescue")
- def rescue(self, context, instance_id):
+ def rescue(self, context, instance_id, rescue_password=None):
"""Rescue the given instance."""
self.update(context,
instance_id,
vm_state=vm_states.ACTIVE,
task_state=task_states.RESCUING)
- self._cast_compute_message('rescue_instance', context, instance_id)
+
+ rescue_params = {
+ "rescue_password": rescue_password
+ }
+ self._cast_compute_message('rescue_instance', context, instance_id,
+ params=rescue_params)
@scheduler_api.reroute_compute("unrescue")
def unrescue(self, context, instance_id):
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 6ddbb20b0..cb5d10f83 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -70,8 +70,6 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
'Driver to use for controlling virtualization')
flags.DEFINE_string('stub_network', False,
'Stub network related code')
-flags.DEFINE_integer('password_length', 12,
- 'Length of generated admin passwords')
flags.DEFINE_string('console_host', socket.gethostname(),
'Console proxy host to use to connect to instances on'
'this host.')
@@ -800,12 +798,18 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
- def rescue_instance(self, context, instance_id):
- """Rescue an instance on this host."""
+ def rescue_instance(self, context, instance_id, **kwargs):
+ """
+ Rescue an instance on this host.
+ :param rescue_password: password to set on rescue instance
+ """
+
LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
+ instance_ref.admin_pass = kwargs.get('rescue_password',
+ utils.generate_password(FLAGS.password_length))
network_info = self._get_instance_nw_info(context, instance_ref)
# NOTE(blamar): None of the virt drivers use the 'callback' param
@@ -1391,11 +1395,6 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
hostname = instance_ref['hostname']
- # Getting fixed ips
- fixed_ips = self.db.instance_get_fixed_addresses(context, instance_id)
- if not fixed_ips:
- raise exception.FixedIpNotFoundForInstance(instance_id=instance_id)
-
# If any volume is mounted, prepare here.
if not instance_ref['volumes']:
LOG.info(_("%s has no volume."), hostname)
@@ -1411,6 +1410,11 @@ class ComputeManager(manager.SchedulerDependentManager):
# Retry operation is necessary because continuously request comes,
# concorrent request occurs to iptables, then it complains.
network_info = self._get_instance_nw_info(context, instance_ref)
+
+ fixed_ips = [nw_info[1]['ips'] for nw_info in network_info]
+ if not fixed_ips:
+ raise exception.FixedIpNotFoundForInstance(instance_id=instance_id)
+
max_retry = FLAGS.live_migration_retry_count
for cnt in range(max_retry):
try:
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 44214c7be..779f20ae1 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -2828,12 +2828,14 @@ def security_group_rule_get_by_security_group(context, security_group_id,
result = session.query(models.SecurityGroupIngressRule).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(parent_group_id=security_group_id).\
+ options(joinedload_all('grantee_group')).\
all()
else:
# TODO(vish): Join to group and check for project_id
result = session.query(models.SecurityGroupIngressRule).\
filter_by(deleted=False).\
filter_by(parent_group_id=security_group_id).\
+ options(joinedload_all('grantee_group')).\
all()
return result
diff --git a/nova/flags.py b/nova/flags.py
index aa76defe5..971e78807 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -421,6 +421,9 @@ DEFINE_string('root_helper', 'sudo',
DEFINE_bool('use_ipv6', False, 'use ipv6')
+DEFINE_integer('password_length', 12,
+ 'Length of generated instance admin passwords')
+
DEFINE_bool('monkey_patch', False,
'Whether to log monkey patching')
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 7bdae0552..cc85cbd95 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -305,6 +305,61 @@ class CloudTestCase(test.TestCase):
'ip_protocol': u'tcp'}]}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
+ def test_describe_security_group_ingress_groups(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec1 = db.security_group_create(self.context, kwargs)
+ sec2 = db.security_group_create(self.context,
+ {'project_id': 'someuser',
+ 'name': 'somegroup1'})
+ sec3 = db.security_group_create(self.context,
+ {'project_id': 'someuser',
+ 'name': 'othergroup2'})
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'ip_permissions': [
+ {'groups': {'1': {'user_id': u'someuser',
+ 'group_name': u'somegroup1'}}},
+ {'ip_protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 80,
+ 'groups': {'1': {'user_id': u'someuser',
+ 'group_name': u'othergroup2'}}}]}
+ self.assertTrue(authz(self.context, group_name=sec1['name'], **kwargs))
+ describe = self.cloud.describe_security_groups
+ groups = describe(self.context, group_name=['test'])
+ self.assertEquals(len(groups['securityGroupInfo']), 1)
+ actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
+ self.assertEquals(len(actual_rules), 4)
+ expected_rules = [{'fromPort': -1,
+ 'groups': [{'groupName': 'somegroup1',
+ 'userId': 'someuser'}],
+ 'ipProtocol': 'icmp',
+ 'ipRanges': [],
+ 'toPort': -1},
+ {'fromPort': 1,
+ 'groups': [{'groupName': u'somegroup1',
+ 'userId': u'someuser'}],
+ 'ipProtocol': 'tcp',
+ 'ipRanges': [],
+ 'toPort': 65535},
+ {'fromPort': 1,
+ 'groups': [{'groupName': u'somegroup1',
+ 'userId': u'someuser'}],
+ 'ipProtocol': 'udp',
+ 'ipRanges': [],
+ 'toPort': 65536},
+ {'fromPort': 80,
+ 'groups': [{'groupName': u'othergroup2',
+ 'userId': u'someuser'}],
+ 'ipProtocol': u'tcp',
+ 'ipRanges': [],
+ 'toPort': 80}]
+ for rule in expected_rules:
+ self.assertTrue(rule in actual_rules)
+
+ db.security_group_destroy(self.context, sec3['id'])
+ db.security_group_destroy(self.context, sec2['id'])
+ db.security_group_destroy(self.context, sec1['id'])
+
def test_revoke_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
diff --git a/nova/tests/api/openstack/contrib/test_rescue.py b/nova/tests/api/openstack/contrib/test_rescue.py
index f8126d461..403bcfd4c 100644
--- a/nova/tests/api/openstack/contrib/test_rescue.py
+++ b/nova/tests/api/openstack/contrib/test_rescue.py
@@ -16,11 +16,14 @@ import json
import webob
from nova import compute
+from nova import flags
from nova import test
from nova.tests.api.openstack import fakes
+FLAGS = flags.FLAGS
-def rescue(self, context, instance_id):
+
+def rescue(self, context, instance_id, rescue_password=None):
pass
@@ -34,7 +37,19 @@ class RescueTest(test.TestCase):
self.stubs.Set(compute.api.API, "rescue", rescue)
self.stubs.Set(compute.api.API, "unrescue", unrescue)
- def test_rescue(self):
+ def test_rescue_with_preset_password(self):
+ body = {"rescue": {"adminPass": "AABBCC112233"}}
+ req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
+ req.method = "POST"
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+ resp_json = json.loads(resp.body)
+ self.assertEqual("AABBCC112233", resp_json['adminPass'])
+
+ def test_rescue_generates_password(self):
body = dict(rescue=None)
req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
req.method = "POST"
@@ -43,6 +58,8 @@ class RescueTest(test.TestCase):
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
+ resp_json = json.loads(resp.body)
+ self.assertEqual(FLAGS.password_length, len(resp_json['adminPass']))
def test_unrescue(self):
body = dict(unrescue=None)
@@ -52,4 +69,4 @@ class RescueTest(test.TestCase):
req.headers["content-type"] = "application/json"
resp = req.get_response(fakes.wsgi_app())
- self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.status_int, 202)
diff --git a/nova/tests/api/openstack/contrib/test_volumes.py b/nova/tests/api/openstack/contrib/test_volumes.py
index 443ec399f..52b65f5e1 100644
--- a/nova/tests/api/openstack/contrib/test_volumes.py
+++ b/nova/tests/api/openstack/contrib/test_volumes.py
@@ -19,6 +19,7 @@ import webob
import nova
from nova import context
+from nova import flags
from nova import test
from nova.api.openstack.contrib.volumes import BootFromVolumeController
from nova.compute import instance_types
@@ -26,6 +27,9 @@ from nova.tests.api.openstack import fakes
from nova.tests.api.openstack.test_servers import fake_gen_uuid
+FLAGS = flags.FLAGS
+
+
def fake_compute_api_create(cls, context, instance_type, image_href, **kwargs):
inst_type = instance_types.get_instance_type_by_flavor_id(2)
return [{'id': 1,
@@ -70,4 +74,4 @@ class BootFromVolumeTest(test.TestCase):
self.assertEqual(2, int(server['flavor']['id']))
self.assertEqual(u'test_server', server['name'])
self.assertEqual(3, int(server['image']['id']))
- self.assertEqual(16, len(server['adminPass']))
+ self.assertEqual(FLAGS.password_length, len(server['adminPass']))
diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py
index b9ef41465..4a215dd74 100644
--- a/nova/tests/api/openstack/test_server_actions.py
+++ b/nova/tests/api/openstack/test_server_actions.py
@@ -622,7 +622,8 @@ class ServerActionsTestV11(test.TestCase):
self.assertEqual(res.status_int, 202)
body = json.loads(res.body)
self.assertEqual(body['server']['image']['id'], '2')
- self.assertEqual(len(body['server']['adminPass']), 16)
+ self.assertEqual(len(body['server']['adminPass']),
+ FLAGS.password_length)
def test_server_rebuild_rejected_when_building(self):
body = {
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index ee7927c64..a2c8b3b04 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -28,6 +28,7 @@ import webob
from nova import context
from nova import db
from nova import exception
+from nova import flags
from nova import test
from nova import utils
import nova.api.openstack
@@ -49,6 +50,7 @@ from nova.tests.api.openstack import common
from nova.tests.api.openstack import fakes
+FLAGS = flags.FLAGS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
@@ -1575,7 +1577,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 202)
server = json.loads(res.body)['server']
- self.assertEqual(16, len(server['adminPass']))
+ self.assertEqual(FLAGS.password_length, len(server['adminPass']))
self.assertEqual('server_test', server['name'])
self.assertEqual(1, server['id'])
self.assertEqual(2, server['flavorId'])
@@ -1776,7 +1778,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 202)
server = json.loads(res.body)['server']
- self.assertEqual(16, len(server['adminPass']))
+ self.assertEqual(FLAGS.password_length, len(server['adminPass']))
self.assertEqual(1, server['id'])
self.assertEqual(0, server['progress'])
self.assertEqual('server_test', server['name'])
@@ -1836,7 +1838,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 202)
server = json.loads(res.body)['server']
- self.assertEqual(16, len(server['adminPass']))
+ self.assertEqual(FLAGS.password_length, len(server['adminPass']))
self.assertEqual(1, server['id'])
self.assertEqual("BUILD", server["status"])
self.assertEqual(0, server['progress'])
@@ -2547,9 +2549,8 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status, '202 Accepted')
self.assertEqual(self.server_delete_called, True)
- def test_rescue_accepted(self):
+ def test_rescue_generates_password(self):
self.flags(allow_admin_api=True)
- body = {}
self.called = False
@@ -2564,7 +2565,33 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(self.called, True)
- self.assertEqual(res.status_int, 202)
+ self.assertEqual(res.status_int, 200)
+ res_body = json.loads(res.body)
+ self.assertTrue('adminPass' in res_body)
+ self.assertEqual(FLAGS.password_length, len(res_body['adminPass']))
+
+ def test_rescue_with_preset_password(self):
+ self.flags(allow_admin_api=True)
+
+ self.called = False
+
+ def rescue_mock(*args, **kwargs):
+ self.called = True
+
+ self.stubs.Set(nova.compute.api.API, 'rescue', rescue_mock)
+ req = webob.Request.blank('/v1.0/servers/1/rescue')
+ req.method = 'POST'
+ body = {"rescue": {"adminPass": "AABBCC112233"}}
+ req.body = json.dumps(body)
+ req.content_type = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app())
+
+ self.assertEqual(self.called, True)
+ self.assertEqual(res.status_int, 200)
+ res_body = json.loads(res.body)
+ self.assertTrue('adminPass' in res_body)
+ self.assertEqual('AABBCC112233', res_body['adminPass'])
def test_rescue_raises_handled(self):
self.flags(allow_admin_api=True)
@@ -3621,7 +3648,8 @@ class TestServerInstanceCreation(test.TestCase):
self.assertEquals(response.status_int, 202)
response = json.loads(response.body)
self.assertTrue('adminPass' in response['server'])
- self.assertEqual(16, len(response['server']['adminPass']))
+ self.assertEqual(FLAGS.password_length,
+ len(response['server']['adminPass']))
def test_create_instance_admin_pass_xml(self):
request, response, dummy = \
@@ -3630,7 +3658,8 @@ class TestServerInstanceCreation(test.TestCase):
dom = minidom.parseString(response.body)
server = dom.childNodes[0]
self.assertEquals(server.nodeName, 'server')
- self.assertEqual(16, len(server.getAttribute('adminPass')))
+ self.assertEqual(FLAGS.password_length,
+ len(server.getAttribute('adminPass')))
class TestGetKernelRamdiskFromImage(test.TestCase):
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index 526d1c490..e9f1145dd 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -515,7 +515,7 @@ class ApiEc2TestCase(test.TestCase):
# be good enough for that.
for group in rv:
if group.name == security_group_name:
- self.assertEquals(len(group.rules), 1)
+ self.assertEquals(len(group.rules), 3)
self.assertEquals(len(group.rules[0].grants), 1)
self.assertEquals(str(group.rules[0].grants[0]), '%s-%s' %
(other_security_group_name, 'fake'))
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 4d463572b..356412dbf 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -174,6 +174,20 @@ class ComputeTestCase(test.TestCase):
self.assertEqual(pre_build_len,
len(db.instance_get_all(context.get_admin_context())))
+ def test_create_instance_with_img_ref_associates_config_drive(self):
+ """Make sure create associates a config drive."""
+
+ instance_id = self._create_instance(params={'config_drive': '1234', })
+
+ try:
+ self.compute.run_instance(self.context, instance_id)
+ instances = db.instance_get_all(context.get_admin_context())
+ instance = instances[0]
+
+ self.assertTrue(instance.config_drive)
+ finally:
+ db.instance_destroy(self.context, instance_id)
+
def test_create_instance_associates_config_drive(self):
"""Make sure create associates a config drive."""
@@ -647,7 +661,6 @@ class ComputeTestCase(test.TestCase):
dbmock = self.mox.CreateMock(db)
dbmock.instance_get(c, i_id).AndReturn(instance_ref)
- dbmock.instance_get_fixed_addresses(c, i_id).AndReturn(None)
self.compute.db = dbmock
self.mox.ReplayAll()
@@ -657,6 +670,9 @@ class ComputeTestCase(test.TestCase):
def test_pre_live_migration_instance_has_volume(self):
"""Confirm setup_compute_volume is called when volume is mounted."""
+ def fake_nw_info(*args, **kwargs):
+ return [(0, {'ips':['dummy']})]
+
i_ref = self._get_dummy_instance()
c = context.get_admin_context()
@@ -666,13 +682,13 @@ class ComputeTestCase(test.TestCase):
drivermock = self.mox.CreateMock(self.compute_driver)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
- dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy')
for i in range(len(i_ref['volumes'])):
vid = i_ref['volumes'][i]['id']
volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
- drivermock.plug_vifs(i_ref, [])
- drivermock.ensure_filtering_rules_for_instance(i_ref, [])
+ drivermock.plug_vifs(i_ref, fake_nw_info())
+ drivermock.ensure_filtering_rules_for_instance(i_ref, fake_nw_info())
+ self.stubs.Set(self.compute, '_get_instance_nw_info', fake_nw_info)
self.compute.db = dbmock
self.compute.volume_manager = volmock
self.compute.driver = drivermock
@@ -683,6 +699,9 @@ class ComputeTestCase(test.TestCase):
def test_pre_live_migration_instance_has_no_volume(self):
"""Confirm log meg when instance doesn't mount any volumes."""
+ def fake_nw_info(*args, **kwargs):
+ return [(0, {'ips':['dummy']})]
+
i_ref = self._get_dummy_instance()
i_ref['volumes'] = []
c = context.get_admin_context()
@@ -692,12 +711,12 @@ class ComputeTestCase(test.TestCase):
drivermock = self.mox.CreateMock(self.compute_driver)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
- dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy')
self.mox.StubOutWithMock(compute_manager.LOG, 'info')
compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname'])
- drivermock.plug_vifs(i_ref, [])
- drivermock.ensure_filtering_rules_for_instance(i_ref, [])
+ drivermock.plug_vifs(i_ref, fake_nw_info())
+ drivermock.ensure_filtering_rules_for_instance(i_ref, fake_nw_info())
+ self.stubs.Set(self.compute, '_get_instance_nw_info', fake_nw_info)
self.compute.db = dbmock
self.compute.driver = drivermock
@@ -711,6 +730,8 @@ class ComputeTestCase(test.TestCase):
It retries and raise exception when timeout exceeded.
"""
+ def fake_nw_info(*args, **kwargs):
+ return [(0, {'ips':['dummy']})]
i_ref = self._get_dummy_instance()
c = context.get_admin_context()
@@ -722,13 +743,13 @@ class ComputeTestCase(test.TestCase):
drivermock = self.mox.CreateMock(self.compute_driver)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
- dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy')
for i in range(len(i_ref['volumes'])):
volmock.setup_compute_volume(c, i_ref['volumes'][i]['id'])
for i in range(FLAGS.live_migration_retry_count):
- drivermock.plug_vifs(i_ref, []).\
+ drivermock.plug_vifs(i_ref, fake_nw_info()).\
AndRaise(exception.ProcessExecutionError())
+ self.stubs.Set(self.compute, '_get_instance_nw_info', fake_nw_info)
self.compute.db = dbmock
self.compute.network_manager = netmock
self.compute.volume_manager = volmock
@@ -1563,12 +1584,16 @@ class ComputeTestCase(test.TestCase):
db.block_device_mapping_destroy(self.context, bdm['id'])
self.compute.terminate_instance(self.context, instance_id)
- def test_ephemeral_size(self):
+ def test_volume_size(self):
local_size = 2
- inst_type = {'local_gb': local_size}
- self.assertEqual(self.compute_api._ephemeral_size(inst_type,
+ swap_size = 3
+ inst_type = {'local_gb': local_size, 'swap': swap_size}
+ self.assertEqual(self.compute_api._volume_size(inst_type,
'ephemeral0'),
local_size)
- self.assertEqual(self.compute_api._ephemeral_size(inst_type,
- 'ephemeral1'),
+ self.assertEqual(self.compute_api._volume_size(inst_type,
+ 'ephemeral1'),
0)
+ self.assertEqual(self.compute_api._volume_size(inst_type,
+ 'swap'),
+ swap_size)
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index b7c1ef1ab..39aa4ad41 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -52,6 +52,15 @@ def _concurrency(wait, done, target):
done.send()
+class FakeVirDomainSnapshot(object):
+
+ def __init__(self, dom=None):
+ self.dom = dom
+
+ def delete(self, flags):
+ pass
+
+
class FakeVirtDomain(object):
def __init__(self, fake_xml=None):
@@ -69,7 +78,7 @@ class FakeVirtDomain(object):
"""
def snapshotCreateXML(self, *args):
- return None
+ return FakeVirDomainSnapshot(self)
def createWithFlags(self, launch_flags):
pass
@@ -260,11 +269,48 @@ class LibvirtConnTestCase(test.TestCase):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
- def test_snapshot(self):
+ def test_snapshot_in_raw_format(self):
+ if not self.lazy_load_library_exists():
+ return
+
+ self.flags(image_service='nova.image.fake.FakeImageService')
+
+ # Start test
+ image_service = utils.import_object(FLAGS.image_service)
+
+ # Assuming that base image already exists in image_service
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
+ connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(connection.utils, 'execute')
+ connection.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = connection.LibvirtConnection(False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'])
+
+ snapshot = image_service.show(context, recv_meta['id'])
+ self.assertEquals(snapshot['properties']['image_state'], 'available')
+ self.assertEquals(snapshot['status'], 'active')
+ self.assertEquals(snapshot['disk_format'], 'raw')
+ self.assertEquals(snapshot['name'], snapshot_name)
+
+ def test_snapshot_in_qcow2_format(self):
if not self.lazy_load_library_exists():
return
self.flags(image_service='nova.image.fake.FakeImageService')
+ self.flags(snapshot_image_format='qcow2')
# Start test
image_service = utils.import_object(FLAGS.image_service)
@@ -293,6 +339,7 @@ class LibvirtConnTestCase(test.TestCase):
snapshot = image_service.show(context, recv_meta['id'])
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
+ self.assertEquals(snapshot['disk_format'], 'qcow2')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_no_image_architecture(self):
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 2ae1359bf..b3c8ebf6e 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -29,9 +29,9 @@ Supports KVM, LXC, QEMU, UML, and XEN.
(default: kvm).
:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type).
:libvirt_xml_template: Libvirt XML Template.
-:rescue_image_id: Rescue ami image (default: ami-rescue).
-:rescue_kernel_id: Rescue aki image (default: aki-rescue).
-:rescue_ramdisk_id: Rescue ari image (default: ari-rescue).
+:rescue_image_id: Rescue ami image (None = original image).
+:rescue_kernel_id: Rescue aki image (None = original image).
+:rescue_ramdisk_id: Rescue ari image (None = original image).
:injected_network_template: Template file for injected network
:allow_same_net_traffic: Whether to allow in project network traffic
@@ -84,9 +84,9 @@ LOG = logging.getLogger('nova.virt.libvirt_conn')
FLAGS = flags.FLAGS
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
# TODO(vish): These flags should probably go into a shared location
-flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
-flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
-flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
+flags.DEFINE_string('rescue_image_id', None, 'Rescue ami image')
+flags.DEFINE_string('rescue_kernel_id', None, 'Rescue aki image')
+flags.DEFINE_string('rescue_ramdisk_id', None, 'Rescue ari image')
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('virt/libvirt.xml.template'),
'Libvirt XML Template')
@@ -125,8 +125,10 @@ flags.DEFINE_string('block_migration_flag',
'Define block migration behavior.')
flags.DEFINE_integer('live_migration_bandwidth', 0,
'Define live migration behavior')
-flags.DEFINE_string('qemu_img', 'qemu-img',
- 'binary to use for qemu-img commands')
+flags.DEFINE_string('snapshot_image_format', None,
+ 'Snapshot image format (valid options are : '
+ 'raw, qcow2, vmdk, vdi).'
+ 'Defaults to same as source image')
flags.DEFINE_string('libvirt_vif_type', 'bridge',
'Type of VIF to create.')
flags.DEFINE_string('libvirt_vif_driver',
@@ -391,10 +393,7 @@ class LibvirtConnection(driver.ComputeDriver):
def snapshot(self, context, instance, image_href):
"""Create snapshot from a running VM instance.
- This command only works with qemu 0.14+, the qemu_img flag is
- provided so that a locally compiled binary of qemu-img can be used
- to support this command.
-
+ This command only works with qemu 0.14+
"""
virt_dom = self._lookup_by_name(instance['name'])
@@ -420,8 +419,11 @@ class LibvirtConnection(driver.ComputeDriver):
arch = base['properties']['architecture']
metadata['properties']['architecture'] = arch
- if 'disk_format' in base:
- metadata['disk_format'] = base['disk_format']
+ source_format = base.get('disk_format') or 'raw'
+ image_format = FLAGS.snapshot_image_format or source_format
+ if FLAGS.use_cow_images:
+ source_format = 'qcow2'
+ metadata['disk_format'] = image_format
if 'container_format' in base:
metadata['container_format'] = base['container_format']
@@ -444,12 +446,12 @@ class LibvirtConnection(driver.ComputeDriver):
# Export the snapshot to a raw image
temp_dir = tempfile.mkdtemp()
out_path = os.path.join(temp_dir, snapshot_name)
- qemu_img_cmd = (FLAGS.qemu_img,
+ qemu_img_cmd = ('qemu-img',
'convert',
'-f',
- 'qcow2',
+ source_format,
'-O',
- 'raw',
+ image_format,
'-s',
snapshot_name,
disk_path,
@@ -468,7 +470,7 @@ class LibvirtConnection(driver.ComputeDriver):
snapshot_ptr.delete(0)
@exception.wrap_exception()
- def reboot(self, instance, network_info, reboot_type):
+ def reboot(self, instance, network_info, reboot_type=None, xml=None):
"""Reboot a virtual machine, given an instance reference.
This method actually destroys and re-creates the domain to ensure the
@@ -479,7 +481,9 @@ class LibvirtConnection(driver.ComputeDriver):
# NOTE(itoumsn): Use XML delived from the running instance
# instead of using to_xml(instance, network_info). This is almost
# the ultimate stupid workaround.
- xml = virt_dom.XMLDesc(0)
+ if not xml:
+ xml = virt_dom.XMLDesc(0)
+
# NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is
# better because we cannot ensure flushing dirty buffers
# in the guest OS. But, in case of KVM, shutdown() does not work...
@@ -543,43 +547,42 @@ class LibvirtConnection(driver.ComputeDriver):
data recovery.
"""
- self.destroy(instance, network_info, cleanup=False)
-
- xml = self.to_xml(instance, network_info, rescue=True)
- rescue_images = {'image_id': FLAGS.rescue_image_id,
- 'kernel_id': FLAGS.rescue_kernel_id,
- 'ramdisk_id': FLAGS.rescue_ramdisk_id}
- self._create_image(context, instance, xml, '.rescue', rescue_images)
- self._create_new_domain(xml)
-
- def _wait_for_rescue():
- """Called at an interval until the VM is running again."""
- instance_name = instance['name']
- try:
- state = self.get_info(instance_name)['state']
- except exception.NotFound:
- msg = _("During reboot, %s disappeared.") % instance_name
- LOG.error(msg)
- raise utils.LoopingCallDone
-
- if state == power_state.RUNNING:
- msg = _("Instance %s rescued successfully.") % instance_name
- LOG.info(msg)
- raise utils.LoopingCallDone
+ virt_dom = self._conn.lookupByName(instance['name'])
+ unrescue_xml = virt_dom.XMLDesc(0)
+ unrescue_xml_path = os.path.join(FLAGS.instances_path,
+ instance['name'],
+ 'unrescue.xml')
+ f = open(unrescue_xml_path, 'w')
+ f.write(unrescue_xml)
+ f.close()
- timer = utils.LoopingCall(_wait_for_rescue)
- return timer.start(interval=0.5, now=True)
+ xml = self.to_xml(instance, network_info, rescue=True)
+ rescue_images = {
+ 'image_id': FLAGS.rescue_image_id or instance['image_ref'],
+ 'kernel_id': FLAGS.rescue_kernel_id or instance['kernel_id'],
+ 'ramdisk_id': FLAGS.rescue_ramdisk_id or instance['ramdisk_id'],
+ }
+ self._create_image(context, instance, xml, '.rescue', rescue_images,
+ network_info=network_info)
+ self.reboot(instance, network_info, xml=xml)
@exception.wrap_exception()
- def unrescue(self, instance, network_info):
+ def unrescue(self, instance, callback, network_info):
"""Reboot the VM which is being rescued back into primary images.
Because reboot destroys and re-creates instances, unresue should
simply call reboot.
"""
- self.reboot(instance, network_info)
+ unrescue_xml_path = os.path.join(FLAGS.instances_path,
+ instance['name'],
+ 'unrescue.xml')
+ f = open(unrescue_xml_path)
+ unrescue_xml = f.read()
+ f.close()
+ os.remove(unrescue_xml_path)
+ self.reboot(instance, network_info, xml=unrescue_xml)
@exception.wrap_exception()
def poll_rescued_instances(self, timeout):
@@ -774,13 +777,13 @@ class LibvirtConnection(driver.ComputeDriver):
if size:
disk.extend(target, size)
- def _create_local(self, target, local_size, prefix='G', fs_format=None):
+ def _create_local(self, target, local_size, unit='G', fs_format=None):
"""Create a blank image of specified size"""
if not fs_format:
fs_format = FLAGS.default_local_format
- utils.execute('truncate', target, '-s', "%d%c" % (local_size, prefix))
+ utils.execute('truncate', target, '-s', "%d%c" % (local_size, unit))
if fs_format:
utils.execute('mkfs', '-t', fs_format, target)
@@ -788,9 +791,9 @@ class LibvirtConnection(driver.ComputeDriver):
self._create_local(target, local_size)
disk.mkfs(os_type, fs_label, target)
- def _create_swap(self, target, swap_gb):
+ def _create_swap(self, target, swap_mb):
"""Create a swap file of specified size"""
- self._create_local(target, swap_gb)
+ self._create_local(target, swap_mb, unit='M')
utils.execute('mkswap', target)
def _create_image(self, context, inst, libvirt_xml, suffix='',
@@ -818,8 +821,10 @@ class LibvirtConnection(driver.ComputeDriver):
utils.execute('mkdir', '-p', container_dir)
# NOTE(vish): No need add the suffix to console.log
- os.close(os.open(basepath('console.log', ''),
- os.O_CREAT | os.O_WRONLY, 0660))
+ console_log = basepath('console.log', '')
+ if os.path.exists(console_log):
+ utils.execute('chown', os.getuid(), console_log, run_as_root=True)
+ os.close(os.open(console_log, os.O_CREAT | os.O_WRONLY, 0660))
if not disk_images:
disk_images = {'image_id': inst['image_ref'],
@@ -886,22 +891,22 @@ class LibvirtConnection(driver.ComputeDriver):
cow=FLAGS.use_cow_images,
local_size=eph['size'])
- swap_gb = 0
+ swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
- swap_gb = swap['swap_size']
+ swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not self._volume_in_mapping(self.default_swap_device,
block_device_info)):
- swap_gb = inst_type['swap']
+ swap_mb = inst_type['swap']
- if swap_gb > 0:
+ if swap_mb > 0:
self._cache_image(fn=self._create_swap,
target=basepath('disk.swap'),
- fname="swap_%s" % swap_gb,
+ fname="swap_%s" % swap_mb,
cow=FLAGS.use_cow_images,
- swap_gb=swap_gb)
+ swap_mb=swap_mb)
# For now, we assume that if we're not using a kernel, we're using a
# partitioned disk image where the target partition is the first
@@ -922,10 +927,10 @@ class LibvirtConnection(driver.ComputeDriver):
target=basepath('disk.config'),
fname=fname,
image_id=config_drive_id,
- user=user,
- project=project)
+ user_id=inst['user_id'],
+ project_id=inst['project_id'],)
elif config_drive:
- self._create_local(basepath('disk.config'), 64, prefix="M",
+ self._create_local(basepath('disk.config'), 64, unit='M',
fs_format='msdos') # 64MB
if inst['key_data']:
@@ -995,15 +1000,16 @@ class LibvirtConnection(driver.ComputeDriver):
nbd=FLAGS.use_cow_images,
tune2fs=tune2fs)
- if FLAGS.libvirt_type == 'lxc':
- disk.setup_container(basepath('disk'),
- container_dir=container_dir,
- nbd=FLAGS.use_cow_images)
except Exception as e:
# This could be a windows image, or a vmdk format disk
LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
' data into image %(img_id)s (%(e)s)') % locals())
+ if FLAGS.libvirt_type == 'lxc':
+ disk.setup_container(basepath('disk'),
+ container_dir=container_dir,
+ nbd=FLAGS.use_cow_images)
+
if FLAGS.libvirt_type == 'uml':
utils.execute('chown', 'root', basepath('disk'), run_as_root=True)
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index 0db10c7ce..c6253511e 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -663,7 +663,9 @@ class IptablesFirewallDriver(FirewallDriver):
if version == 6 and rule.protocol == 'icmp':
protocol = 'icmpv6'
- args = ['-j ACCEPT', '-p', protocol]
+ args = ['-j ACCEPT']
+ if protocol:
+ args += ['-p', protocol]
if protocol in ['udp', 'tcp']:
if rule.from_port == rule.to_port:
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 55a6a4a78..210b8fe65 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -49,9 +49,9 @@ XenAPI = None
LOG = logging.getLogger("nova.virt.xenapi.vmops")
FLAGS = flags.FLAGS
-flags.DEFINE_integer('windows_version_timeout', 300,
- 'number of seconds to wait for windows agent to be '
- 'fully operational')
+flags.DEFINE_integer('agent_version_timeout', 300,
+ 'number of seconds to wait for agent to be fully '
+ 'operational')
flags.DEFINE_string('xenapi_vif_driver',
'nova.virt.xenapi.vif.XenAPIBridgeDriver',
'The XenAPI VIF driver using XenServer Network APIs.')
@@ -324,15 +324,8 @@ class VMOps(object):
def _check_agent_version():
LOG.debug(_("Querying agent version"))
- if instance.os_type == 'windows':
- # Windows will generally perform a setup process on first boot
- # that can take a couple of minutes and then reboot. So we
- # need to be more patient than normal as well as watch for
- # domid changes
- version = self.get_agent_version(instance,
- timeout=FLAGS.windows_version_timeout)
- else:
- version = self.get_agent_version(instance)
+
+ version = self.get_agent_version(instance)
if not version:
return
@@ -639,9 +632,15 @@ class VMOps(object):
self._session.wait_for_task(task, instance.id)
- def get_agent_version(self, instance, timeout=None):
+ def get_agent_version(self, instance):
"""Get the version of the agent running on the VM instance."""
+ # The agent can be slow to start for a variety of reasons. On Windows,
+ # it will generally perform a setup process on first boot that can
+ # take a couple of minutes and then reboot. On Linux, the system can
+ # also take a while to boot. So we need to be more patient than
+ # normal as well as watch for domid changes
+
def _call():
# Send the encrypted password
transaction_id = str(uuid.uuid4())
@@ -655,27 +654,26 @@ class VMOps(object):
# (ie CRLF escaped) for some reason. Strip that off.
return resp['message'].replace('\\r\\n', '')
- if timeout:
- vm_ref = self._get_vm_opaque_ref(instance)
- vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ vm_ref = self._get_vm_opaque_ref(instance)
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
- domid = vm_rec['domid']
-
- expiration = time.time() + timeout
- while time.time() < expiration:
- ret = _call()
- if ret:
- return ret
-
- vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
- if vm_rec['domid'] != domid:
- LOG.info(_('domid changed from %(olddomid)s to '
- '%(newdomid)s') % {
- 'olddomid': domid,
- 'newdomid': vm_rec['domid']})
- domid = vm_rec['domid']
- else:
- return _call()
+ domid = vm_rec['domid']
+
+ expiration = time.time() + FLAGS.agent_version_timeout
+ while time.time() < expiration:
+ ret = _call()
+ if ret:
+ return ret
+
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ if vm_rec['domid'] != domid:
+ LOG.info(_('domid changed from %(olddomid)s to '
+ '%(newdomid)s') % {
+ 'olddomid': domid,
+ 'newdomid': vm_rec['domid']})
+ domid = vm_rec['domid']
+
+ return None
def agent_update(self, instance, url, md5sum):
"""Update agent on the VM instance."""