summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--etc/nova/policy.json4
-rw-r--r--nova/api/openstack/compute/contrib/volumes.py14
-rw-r--r--nova/api/openstack/wsgi.py10
-rw-r--r--nova/availability_zones.py1
-rw-r--r--nova/compute/api.py27
-rw-r--r--nova/compute/manager.py6
-rw-r--r--nova/context.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py3
-rw-r--r--nova/exception.py25
-rw-r--r--nova/network/linux_net.py14
-rw-r--r--nova/network/manager.py1
-rw-r--r--nova/notifications.py1
-rw-r--r--nova/scheduler/filters/availability_zone_filter.py1
-rw-r--r--nova/service.py12
-rw-r--r--nova/servicegroup/drivers/db.py1
-rw-r--r--nova/tests/baremetal/test_nova_baremetal_manage.py4
-rw-r--r--nova/tests/baremetal/test_pxe.py2
-rw-r--r--nova/tests/baremetal/test_utils.py3
-rw-r--r--nova/tests/cells/test_cells_messaging.py2
-rw-r--r--nova/tests/fake_policy.py4
-rw-r--r--nova/tests/network/test_manager.py1
-rw-r--r--nova/tests/test_availability_zones.py1
-rw-r--r--nova/tests/test_context.py19
-rw-r--r--nova/tests/test_iptables_network.py19
-rw-r--r--nova/tests/test_libvirt_vif.py41
-rw-r--r--nova/tests/test_migrations.py404
-rw-r--r--nova/tests/test_periodic_tasks.py1
-rw-r--r--nova/tests/test_virt_drivers.py2
-rw-r--r--nova/tests/test_vmwareapi.py93
-rw-r--r--nova/tests/virt/xenapi/test_vm_utils.py2
-rw-r--r--nova/utils.py4
-rw-r--r--nova/virt/baremetal/pxe.py2
-rw-r--r--nova/virt/libvirt/driver.py2
-rw-r--r--nova/virt/libvirt/vif.py109
-rw-r--r--nova/virt/powervm/blockdev.py7
-rw-r--r--nova/virt/powervm/driver.py8
-rw-r--r--nova/virt/vmwareapi/driver.py174
-rw-r--r--nova/virt/vmwareapi/fake.py47
-rw-r--r--nova/virt/vmwareapi/host.py140
-rw-r--r--nova/virt/vmwareapi/read_write_util.py2
-rw-r--r--nova/virt/vmwareapi/vif.py2
-rw-r--r--nova/virt/vmwareapi/vm_util.py75
-rw-r--r--nova/virt/vmwareapi/vmops.py783
-rw-r--r--nova/virt/vmwareapi/vmware_images.py14
-rw-r--r--nova/virt/vmwareapi/volume_util.py1
-rw-r--r--nova/virt/vmwareapi/volumeops.py4
-rw-r--r--nova/volume/cinder.py4
-rwxr-xr-xrun_tests.sh1
-rw-r--r--tools/flakes.py4
-rwxr-xr-xtools/unused_imports.sh4
-rw-r--r--tox.ini5
52 files changed, 1594 insertions, 528 deletions
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index 97ae89a38..1a446263f 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -83,6 +83,10 @@
"compute_extension:virtual_interfaces": "",
"compute_extension:virtual_storage_arrays": "",
"compute_extension:volumes": "",
+ "compute_extension:volume_attachments:index": "",
+ "compute_extension:volume_attachments:show": "",
+ "compute_extension:volume_attachments:create": "",
+ "compute_extension:volume_attachments:delete": "",
"compute_extension:volumetypes": "",
"compute_extension:availability_zone:list": "",
"compute_extension:availability_zone:detail": "rule:admin_api",
diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py
index 47c717495..3fc503217 100644
--- a/nova/api/openstack/compute/contrib/volumes.py
+++ b/nova/api/openstack/compute/contrib/volumes.py
@@ -33,6 +33,15 @@ from nova import volume
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'volumes')
+authorize_attach_index = extensions.extension_authorizer('compute',
+ 'volume_attachments:index')
+authorize_attach_show = extensions.extension_authorizer('compute',
+ 'volume_attachments:show')
+authorize_attach_create = extensions.extension_authorizer('compute',
+ 'volume_attachments:create')
+authorize_attach_delete = extensions.extension_authorizer('compute',
+ 'volume_attachments:delete')
+
def _translate_volume_detail_view(context, vol):
"""Maps keys for volumes details view."""
@@ -329,6 +338,8 @@ class VolumeAttachmentController(wsgi.Controller):
@wsgi.serializers(xml=VolumeAttachmentsTemplate)
def index(self, req, server_id):
"""Returns the list of volume attachments for a given instance."""
+ context = req.environ['nova.context']
+ authorize_attach_index(context)
return self._items(req, server_id,
entity_maker=_translate_attachment_summary_view)
@@ -337,6 +348,7 @@ class VolumeAttachmentController(wsgi.Controller):
"""Return data about the given volume attachment."""
context = req.environ['nova.context']
authorize(context)
+ authorize_attach_show(context)
volume_id = id
try:
@@ -377,6 +389,7 @@ class VolumeAttachmentController(wsgi.Controller):
"""Attach a volume to an instance."""
context = req.environ['nova.context']
authorize(context)
+ authorize_attach_create(context)
if not self.is_valid_body(body, 'volumeAttachment'):
raise exc.HTTPUnprocessableEntity()
@@ -423,6 +436,7 @@ class VolumeAttachmentController(wsgi.Controller):
"""Detach a volume from an instance."""
context = req.environ['nova.context']
authorize(context)
+ authorize_attach_delete(context)
volume_id = id
LOG.audit(_("Detach volume %s"), volume_id, context=context)
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index 8b593d742..a6f255081 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -1182,10 +1182,18 @@ class Fault(webob.exc.HTTPException):
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
+ explanation = self.wrapped_exc.explanation
+ offset = explanation.find("Traceback")
+ if offset is not -1:
+ LOG.debug(_("API request failed, fault raised to the top of"
+ " the stack. Detailed stacktrace %s") %
+ explanation)
+ explanation = explanation[0:offset - 1]
+
fault_data = {
fault_name: {
'code': code,
- 'message': self.wrapped_exc.explanation}}
+ 'message': explanation}}
if code == 413:
retry = self.wrapped_exc.headers.get('Retry-After', None)
if retry:
diff --git a/nova/availability_zones.py b/nova/availability_zones.py
index 09cbd98b8..711eee1fa 100644
--- a/nova/availability_zones.py
+++ b/nova/availability_zones.py
@@ -17,7 +17,6 @@
from nova import db
from nova.openstack.common import cfg
-from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
availability_zone_opts = [
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 06ce2e07e..4961827fd 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -558,6 +558,11 @@ class API(base.Base):
security_group, block_device_mapping)
instances.append(instance)
instance_uuids.append(instance['uuid'])
+ self._validate_bdm(context, instance)
+ # send a state update notification for the initial create to
+ # show it going from non-existent to BUILDING
+ notifications.send_update_with_states(context, instance, None,
+ vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
@@ -704,6 +709,23 @@ class API(base.Base):
self.db.block_device_mapping_update_or_create(elevated_context,
values)
+ def _validate_bdm(self, context, instance):
+ for bdm in self.db.block_device_mapping_get_all_by_instance(
+ context, instance['uuid']):
+ # NOTE(vish): For now, just make sure the volumes are accessible.
+ snapshot_id = bdm.get('snapshot_id')
+ volume_id = bdm.get('volume_id')
+ if volume_id is not None:
+ try:
+ self.volume_api.get(context, volume_id)
+ except Exception:
+ raise exception.InvalidBDMVolume(id=volume_id)
+ elif snapshot_id is not None:
+ try:
+ self.volume_api.get_snapshot(context, snapshot_id)
+ except Exception:
+ raise exception.InvalidBDMSnapshot(id=snapshot_id)
+
def _populate_instance_for_bdm(self, context, instance, instance_type,
image, block_device_mapping):
"""Populate instance block device mapping information."""
@@ -818,11 +840,6 @@ class API(base.Base):
self._populate_instance_for_bdm(context, instance,
instance_type, image, block_device_mapping)
- # send a state update notification for the initial create to
- # show it going from non-existent to BUILDING
- notifications.send_update_with_states(context, instance, None,
- vm_states.BUILDING, None, None, service="api")
-
return instance
def _check_create_policies(self, context, availability_zone,
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 6365a548c..b52e85440 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -57,13 +57,11 @@ from nova import network
from nova.network import model as network_model
from nova.openstack.common import cfg
from nova.openstack.common import excutils
-from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import rpc
-from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import paths
from nova import quota
@@ -3070,8 +3068,8 @@ class ComputeManager(manager.SchedulerDependentManager):
vm_state = instance['vm_state']
task_state = instance['task_state']
if vm_state != vm_states.RESIZED or task_state is not None:
- reason = _("In states %(vm_state)s/%(task_state)s, not"
- "RESIZED/None")
+ reason = _("In states %(vm_state)s/%(task_state)s, not "
+ "RESIZED/None")
_set_migration_to_error(migration, reason % locals(),
instance=instance)
continue
diff --git a/nova/context.py b/nova/context.py
index 1a566cb5a..8731e012d 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -46,7 +46,7 @@ class RequestContext(object):
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
quota_class=None, user_name=None, project_name=None,
- service_catalog=None, instance_lock_checked=False, **kwargs):
+ service_catalog=[], instance_lock_checked=False, **kwargs):
"""
:param read_deleted: 'no' indicates deleted records are hidden, 'yes'
indicates deleted records are visible, 'only' indicates that
@@ -79,7 +79,9 @@ class RequestContext(object):
request_id = generate_request_id()
self.request_id = request_id
self.auth_token = auth_token
- self.service_catalog = service_catalog
+ # Only include required parts of service_catalog
+ self.service_catalog = [s for s in service_catalog
+ if s.get('type') in ('volume')]
self.instance_lock_checked = instance_lock_checked
# NOTE(markmc): this attribute is currently only used by the
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py b/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py
index a20799fbe..d93cd1ead 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py
@@ -37,9 +37,9 @@ def upgrade(migrate_engine):
if rec['binary'] != 'nova-compute':
continue
# if zone doesn't exist create
- result = aggregate_metadata.select().where(aggregate_metadata.c.key ==
- 'availability_zone' and
- aggregate_metadata.c.key == rec['availability_zone']).execute()
+ result = aggregate_metadata.select().where(
+ aggregate_metadata.c.key == 'availability_zone').where(
+ aggregate_metadata.c.value == rec['availability_zone']).execute()
result = [r for r in result]
if len(result) > 0:
agg_id = result[0].aggregate_id
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py b/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py
index d4bd991f7..c49e8272b 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py
@@ -4,8 +4,7 @@ from sqlalchemy.ext.compiler import compiles
from sqlalchemy import MetaData, Table, Column, Index
from sqlalchemy import select
from sqlalchemy.sql.expression import UpdateBase
-from sqlalchemy.sql import literal_column
-from sqlalchemy import String, Integer, Boolean
+from sqlalchemy import Integer, Boolean
from sqlalchemy.types import NullType, BigInteger
diff --git a/nova/exception.py b/nova/exception.py
index c15fc1e43..6915c14bb 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -25,7 +25,6 @@ SHOULD include dedicated exception logging.
"""
import functools
-import itertools
import webob.exc
@@ -227,6 +226,20 @@ class Invalid(NovaException):
code = 400
+class InvalidBDM(Invalid):
+ message = _("Block Device Mapping is Invalid.")
+
+
+class InvalidBDMSnapshot(InvalidBDM):
+ message = _("Block Device Mapping is Invalid: "
+ "failed to get snapshot %(id)s.")
+
+
+class InvalidBDMVolume(InvalidBDM):
+ message = _("Block Device Mapping is Invalid: "
+ "failed to get volume %(id)s.")
+
+
class VolumeUnattached(Invalid):
message = _("Volume %(volume_id)s is not attached to anything")
@@ -317,7 +330,15 @@ class InstanceSuspendFailure(Invalid):
class InstanceResumeFailure(Invalid):
- message = _("Failed to resume server") + ": %(reason)s."
+ message = _("Failed to resume instance: %(reason)s.")
+
+
+class InstancePowerOnFailure(Invalid):
+ message = _("Failed to power on instance: %(reason)s.")
+
+
+class InstancePowerOffFailure(Invalid):
+ message = _("Failed to power off instance: %(reason)s.")
class InstanceRebootFailure(Invalid):
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index a9b44e94a..f090064a3 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -378,7 +378,7 @@ class IptablesManager(object):
for table in tables:
start, end = self._find_table(all_lines, table)
all_lines[start:end] = self._modify_rules(
- all_lines[start:end], tables[table])
+ all_lines[start:end], tables[table], table_name=table)
self.execute('%s-restore' % (cmd,), '-c', run_as_root=True,
process_input='\n'.join(all_lines),
attempts=5)
@@ -397,13 +397,20 @@ class IptablesManager(object):
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
- def _modify_rules(self, current_lines, table, binary=None):
+ def _modify_rules(self, current_lines, table, binary=None,
+ table_name=None):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
+ if not current_lines:
+ fake_table = ['#Generated by nova',
+ '*' + table_name, 'COMMIT',
+ '#Completed by nova']
+ current_lines = fake_table
+
# Remove any trace of our rules
new_filter = filter(lambda line: binary_name not in line,
current_lines)
@@ -418,6 +425,9 @@ class IptablesManager(object):
if not rule.startswith(':'):
break
+ if not seen_chains:
+ rules_index = 2
+
our_rules = []
bot_rules = []
for rule in rules:
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 897472d08..8ee8f0290 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -47,7 +47,6 @@ import datetime
import itertools
import math
import re
-import socket
import uuid
from eventlet import greenpool
diff --git a/nova/notifications.py b/nova/notifications.py
index 65428d03f..f40fff7f2 100644
--- a/nova/notifications.py
+++ b/nova/notifications.py
@@ -21,7 +21,6 @@ the system.
import nova.context
from nova import db
-from nova import exception
from nova.image import glance
from nova import network
from nova.network import model as network_model
diff --git a/nova/scheduler/filters/availability_zone_filter.py b/nova/scheduler/filters/availability_zone_filter.py
index 585acbaf8..390276ea3 100644
--- a/nova/scheduler/filters/availability_zone_filter.py
+++ b/nova/scheduler/filters/availability_zone_filter.py
@@ -14,7 +14,6 @@
# under the License.
-from nova import availability_zones
from nova import db
from nova.openstack.common import cfg
from nova.scheduler import filters
diff --git a/nova/service.py b/nova/service.py
index c250673f4..2daceba80 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -38,7 +38,6 @@ from nova.openstack.common import eventlet_backdoor
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
-from nova.openstack.common.rpc import common as rpc_common
from nova import servicegroup
from nova import utils
from nova import version
@@ -426,6 +425,7 @@ class Service(object):
verstr = version.version_string_with_package()
LOG.audit(_('Starting %(topic)s node (version %(version)s)'),
{'topic': self.topic, 'version': verstr})
+ self.basic_config_check()
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
@@ -570,6 +570,16 @@ class Service(object):
ctxt = context.get_admin_context()
return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
+ def basic_config_check(self):
+ """Perform basic config checks before starting processing."""
+ # Make sure the tempdir exists and is writable
+ try:
+ with utils.tempdir() as tmpdir:
+ pass
+ except Exception as e:
+ LOG.error(_('Temporary directory is invalid: %s'), e)
+ sys.exit(1)
+
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
diff --git a/nova/servicegroup/drivers/db.py b/nova/servicegroup/drivers/db.py
index 686ee728b..18b4b74e5 100644
--- a/nova/servicegroup/drivers/db.py
+++ b/nova/servicegroup/drivers/db.py
@@ -16,7 +16,6 @@
from nova import conductor
from nova import context
-from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/tests/baremetal/test_nova_baremetal_manage.py b/nova/tests/baremetal/test_nova_baremetal_manage.py
index 4d152a028..c4fdaac6b 100644
--- a/nova/tests/baremetal/test_nova_baremetal_manage.py
+++ b/nova/tests/baremetal/test_nova_baremetal_manage.py
@@ -20,10 +20,6 @@ import imp
import os
import sys
-from nova import context
-from nova import test
-from nova.virt.baremetal import db as bmdb
-
from nova.tests.baremetal.db import base as bm_db_base
TOPDIR = os.path.normpath(os.path.join(
diff --git a/nova/tests/baremetal/test_pxe.py b/nova/tests/baremetal/test_pxe.py
index dafa9bab7..09f1079bf 100644
--- a/nova/tests/baremetal/test_pxe.py
+++ b/nova/tests/baremetal/test_pxe.py
@@ -21,12 +21,10 @@
import os
-import mox
from testtools import matchers
from nova import exception
from nova.openstack.common import cfg
-from nova import test
from nova.tests.baremetal.db import base as bm_db_base
from nova.tests.baremetal.db import utils as bm_db_utils
from nova.tests.image import fake as fake_image
diff --git a/nova/tests/baremetal/test_utils.py b/nova/tests/baremetal/test_utils.py
index 2615a26cb..df5112deb 100644
--- a/nova/tests/baremetal/test_utils.py
+++ b/nova/tests/baremetal/test_utils.py
@@ -18,12 +18,9 @@
"""Tests for baremetal utils."""
-import mox
-
import errno
import os
-from nova import exception
from nova import test
from nova.virt.baremetal import utils
diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py
index da45721ed..1208368c2 100644
--- a/nova/tests/cells/test_cells_messaging.py
+++ b/nova/tests/cells/test_cells_messaging.py
@@ -14,8 +14,6 @@
"""
Tests For Cells Messaging module
"""
-import mox
-
from nova.cells import messaging
from nova.cells import utils as cells_utils
from nova import context
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index dbf620196..ead43adea 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -157,6 +157,10 @@ policy_data = """
"compute_extension:virtual_interfaces": "",
"compute_extension:virtual_storage_arrays": "",
"compute_extension:volumes": "",
+ "compute_extension:volume_attachments:index": "",
+ "compute_extension:volume_attachments:show": "",
+ "compute_extension:volume_attachments:create": "",
+ "compute_extension:volume_attachments:delete": "",
"compute_extension:volumetypes": "",
"compute_extension:zones": "",
"compute_extension:availability_zone:list": "",
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index 94f297fe9..c665aa1b3 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -15,7 +15,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import shutil
import fixtures
import mox
diff --git a/nova/tests/test_availability_zones.py b/nova/tests/test_availability_zones.py
index 2c5c06921..4192fa08f 100644
--- a/nova/tests/test_availability_zones.py
+++ b/nova/tests/test_availability_zones.py
@@ -23,7 +23,6 @@ from nova import availability_zones as az
from nova import context
from nova import db
from nova.openstack.common import cfg
-from nova import service
from nova import test
CONF = cfg.CONF
diff --git a/nova/tests/test_context.py b/nova/tests/test_context.py
index 0915bf157..527534fd5 100644
--- a/nova/tests/test_context.py
+++ b/nova/tests/test_context.py
@@ -74,3 +74,22 @@ class ContextTestCase(test.TestCase):
self.assertTrue(c)
self.assertIn("'extra_arg1': 'meow'", info['log_msg'])
self.assertIn("'extra_arg2': 'wuff'", info['log_msg'])
+
+ def test_service_catalog_default(self):
+ ctxt = context.RequestContext('111', '222')
+ self.assertEquals(ctxt.service_catalog, [])
+
+ def test_service_catalog_cinder_only(self):
+ service_catalog = [
+ {u'type': u'compute', u'name': u'nova'},
+ {u'type': u's3', u'name': u's3'},
+ {u'type': u'image', u'name': u'glance'},
+ {u'type': u'volume', u'name': u'cinder'},
+ {u'type': u'ec2', u'name': u'ec2'},
+ {u'type': u'object-store', u'name': u'swift'},
+ {u'type': u'identity', u'name': u'keystone'}]
+
+ volume_catalog = [{u'type': u'volume', u'name': u'cinder'}]
+ ctxt = context.RequestContext('111', '222',
+ service_catalog=service_catalog)
+ self.assertEquals(ctxt.service_catalog, volume_catalog)
diff --git a/nova/tests/test_iptables_network.py b/nova/tests/test_iptables_network.py
index c8f310303..95af25ebd 100644
--- a/nova/tests/test_iptables_network.py
+++ b/nova/tests/test_iptables_network.py
@@ -170,3 +170,22 @@ class IptablesManagerTestCase(test.TestCase):
self.assertTrue('[0:0] -A %s -j %s-%s' %
(chain, self.binary_name, chain) in new_lines,
"Built-in chain %s not wrapped" % (chain,))
+
+ def test_missing_table(self):
+ current_lines = []
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['filter'],
+ table_name='filter')
+
+ for line in ['*filter',
+ 'COMMIT']:
+ self.assertTrue(line in new_lines, "One of iptables key lines"
+ "went missing.")
+
+ self.assertTrue(len(new_lines) > 4, "No iptables rules added")
+
+ self.assertTrue("#Generated by nova" == new_lines[0] and
+ "*filter" == new_lines[1] and
+ "COMMIT" == new_lines[-2] and
+ "#Completed by nova" == new_lines[-1],
+ "iptables rules not generated in the correct order")
diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py
index 3861d7dfa..7ce81cc09 100644
--- a/nova/tests/test_libvirt_vif.py
+++ b/nova/tests/test_libvirt_vif.py
@@ -16,6 +16,8 @@
from lxml import etree
+from nova import exception
+from nova.network import model as network_model
from nova.openstack.common import cfg
from nova import test
from nova import utils
@@ -48,7 +50,8 @@ class LibvirtVifTestCase(test.TestCase):
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
'vif_uuid': 'vif-xxx-yyy-zzz',
- 'vif_devname': 'tap-xxx-yyy-zzz'
+ 'vif_devname': 'tap-xxx-yyy-zzz',
+ 'vif_type': network_model.VIF_TYPE_BRIDGE,
}
net_ovs = {
@@ -75,6 +78,15 @@ class LibvirtVifTestCase(test.TestCase):
'ovs_interfaceid': 'aaa-bbb-ccc',
}
+ mapping_none = {
+ 'mac': 'ca:fe:de:ad:be:ef',
+ 'gateway_v6': net_bridge['gateway_v6'],
+ 'ips': [{'ip': '101.168.1.9'}],
+ 'dhcp_server': '191.168.1.1',
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
+ 'vif_devname': 'tap-xxx-yyy-zzz',
+ }
+
instance = {
'name': 'instance-name',
'uuid': 'instance-uuid'
@@ -149,7 +161,7 @@ class LibvirtVifTestCase(test.TestCase):
self.flags(libvirt_use_virtio_for_bridges=False,
libvirt_type='kvm')
- d = vif.LibvirtBridgeDriver()
+ d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
@@ -168,7 +180,7 @@ class LibvirtVifTestCase(test.TestCase):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='kvm')
- d = vif.LibvirtBridgeDriver()
+ d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
@@ -187,7 +199,7 @@ class LibvirtVifTestCase(test.TestCase):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='qemu')
- d = vif.LibvirtBridgeDriver()
+ d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
@@ -206,7 +218,7 @@ class LibvirtVifTestCase(test.TestCase):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='xen')
- d = vif.LibvirtBridgeDriver()
+ d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
@@ -221,8 +233,15 @@ class LibvirtVifTestCase(test.TestCase):
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
- def test_bridge_driver(self):
- d = vif.LibvirtBridgeDriver()
+ def test_generic_driver_none(self):
+ d = vif.LibvirtGenericVIFDriver()
+ self.assertRaises(exception.NovaException,
+ self._get_instance_xml,
+ d,
+ self.net_bridge,
+ self.mapping_none)
+
+ def _check_bridge_driver(self, d):
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
@@ -237,6 +256,14 @@ class LibvirtVifTestCase(test.TestCase):
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping_bridge['mac'])
+ def test_bridge_driver(self):
+ d = vif.LibvirtBridgeDriver()
+ self._check_bridge_driver(d)
+
+ def test_generic_driver_bridge(self):
+ d = vif.LibvirtGenericVIFDriver()
+ self._check_bridge_driver(d)
+
def test_ovs_ethernet_driver(self):
d = vif.LibvirtOpenVswitchDriver()
xml = self._get_instance_xml(d,
diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py
index f0ed0a863..3b67e6439 100644
--- a/nova/tests/test_migrations.py
+++ b/nova/tests/test_migrations.py
@@ -87,6 +87,16 @@ def _have_mysql():
return present.lower() in ('', 'true')
+def get_table(engine, name):
+ """Returns an sqlalchemy table dynamically from db.
+
+ Needed because the models don't work for us in migrations
+ as models will be far out of sync with the current data."""
+ metadata = sqlalchemy.schema.MetaData()
+ metadata.bind = engine
+ return sqlalchemy.Table(name, metadata, autoload=True)
+
+
class TestMigrations(test.TestCase):
"""Test sqlalchemy-migrate migrations."""
@@ -227,19 +237,11 @@ class TestMigrations(test.TestCase):
self.engines["mysqlcitest"] = engine
self.test_databases["mysqlcitest"] = connect_string
- # Test that we end in an innodb
- self._check_mysql_innodb(engine)
- # Test IP transition
- self._check_mysql_migration_149(engine)
-
- def _check_mysql_innodb(self, engine):
# build a fully populated mysql database with all the tables
self._reset_databases()
self._walk_versions(engine, False, False)
- uri = _get_connect_string("mysql", database="information_schema")
- connection = sqlalchemy.create_engine(uri).connect()
-
+ connection = engine.connect()
# sanity check
total = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
@@ -253,92 +255,8 @@ class TestMigrations(test.TestCase):
"and TABLE_NAME!='migrate_version'")
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
-
- def test_migration_149_postgres(self):
- """Test updating a table with IPAddress columns."""
- if not _is_backend_avail('postgres'):
- self.skipTest("postgres not available")
-
- connect_string = _get_connect_string("postgres")
- engine = sqlalchemy.create_engine(connect_string)
-
- self.engines["postgrescitest"] = engine
- self.test_databases["postgrescitest"] = connect_string
-
- self._reset_databases()
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
-
- connection = engine.connect()
-
- self._migrate_up(engine, 148)
- IPS = ("127.0.0.1", "255.255.255.255", "2001:db8::1:2", "::1")
- connection.execute("INSERT INTO provider_fw_rules "
- " (protocol, from_port, to_port, cidr)"
- "VALUES ('tcp', 1234, 1234, '%s'), "
- " ('tcp', 1234, 1234, '%s'), "
- " ('tcp', 1234, 1234, '%s'), "
- " ('tcp', 1234, 1234, '%s')" % IPS)
- self.assertEqual('character varying',
- connection.execute(
- "SELECT data_type FROM INFORMATION_SCHEMA.COLUMNS "
- "WHERE table_name='provider_fw_rules' "
- "AND table_catalog='openstack_citest' "
- "AND column_name='cidr'").scalar())
-
- self._migrate_up(engine, 149)
- self.assertEqual(IPS,
- tuple(tup[0] for tup in connection.execute(
- "SELECT cidr from provider_fw_rules").fetchall()))
- self.assertEqual('inet',
- connection.execute(
- "SELECT data_type FROM INFORMATION_SCHEMA.COLUMNS "
- "WHERE table_name='provider_fw_rules' "
- "AND table_catalog='openstack_citest' "
- "AND column_name='cidr'").scalar())
connection.close()
- def _check_mysql_migration_149(self, engine):
- """Test updating a table with IPAddress columns."""
- self._reset_databases()
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
-
- uri = _get_connect_string("mysql", database="openstack_citest")
- connection = sqlalchemy.create_engine(uri).connect()
-
- self._migrate_up(engine, 148)
-
- IPS = ("127.0.0.1", "255.255.255.255", "2001:db8::1:2", "::1")
- connection.execute("INSERT INTO provider_fw_rules "
- " (protocol, from_port, to_port, cidr)"
- "VALUES ('tcp', 1234, 1234, '%s'), "
- " ('tcp', 1234, 1234, '%s'), "
- " ('tcp', 1234, 1234, '%s'), "
- " ('tcp', 1234, 1234, '%s')" % IPS)
- self.assertEqual('varchar(255)',
- connection.execute(
- "SELECT column_type FROM INFORMATION_SCHEMA.COLUMNS "
- "WHERE table_name='provider_fw_rules' "
- "AND table_schema='openstack_citest' "
- "AND column_name='cidr'").scalar())
-
- connection.close()
-
- self._migrate_up(engine, 149)
-
- connection = sqlalchemy.create_engine(uri).connect()
-
- self.assertEqual(IPS,
- tuple(tup[0] for tup in connection.execute(
- "SELECT cidr from provider_fw_rules").fetchall()))
- self.assertEqual('varchar(39)',
- connection.execute(
- "SELECT column_type FROM INFORMATION_SCHEMA.COLUMNS "
- "WHERE table_name='provider_fw_rules' "
- "AND table_schema='openstack_citest' "
- "AND column_name='cidr'").scalar())
-
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
@@ -360,7 +278,7 @@ class TestMigrations(test.TestCase):
for version in xrange(migration.INIT_VERSION + 2,
TestMigrations.REPOSITORY.latest + 1):
# upgrade -> downgrade -> upgrade
- self._migrate_up(engine, version)
+ self._migrate_up(engine, version, with_data=True)
if snake_walk:
self._migrate_down(engine, version)
self._migrate_up(engine, version)
@@ -385,7 +303,19 @@ class TestMigrations(test.TestCase):
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
- def _migrate_up(self, engine, version):
+ def _migrate_up(self, engine, version, with_data=False):
+ """migrate up to a new version of the db.
+
+ We allow for data insertion and post checks at every
+ migration version with special _prerun_### and
+ _check_### functions in the main test.
+ """
+ if with_data:
+ data = None
+ prerun = getattr(self, "_prerun_%d" % version, None)
+ if prerun:
+ data = prerun(engine)
+
migration_api.upgrade(engine,
TestMigrations.REPOSITORY,
version)
@@ -393,168 +323,132 @@ class TestMigrations(test.TestCase):
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
- def test_migration_146(self):
- name = 'name'
- az = 'custom_az'
-
- def _145_check():
- agg = aggregates.select(aggregates.c.id == 1).execute().first()
- self.assertEqual(name, agg.name)
- self.assertEqual(az, agg.availability_zone)
-
- for key, engine in self.engines.items():
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 145)
- metadata = sqlalchemy.schema.MetaData()
- metadata.bind = engine
- aggregates = sqlalchemy.Table('aggregates', metadata,
- autoload=True)
-
- aggregates.insert().values(id=1, availability_zone=az,
- aggregate_name=1, name=name).execute()
-
- _145_check()
-
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 146)
-
- aggregate_metadata = sqlalchemy.Table('aggregate_metadata',
- metadata, autoload=True)
- metadata = aggregate_metadata.select(aggregate_metadata.c.
- aggregate_id == 1).execute().first()
- self.assertEqual(az, metadata['value'])
-
- migration_api.downgrade(engine, TestMigrations.REPOSITORY, 145)
- _145_check()
-
- def test_migration_147(self):
+ if with_data:
+ check = getattr(self, "_check_%d" % version, None)
+ if check:
+ check(engine, data)
+
+ # migration 146, availability zone transition
+ def _prerun_146(self, engine):
+ data = {
+ 'id': 1,
+ 'availability_zone': 'custom_az',
+ 'aggregate_name': 1,
+ 'name': 'name',
+ }
+
+ aggregates = get_table(engine, 'aggregates')
+ aggregates.insert().values(data).execute()
+ return data
+
+ def _check_146(self, engine, data):
+ aggregate_md = get_table(engine, 'aggregate_metadata')
+ md = aggregate_md.select(
+ aggregate_md.c.aggregate_id == 1).execute().first()
+ self.assertEqual(data['availability_zone'], md['value'])
+
+ # migration 147, availability zone transition for services
+ def _prerun_147(self, engine):
az = 'test_zone'
host1 = 'compute-host1'
host2 = 'compute-host2'
-
- def _146_check():
- service = services.select(services.c.id == 1).execute().first()
- self.assertEqual(az, service.availability_zone)
- self.assertEqual(host1, service.host)
- service = services.select(services.c.id == 2).execute().first()
- self.assertNotEqual(az, service.availability_zone)
- service = services.select(services.c.id == 3).execute().first()
- self.assertEqual(az, service.availability_zone)
- self.assertEqual(host2, service.host)
-
- for key, engine in self.engines.items():
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 146)
- metadata = sqlalchemy.schema.MetaData()
- metadata.bind = engine
-
- #populate service table
- services = sqlalchemy.Table('services', metadata,
- autoload=True)
- services.insert().values(id=1, host=host1,
- binary='nova-compute', topic='compute', report_count=0,
- availability_zone=az).execute()
- services.insert().values(id=2, host='sched-host',
- binary='nova-scheduler', topic='scheduler', report_count=0,
- availability_zone='ignore_me').execute()
- services.insert().values(id=3, host=host2,
- binary='nova-compute', topic='compute', report_count=0,
- availability_zone=az).execute()
-
- _146_check()
-
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 147)
-
- # check aggregate metadata
- aggregate_metadata = sqlalchemy.Table('aggregate_metadata',
- metadata, autoload=True)
- aggregate_hosts = sqlalchemy.Table('aggregate_hosts',
- metadata, autoload=True)
- metadata = aggregate_metadata.select(aggregate_metadata.c.
- aggregate_id == 1).execute().first()
- self.assertEqual(az, metadata['value'])
- self.assertEqual(aggregate_hosts.select(
- aggregate_hosts.c.aggregate_id == 1).execute().
- first().host, host1)
- blank = [h for h in aggregate_hosts.select(
- aggregate_hosts.c.aggregate_id == 2).execute()]
- self.assertEqual(blank, [])
-
- migration_api.downgrade(engine, TestMigrations.REPOSITORY, 146)
-
- _146_check()
-
- def test_migration_152(self):
+ # start at id == 2 because we already inserted one
+ data = [
+ {'id': 1, 'host': host1,
+ 'binary': 'nova-compute', 'topic': 'compute',
+ 'report_count': 0, 'availability_zone': az},
+ {'id': 2, 'host': 'sched-host',
+ 'binary': 'nova-scheduler', 'topic': 'scheduler',
+ 'report_count': 0, 'availability_zone': 'ignore_me'},
+ {'id': 3, 'host': host2,
+ 'binary': 'nova-compute', 'topic': 'compute',
+ 'report_count': 0, 'availability_zone': az},
+ ]
+
+ services = get_table(engine, 'services')
+ engine.execute(services.insert(), data)
+ return data
+
+ def _check_147(self, engine, data):
+ aggregate_md = get_table(engine, 'aggregate_metadata')
+ aggregate_hosts = get_table(engine, 'aggregate_hosts')
+ # NOTE(sdague): hard coded to id == 2, because we added to
+ # aggregate_metadata previously
+ for item in data:
+ md = aggregate_md.select(
+ aggregate_md.c.aggregate_id == 2).execute().first()
+ if item['binary'] == "nova-compute":
+ self.assertEqual(item['availability_zone'], md['value'])
+
+ host = aggregate_hosts.select(
+ aggregate_hosts.c.aggregate_id == 2
+ ).execute().first()
+ self.assertEqual(host['host'], data[0]['host'])
+
+ # NOTE(sdague): id 3 is just non-existent
+ host = aggregate_hosts.select(
+ aggregate_hosts.c.aggregate_id == 3
+ ).execute().first()
+ self.assertEqual(host, None)
+
+ # migration 149, changes IPAddr storage format
+ def _prerun_149(self, engine):
+ provider_fw_rules = get_table(engine, 'provider_fw_rules')
+ data = [
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "127.0.0.1"},
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "255.255.255.255"},
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "2001:db8::1:2"},
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "::1"}
+ ]
+ engine.execute(provider_fw_rules.insert(), data)
+ return data
+
+ def _check_149(self, engine, data):
+ provider_fw_rules = get_table(engine, 'provider_fw_rules')
+ result = provider_fw_rules.select().execute()
+
+ iplist = map(lambda x: x['cidr'], data)
+
+ for row in result:
+ self.assertIn(row['cidr'], iplist)
+
+ # migration 152 - convert deleted from boolean to int
+ def _prerun_152(self, engine):
host1 = 'compute-host1'
host2 = 'compute-host2'
-
- def _151_check(services, volumes):
- service = services.select(services.c.id == 1).execute().first()
- self.assertEqual(False, service.deleted)
- service = services.select(services.c.id == 2).execute().first()
- self.assertEqual(True, service.deleted)
-
- volume = volumes.select(volumes.c.id == "first").execute().first()
- self.assertEqual(False, volume.deleted)
- volume = volumes.select(volumes.c.id == "second").execute().first()
- self.assertEqual(True, volume.deleted)
-
- for key, engine in self.engines.items():
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 151)
- metadata = sqlalchemy.schema.MetaData()
- metadata.bind = engine
-
- # NOTE(boris-42): It is enough to test one table with type of `id`
- # column Integer and one with type String.
- services = sqlalchemy.Table('services', metadata, autoload=True)
- volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
-
- engine.execute(
- services.insert(),
- [
- {'id': 1, 'host': host1, 'binary': 'nova-compute',
- 'report_count': 0, 'topic': 'compute', 'deleted': False},
- {'id': 2, 'host': host1, 'binary': 'nova-compute',
- 'report_count': 0, 'topic': 'compute', 'deleted': True}
- ]
- )
-
- engine.execute(
- volumes.insert(),
- [
- {'id': 'first', 'host': host1, 'deleted': False},
- {'id': 'second', 'host': host2, 'deleted': True}
- ]
- )
-
- _151_check(services, volumes)
-
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 152)
- # NOTE(boris-42): One more time get from DB info about tables.
- metadata2 = sqlalchemy.schema.MetaData()
- metadata2.bind = engine
-
- services = sqlalchemy.Table('services', metadata2, autoload=True)
-
- service = services.select(services.c.id == 1).execute().first()
- self.assertEqual(0, service.deleted)
- service = services.select(services.c.id == 2).execute().first()
- self.assertEqual(service.id, service.deleted)
-
- volumes = sqlalchemy.Table('volumes', metadata2, autoload=True)
- volume = volumes.select(volumes.c.id == "first").execute().first()
- self.assertEqual("", volume.deleted)
- volume = volumes.select(volumes.c.id == "second").execute().first()
- self.assertEqual(volume.id, volume.deleted)
-
- migration_api.downgrade(engine, TestMigrations.REPOSITORY, 151)
- # NOTE(boris-42): One more time get from DB info about tables.
- metadata = sqlalchemy.schema.MetaData()
- metadata.bind = engine
- services = sqlalchemy.Table('services', metadata, autoload=True)
- volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
-
- _151_check(services, volumes)
+ # NOTE(sdague): start at #4 because services data already in table
+ # from 147
+ services_data = [
+ {'id': 4, 'host': host1, 'binary': 'nova-compute',
+ 'report_count': 0, 'topic': 'compute', 'deleted': False},
+ {'id': 5, 'host': host1, 'binary': 'nova-compute',
+ 'report_count': 0, 'topic': 'compute', 'deleted': True}
+ ]
+ volumes_data = [
+ {'id': 'first', 'host': host1, 'deleted': False},
+ {'id': 'second', 'host': host2, 'deleted': True}
+ ]
+
+ services = get_table(engine, 'services')
+ engine.execute(services.insert(), services_data)
+
+ volumes = get_table(engine, 'volumes')
+ engine.execute(volumes.insert(), volumes_data)
+ return dict(services=services_data, volumes=volumes_data)
+
+ def _check_152(self, engine, data):
+ services = get_table(engine, 'services')
+ service = services.select(services.c.id == 4).execute().first()
+ self.assertEqual(0, service.deleted)
+ service = services.select(services.c.id == 5).execute().first()
+ self.assertEqual(service.id, service.deleted)
+
+ volumes = get_table(engine, 'volumes')
+ volume = volumes.select(volumes.c.id == "first").execute().first()
+ self.assertEqual("", volume.deleted)
+ volume = volumes.select(volumes.c.id == "second").execute().first()
+ self.assertEqual(volume.id, volume.deleted)
diff --git a/nova/tests/test_periodic_tasks.py b/nova/tests/test_periodic_tasks.py
index 3c63f6d4a..621e86b3a 100644
--- a/nova/tests/test_periodic_tasks.py
+++ b/nova/tests/test_periodic_tasks.py
@@ -17,7 +17,6 @@
import time
-import fixtures
from testtools import matchers
from nova import manager
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index 6ea2d0ef7..e8e7c329a 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -119,8 +119,6 @@ class _FakeDriverBackendTestCase(object):
def _teardown_fakelibvirt(self):
# Restore libvirt
- import nova.virt.libvirt.driver
- import nova.virt.libvirt.firewall
if self.saved_libvirt:
sys.modules['libvirt'] = self.saved_libvirt
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index 8db4c80ad..34f03a555 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -41,7 +42,9 @@ class VMwareAPIVMTestCase(test.TestCase):
self.context = context.RequestContext('fake', 'fake', is_admin=False)
self.flags(vmwareapi_host_ip='test_url',
vmwareapi_host_username='test_username',
- vmwareapi_host_password='test_pass')
+ vmwareapi_host_password='test_pass',
+ vnc_enabled=False,
+ use_linked_clone=False)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
@@ -211,7 +214,7 @@ class VMwareAPIVMTestCase(test.TestCase):
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'name': 1})
- self._check_vm_info(info, power_state.PAUSED)
+ self._check_vm_info(info, power_state.SUSPENDED)
self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot,
self.instance, self.network_info, 'SOFT')
@@ -221,7 +224,7 @@ class VMwareAPIVMTestCase(test.TestCase):
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'name': 1})
- self._check_vm_info(info, power_state.PAUSED)
+ self._check_vm_info(info, power_state.SUSPENDED)
def test_suspend_non_existent(self):
self._create_instance_in_the_db()
@@ -234,7 +237,7 @@ class VMwareAPIVMTestCase(test.TestCase):
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'name': 1})
- self._check_vm_info(info, power_state.PAUSED)
+ self._check_vm_info(info, power_state.SUSPENDED)
self.conn.resume(self.instance, self.network_info)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
@@ -251,6 +254,43 @@ class VMwareAPIVMTestCase(test.TestCase):
self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
self.instance, self.network_info)
+ def test_power_on(self):
+ self._create_vm()
+ info = self.conn.get_info({'name': 1})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.power_off(self.instance)
+ info = self.conn.get_info({'name': 1})
+ self._check_vm_info(info, power_state.SHUTDOWN)
+ self.conn.power_on(self.instance)
+ info = self.conn.get_info({'name': 1})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_power_on_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(exception.InstanceNotFound, self.conn.power_on,
+ self.instance)
+
+ def test_power_off(self):
+ self._create_vm()
+ info = self.conn.get_info({'name': 1})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.power_off(self.instance)
+ info = self.conn.get_info({'name': 1})
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ def test_power_off_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(exception.InstanceNotFound, self.conn.power_off,
+ self.instance)
+
+ def test_power_off_suspended(self):
+ self._create_vm()
+ self.conn.suspend(self.instance)
+ info = self.conn.get_info({'name': 1})
+ self._check_vm_info(info, power_state.SUSPENDED)
+ self.assertRaises(exception.InstancePowerOffFailure,
+ self.conn.power_off, self.instance)
+
def test_get_info(self):
self._create_vm()
info = self.conn.get_info({'name': 1})
@@ -282,3 +322,48 @@ class VMwareAPIVMTestCase(test.TestCase):
def test_get_console_output(self):
pass
+
+
+class VMwareAPIHostTestCase(test.TestCase):
+ """Unit tests for Vmware API host calls."""
+
+ def setUp(self):
+ super(VMwareAPIHostTestCase, self).setUp()
+ self.flags(vmwareapi_host_ip='test_url',
+ vmwareapi_host_username='test_username',
+ vmwareapi_host_password='test_pass')
+ vmwareapi_fake.reset()
+ stubs.set_stubs(self.stubs)
+ self.conn = driver.VMwareESXDriver(False)
+
+ def tearDown(self):
+ super(VMwareAPIHostTestCase, self).tearDown()
+ vmwareapi_fake.cleanup()
+
+ def test_host_state(self):
+ stats = self.conn.get_host_stats()
+ self.assertEquals(stats['vcpus'], 16)
+ self.assertEquals(stats['disk_total'], 1024)
+ self.assertEquals(stats['disk_available'], 500)
+ self.assertEquals(stats['disk_used'], 1024 - 500)
+ self.assertEquals(stats['host_memory_total'], 1024)
+ self.assertEquals(stats['host_memory_free'], 1024 - 500)
+
+ def _test_host_action(self, method, action, expected=None):
+ result = method('host', action)
+ self.assertEqual(result, expected)
+
+ def test_host_reboot(self):
+ self._test_host_action(self.conn.host_power_action, 'reboot')
+
+ def test_host_shutdown(self):
+ self._test_host_action(self.conn.host_power_action, 'shutdown')
+
+ def test_host_startup(self):
+ self._test_host_action(self.conn.host_power_action, 'startup')
+
+ def test_host_maintenance_on(self):
+ self._test_host_action(self.conn.host_maintenance_mode, True)
+
+ def test_host_maintenance_off(self):
+ self._test_host_action(self.conn.host_maintenance_mode, False)
diff --git a/nova/tests/virt/xenapi/test_vm_utils.py b/nova/tests/virt/xenapi/test_vm_utils.py
index 275088af0..633e6f835 100644
--- a/nova/tests/virt/xenapi/test_vm_utils.py
+++ b/nova/tests/virt/xenapi/test_vm_utils.py
@@ -19,10 +19,8 @@
import contextlib
import fixtures
import mox
-import uuid
from nova import test
-from nova.tests.xenapi import stubs
from nova import utils
from nova.virt.xenapi import vm_utils
diff --git a/nova/utils.py b/nova/utils.py
index 97091e42c..52d4868c9 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -76,6 +76,9 @@ utils_opts = [
default="/etc/nova/rootwrap.conf",
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
+ cfg.StrOpt('tempdir',
+ default=None,
+ help='Explicitly specify the temporary working directory'),
]
CONF = cfg.CONF
CONF.register_opts(monkey_patch_opts)
@@ -1147,6 +1150,7 @@ def temporary_chown(path, owner_uid=None):
@contextlib.contextmanager
def tempdir(**kwargs):
+ tempfile.tempdir = CONF.tempdir
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
index 11af99d41..5a6f58655 100644
--- a/nova/virt/baremetal/pxe.py
+++ b/nova/virt/baremetal/pxe.py
@@ -27,11 +27,9 @@ from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
-from nova import utils
from nova.virt.baremetal import base
from nova.virt.baremetal import db
from nova.virt.baremetal import utils as bm_utils
-from nova.virt.disk import api as disk
pxe_opts = [
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 46b63d1c6..c865e4b3a 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -140,7 +140,7 @@ libvirt_opts = [
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.StrOpt('libvirt_vif_driver',
- default='nova.virt.libvirt.vif.LibvirtBridgeDriver',
+ default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver',
help='The libvirt VIF driver to configure the VIFs.'),
cfg.ListOpt('libvirt_volume_drivers',
default=[
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index d90a5e295..45c299830 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -28,7 +28,7 @@ from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
-from nova.virt import netutils
+
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
@@ -72,19 +72,22 @@ class LibvirtBaseVIFDriver(object):
return conf
+ def plug(self, instance, vif):
+ pass
+
+ def unplug(self, instance, vif):
+ pass
-class LibvirtBridgeDriver(LibvirtBaseVIFDriver):
- """VIF driver for Linux bridge."""
+
+class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
+ """Generic VIF driver for libvirt networking."""
def get_bridge_name(self, network):
return network['bridge']
- def get_config(self, instance, network, mapping):
+ def get_config_bridge(self, instance, network, mapping):
"""Get VIF configurations for bridge type."""
-
- mac_id = mapping['mac'].replace(':', '')
-
- conf = super(LibvirtBridgeDriver,
+ conf = super(LibvirtGenericVIFDriver,
self).get_config(instance,
network,
mapping)
@@ -93,6 +96,7 @@ class LibvirtBridgeDriver(LibvirtBaseVIFDriver):
conf, self.get_bridge_name(network),
self.get_vif_devname(mapping))
+ mac_id = mapping['mac'].replace(':', '')
name = "nova-instance-" + instance['name'] + "-" + mac_id
primary_addr = mapping['ips'][0]['ip']
dhcp_server = ra_server = ipv4_cidr = ipv6_cidr = None
@@ -112,8 +116,29 @@ class LibvirtBridgeDriver(LibvirtBaseVIFDriver):
return conf
- def plug(self, instance, vif):
+ def get_config(self, instance, network, mapping):
+ vif_type = mapping.get('vif_type')
+
+ LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
+ "network=%(network)s mapping=%(mapping)s")
+ % locals())
+
+ if vif_type is None:
+ raise exception.NovaException(
+ _("vif_type parameter must be present "
+ "for this vif_driver implementation"))
+
+ if vif_type == network_model.VIF_TYPE_BRIDGE:
+ return self.get_config_bridge(instance, network, mapping)
+ else:
+ raise exception.NovaException(
+ _("Unexpected vif_type=%s") % vif_type)
+
+ def plug_bridge(self, instance, vif):
"""Ensure that the bridge exists, and add VIF to it."""
+ super(LibvirtGenericVIFDriver,
+ self).plug(instance, vif)
+
network, mapping = vif
if (not network.get('multi_host') and
mapping.get('should_create_bridge')):
@@ -135,9 +160,71 @@ class LibvirtBridgeDriver(LibvirtBaseVIFDriver):
self.get_bridge_name(network),
iface)
- def unplug(self, instance, vif):
+ def plug(self, instance, vif):
+ network, mapping = vif
+ vif_type = mapping.get('vif_type')
+
+ LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
+ "network=%(network)s mapping=%(mapping)s")
+ % locals())
+
+ if vif_type is None:
+ raise exception.NovaException(
+ _("vif_type parameter must be present "
+ "for this vif_driver implementation"))
+
+ if vif_type == network_model.VIF_TYPE_BRIDGE:
+ self.plug_bridge(instance, vif)
+ else:
+ raise exception.NovaException(
+ _("Unexpected vif_type=%s") % vif_type)
+
+ def unplug_bridge(self, instance, vif):
"""No manual unplugging required."""
- pass
+ super(LibvirtGenericVIFDriver,
+ self).unplug(instance, vif)
+
+ def unplug(self, instance, vif):
+ network, mapping = vif
+ vif_type = mapping.get('vif_type')
+
+ LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
+ "network=%(network)s mapping=%(mapping)s")
+ % locals())
+
+ if vif_type is None:
+ raise exception.NovaException(
+ _("vif_type parameter must be present "
+ "for this vif_driver implementation"))
+
+ if vif_type == network_model.VIF_TYPE_BRIDGE:
+ self.unplug_bridge(instance, vif)
+ else:
+ raise exception.NovaException(
+ _("Unexpected vif_type=%s") % vif_type)
+
+
+class LibvirtBridgeDriver(LibvirtGenericVIFDriver):
+ """Deprecated in favour of LibvirtGenericVIFDriver.
+ Retained in Grizzly for compatibility with Quantum
+ drivers which do not yet report 'vif_type' port binding.
+ To be removed in Hxxxx."""
+
+ def __init__(self):
+ LOG.deprecated(
+ _("LibvirtBridgeDriver is deprecated and "
+ "will be removed in the Hxxxx release. Please "
+ "update the 'libvirt_vif_driver' config parameter "
+ "to use the LibvirtGenericVIFDriver class instead"))
+
+ def get_config(self, instance, network, mapping):
+ return self.get_config_bridge(instance, network, mapping)
+
+ def plug(self, instance, vif):
+ self.plug_bridge(instance, vif)
+
+ def unplug(self, instance, vif):
+ self.unplug_bridge(instance, vif)
class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver):
diff --git a/nova/virt/powervm/blockdev.py b/nova/virt/powervm/blockdev.py
index fb3a0210c..76caca1b9 100644
--- a/nova/virt/powervm/blockdev.py
+++ b/nova/virt/powervm/blockdev.py
@@ -18,16 +18,11 @@ import hashlib
import os
import re
-from eventlet import greenthread
-
-from nova import utils
-
from nova.image import glance
-
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
-
+from nova import utils
from nova.virt import images
from nova.virt.powervm import command
from nova.virt.powervm import common
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index ccba3cf73..0ce313535 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -14,19 +14,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
import time
-from nova.compute import task_states
-from nova.compute import vm_states
-
-from nova import context as nova_context
-
from nova.image import glance
-
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
-
from nova.virt import driver
from nova.virt.powervm import operator
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index d5a7e5875..67822f2c9 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -20,16 +21,19 @@ A connection to the VMware ESX platform.
**Related Flags**
-:vmwareapi_host_ip: IPAddress of VMware ESX server.
-:vmwareapi_host_username: Username for connection to VMware ESX Server.
-:vmwareapi_host_password: Password for connection to VMware ESX Server.
-:vmwareapi_task_poll_interval: The interval (seconds) used for polling of
- remote tasks
- (default: 1.0).
-:vmwareapi_api_retry_count: The API retry count in case of failure such as
- network failures (socket errors etc.)
- (default: 10).
-
+:vmwareapi_host_ip: IP address of VMware ESX server.
+:vmwareapi_host_username: Username for connection to VMware ESX Server.
+:vmwareapi_host_password: Password for connection to VMware ESX Server.
+:vmwareapi_task_poll_interval: The interval (seconds) used for polling of
+ remote tasks
+ (default: 5.0).
+:vmwareapi_api_retry_count: The API retry count in case of failure such as
+ network failures (socket errors etc.)
+ (default: 10).
+:vnc_port: VNC starting port (default: 5900)
+:vnc_port_total: Total number of VNC ports (default: 10000)
+:vnc_password: VNC password
+:use_linked_clone: Whether to use linked clone (default: True)
"""
import time
@@ -38,10 +42,12 @@ from eventlet import event
from nova import exception
from nova.openstack.common import cfg
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import driver
from nova.virt.vmwareapi import error_util
+from nova.virt.vmwareapi import host
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vmops
@@ -76,6 +82,18 @@ vmwareapi_opts = [
'socket error, etc. '
'Used only if compute_driver is '
'vmwareapi.VMwareESXDriver.'),
+ cfg.IntOpt('vnc_port',
+ default=5900,
+ help='VNC starting port'),
+ cfg.IntOpt('vnc_port_total',
+ default=10000,
+ help='Total number of VNC ports'),
+ cfg.StrOpt('vnc_password',
+ default=None,
+ help='VNC password'),
+ cfg.BoolOpt('use_linked_clone',
+ default=True,
+ help='Whether to use linked clone'),
]
CONF = cfg.CONF
@@ -100,20 +118,31 @@ class VMwareESXDriver(driver.ComputeDriver):
def __init__(self, virtapi, read_only=False, scheme="https"):
super(VMwareESXDriver, self).__init__(virtapi)
- host_ip = CONF.vmwareapi_host_ip
+ self._host_ip = CONF.vmwareapi_host_ip
host_username = CONF.vmwareapi_host_username
host_password = CONF.vmwareapi_host_password
api_retry_count = CONF.vmwareapi_api_retry_count
- if not host_ip or host_username is None or host_password is None:
+ if not self._host_ip or host_username is None or host_password is None:
raise Exception(_("Must specify vmwareapi_host_ip,"
"vmwareapi_host_username "
"and vmwareapi_host_password to use"
"compute_driver=vmwareapi.VMwareESXDriver"))
- self._session = VMwareAPISession(host_ip, host_username, host_password,
- api_retry_count, scheme=scheme)
+ self._session = VMwareAPISession(self._host_ip,
+ host_username, host_password,
+ api_retry_count, scheme=scheme)
self._volumeops = volumeops.VMwareVolumeOps(self._session)
- self._vmops = vmops.VMwareVMOps(self._session)
+ self._vmops = vmops.VMwareVMOps(self._session, self.virtapi,
+ self._volumeops)
+ self._host = host.Host(self._session)
+ self._host_state = None
+
+ @property
+ def host_state(self):
+ if not self._host_state:
+ self._host_state = host.HostState(self._session,
+ self._host_ip)
+ return self._host_state
def init_host(self, host):
"""Do the initialization that needs to be done."""
@@ -130,7 +159,8 @@ class VMwareESXDriver(driver.ComputeDriver):
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
- self._vmops.spawn(context, instance, image_meta, network_info)
+ self._vmops.spawn(context, instance, image_meta, network_info,
+ block_device_info)
def snapshot(self, context, instance, name, update_task_state):
"""Create snapshot from a running VM instance."""
@@ -162,6 +192,61 @@ class VMwareESXDriver(driver.ComputeDriver):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)
+ def rescue(self, context, instance, network_info, image_meta,
+ rescue_password):
+ """Rescue the specified instance."""
+ self._vmops.rescue(context, instance, network_info, image_meta)
+
+ def unrescue(self, instance, network_info):
+ """Unrescue the specified instance."""
+ self._vmops.unrescue(instance)
+
+ def power_off(self, instance):
+ """Power off the specified instance."""
+ self._vmops.power_off(instance)
+
+ def power_on(self, instance):
+ """Power on the specified instance."""
+ self._vmops.power_on(instance)
+
+ def migrate_disk_and_power_off(self, context, instance, dest,
+ instance_type, network_info,
+ block_device_info=None):
+ """
+ Transfers the disk of a running instance in multiple phases, turning
+ off the instance before the end.
+ """
+ return self._vmops.migrate_disk_and_power_off(context, instance,
+ dest, instance_type)
+
+ def confirm_migration(self, migration, instance, network_info):
+ """Confirms a resize, destroying the source VM."""
+ self._vmops.confirm_migration(migration, instance, network_info)
+
+ def finish_revert_migration(self, instance, network_info,
+ block_device_info=None):
+ """Finish reverting a resize, powering back on the instance."""
+ self._vmops.finish_revert_migration(instance)
+
+ def finish_migration(self, context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance=False,
+ block_device_info=None):
+ """Completes a resize, turning on the migrated instance."""
+ self._vmops.finish_migration(context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance)
+
+ def live_migration(self, context, instance_ref, dest,
+ post_method, recover_method, block_migration=False,
+ migrate_data=None):
+ """Live migration of an instance to another host."""
+ self._vmops.live_migration(context, instance_ref, dest,
+ post_method, recover_method,
+ block_migration)
+
+ def poll_rebooting_instances(self, timeout, instances):
+ """Poll for rebooting instances."""
+ self._vmops.poll_rebooting_instances(timeout, instances)
+
def get_info(self, instance):
"""Return info about the VM instance."""
return self._vmops.get_info(instance)
@@ -174,10 +259,18 @@ class VMwareESXDriver(driver.ComputeDriver):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
+ def get_vnc_console(self, instance):
+ """Return link to instance's VNC console."""
+ return self._vmops.get_vnc_console(instance)
+
def get_volume_connector(self, instance):
"""Return volume connector information."""
return self._volumeops.get_volume_connector(instance)
+ def get_host_ip_addr(self):
+ """Retrieves the IP address of the ESX host."""
+ return self._host_ip
+
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
@@ -197,8 +290,53 @@ class VMwareESXDriver(driver.ComputeDriver):
'password': CONF.vmwareapi_host_password}
def get_available_resource(self, nodename):
- """This method is supported only by libvirt."""
- return
+ """Retrieve resource info.
+
+ This method is called when nova-compute launches, and
+ as part of a periodic task.
+
+ :returns: dictionary describing resources
+
+ """
+ host_stats = self.get_host_stats(refresh=True)
+
+ # Updating host information
+ dic = {'vcpus': host_stats["vcpus"],
+ 'memory_mb': host_stats['host_memory_total'],
+ 'local_gb': host_stats['disk_total'],
+ 'vcpus_used': 0,
+ 'memory_mb_used': host_stats['host_memory_total'] -
+ host_stats['host_memory_free'],
+ 'local_gb_used': host_stats['disk_used'],
+ 'hypervisor_type': host_stats['hypervisor_type'],
+ 'hypervisor_version': host_stats['hypervisor_version'],
+ 'hypervisor_hostname': host_stats['hypervisor_hostname'],
+ 'cpu_info': jsonutils.dumps(host_stats['cpu_info'])}
+
+ return dic
+
+ def update_host_status(self):
+ """Update the status info of the host, and return those values
+ to the calling program."""
+ return self.host_state.update_status()
+
+ def get_host_stats(self, refresh=False):
+ """Return the current state of the host. If 'refresh' is
+ True, run the update first."""
+ return self.host_state.get_host_stats(refresh=refresh)
+
+ def host_power_action(self, host, action):
+ """Reboots, shuts down or powers up the host."""
+ return self._host.host_power_action(host, action)
+
+ def host_maintenance_mode(self, host, mode):
+ """Start/Stop host maintenance window. On start, it triggers
+ guest VMs evacuation."""
+ return self._host.host_maintenance_mode(host, mode)
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ return self._host.set_host_enabled(host, enabled)
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
index 7fb014075..692e5f253 100644
--- a/nova/virt/vmwareapi/fake.py
+++ b/nova/virt/vmwareapi/fake.py
@@ -255,6 +255,8 @@ class Datastore(ManagedObject):
super(Datastore, self).__init__("Datastore")
self.set("summary.type", "VMFS")
self.set("summary.name", "fake-ds")
+ self.set("summary.capacity", 1024 * 1024 * 1024)
+ self.set("summary.freeSpace", 500 * 1024 * 1024)
class HostNetworkSystem(ManagedObject):
@@ -285,6 +287,29 @@ class HostSystem(ManagedObject):
host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
self.set("configManager.networkSystem", host_net_sys)
+ summary = DataObject()
+ hardware = DataObject()
+ hardware.numCpuCores = 8
+ hardware.numCpuPkgs = 2
+ hardware.numCpuThreads = 16
+ hardware.vendor = "Intel"
+ hardware.cpuModel = "Intel(R) Xeon(R)"
+ hardware.memorySize = 1024 * 1024 * 1024
+ summary.hardware = hardware
+
+ quickstats = DataObject()
+ quickstats.overallMemoryUsage = 500
+ summary.quickStats = quickstats
+
+ product = DataObject()
+ product.name = "VMware ESXi"
+ product.version = "5.0.0"
+ config = DataObject()
+ config.product = product
+ summary.config = config
+
+ self.set("summary", summary)
+
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
@@ -599,6 +624,11 @@ class FakeVim(object):
"""Fakes a return."""
return
+ def _just_return_task(self, method):
+ """Fakes a task return."""
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
def _unregister_vm(self, method, *args, **kwargs):
"""Unregisters a VM from the Host System."""
vm_ref = args[0]
@@ -627,7 +657,7 @@ class FakeVim(object):
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
"""Sets power state for the VM."""
if _db_content.get("VirtualMachine", None) is None:
- raise exception.NotFound(_(" No Virtual Machine has been "
+ raise exception.NotFound(_("No Virtual Machine has been "
"registered yet"))
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound(_("Virtual Machine with ref %s is not "
@@ -722,6 +752,9 @@ class FakeVim(object):
elif attr_name == "DeleteVirtualDisk_Task":
return lambda *args, **kwargs: self._delete_disk(attr_name,
*args, **kwargs)
+ elif attr_name == "Destroy_Task":
+ return lambda *args, **kwargs: self._unregister_vm(attr_name,
+ *args, **kwargs)
elif attr_name == "UnregisterVM":
return lambda *args, **kwargs: self._unregister_vm(attr_name,
*args, **kwargs)
@@ -739,3 +772,15 @@ class FakeVim(object):
elif attr_name == "AddPortGroup":
return lambda *args, **kwargs: self._add_port_group(attr_name,
*args, **kwargs)
+ elif attr_name == "RebootHost_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "ShutdownHost_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "PowerDownHostToStandBy_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "PowerUpHostFromStandBy_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "EnterMaintenanceMode_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "ExitMaintenanceMode_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
diff --git a/nova/virt/vmwareapi/host.py b/nova/virt/vmwareapi/host.py
new file mode 100644
index 000000000..09b8f1fe3
--- /dev/null
+++ b/nova/virt/vmwareapi/host.py
@@ -0,0 +1,140 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management class for host-related functions (start, reboot, etc).
+"""
+
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+
+LOG = logging.getLogger(__name__)
+
+
+class Host(object):
+ """
+ Implements host related operations.
+ """
+ def __init__(self, session):
+ self._session = session
+
+ def host_power_action(self, host, action):
+ """Reboots or shuts down the host."""
+ host_mor = self._session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ LOG.debug(_("%(action)s %(host)s") % locals())
+ if action == "reboot":
+ host_task = self._session._call_method(
+ self._session._get_vim(),
+ "RebootHost_Task", host_mor,
+ force=False)
+ elif action == "shutdown":
+ host_task = self._session._call_method(
+ self._session._get_vim(),
+ "ShutdownHost_Task", host_mor,
+ force=False)
+ elif action == "startup":
+ host_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerUpHostFromStandBy_Task", host_mor,
+ timeoutSec=60)
+ self._session._wait_for_task(host, host_task)
+
+ def host_maintenance_mode(self, host, mode):
+ """Start/Stop host maintenance window. On start, it triggers
+ guest VMs evacuation."""
+ host_mor = self._session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ LOG.debug(_("Set maintenance mod on %(host)s to %(mode)s") % locals())
+ if mode:
+ host_task = self._session._call_method(
+ self._session._get_vim(),
+ "EnterMaintenanceMode_Task",
+ host_mor, timeout=0,
+ evacuatePoweredOffVms=True)
+ else:
+ host_task = self._session._call_method(
+ self._session._get_vim(),
+ "ExitMaintenanceMode_Task",
+ host_mor, timeout=0)
+ self._session._wait_for_task(host, host_task)
+
+ def set_host_enabled(self, _host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
+
+
+class HostState(object):
+ """Manages information about the ESX host this compute
+ node is running on.
+ """
+ def __init__(self, session, host_name):
+ super(HostState, self).__init__()
+ self._session = session
+ self._host_name = host_name
+ self._stats = {}
+ self.update_status()
+
+ def get_host_stats(self, refresh=False):
+ """Return the current state of the host. If 'refresh' is
+ True, run the update first.
+ """
+ if refresh:
+ self.update_status()
+ return self._stats
+
+ def update_status(self):
+ """Update the current state of the host.
+ """
+ host_mor = self._session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ summary = self._session._call_method(vim_util,
+ "get_dynamic_property",
+ host_mor,
+ "HostSystem",
+ "summary")
+
+ if summary is None:
+ return
+
+ try:
+ ds = vm_util.get_datastore_ref_and_name(self._session)
+ except exception.DatastoreNotFound:
+ ds = (None, None, 0, 0)
+
+ data = {}
+ data["vcpus"] = summary.hardware.numCpuThreads
+ data["cpu_info"] = \
+ {"vendor": summary.hardware.vendor,
+ "model": summary.hardware.cpuModel,
+ "topology": {"cores": summary.hardware.numCpuCores,
+ "sockets": summary.hardware.numCpuPkgs,
+ "threads": summary.hardware.numCpuThreads}
+ }
+ data["disk_total"] = ds[2] / (1024 * 1024)
+ data["disk_available"] = ds[3] / (1024 * 1024)
+ data["disk_used"] = data["disk_total"] - data["disk_available"]
+ data["host_memory_total"] = summary.hardware.memorySize / (1024 * 1024)
+ data["host_memory_free"] = data["host_memory_total"] - \
+ summary.quickStats.overallMemoryUsage
+ data["hypervisor_type"] = summary.config.product.name
+ data["hypervisor_version"] = summary.config.product.version
+ data["hypervisor_hostname"] = self._host_name
+
+ self._stats = data
+ return data
diff --git a/nova/virt/vmwareapi/read_write_util.py b/nova/virt/vmwareapi/read_write_util.py
index 39ea8e2e8..5dcdc6fdb 100644
--- a/nova/virt/vmwareapi/read_write_util.py
+++ b/nova/virt/vmwareapi/read_write_util.py
@@ -140,7 +140,7 @@ class VMwareHTTPWriteFile(VMwareHTTPFile):
self.conn.getresponse()
except Exception, excep:
LOG.debug(_("Exception during HTTP connection close in "
- "VMwareHTTpWrite. Exception is %s") % excep)
+ "VMwareHTTPWrite. Exception is %s") % excep)
super(VMwareHTTPWriteFile, self).close()
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py
index c5b524186..5684e6aa6 100644
--- a/nova/virt/vmwareapi/vif.py
+++ b/nova/virt/vmwareapi/vif.py
@@ -45,7 +45,7 @@ def ensure_vlan_bridge(self, session, network):
# Check if the vlan_interface physical network adapter exists on the
# host.
if not network_util.check_if_vlan_interface_exists(session,
- vlan_interface):
+ vlan_interface):
raise exception.NetworkAdapterNotFound(adapter=vlan_interface)
# Get the vSwitch associated with the Physical Adapter
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index 381c47193..af481b566 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -20,6 +20,7 @@ The VMware API VM utility module to build SOAP object specs.
"""
import copy
+from nova import exception
from nova.virt.vmwareapi import vim_util
@@ -360,6 +361,27 @@ def delete_virtual_disk_spec(client_factory, device):
return virtual_device_config
+def clone_vm_spec(client_factory, location,
+ power_on=False, snapshot=None, template=False):
+ """Builds the VM clone spec."""
+ clone_spec = client_factory.create('ns0:VirtualMachineCloneSpec')
+ clone_spec.location = location
+ clone_spec.powerOn = power_on
+ clone_spec.snapshot = snapshot
+ clone_spec.template = template
+ return clone_spec
+
+
+def relocate_vm_spec(client_factory, datastore=None, host=None,
+ disk_move_type="moveAllDiskBackingsAndAllowSharing"):
+ """Builds the VM relocation spec."""
+ rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec')
+ rel_spec.datastore = datastore
+ rel_spec.diskMoveType = disk_move_type
+ rel_spec.host = host
+ return rel_spec
+
+
def get_dummy_vm_create_spec(client_factory, name, data_store_name):
"""Builds the dummy VM create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
@@ -423,6 +445,31 @@ def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
return vswitch_port_group_spec
+def get_vnc_config_spec(client_factory, port, password):
+ """Builds the vnc config spec."""
+ virtual_machine_config_spec = client_factory.create(
+ 'ns0:VirtualMachineConfigSpec')
+
+ opt_enabled = client_factory.create('ns0:OptionValue')
+ opt_enabled.key = "RemoteDisplay.vnc.enabled"
+ opt_enabled.value = "true"
+ opt_port = client_factory.create('ns0:OptionValue')
+ opt_port.key = "RemoteDisplay.vnc.port"
+ opt_port.value = port
+ opt_pass = client_factory.create('ns0:OptionValue')
+ opt_pass.key = "RemoteDisplay.vnc.password"
+ opt_pass.value = password
+ virtual_machine_config_spec.extraConfig = [opt_enabled, opt_port, opt_pass]
+ return virtual_machine_config_spec
+
+
+def search_datastore_spec(client_factory, file_name):
+ """Builds the datastore search spec."""
+ search_spec = client_factory.create('ns0:HostDatastoreBrowserSearchSpec')
+ search_spec.matchPattern = [file_name]
+ return search_spec
+
+
def get_vm_ref_from_name(session, vm_name):
"""Get reference to the VM with the name specified."""
vms = session._call_method(vim_util, "get_objects",
@@ -431,3 +478,31 @@ def get_vm_ref_from_name(session, vm_name):
if vm.propSet[0].val == vm_name:
return vm.obj
return None
+
+
+def get_datastore_ref_and_name(session):
+ """Get the datastore list and choose the first local storage."""
+ data_stores = session._call_method(vim_util, "get_objects",
+ "Datastore", ["summary.type", "summary.name",
+ "summary.capacity", "summary.freeSpace"])
+ for elem in data_stores:
+ ds_name = None
+ ds_type = None
+ ds_cap = None
+ ds_free = None
+ for prop in elem.propSet:
+ if prop.name == "summary.type":
+ ds_type = prop.val
+ elif prop.name == "summary.name":
+ ds_name = prop.val
+ elif prop.name == "summary.capacity":
+ ds_cap = prop.val
+ elif prop.name == "summary.freeSpace":
+ ds_free = prop.val
+ # Local storage identifier
+ if ds_type == "VMFS" or ds_type == "NFS":
+ data_store_name = ds_name
+ return elem.obj, data_store_name, ds_cap, ds_free
+
+ if data_store_name is None:
+ raise exception.DatastoreNotFound()
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 187fc449d..75f85454b 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -27,12 +27,16 @@ import urllib
import urllib2
import uuid
+from nova import block_device
+from nova.compute import api as compute
from nova.compute import power_state
from nova.compute import task_states
+from nova import context as nova_context
from nova import exception
from nova.openstack.common import cfg
-from nova.openstack.common import importutils
+from nova.openstack.common import excutils
from nova.openstack.common import log as logging
+from nova.virt import driver
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vif as vmwarevif
from nova.virt.vmwareapi import vim_util
@@ -52,21 +56,33 @@ vmware_group = cfg.OptGroup(name='vmware',
CONF = cfg.CONF
CONF.register_group(vmware_group)
CONF.register_opts(vmware_vif_opts, vmware_group)
+CONF.import_opt('vnc_enabled', 'nova.vnc')
LOG = logging.getLogger(__name__)
VMWARE_POWER_STATES = {
'poweredOff': power_state.SHUTDOWN,
'poweredOn': power_state.RUNNING,
- 'suspended': power_state.PAUSED}
+ 'suspended': power_state.SUSPENDED}
+VMWARE_PREFIX = 'vmware'
+
+
+RESIZE_TOTAL_STEPS = 4
class VMwareVMOps(object):
"""Management class for VM-related tasks."""
- def __init__(self, session):
+ def __init__(self, session, virtapi, volumeops):
"""Initializer."""
+ self.compute_api = compute.API()
self._session = session
+ self._virtapi = virtapi
+ self._volumeops = volumeops
+ self._instance_path_base = VMWARE_PREFIX + CONF.base_dir_name
+ self._default_root_device = 'vda'
+ self._rescue_suffix = '-rescue'
+ self._poll_rescue_last_ran = None
def list_instances(self):
"""Lists the VM instances that are registered with the ESX host."""
@@ -83,13 +99,14 @@ class VMwareVMOps(object):
vm_name = prop.val
elif prop.name == "runtime.connectionState":
conn_state = prop.val
- # Ignoring the oprhaned or inaccessible VMs
+ # Ignoring the orphaned or inaccessible VMs
if conn_state not in ["orphaned", "inaccessible"]:
lst_vm_names.append(vm_name)
LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
return lst_vm_names
- def spawn(self, context, instance, image_meta, network_info):
+ def spawn(self, context, instance, image_meta, network_info,
+ block_device_info=None):
"""
Creates a VM instance.
@@ -97,44 +114,28 @@ class VMwareVMOps(object):
1. Create a VM with no disk and the specifics in the instance object
like RAM size.
- 2. Create a dummy vmdk of the size of the disk file that is to be
- uploaded. This is required just to create the metadata file.
- 3. Delete the -flat.vmdk file created in the above step and retain
- the metadata .vmdk file.
- 4. Upload the disk file.
- 5. Attach the disk to the VM by reconfiguring the same.
- 6. Power on the VM.
+ 2. For flat disk
+ 2.1. Create a dummy vmdk of the size of the disk file that is to be
+ uploaded. This is required just to create the metadata file.
+ 2.2. Delete the -flat.vmdk file created in the above step and retain
+ the metadata .vmdk file.
+ 2.3. Upload the disk file.
+ 3. For sparse disk
+ 3.1. Upload the disk file to a -sparse.vmdk file.
+ 3.2. Copy/Clone the -sparse.vmdk file to a thin vmdk.
+ 3.3. Delete the -sparse.vmdk file.
+ 4. Attach the disk to the VM by reconfiguring the same.
+ 5. Power on the VM.
"""
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
if vm_ref:
- raise exception.InstanceExists(name=instance.name)
+ raise exception.InstanceExists(name=instance['name'])
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
-
- def _get_datastore_ref():
- """Get the datastore list and choose the first local storage."""
- data_stores = self._session._call_method(vim_util, "get_objects",
- "Datastore", ["summary.type", "summary.name"])
- for elem in data_stores:
- ds_name = None
- ds_type = None
- for prop in elem.propSet:
- if prop.name == "summary.type":
- ds_type = prop.val
- elif prop.name == "summary.name":
- ds_name = prop.val
- # Local storage identifier
- if ds_type == "VMFS":
- data_store_name = ds_name
- return data_store_name
-
- if data_store_name is None:
- msg = _("Couldn't get a local Datastore reference")
- LOG.error(msg, instance=instance)
- raise exception.NovaException(msg)
-
- data_store_name = _get_datastore_ref()
+ ds = vm_util.get_datastore_ref_and_name(self._session)
+ data_store_ref = ds[0]
+ data_store_name = ds[1]
def _get_image_properties():
"""
@@ -142,31 +143,21 @@ class VMwareVMOps(object):
repository.
"""
_image_info = vmware_images.get_vmdk_size_and_properties(context,
- instance.image_ref,
- instance)
+ instance['image_ref'],
+ instance)
image_size, image_properties = _image_info
vmdk_file_size_in_kb = int(image_size) / 1024
os_type = image_properties.get("vmware_ostype", "otherGuest")
adapter_type = image_properties.get("vmware_adaptertype",
"lsiLogic")
- return vmdk_file_size_in_kb, os_type, adapter_type
-
- vmdk_file_size_in_kb, os_type, adapter_type = _get_image_properties()
+ disk_type = image_properties.get("vmware_disktype",
+ "preallocated")
+ return vmdk_file_size_in_kb, os_type, adapter_type, disk_type
- def _get_vmfolder_and_res_pool_mors():
- """Get the Vm folder ref from the datacenter."""
- dc_objs = self._session._call_method(vim_util, "get_objects",
- "Datacenter", ["vmFolder"])
- # There is only one default datacenter in a standalone ESX host
- vm_folder_mor = dc_objs[0].propSet[0].val
+ (vmdk_file_size_in_kb, os_type, adapter_type,
+ disk_type) = _get_image_properties()
- # Get the resource pool. Taking the first resource pool coming our
- # way. Assuming that is the default resource pool.
- res_pool_mor = self._session._call_method(vim_util, "get_objects",
- "ResourcePool")[0].obj
- return vm_folder_mor, res_pool_mor
-
- vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()
+ vm_folder_ref, res_pool_ref = self._get_vmfolder_and_res_pool_refs()
def _check_if_network_bridge_exists(network_name):
network_ref = network_util.get_network_with_the_name(
@@ -177,6 +168,8 @@ class VMwareVMOps(object):
def _get_vif_infos():
vif_infos = []
+ if network_info is None:
+ return vif_infos
for (network, mapping) in network_info:
mac_address = mapping['mac']
network_name = network['bridge'] or \
@@ -201,33 +194,29 @@ class VMwareVMOps(object):
def _execute_create_vm():
"""Create VM on ESX host."""
- LOG.debug(_("Creating VM on the ESX host"), instance=instance)
+ LOG.debug(_("Creating VM on the ESX host"), instance=instance)
# Create the VM on the ESX host
vm_create_task = self._session._call_method(
self._session._get_vim(),
- "CreateVM_Task", vm_folder_mor,
- config=config_spec, pool=res_pool_mor)
+ "CreateVM_Task", vm_folder_ref,
+ config=config_spec, pool=res_pool_ref)
self._session._wait_for_task(instance['uuid'], vm_create_task)
- LOG.debug(_("Created VM on the ESX host"), instance=instance)
+ LOG.debug(_("Created VM on the ESX host"), instance=instance)
_execute_create_vm()
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
if CONF.flat_injected:
self._set_machine_id(client_factory, instance, network_info)
- # Naming the VM files in correspondence with the VM instance name
- # The flat vmdk file name
- flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % (instance.name,
- instance.name)
- # The vmdk meta-data file
- uploaded_vmdk_name = "%s/%s.vmdk" % (instance.name, instance.name)
- flat_uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
- flat_uploaded_vmdk_name)
- uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
- uploaded_vmdk_name)
+ # Set the vnc configuration of the instance, vnc port starts from 5900
+ if CONF.vnc_enabled:
+ vnc_port = self._get_vnc_port(vm_ref)
+ vnc_pass = CONF.vnc_password or ''
+ self._set_vnc_config(client_factory, instance, vnc_port, vnc_pass)
def _create_virtual_disk():
"""Create a virtual disk of the size of flat vmdk file."""
@@ -238,103 +227,186 @@ class VMwareVMOps(object):
# Here we assume thick provisioning and lsiLogic for the adapter
# type
LOG.debug(_("Creating Virtual Disk of size "
- "%(vmdk_file_size_in_kb)s KB and adapter type "
- "%(adapter_type)s on the ESX host local store"
- " %(data_store_name)s") %
+ "%(vmdk_file_size_in_kb)s KB and adapter type "
+ "%(adapter_type)s on the ESX host local store "
+ "%(data_store_name)s") %
{"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
"adapter_type": adapter_type,
"data_store_name": data_store_name},
instance=instance)
vmdk_create_spec = vm_util.get_vmdk_create_spec(client_factory,
- vmdk_file_size_in_kb, adapter_type)
+ vmdk_file_size_in_kb, adapter_type,
+ disk_type)
vmdk_create_task = self._session._call_method(
self._session._get_vim(),
"CreateVirtualDisk_Task",
service_content.virtualDiskManager,
name=uploaded_vmdk_path,
- datacenter=self._get_datacenter_name_and_ref()[0],
+ datacenter=dc_ref,
spec=vmdk_create_spec)
self._session._wait_for_task(instance['uuid'], vmdk_create_task)
LOG.debug(_("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
- " KB on the ESX host local store "
- "%(data_store_name)s") %
+ " KB and type %(disk_type)s on "
+ "the ESX host local store %(data_store_name)s") %
{"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
+ "disk_type": disk_type,
"data_store_name": data_store_name},
instance=instance)
- _create_virtual_disk()
-
- def _delete_disk_file():
- LOG.debug(_("Deleting the file %(flat_uploaded_vmdk_path)s "
+ def _delete_disk_file(vmdk_path):
+ LOG.debug(_("Deleting the file %(vmdk_path)s "
"on the ESX host local"
"store %(data_store_name)s") %
- {"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
+ {"vmdk_path": vmdk_path,
"data_store_name": data_store_name},
instance=instance)
- # Delete the -flat.vmdk file created. .vmdk file is retained.
+ # Delete the vmdk file.
vmdk_delete_task = self._session._call_method(
self._session._get_vim(),
"DeleteDatastoreFile_Task",
service_content.fileManager,
- name=flat_uploaded_vmdk_path)
+ name=vmdk_path,
+ datacenter=dc_ref)
self._session._wait_for_task(instance['uuid'], vmdk_delete_task)
- LOG.debug(_("Deleted the file %(flat_uploaded_vmdk_path)s on the "
+ LOG.debug(_("Deleted the file %(vmdk_path)s on the "
"ESX host local store %(data_store_name)s") %
- {"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
+ {"vmdk_path": vmdk_path,
"data_store_name": data_store_name},
instance=instance)
- _delete_disk_file()
-
- cookies = self._session._get_vim().client.options.transport.cookiejar
-
def _fetch_image_on_esx_datastore():
"""Fetch image from Glance to ESX datastore."""
LOG.debug(_("Downloading image file data %(image_ref)s to the ESX "
"data store %(data_store_name)s") %
- {'image_ref': instance.image_ref,
+ {'image_ref': instance['image_ref'],
'data_store_name': data_store_name},
instance=instance)
- # Upload the -flat.vmdk file whose meta-data file we just created
- # above
+ # For flat disk, upload the -flat.vmdk file whose meta-data file
+ # we just created above
+ # For sparse disk, upload the -sparse.vmdk file to be copied into
+ # a flat vmdk
+ upload_vmdk_name = sparse_uploaded_vmdk_name \
+ if disk_type == "sparse" else flat_uploaded_vmdk_name
vmware_images.fetch_image(
context,
- instance.image_ref,
+ instance['image_ref'],
instance,
host=self._session._host_ip,
- data_center_name=self._get_datacenter_name_and_ref()[1],
+ data_center_name=self._get_datacenter_ref_and_name()[1],
datastore_name=data_store_name,
cookies=cookies,
- file_path=flat_uploaded_vmdk_name)
- LOG.debug(_("Downloaded image file data %(image_ref)s to the ESX "
- "data store %(data_store_name)s") %
- {'image_ref': instance.image_ref,
+ file_path=upload_vmdk_name)
+ LOG.debug(_("Downloaded image file data %(image_ref)s to "
+ "%(upload_vmdk_name)s on the ESX data store "
+ "%(data_store_name)s") %
+ {'image_ref': instance['image_ref'],
+ 'upload_vmdk_name': upload_vmdk_name,
'data_store_name': data_store_name},
instance=instance)
- _fetch_image_on_esx_datastore()
-
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
- def _attach_vmdk_to_the_vm():
- """
- Attach the vmdk uploaded to the VM. VM reconfigure is done
- to do so.
- """
- vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(
- client_factory,
- adapter_type, "preallocated",
- uploaded_vmdk_path, vmdk_file_size_in_kb)
- LOG.debug(_("Reconfiguring VM instance to attach the image disk"),
- instance=instance)
- reconfig_task = self._session._call_method(
- self._session._get_vim(),
- "ReconfigVM_Task", vm_ref,
- spec=vmdk_attach_config_spec)
- self._session._wait_for_task(instance['uuid'], reconfig_task)
- LOG.debug(_("Reconfigured VM instance to attach the image disk"),
+ def _copy_virtual_disk():
+ """Copy a sparse virtual disk to a thin virtual disk."""
+ # Copy a sparse virtual disk to a thin virtual disk. This is also
+ # done to generate the meta-data file whose specifics
+ # depend on the size of the disk, thin/thick provisioning and the
+ # storage adapter type.
+ LOG.debug(_("Copying Virtual Disk of size "
+ "%(vmdk_file_size_in_kb)s KB and adapter type "
+ "%(adapter_type)s on the ESX host local store "
+ "%(data_store_name)s to disk type %(disk_type)s") %
+ {"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
+ "adapter_type": adapter_type,
+ "data_store_name": data_store_name,
+ "disk_type": disk_type},
instance=instance)
+ vmdk_copy_spec = vm_util.get_vmdk_create_spec(client_factory,
+ vmdk_file_size_in_kb, adapter_type,
+ disk_type)
+ vmdk_copy_task = self._session._call_method(
+ self._session._get_vim(),
+ "CopyVirtualDisk_Task",
+ service_content.virtualDiskManager,
+ sourceName=sparse_uploaded_vmdk_path,
+ sourceDatacenter=self._get_datacenter_ref_and_name()[0],
+ destName=uploaded_vmdk_path,
+ destSpec=vmdk_copy_spec)
+ self._session._wait_for_task(instance['uuid'], vmdk_copy_task)
+ LOG.debug(_("Copied Virtual Disk of size %(vmdk_file_size_in_kb)s"
+ " KB and type %(disk_type)s on "
+ "the ESX host local store %(data_store_name)s") %
+ {"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
+ "disk_type": disk_type,
+ "data_store_name": data_store_name},
+ instance=instance)
+
+ ebs_root = self._volume_in_mapping(self._default_root_device,
+ block_device_info)
- _attach_vmdk_to_the_vm()
+ if not ebs_root:
+ linked_clone = CONF.use_linked_clone
+ if linked_clone:
+ upload_folder = self._instance_path_base
+ upload_name = instance['image_ref']
+ else:
+ upload_folder = instance['name']
+ upload_name = instance['name']
+
+ # The vmdk meta-data file
+ uploaded_vmdk_name = "%s/%s.vmdk" % (upload_folder, upload_name)
+ uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
+ uploaded_vmdk_name)
+
+ if not (linked_clone and self._check_if_folder_file_exists(
+ data_store_ref, data_store_name,
+ upload_folder, upload_name + ".vmdk")):
+
+ # Naming the VM files in correspondence with the VM instance
+ # The flat vmdk file name
+ flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % (
+ upload_folder, upload_name)
+ # The sparse vmdk file name for sparse disk image
+ sparse_uploaded_vmdk_name = "%s/%s-sparse.vmdk" % (
+ upload_folder, upload_name)
+
+ flat_uploaded_vmdk_path = vm_util.build_datastore_path(
+ data_store_name,
+ flat_uploaded_vmdk_name)
+ sparse_uploaded_vmdk_path = vm_util.build_datastore_path(
+ data_store_name,
+ sparse_uploaded_vmdk_name)
+ dc_ref = self._get_datacenter_ref_and_name()[0]
+
+ if disk_type != "sparse":
+ # Create a flat virtual disk and retain the metadata file.
+ _create_virtual_disk()
+ _delete_disk_file(flat_uploaded_vmdk_path)
+
+ cookies = \
+ self._session._get_vim().client.options.transport.cookiejar
+ _fetch_image_on_esx_datastore()
+
+ if disk_type == "sparse":
+ # Copy the sparse virtual disk to a thin virtual disk.
+ disk_type = "thin"
+ _copy_virtual_disk()
+ _delete_disk_file(sparse_uploaded_vmdk_path)
+ else:
+ # linked clone base disk exists
+ if disk_type == "sparse":
+ disk_type = "thin"
+
+ # Attach the vmdk uploaded to the VM.
+ self._volumeops.attach_disk_to_vm(
+ vm_ref, instance,
+ adapter_type, disk_type, uploaded_vmdk_path,
+ vmdk_file_size_in_kb, linked_clone)
+ else:
+ # Attach the root disk to the VM.
+ root_disk = driver.block_device_info_get_mapping(
+ block_device_info)[0]
+ connection_info = root_disk['connection_info']
+ self._volumeops.attach_volume(connection_info, instance['name'],
+ self._default_root_device)
def _power_on_vm():
"""Power on the VM."""
@@ -362,9 +434,9 @@ class VMwareVMOps(object):
4. Now upload the -flat.vmdk file to the image store.
5. Delete the coalesced .vmdk and -flat.vmdk created.
"""
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance.id)
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
@@ -378,7 +450,7 @@ class VMwareVMOps(object):
disk_type, unit_number) = vm_util.get_vmdk_path_and_adapter_type(
hardware_devices)
datastore_name = vm_util.split_datastore_path(
- vmdk_file_path_before_snapshot)[0]
+ vmdk_file_path_before_snapshot)[0]
os_type = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "summary.config.guestId")
@@ -395,7 +467,7 @@ class VMwareVMOps(object):
snapshot_task = self._session._call_method(
self._session._get_vim(),
"CreateSnapshot_Task", vm_ref,
- name="%s-snapshot" % instance.name,
+ name="%s-snapshot" % instance['name'],
description="Taking Snapshot of the VM",
memory=False,
quiesce=True)
@@ -437,13 +509,14 @@ class VMwareVMOps(object):
random_name = str(uuid.uuid4())
dest_vmdk_file_location = vm_util.build_datastore_path(datastore_name,
"vmware-tmp/%s.vmdk" % random_name)
- dc_ref = self._get_datacenter_name_and_ref()[0]
+ dc_ref = self._get_datacenter_ref_and_name()[0]
def _copy_vmdk_content():
# Copy the contents of the disk ( or disks, if there were snapshots
# done earlier) to a temporary vmdk file.
copy_spec = vm_util.get_copy_virtual_disk_spec(client_factory,
- adapter_type)
+ adapter_type,
+ disk_type)
LOG.debug(_('Copying disk data before snapshot of the VM'),
instance=instance)
copy_disk_task = self._session._call_method(
@@ -476,7 +549,7 @@ class VMwareVMOps(object):
adapter_type=adapter_type,
image_version=1,
host=self._session._host_ip,
- data_center_name=self._get_datacenter_name_and_ref()[1],
+ data_center_name=self._get_datacenter_ref_and_name()[1],
datastore_name=datastore_name,
cookies=cookies,
file_path="vmware-tmp/%s-flat.vmdk" % random_name)
@@ -509,9 +582,9 @@ class VMwareVMOps(object):
def reboot(self, instance, network_info):
"""Reboot a VM instance."""
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance.id)
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.plug_vifs(instance, network_info)
@@ -552,6 +625,38 @@ class VMwareVMOps(object):
self._session._wait_for_task(instance['uuid'], reset_task)
LOG.debug(_("Did hard reboot of VM"), instance=instance)
+ def _delete(self, instance, network_info):
+ """
+ Destroy a VM instance. Steps followed are:
+ 1. Power off the VM, if it is in poweredOn state.
+ 2. Destroy the VM.
+ """
+ try:
+ vm_ref = vm_util.get_vm_ref_from_name(self._session,
+ instance['name'])
+ if vm_ref is None:
+ LOG.debug(_("instance not present"), instance=instance)
+ return
+
+ self.power_off(instance)
+
+ try:
+ LOG.debug(_("Destroying the VM"), instance=instance)
+ destroy_task = self._session._call_method(
+ self._session._get_vim(),
+ "Destroy_Task", vm_ref)
+ self._session._wait_for_task(instance['uuid'], destroy_task)
+ LOG.debug(_("Destroyed the VM"), instance=instance)
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:vmops:delete, got this exception"
+ " while destroying the VM: %s") % str(excep),
+ instance=instance)
+
+ if network_info:
+ self.unplug_vifs(instance, network_info)
+ except Exception, exc:
+ LOG.exception(exc, instance=instance)
+
def destroy(self, instance, network_info, destroy_disks=True):
"""
Destroy a VM instance. Steps followed are:
@@ -560,7 +665,8 @@ class VMwareVMOps(object):
3. Delete the contents of the folder holding the VM related data.
"""
try:
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session,
+ instance['name'])
if vm_ref is None:
LOG.debug(_("instance not present"), instance=instance)
return
@@ -592,14 +698,15 @@ class VMwareVMOps(object):
try:
LOG.debug(_("Unregistering the VM"), instance=instance)
self._session._call_method(self._session._get_vim(),
- "UnregisterVM", vm_ref)
+ "UnregisterVM", vm_ref)
LOG.debug(_("Unregistered the VM"), instance=instance)
except Exception, excep:
LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
" while un-registering the VM: %s") % str(excep),
instance=instance)
- self.unplug_vifs(instance, network_info)
+ if network_info:
+ self.unplug_vifs(instance, network_info)
# Delete the folder holding the VM related content on
# the datastore.
@@ -617,7 +724,8 @@ class VMwareVMOps(object):
vim,
"DeleteDatastoreFile_Task",
vim.get_service_content().fileManager,
- name=dir_ds_compliant_path)
+ name=dir_ds_compliant_path,
+ datacenter=self._get_datacenter_ref_and_name()[0])
self._session._wait_for_task(instance['uuid'], delete_task)
LOG.debug(_("Deleted contents of the VM from "
"datastore %(datastore_name)s") %
@@ -642,9 +750,9 @@ class VMwareVMOps(object):
def suspend(self, instance):
"""Suspend the specified instance."""
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance.id)
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -658,17 +766,17 @@ class VMwareVMOps(object):
LOG.debug(_("Suspended the VM"), instance=instance)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
- reason = _("instance is powered off and can not be suspended.")
+ reason = _("instance is powered off and cannot be suspended.")
raise exception.InstanceSuspendFailure(reason=reason)
-
- LOG.debug(_("VM was already in suspended state. So returning "
- "without doing anything"), instance=instance)
+ else:
+ LOG.debug(_("VM was already in suspended state. So returning "
+ "without doing anything"), instance=instance)
def resume(self, instance):
"""Resume the specified instance."""
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance.id)
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -684,9 +792,263 @@ class VMwareVMOps(object):
reason = _("instance is not in a suspended state")
raise exception.InstanceResumeFailure(reason=reason)
+ def rescue(self, context, instance, network_info, image_meta):
+ """Rescue the specified instance.
+
+ - shutdown the instance VM.
+ - spawn a rescue VM (the vm name-label will be instance-N-rescue).
+
+ """
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ self.power_off(instance)
+ instance['name'] = instance['name'] + self._rescue_suffix
+ self.spawn(context, instance, image_meta, network_info)
+
+ # Attach vmdk to the rescue VM
+ hardware_devices = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "config.hardware.device")
+ vmdk_path, controller_key, adapter_type, disk_type, unit_number \
+ = vm_util.get_vmdk_path_and_adapter_type(hardware_devices)
+ # Figure out the correct unit number
+ unit_number = unit_number + 1
+ rescue_vm_ref = vm_util.get_vm_ref_from_name(self._session,
+ instance['name'])
+ self._volumeops.attach_disk_to_vm(
+ rescue_vm_ref, instance,
+ adapter_type, disk_type, vmdk_path,
+ controller_key=controller_key,
+ unit_number=unit_number)
+
+ def unrescue(self, instance):
+ """Unrescue the specified instance."""
+ instance_orig_name = instance['name']
+ instance['name'] = instance['name'] + self._rescue_suffix
+ self.destroy(instance, None)
+ instance['name'] = instance_orig_name
+ self.power_on(instance)
+
+ def power_off(self, instance):
+ """Power off the specified instance."""
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ pwr_state = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "runtime.powerState")
+ # Only PoweredOn VMs can be powered off.
+ if pwr_state == "poweredOn":
+ LOG.debug(_("Powering off the VM"), instance=instance)
+ poweroff_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerOffVM_Task", vm_ref)
+ self._session._wait_for_task(instance['uuid'], poweroff_task)
+ LOG.debug(_("Powered off the VM"), instance=instance)
+ # Raise Exception if VM is suspended
+ elif pwr_state == "suspended":
+ reason = _("instance is suspended and cannot be powered off.")
+ raise exception.InstancePowerOffFailure(reason=reason)
+ else:
+ LOG.debug(_("VM was already in powered off state. So returning "
+ "without doing anything"), instance=instance)
+
+ def power_on(self, instance):
+ """Power on the specified instance."""
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ pwr_state = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "runtime.powerState")
+ if pwr_state == "poweredOn":
+ LOG.debug(_("VM was already in powered on state. So returning "
+ "without doing anything"), instance=instance)
+ # Only PoweredOff and Suspended VMs can be powered on.
+ else:
+ LOG.debug(_("Powering on the VM"), instance=instance)
+ poweron_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerOnVM_Task", vm_ref)
+ self._session._wait_for_task(instance['uuid'], poweron_task)
+ LOG.debug(_("Powered on the VM"), instance=instance)
+
+ def _get_orig_vm_name_label(self, instance):
+ return instance['name'] + '-orig'
+
+ def _update_instance_progress(self, context, instance, step, total_steps):
+ """Update instance progress percent to reflect current step number
+ """
+ # Divide the action's workflow into discrete steps and "bump" the
+ # instance's progress field as each step is completed.
+ #
+ # For a first cut this should be fine, however, for large VM images,
+ # the clone disk step begins to dominate the equation. A
+ # better approximation would use the percentage of the VM image that
+ # has been streamed to the destination host.
+ progress = round(float(step) / total_steps * 100)
+ instance_uuid = instance['uuid']
+ LOG.debug(_("Updating instance '%(instance_uuid)s' progress to"
+ " %(progress)d") % locals(), instance=instance)
+ self._virtapi.instance_update(context, instance_uuid,
+ {'progress': progress})
+
+ def migrate_disk_and_power_off(self, context, instance, dest,
+ instance_type):
+ """
+ Transfers the disk of a running instance in multiple phases, turning
+ off the instance before the end.
+ """
+ # 0. Zero out the progress to begin
+ self._update_instance_progress(context, instance,
+ step=0,
+ total_steps=RESIZE_TOTAL_STEPS)
+
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance['name'])
+ host_ref = self._get_host_ref_from_name(dest)
+ if host_ref is None:
+ raise exception.HostNotFound(host=dest)
+
+ # 1. Power off the instance
+ self.power_off(instance)
+ self._update_instance_progress(context, instance,
+ step=1,
+ total_steps=RESIZE_TOTAL_STEPS)
+
+ # 2. Rename the original VM with suffix '-orig'
+ name_label = self._get_orig_vm_name_label(instance)
+ LOG.debug(_("Renaming the VM to %s") % name_label,
+ instance=instance)
+ rename_task = self._session._call_method(
+ self._session._get_vim(),
+ "Rename_Task", vm_ref, newName=name_label)
+ self._session._wait_for_task(instance['uuid'], rename_task)
+ LOG.debug(_("Renamed the VM to %s") % name_label,
+ instance=instance)
+ self._update_instance_progress(context, instance,
+ step=2,
+ total_steps=RESIZE_TOTAL_STEPS)
+
+ # Get the clone vm spec
+ ds_ref = vm_util.get_datastore_ref_and_name(self._session)[0]
+ client_factory = self._session._get_vim().client.factory
+ rel_spec = vm_util.relocate_vm_spec(client_factory, ds_ref, host_ref)
+ clone_spec = vm_util.clone_vm_spec(client_factory, rel_spec)
+ vm_folder_ref, res_pool_ref = self._get_vmfolder_and_res_pool_refs()
+
+ # 3. Clone VM on ESX host
+ LOG.debug(_("Cloning VM to host %s") % dest, instance=instance)
+ vm_clone_task = self._session._call_method(
+ self._session._get_vim(),
+ "CloneVM_Task", vm_ref,
+ folder=vm_folder_ref,
+ name=instance['name'],
+ spec=clone_spec)
+ self._session._wait_for_task(instance['uuid'], vm_clone_task)
+ LOG.debug(_("Cloned VM to host %s") % dest, instance=instance)
+ self._update_instance_progress(context, instance,
+ step=3,
+ total_steps=RESIZE_TOTAL_STEPS)
+
+ def confirm_migration(self, migration, instance, network_info):
+ """Confirms a resize, destroying the source VM."""
+ instance_name = self._get_orig_vm_name_label(instance)
+ # Destroy the original VM.
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
+ if vm_ref is None:
+ LOG.debug(_("instance not present"), instance=instance)
+ return
+
+ try:
+ LOG.debug(_("Destroying the VM"), instance=instance)
+ destroy_task = self._session._call_method(
+ self._session._get_vim(),
+ "Destroy_Task", vm_ref)
+ self._session._wait_for_task(instance['uuid'], destroy_task)
+ LOG.debug(_("Destroyed the VM"), instance=instance)
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:vmops:confirm_migration, got this "
+ "exception while destroying the VM: %s") % str(excep))
+
+ if network_info:
+ self.unplug_vifs(instance, network_info)
+
+ def finish_revert_migration(self, instance):
+ """Finish reverting a resize, powering back on the instance."""
+ # The original vm was suffixed with '-orig'; find it using
+ # the old suffix, remove the suffix, then power it back on.
+ name_label = self._get_orig_vm_name_label(instance)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, name_label)
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=name_label)
+
+ LOG.debug(_("Renaming the VM from %s") % name_label,
+ instance=instance)
+ rename_task = self._session._call_method(
+ self._session._get_vim(),
+ "Rename_Task", vm_ref, newName=instance['name'])
+ self._session._wait_for_task(instance['uuid'], rename_task)
+ LOG.debug(_("Renamed the VM from %s") % name_label,
+ instance=instance)
+ self.power_on(instance)
+
+ def finish_migration(self, context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance=False):
+ """Completes a resize, turning on the migrated instance."""
+ # 4. Start VM
+ self.power_on(instance)
+ self._update_instance_progress(context, instance,
+ step=4,
+ total_steps=RESIZE_TOTAL_STEPS)
+
+ def live_migration(self, context, instance_ref, dest,
+ post_method, recover_method, block_migration=False):
+ """Spawning live_migration operation for distributing high-load."""
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_ref.name)
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance_ref.name)
+ host_ref = self._get_host_ref_from_name(dest)
+ if host_ref is None:
+ raise exception.HostNotFound(host=dest)
+
+ LOG.debug(_("Migrating VM to host %s") % dest, instance=instance_ref)
+ try:
+ vm_migrate_task = self._session._call_method(
+ self._session._get_vim(),
+ "MigrateVM_Task", vm_ref,
+ host=host_ref,
+ priority="defaultPriority")
+ self._session._wait_for_task(instance_ref['uuid'], vm_migrate_task)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ recover_method(context, instance_ref, dest, block_migration)
+ post_method(context, instance_ref, dest, block_migration)
+ LOG.debug(_("Migrated VM to host %s") % dest, instance=instance_ref)
+
+ def poll_rebooting_instances(self, timeout, instances):
+ """Poll for rebooting instances."""
+ ctxt = nova_context.get_admin_context()
+
+ instances_info = dict(instance_count=len(instances),
+ timeout=timeout)
+
+ if instances_info["instance_count"] > 0:
+ LOG.info(_("Found %(instance_count)d hung reboots "
+ "older than %(timeout)d seconds") % instances_info)
+
+ for instance in instances:
+ LOG.info(_("Automatically hard rebooting %d") % instance['uuid'])
+ self.compute_api.reboot(ctxt, instance, "HARD")
+
def get_info(self, instance):
"""Return data about the VM instance."""
- vm_ref = self._get_vm_ref_from_the_name(instance['name'])
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance['name'])
@@ -722,9 +1084,9 @@ class VMwareVMOps(object):
def get_console_output(self, instance):
"""Return snapshot of console."""
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance.id)
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
param_list = {"id": str(vm_ref)}
base_url = "%s://%s/screen?%s" % (self._session._scheme,
self._session._host_ip,
@@ -741,6 +1103,24 @@ class VMwareVMOps(object):
else:
return ""
+ def get_vnc_console(self, instance):
+ """Return connection info for a vnc console."""
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ return {'host': CONF.vmwareapi_host_ip,
+ 'port': self._get_vnc_port(vm_ref),
+ 'internal_access_path': None}
+
+ @staticmethod
+ def _get_vnc_port(vm_ref):
+ """Return VNC port for an VM."""
+ vm_id = int(vm_ref.value.replace('vm-', ''))
+ port = CONF.vnc_port + vm_id % CONF.vnc_port_total
+
+ return port
+
def _set_machine_id(self, client_factory, instance, network_info):
"""
Set the machine id of the VM for guest tools to pick up and reconfigure
@@ -786,12 +1166,56 @@ class VMwareVMOps(object):
"with ip - %(ip_addr)s") % {'ip_addr': ip_v4['ip']},
instance=instance)
- def _get_datacenter_name_and_ref(self):
+ def _set_vnc_config(self, client_factory, instance, port, password):
+ """
+ Set the vnc configuration of the VM.
+ """
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ vnc_config_spec = vm_util.get_vnc_config_spec(
+ client_factory, port, password)
+
+ LOG.debug(_("Reconfiguring VM instance to enable vnc on "
+ "port - %(port)s") % {'port': port},
+ instance=instance)
+ reconfig_task = self._session._call_method(self._session._get_vim(),
+ "ReconfigVM_Task", vm_ref,
+ spec=vnc_config_spec)
+ self._session._wait_for_task(instance['uuid'], reconfig_task)
+ LOG.debug(_("Reconfigured VM instance to enable vnc on "
+ "port - %(port)s") % {'port': port},
+ instance=instance)
+
+ def _get_datacenter_ref_and_name(self):
"""Get the datacenter name and the reference."""
dc_obj = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["name"])
return dc_obj[0].obj, dc_obj[0].propSet[0].val
+ def _get_host_ref_from_name(self, host_name):
+ """Get reference to the host with the name specified."""
+ host_objs = self._session._call_method(vim_util, "get_objects",
+ "HostSystem", ["name"])
+ for host in host_objs:
+ if host.propSet[0].val == host_name:
+ return host.obj
+ return None
+
+ def _get_vmfolder_and_res_pool_refs(self):
+ """Get the Vm folder ref from the datacenter."""
+ dc_objs = self._session._call_method(vim_util, "get_objects",
+ "Datacenter", ["vmFolder"])
+ # There is only one default datacenter in a standalone ESX host
+ vm_folder_ref = dc_objs[0].propSet[0].val
+
+ # Get the resource pool. Taking the first resource pool coming our
+ # way. Assuming that is the default resource pool.
+ res_pool_ref = self._session._call_method(vim_util, "get_objects",
+ "ResourcePool")[0].obj
+ return vm_folder_ref, res_pool_ref
+
def _path_exists(self, ds_browser, ds_path):
"""Check if the path exists on the datastore."""
search_task = self._session._call_method(self._session._get_vim(),
@@ -812,6 +1236,32 @@ class VMwareVMOps(object):
return False
return True
+ def _path_file_exists(self, ds_browser, ds_path, file_name):
+ """Check if the path and file exists on the datastore."""
+ client_factory = self._session._get_vim().client.factory
+ search_spec = vm_util.search_datastore_spec(client_factory, file_name)
+ search_task = self._session._call_method(self._session._get_vim(),
+ "SearchDatastore_Task",
+ ds_browser,
+ datastorePath=ds_path,
+ searchSpec=search_spec)
+ # Wait till the state changes from queued or running.
+ # If an error state is returned, it means that the path doesn't exist.
+ while True:
+ task_info = self._session._call_method(vim_util,
+ "get_dynamic_property",
+ search_task, "Task", "info")
+ if task_info.state in ['queued', 'running']:
+ time.sleep(2)
+ continue
+ break
+ if task_info.state == "error":
+ return False, False
+
+ file_exists = (getattr(task_info.result, 'file', False) and
+ task_info.result.file[0].path == file_name)
+ return True, file_exists
+
def _mkdir(self, ds_path):
"""
Creates a directory at the path specified. If it is just "NAME",
@@ -824,14 +1274,23 @@ class VMwareVMOps(object):
name=ds_path, createParentDirectories=False)
LOG.debug(_("Created directory with path %s") % ds_path)
- def _get_vm_ref_from_the_name(self, vm_name):
- """Get reference to the VM with the name specified."""
- vms = self._session._call_method(vim_util, "get_objects",
- "VirtualMachine", ["name"])
- for vm in vms:
- if vm.propSet[0].val == vm_name:
- return vm.obj
- return None
+ def _check_if_folder_file_exists(self, ds_ref, ds_name,
+ folder_name, file_name):
+ ds_browser = vim_util.get_dynamic_property(
+ self._session._get_vim(),
+ ds_ref,
+ "Datastore",
+ "browser")
+ # Check if the folder exists or not. If not, create one
+ # Check if the file exists or not.
+ folder_path = vm_util.build_datastore_path(ds_name, folder_name)
+ folder_exists, file_exists = self._path_file_exists(ds_browser,
+ folder_path,
+ file_name)
+ if not folder_exists:
+ self._mkdir(vm_util.build_datastore_path(ds_name, folder_name))
+
+ return file_exists
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
@@ -872,3 +1331,21 @@ class VMwareVMOps(object):
interfaces.append(device.key)
return interfaces
+
+ @staticmethod
+ def _volume_in_mapping(mount_device, block_device_info):
+ block_device_list = [block_device.strip_dev(vol['mount_device'])
+ for vol in
+ driver.block_device_info_get_mapping(
+ block_device_info)]
+ swap = driver.block_device_info_get_swap(block_device_info)
+ if driver.swap_is_usable(swap):
+ block_device_list.append(
+ block_device.strip_dev(swap['device_name']))
+ block_device_list += [block_device.strip_dev(ephemeral['device_name'])
+ for ephemeral in
+ driver.block_device_info_get_ephemerals(
+ block_device_info)]
+
+ LOG.debug(_("block_device_list %s"), block_device_list)
+ return block_device.strip_dev(mount_device) in block_device_list
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py
index 7c4480ea0..e8510b36e 100644
--- a/nova/virt/vmwareapi/vmware_images.py
+++ b/nova/virt/vmwareapi/vmware_images.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -17,7 +18,6 @@
"""
Utility functions for Image transfer.
"""
-import StringIO
from nova import exception
from nova.image import glance
@@ -56,7 +56,7 @@ def start_transfer(context, read_file_handle, data_size,
write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
# In case of VMware - Glance transfer, we relinquish VMware HTTP file read
# handle to Glance Client instance, but to be sure of the transfer we need
- # to be sure of the status of the image on glnace changing to active.
+ # to be sure of the status of the image on glance changing to active.
# The GlanceWriteThread handles the same for us.
elif image_service and image_id:
write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe,
@@ -93,9 +93,8 @@ def fetch_image(context, image, instance, **kwargs):
(image_service, image_id) = glance.get_remote_image_service(context, image)
metadata = image_service.show(context, image_id)
file_size = int(metadata['size'])
- f = StringIO.StringIO()
- image_service.download(context, image_id, f)
- read_file_handle = read_write_util.GlanceFileRead(f)
+ read_iter = image_service.download(context, image_id)
+ read_file_handle = read_write_util.GlanceFileRead(read_iter)
write_file_handle = read_write_util.VMwareHTTPWriteFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
@@ -122,10 +121,9 @@ def upload_image(context, image, instance, **kwargs):
file_size = read_file_handle.get_size()
(image_service, image_id) = glance.get_remote_image_service(context, image)
# The properties and other fields that we need to set for the image.
- image_metadata = {"is_public": True,
- "disk_format": "vmdk",
+ image_metadata = {"disk_format": "vmdk",
"container_format": "bare",
- "type": "vmdk",
+ "size": file_size,
"properties": {"vmware_adaptertype":
kwargs.get("adapter_type"),
"vmware_ostype": kwargs.get("os_type"),
diff --git a/nova/virt/vmwareapi/volume_util.py b/nova/virt/vmwareapi/volume_util.py
index 9d556cd26..2af3381a4 100644
--- a/nova/virt/vmwareapi/volume_util.py
+++ b/nova/virt/vmwareapi/volume_util.py
@@ -22,7 +22,6 @@ and storage repositories
import re
import string
-from nova import exception
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import vim_util
diff --git a/nova/virt/vmwareapi/volumeops.py b/nova/virt/vmwareapi/volumeops.py
index 5ec389f80..922d2135b 100644
--- a/nova/virt/vmwareapi/volumeops.py
+++ b/nova/virt/vmwareapi/volumeops.py
@@ -18,7 +18,6 @@
Management class for Storage-related functions (attach, detach, etc).
"""
-from nova import context
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -110,7 +109,8 @@ class VMwareVolumeOps(object):
iqn = volume_util.get_host_iqn(self._session)
return {
'ip': CONF.vmwareapi_host_ip,
- 'initiator': iqn
+ 'initiator': iqn,
+ 'host': CONF.vmwareapi_host_ip
}
def attach_volume(self, connection_info, instance, mountpoint):
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index daca69854..05918f83d 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -63,8 +63,10 @@ def cinderclient(context):
# FIXME: the cinderclient ServiceCatalog object is mis-named.
# It actually contains the entire access blob.
+ # Only needed parts of the service catalog are passed in, see
+ # nova/context.py.
compat_catalog = {
- 'access': {'serviceCatalog': context.service_catalog or {}}
+ 'access': {'serviceCatalog': context.service_catalog or []}
}
sc = service_catalog.ServiceCatalog(compat_catalog)
if CONF.cinder_endpoint_template:
diff --git a/run_tests.sh b/run_tests.sh
index 11bc8b518..5bb2842b2 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -142,6 +142,7 @@ function run_pep8 {
echo "Running pep8"
${wrapper} python tools/hacking.py ${ignore} ${srcfiles}
+ ${wrapper} bash tools/unused_imports.sh
# NOTE(sdague): as of grizzly-2 these are passing however leaving the comment
# in here in case we need to break it out when we get more of our hacking working
# again.
diff --git a/tools/flakes.py b/tools/flakes.py
index 4b93abc21..f805fd156 100644
--- a/tools/flakes.py
+++ b/tools/flakes.py
@@ -8,7 +8,7 @@ import __builtin__
import os
import sys
-from pyflakes.scripts.pyflakes import main
+from pyflakes.scripts import pyflakes
if __name__ == "__main__":
names = os.environ.get('PYFLAKES_BUILTINS', '_')
@@ -19,4 +19,4 @@ if __name__ == "__main__":
del names, os, __builtin__
- sys.exit(main())
+ sys.exit(pyflakes.main())
diff --git a/tools/unused_imports.sh b/tools/unused_imports.sh
new file mode 100755
index 000000000..0e0294517
--- /dev/null
+++ b/tools/unused_imports.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+#snakefood sfood-checker detects even more unused imports
+! pyflakes nova/ | grep "imported but unused"
diff --git a/tox.ini b/tox.ini
index e98f30151..67fe1eea0 100644
--- a/tox.ini
+++ b/tox.ini
@@ -15,13 +15,16 @@ sitepackages = True
downloadcache = ~/cache/pip
[testenv:pep8]
-deps=pep8==1.3.3
+deps=
+ pep8==1.3.3
+ pyflakes
commands =
python tools/hacking.py --doctest
python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \
--exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg .
python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \
--filename=nova* bin
+ bash tools/unused_imports.sh
[testenv:pylint]
setenv = VIRTUAL_ENV={envdir}