summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-api6
-rwxr-xr-xbin/nova-api-ec22
-rwxr-xr-xbin/nova-dhcpbridge5
-rwxr-xr-xbin/nova-novncproxy5
-rwxr-xr-xbin/nova-spicehtml5proxy5
-rw-r--r--contrib/boto_v6/ec2/connection.py4
-rw-r--r--contrib/boto_v6/ec2/instance.py18
-rw-r--r--doc/api_samples/os-floating-ip-pools/floatingippools-list-resp.json10
-rw-r--r--doc/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml5
-rw-r--r--nova/api/openstack/compute/contrib/coverage_ext.py4
-rw-r--r--nova/compute/api.py16
-rw-r--r--nova/compute/manager.py6
-rw-r--r--nova/compute/utils.py10
-rw-r--r--nova/db/sqlalchemy/api.py6
-rw-r--r--nova/db/sqlalchemy/models.py42
-rw-r--r--nova/db/sqlalchemy/session.py12
-rw-r--r--nova/notifications.py8
-rw-r--r--nova/service.py5
-rw-r--r--nova/servicegroup/__init__.py4
-rw-r--r--nova/servicegroup/api.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_availability_zone.py35
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_coverage_ext.py6
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_services.py82
-rw-r--r--nova/tests/baremetal/test_pxe.py29
-rw-r--r--nova/tests/compute/test_compute.py76
-rw-r--r--nova/tests/compute/test_compute_utils.py14
-rw-r--r--nova/tests/conf_fixture.py4
-rw-r--r--nova/tests/fakelibvirt.py6
-rw-r--r--nova/tests/integrated/api_samples/README.rst18
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl10
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/test_api_samples.py36
-rw-r--r--nova/tests/integrated/test_multiprocess_api.py2
-rw-r--r--nova/tests/scheduler/test_host_filters.py5
-rw-r--r--nova/tests/test_api.py6
-rw-r--r--nova/tests/test_metadata.py36
-rw-r--r--nova/tests/test_periodic_tasks.py18
-rw-r--r--nova/tests/test_utils.py11
-rw-r--r--nova/tests/test_virt_drivers.py6
-rw-r--r--nova/tests/test_wsgi.py21
-rw-r--r--nova/utils.py7
-rw-r--r--nova/virt/baremetal/__init__.py4
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/api.py71
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migration.py15
-rw-r--r--nova/virt/baremetal/fake.py4
-rw-r--r--nova/virt/baremetal/ipmi.py14
-rw-r--r--nova/virt/baremetal/pxe.py3
-rw-r--r--nova/virt/hyperv/vif.py8
-rw-r--r--nova/virt/libvirt/__init__.py4
-rw-r--r--nova/virt/libvirt/driver.py12
-rw-r--r--nova/virt/powervm/__init__.py4
-rw-r--r--nova/virt/powervm/operator.py38
-rw-r--r--nova/virt/vmwareapi/__init__.py4
-rw-r--r--nova/virt/vmwareapi/driver.py26
-rw-r--r--nova/virt/vmwareapi/fake.py25
-rw-r--r--nova/virt/vmwareapi/io_util.py9
-rw-r--r--nova/virt/vmwareapi/vm_util.py201
-rw-r--r--nova/virt/vmwareapi/vmops.py18
-rw-r--r--nova/virt/vmwareapi/volume_util.py178
-rw-r--r--nova/virt/vmwareapi/volumeops.py183
-rw-r--r--nova/virt/xenapi/__init__.py4
-rw-r--r--nova/virt/xenapi/vm_utils.py11
-rw-r--r--nova/volume/cinder.py4
-rw-r--r--nova/wsgi.py25
-rwxr-xr-xrun_tests.sh14
-rw-r--r--smoketests/base.py6
-rwxr-xr-xtools/hacking.py62
-rw-r--r--tools/test-requires2
68 files changed, 1125 insertions, 433 deletions
diff --git a/bin/nova-api b/bin/nova-api
index 16cf33cc5..d957f3e58 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -54,6 +54,10 @@ if __name__ == '__main__':
launcher = service.ProcessLauncher()
for api in CONF.enabled_apis:
should_use_ssl = api in CONF.enabled_ssl_apis
- server = service.WSGIService(api, use_ssl=should_use_ssl)
+ if api == 'ec2':
+ server = service.WSGIService(api, use_ssl=should_use_ssl,
+ max_url_len=16384)
+ else:
+ server = service.WSGIService(api, use_ssl=should_use_ssl)
launcher.launch_server(server, workers=server.workers or 1)
launcher.wait()
diff --git a/bin/nova-api-ec2 b/bin/nova-api-ec2
index c7b08845d..d1b3d45ea 100755
--- a/bin/nova-api-ec2
+++ b/bin/nova-api-ec2
@@ -41,6 +41,6 @@ if __name__ == '__main__':
config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
- server = service.WSGIService('ec2')
+ server = service.WSGIService('ec2', max_url_len=16384)
service.serve(server, workers=server.workers)
service.wait()
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index ee7bf2da9..0438ee6ff 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -96,10 +96,15 @@ def init_leases(network_id):
def add_action_parsers(subparsers):
parser = subparsers.add_parser('init')
+ # NOTE(cfb): dnsmasq always passes mac, and ip. hostname
+ # is passed if known. We don't care about
+ # hostname, but argparse will complain if we
+ # do not accept it.
for action in ['add', 'del', 'old']:
parser = subparsers.add_parser(action)
parser.add_argument('mac')
parser.add_argument('ip')
+ parser.add_argument('hostname', nargs='?', default='')
parser.set_defaults(func=globals()[action + '_lease'])
diff --git a/bin/nova-novncproxy b/bin/nova-novncproxy
index 477510b99..657f97b48 100755
--- a/bin/nova-novncproxy
+++ b/bin/nova-novncproxy
@@ -25,7 +25,7 @@ import os
import sys
from nova import config
-from nova.console import websocketproxy as ws
+from nova.console import websocketproxy
from nova.openstack.common import cfg
@@ -77,7 +77,8 @@ if __name__ == '__main__':
sys.exit(-1)
# Create and start the NovaWebSockets proxy
- server = ws.NovaWebSocketProxy(listen_host=CONF.novncproxy_host,
+ server = websocketproxy.NovaWebSocketProxy(
+ listen_host=CONF.novncproxy_host,
listen_port=CONF.novncproxy_port,
source_is_ipv6=CONF.source_is_ipv6,
verbose=CONF.verbose,
diff --git a/bin/nova-spicehtml5proxy b/bin/nova-spicehtml5proxy
index 089ff9d71..17f8cb7c2 100755
--- a/bin/nova-spicehtml5proxy
+++ b/bin/nova-spicehtml5proxy
@@ -25,7 +25,7 @@ import os
import sys
from nova import config
-from nova.console import websocketproxy as ws
+from nova.console import websocketproxy
from nova.openstack.common import cfg
@@ -77,7 +77,8 @@ if __name__ == '__main__':
sys.exit(-1)
# Create and start the NovaWebSockets proxy
- server = ws.NovaWebSocketProxy(listen_host=CONF.spicehtml5proxy_host,
+ server = websocketproxy.NovaWebSocketProxy(
+ listen_host=CONF.spicehtml5proxy_host,
listen_port=CONF.spicehtml5proxy_port,
source_is_ipv6=CONF.source_is_ipv6,
verbose=CONF.verbose,
diff --git a/contrib/boto_v6/ec2/connection.py b/contrib/boto_v6/ec2/connection.py
index 940608ffd..4cec65ad8 100644
--- a/contrib/boto_v6/ec2/connection.py
+++ b/contrib/boto_v6/ec2/connection.py
@@ -6,7 +6,7 @@ Created on 2010/12/20
import base64
import boto
import boto.ec2
-from boto.ec2.securitygroup import SecurityGroup
+import boto.ec2.securitygroup as securitygroup
from boto_v6.ec2.instance import ReservationV6
@@ -114,7 +114,7 @@ class EC2ConnectionV6(boto.ec2.EC2Connection):
if security_groups:
l = []
for group in security_groups:
- if isinstance(group, SecurityGroup):
+ if isinstance(group, securitygroup.SecurityGroup):
l.append(group.name)
else:
l.append(group)
diff --git a/contrib/boto_v6/ec2/instance.py b/contrib/boto_v6/ec2/instance.py
index 74adccc00..6f088c67e 100644
--- a/contrib/boto_v6/ec2/instance.py
+++ b/contrib/boto_v6/ec2/instance.py
@@ -3,31 +3,29 @@ Created on 2010/12/20
@author: Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
'''
-from boto.ec2.instance import Group
-from boto.ec2.instance import Instance
-from boto.ec2.instance import Reservation
-from boto.resultset import ResultSet
+from boto.ec2 import instance
+from boto import resultset
-class ReservationV6(Reservation):
+class ReservationV6(instance.Reservation):
def startElement(self, name, attrs, connection):
if name == 'instancesSet':
- self.instances = ResultSet([('item', InstanceV6)])
+ self.instances = resultset.ResultSet([('item', InstanceV6)])
return self.instances
elif name == 'groupSet':
- self.groups = ResultSet([('item', Group)])
+ self.groups = resultset.ResultSet([('item', instance.Group)])
return self.groups
else:
return None
-class InstanceV6(Instance):
+class InstanceV6(instance.Instance):
def __init__(self, connection=None):
- Instance.__init__(self, connection)
+ instance.Instance.__init__(self, connection)
self.dns_name_v6 = None
def endElement(self, name, value, connection):
- Instance.endElement(self, name, value, connection)
+ instance.Instance.endElement(self, name, value, connection)
if name == 'dnsNameV6':
self.dns_name_v6 = value
diff --git a/doc/api_samples/os-floating-ip-pools/floatingippools-list-resp.json b/doc/api_samples/os-floating-ip-pools/floatingippools-list-resp.json
new file mode 100644
index 000000000..7b6482987
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-pools/floatingippools-list-resp.json
@@ -0,0 +1,10 @@
+{
+ "floating_ip_pools": [
+ {
+ "name": "pool1"
+ },
+ {
+ "name": "pool2"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml b/doc/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml
new file mode 100644
index 000000000..ca09fbf95
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ip_pools>
+ <floating_ip_pool name="pool1"/>
+ <floating_ip_pool name="pool2"/>
+</floating_ip_pools> \ No newline at end of file
diff --git a/nova/api/openstack/compute/contrib/coverage_ext.py b/nova/api/openstack/compute/contrib/coverage_ext.py
index 4b7d4e57f..bc4d0f0f9 100644
--- a/nova/api/openstack/compute/contrib/coverage_ext.py
+++ b/nova/api/openstack/compute/contrib/coverage_ext.py
@@ -23,7 +23,7 @@ import sys
import telnetlib
import tempfile
-from coverage import coverage
+import coverage
from webob import exc
from nova.api.openstack import extensions
@@ -47,7 +47,7 @@ class CoverageController(object):
def __init__(self):
self.data_path = tempfile.mkdtemp(prefix='nova-coverage_')
data_out = os.path.join(self.data_path, '.nova-coverage')
- self.coverInst = coverage(data_file=data_out)
+ self.coverInst = coverage.coverage(data_file=data_out)
self.compute_api = compute_api.API()
self.network_api = network_api.API()
self.conductor_api = conductor_api.API()
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 765aeeef5..06ce2e07e 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -1357,9 +1357,6 @@ class API(base.Base):
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
"""
- instance = self.update(context, instance,
- task_state=task_states.IMAGE_BACKUP,
- expected_task_state=None)
if image_id:
# The image entry has already been created, so just pull the
# metadata.
@@ -1368,6 +1365,11 @@ class API(base.Base):
image_meta = self._create_image(context, instance, name,
'backup', backup_type=backup_type,
rotation=rotation, extra_properties=extra_properties)
+
+ instance = self.update(context, instance,
+ task_state=task_states.IMAGE_BACKUP,
+ expected_task_state=None)
+
self.compute_rpcapi.snapshot_instance(context, instance=instance,
image_id=image_meta['id'], image_type='backup',
backup_type=backup_type, rotation=rotation)
@@ -1386,9 +1388,6 @@ class API(base.Base):
:returns: A dict containing image metadata
"""
- instance = self.update(context, instance,
- task_state=task_states.IMAGE_SNAPSHOT,
- expected_task_state=None)
if image_id:
# The image entry has already been created, so just pull the
# metadata.
@@ -1396,6 +1395,11 @@ class API(base.Base):
else:
image_meta = self._create_image(context, instance, name,
'snapshot', extra_properties=extra_properties)
+
+ instance = self.update(context, instance,
+ task_state=task_states.IMAGE_SNAPSHOT,
+ expected_task_state=None)
+
self.compute_rpcapi.snapshot_instance(context, instance=instance,
image_id=image_meta['id'], image_type='snapshot')
return image_meta
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index d1cffea7d..275611cdf 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -1132,8 +1132,7 @@ class ComputeManager(manager.SchedulerDependentManager):
vm_state=vm_states.DELETED,
task_state=None,
terminated_at=timeutils.utcnow())
- system_meta = compute_utils.metadata_to_dict(
- instance['system_metadata'])
+ system_meta = utils.metadata_to_dict(instance['system_metadata'])
self.conductor_api.instance_destroy(context, instance)
# ensure block device mappings are not leaked
@@ -1675,8 +1674,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def _get_rescue_image_ref(self, context, instance):
"""Determine what image should be used to boot the rescue VM."""
- system_meta = compute_utils.metadata_to_dict(
- instance['system_metadata'])
+ system_meta = utils.metadata_to_dict(instance['system_metadata'])
rescue_image_ref = system_meta.get('image_base_image_ref')
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 2b1286e16..1874e886f 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -37,13 +37,6 @@ CONF.import_opt('host', 'nova.netconf')
LOG = log.getLogger(__name__)
-def metadata_to_dict(metadata):
- result = {}
- for item in metadata:
- result[item['key']] = item['value']
- return result
-
-
def add_instance_fault_from_exc(context, instance, fault, exc_info=None):
"""Adds the specified fault to the database."""
@@ -159,7 +152,8 @@ def notify_usage_exists(context, instance_ref, current_period=False,
ignore_missing_network_data)
if system_metadata is None:
- system_metadata = metadata_to_dict(instance_ref['system_metadata'])
+ system_metadata = utils.metadata_to_dict(
+ instance_ref['system_metadata'])
# add image metadata to the notification:
image_meta = notifications.image_meta(system_metadata)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index d6f093f5b..a6f585eef 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -35,7 +35,7 @@ from sqlalchemy.sql.expression import desc
from sqlalchemy.sql import func
from nova import block_device
-from nova.common.sqlalchemyutils import paginate_query
+from nova.common import sqlalchemyutils
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
@@ -1645,7 +1645,8 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
marker = _instance_get_by_uuid(context, marker, session=session)
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker)
- query_prefix = paginate_query(query_prefix, models.Instance, limit,
+ query_prefix = sqlalchemyutils.paginate_query(query_prefix,
+ models.Instance, limit,
[sort_key, 'created_at', 'id'],
marker=marker,
sort_dir=sort_dir)
@@ -1714,6 +1715,7 @@ def instance_get_active_by_window_joined(context, begin, end=None,
options(joinedload('security_groups')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
+ options(joinedload('system_metadata')).\
filter(or_(models.Instance.terminated_at == None,
models.Instance.terminated_at > begin))
if end:
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 14c651020..05452f2ad 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -27,7 +27,7 @@ from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from sqlalchemy.orm import relationship, backref, object_mapper
from nova.db.sqlalchemy.session import get_session
-from nova.db.sqlalchemy.types import IPAddress
+from nova.db.sqlalchemy import types
from nova.openstack.common import cfg
from nova.openstack.common import timeutils
@@ -221,7 +221,7 @@ class Instance(BASE, NovaBase):
return base_name
def _extra_keys(self):
- return ['name']
+ return ['name', 'system_metadata']
user_id = Column(String(255))
project_id = Column(String(255))
@@ -291,8 +291,8 @@ class Instance(BASE, NovaBase):
# User editable field meant to represent what ip should be used
# to connect to the instance
- access_ip_v4 = Column(IPAddress())
- access_ip_v6 = Column(IPAddress())
+ access_ip_v4 = Column(types.IPAddress())
+ access_ip_v6 = Column(types.IPAddress())
auto_disk_config = Column(Boolean())
progress = Column(Integer)
@@ -595,7 +595,7 @@ class SecurityGroupIngressRule(BASE, NovaBase):
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
- cidr = Column(IPAddress())
+ cidr = Column(types.IPAddress())
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
@@ -615,7 +615,7 @@ class ProviderFirewallRule(BASE, NovaBase):
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
- cidr = Column(IPAddress())
+ cidr = Column(types.IPAddress())
class KeyPair(BASE, NovaBase):
@@ -665,25 +665,25 @@ class Network(BASE, NovaBase):
label = Column(String(255))
injected = Column(Boolean, default=False)
- cidr = Column(IPAddress(), unique=True)
- cidr_v6 = Column(IPAddress(), unique=True)
+ cidr = Column(types.IPAddress(), unique=True)
+ cidr_v6 = Column(types.IPAddress(), unique=True)
multi_host = Column(Boolean, default=False)
- gateway_v6 = Column(IPAddress())
- netmask_v6 = Column(IPAddress())
- netmask = Column(IPAddress())
+ gateway_v6 = Column(types.IPAddress())
+ netmask_v6 = Column(types.IPAddress())
+ netmask = Column(types.IPAddress())
bridge = Column(String(255))
bridge_interface = Column(String(255))
- gateway = Column(IPAddress())
- broadcast = Column(IPAddress())
- dns1 = Column(IPAddress())
- dns2 = Column(IPAddress())
+ gateway = Column(types.IPAddress())
+ broadcast = Column(types.IPAddress())
+ dns1 = Column(types.IPAddress())
+ dns2 = Column(types.IPAddress())
vlan = Column(Integer)
- vpn_public_address = Column(IPAddress())
+ vpn_public_address = Column(types.IPAddress())
vpn_public_port = Column(Integer)
- vpn_private_address = Column(IPAddress())
- dhcp_start = Column(IPAddress())
+ vpn_private_address = Column(types.IPAddress())
+ dhcp_start = Column(types.IPAddress())
rxtx_base = Column(Integer)
@@ -708,7 +708,7 @@ class FixedIp(BASE, NovaBase):
"""Represents a fixed ip for an instance."""
__tablename__ = 'fixed_ips'
id = Column(Integer, primary_key=True)
- address = Column(IPAddress())
+ address = Column(types.IPAddress())
network_id = Column(Integer, nullable=True)
virtual_interface_id = Column(Integer, nullable=True)
instance_uuid = Column(String(36), nullable=True)
@@ -725,7 +725,7 @@ class FloatingIp(BASE, NovaBase):
"""Represents a floating ip that dynamically forwards to a fixed ip."""
__tablename__ = 'floating_ips'
id = Column(Integer, primary_key=True)
- address = Column(IPAddress())
+ address = Column(types.IPAddress())
fixed_ip_id = Column(Integer, nullable=True)
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
@@ -748,7 +748,7 @@ class ConsolePool(BASE, NovaBase):
"""Represents pool of consoles on the same physical node."""
__tablename__ = 'console_pools'
id = Column(Integer, primary_key=True)
- address = Column(IPAddress())
+ address = Column(types.IPAddress())
username = Column(String(255))
password = Column(String(255))
console_type = Column(String(255))
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index cfabc7085..eb5d8016f 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -236,9 +236,7 @@ import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
-from nova.exception import DBDuplicateEntry
-from nova.exception import DBError
-from nova.exception import InvalidUnicodeParameter
+import nova.exception
from nova.openstack.common import cfg
import nova.openstack.common.log as logging
from nova.openstack.common import timeutils
@@ -362,7 +360,7 @@ def raise_if_duplicate_entry_error(integrity_error, engine_name):
columns = columns.strip().split(", ")
else:
columns = get_columns_from_uniq_cons_or_name(columns)
- raise DBDuplicateEntry(columns, integrity_error)
+ raise nova.exception.DBDuplicateEntry(columns, integrity_error)
def wrap_db_error(f):
@@ -370,7 +368,7 @@ def wrap_db_error(f):
try:
return f(*args, **kwargs)
except UnicodeEncodeError:
- raise InvalidUnicodeParameter()
+ raise nova.exception.InvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
@@ -381,10 +379,10 @@ def wrap_db_error(f):
# means we should get names of columns, which values violate
# unique constraint, from error message.
raise_if_duplicate_entry_error(e, get_engine().name)
- raise DBError(e)
+ raise nova.exception.DBError(e)
except Exception, e:
LOG.exception(_('DB exception wrapped.'))
- raise DBError(e)
+ raise nova.exception.DBError(e)
_wrap.func_name = f.func_name
return _wrap
diff --git a/nova/notifications.py b/nova/notifications.py
index f399ac55d..65428d03f 100644
--- a/nova/notifications.py
+++ b/nova/notifications.py
@@ -283,12 +283,8 @@ def info_from_instance(context, instance_ref, network_info,
instance_type_name = instance_ref.get('instance_type', {}).get('name', '')
if system_metadata is None:
- try:
- system_metadata = db.instance_system_metadata_get(
- context, instance_ref['uuid'])
-
- except exception.NotFound:
- system_metadata = {}
+ system_metadata = utils.metadata_to_dict(
+ instance_ref['system_metadata'])
instance_info = dict(
# Owner properties
diff --git a/nova/service.py b/nova/service.py
index 87857f93d..c250673f4 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -574,7 +574,7 @@ class Service(object):
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
- def __init__(self, name, loader=None, use_ssl=False):
+ def __init__(self, name, loader=None, use_ssl=False, max_url_len=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
@@ -594,7 +594,8 @@ class WSGIService(object):
self.app,
host=self.host,
port=self.port,
- use_ssl=self.use_ssl)
+ use_ssl=self.use_ssl,
+ max_url_len=max_url_len)
# Pull back actual port used
self.port = self.server.port
self.backdoor_port = None
diff --git a/nova/servicegroup/__init__.py b/nova/servicegroup/__init__.py
index 318d30ff7..a804c62fa 100644
--- a/nova/servicegroup/__init__.py
+++ b/nova/servicegroup/__init__.py
@@ -19,4 +19,6 @@ The membership service for Nova. Different implementations can be plugged
according to the Nova configuration.
"""
-from nova.servicegroup.api import API
+from nova.servicegroup import api
+
+API = api.API
diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py
index 358b7dcbc..0fb30cdf5 100644
--- a/nova/servicegroup/api.py
+++ b/nova/servicegroup/api.py
@@ -23,7 +23,7 @@ from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova import utils
-from random import choice
+import random
LOG = logging.getLogger(__name__)
@@ -144,4 +144,4 @@ class ServiceGroupDriver(object):
length = len(members)
if length == 0:
return None
- return choice(members)
+ return random.choice(members)
diff --git a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py
index 8abe7f388..fb9a36ba9 100644
--- a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py
+++ b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from datetime import datetime
+import datetime
from lxml import etree
import webob
@@ -40,29 +40,29 @@ def fake_service_get_all(context, disabled=None):
if disabled:
return [__fake_service("nova-compute", "zone-2",
- datetime(2012, 11, 14, 9, 53, 25, 0),
- datetime(2012, 12, 26, 14, 45, 25, 0),
+ datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-scheduler", "internal",
- datetime(2012, 11, 14, 9, 57, 3, 0),
- datetime(2012, 12, 26, 14, 45, 25, 0),
+ datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-network", "internal",
- datetime(2012, 11, 16, 7, 25, 46, 0),
- datetime(2012, 12, 26, 14, 45, 24, 0),
+ datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", True)]
else:
return [__fake_service("nova-compute", "zone-1",
- datetime(2012, 11, 14, 9, 53, 25, 0),
- datetime(2012, 12, 26, 14, 45, 25, 0),
+ datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-sched", "internal",
- datetime(2012, 11, 14, 9, 57, 03, 0),
- datetime(2012, 12, 26, 14, 45, 25, 0),
+ datetime.datetime(2012, 11, 14, 9, 57, 03, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-network", "internal",
- datetime(2012, 11, 16, 7, 25, 46, 0),
- datetime(2012, 12, 26, 14, 45, 24, 0),
+ datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", False)]
@@ -218,18 +218,21 @@ class AvailabilityZoneSerializerTest(test.TestCase):
'hosts': {'fake_host-1': {
'nova-compute': {'active': True, 'available': True,
'updated_at':
- datetime(2012, 12, 26, 14, 45, 25)}}}},
+ datetime.datetime(
+ 2012, 12, 26, 14, 45, 25)}}}},
{'zoneName': 'internal',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'nova-sched': {'active': True, 'available': True,
'updated_at':
- datetime(2012, 12, 26, 14, 45, 25)}},
+ datetime.datetime(
+ 2012, 12, 26, 14, 45, 25)}},
'fake_host-2': {
'nova-network': {'active': True,
'available': False,
'updated_at':
- datetime(2012, 12, 26, 14, 45, 24)}}}},
+ datetime.datetime(
+ 2012, 12, 26, 14, 45, 24)}}}},
{'zoneName': 'zone-2',
'zoneState': {'available': False},
'hosts': None}]
diff --git a/nova/tests/api/openstack/compute/contrib/test_coverage_ext.py b/nova/tests/api/openstack/compute/contrib/test_coverage_ext.py
index 39a883049..66a8a8f82 100644
--- a/nova/tests/api/openstack/compute/contrib/test_coverage_ext.py
+++ b/nova/tests/api/openstack/compute/contrib/test_coverage_ext.py
@@ -16,7 +16,7 @@
import telnetlib
-from coverage import coverage
+import coverage
import webob
from nova.api.openstack.compute.contrib import coverage_ext
@@ -48,8 +48,8 @@ class CoverageExtensionTest(test.TestCase):
super(CoverageExtensionTest, self).setUp()
self.stubs.Set(telnetlib.Telnet, 'write', fake_telnet)
self.stubs.Set(telnetlib.Telnet, 'expect', fake_telnet)
- self.stubs.Set(coverage, 'report', fake_report)
- self.stubs.Set(coverage, 'xml_report', fake_xml_report)
+ self.stubs.Set(coverage.coverage, 'report', fake_report)
+ self.stubs.Set(coverage.coverage, 'xml_report', fake_xml_report)
self.admin_context = context.RequestContext('fakeadmin_0',
'fake',
is_admin=True)
diff --git a/nova/tests/api/openstack/compute/contrib/test_services.py b/nova/tests/api/openstack/compute/contrib/test_services.py
index 3a6e5db7c..aba1b92c1 100644
--- a/nova/tests/api/openstack/compute/contrib/test_services.py
+++ b/nova/tests/api/openstack/compute/contrib/test_services.py
@@ -14,7 +14,8 @@
# under the License.
-from datetime import datetime
+import datetime
+
from nova.api.openstack.compute.contrib import services
from nova import context
from nova import db
@@ -24,35 +25,36 @@ from nova import test
from nova.tests.api.openstack import fakes
-fake_services_list = [{'binary': 'nova-scheduler',
- 'host': 'host1',
- 'id': 1,
- 'disabled': True,
- 'topic': 'scheduler',
- 'updated_at': datetime(2012, 10, 29, 13, 42, 2),
- 'created_at': datetime(2012, 9, 18, 2, 46, 27)},
- {'binary': 'nova-compute',
- 'host': 'host1',
- 'id': 2,
- 'disabled': True,
- 'topic': 'compute',
- 'updated_at': datetime(2012, 10, 29, 13, 42, 5),
- 'created_at': datetime(2012, 9, 18, 2, 46, 27)},
- {'binary': 'nova-scheduler',
- 'host': 'host2',
- 'id': 3,
- 'disabled': False,
- 'topic': 'scheduler',
- 'updated_at': datetime(2012, 9, 19, 6, 55, 34),
- 'created_at': datetime(2012, 9, 18, 2, 46, 28)},
- {'binary': 'nova-compute',
- 'host': 'host2',
- 'id': 4,
- 'disabled': True,
- 'topic': 'compute',
- 'updated_at': datetime(2012, 9, 18, 8, 3, 38),
- 'created_at': datetime(2012, 9, 18, 2, 46, 28)},
- ]
+fake_services_list = [
+ {'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'id': 1,
+ 'disabled': True,
+ 'topic': 'scheduler',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27)},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'id': 2,
+ 'disabled': True,
+ 'topic': 'compute',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27)},
+ {'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'id': 3,
+ 'disabled': False,
+ 'topic': 'scheduler',
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
+ 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28)},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'id': 4,
+ 'disabled': True,
+ 'topic': 'compute',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
+ 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28)},
+ ]
class FakeRequest(object):
@@ -103,7 +105,7 @@ def fake_service_update(context, service_id, values):
def fake_utcnow():
- return datetime(2012, 10, 29, 13, 42, 11)
+ return datetime.datetime(2012, 10, 29, 13, 42, 11)
class ServicesTest(test.TestCase):
@@ -130,19 +132,19 @@ class ServicesTest(test.TestCase):
response = {'services': [{'binary': 'nova-scheduler',
'host': 'host1', 'zone': 'internal',
'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(2012, 10, 29, 13, 42, 2)},
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute',
'host': 'host1', 'zone': 'nova',
'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(2012, 10, 29, 13, 42, 5)},
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'nova-scheduler', 'host': 'host2',
'zone': 'internal',
'status': 'enabled', 'state': 'down',
- 'updated_at': datetime(2012, 9, 19, 6, 55, 34)},
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
{'binary': 'nova-compute', 'host': 'host2',
'zone': 'nova',
'status': 'disabled', 'state': 'down',
- 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]}
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self.assertEqual(res_dict, response)
def test_services_list_with_host(self):
@@ -152,11 +154,11 @@ class ServicesTest(test.TestCase):
response = {'services': [{'binary': 'nova-scheduler', 'host': 'host1',
'zone': 'internal',
'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(2012, 10, 29, 13, 42, 2)},
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute', 'host': 'host1',
'zone': 'nova',
'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}]}
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_list_with_service(self):
@@ -166,11 +168,11 @@ class ServicesTest(test.TestCase):
response = {'services': [{'binary': 'nova-compute', 'host': 'host1',
'zone': 'nova',
'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(2012, 10, 29, 13, 42, 5)},
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'nova-compute', 'host': 'host2',
'zone': 'nova',
'status': 'disabled', 'state': 'down',
- 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]}
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self.assertEqual(res_dict, response)
def test_services_list_with_host_service(self):
@@ -180,7 +182,7 @@ class ServicesTest(test.TestCase):
response = {'services': [{'binary': 'nova-compute', 'host': 'host1',
'zone': 'nova',
'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}]}
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_enable(self):
diff --git a/nova/tests/baremetal/test_pxe.py b/nova/tests/baremetal/test_pxe.py
index 73ef8caa3..7e44581e8 100644
--- a/nova/tests/baremetal/test_pxe.py
+++ b/nova/tests/baremetal/test_pxe.py
@@ -22,10 +22,7 @@
import os
import mox
-from testtools.matchers import Contains
-from testtools.matchers import MatchesAll
-from testtools.matchers import Not
-from testtools.matchers import StartsWith
+from testtools import matchers
from nova import exception
from nova.openstack.common import cfg
@@ -120,26 +117,26 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase):
'ari_path': 'ggg',
}
config = pxe.build_pxe_config(**args)
- self.assertThat(config, StartsWith('default deploy'))
+ self.assertThat(config, matchers.StartsWith('default deploy'))
# deploy bits are in the deploy section
start = config.index('label deploy')
end = config.index('label boot')
- self.assertThat(config[start:end], MatchesAll(
- Contains('kernel ddd'),
- Contains('initrd=eee'),
- Contains('deployment_id=aaa'),
- Contains('deployment_key=bbb'),
- Contains('iscsi_target_iqn=ccc'),
- Not(Contains('kernel fff')),
+ self.assertThat(config[start:end], matchers.MatchesAll(
+ matchers.Contains('kernel ddd'),
+ matchers.Contains('initrd=eee'),
+ matchers.Contains('deployment_id=aaa'),
+ matchers.Contains('deployment_key=bbb'),
+ matchers.Contains('iscsi_target_iqn=ccc'),
+ matchers.Not(matchers.Contains('kernel fff')),
))
# boot bits are in the boot section
start = config.index('label boot')
- self.assertThat(config[start:], MatchesAll(
- Contains('kernel fff'),
- Contains('initrd=ggg'),
- Not(Contains('kernel ddd')),
+ self.assertThat(config[start:], matchers.MatchesAll(
+ matchers.Contains('kernel fff'),
+ matchers.Contains('initrd=ggg'),
+ matchers.Not(matchers.Contains('kernel ddd')),
))
def test_build_network_config(self):
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index b8212848c..dc381d800 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -58,7 +58,7 @@ import nova.policy
from nova import quota
from nova import test
from nova.tests.compute import fake_resource_tracker
-from nova.tests.db.fakes import FakeModel
+from nova.tests.db import fakes as db_fakes
from nova.tests import fake_network
from nova.tests.image import fake as fake_image
from nova.tests import matchers
@@ -4688,6 +4688,60 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(properties['d'], 'd')
self.assertFalse('spam' in properties)
+ def _do_test_snapshot_image_service_fails(self, method, image_id):
+ # Ensure task_state remains at None if image service fails.
+ def fake_fails(*args, **kwargs):
+ raise test.TestingException()
+
+ restore = getattr(fake_image._FakeImageService, method)
+ self.stubs.Set(fake_image._FakeImageService, method, fake_fails)
+
+ instance = self._create_fake_instance()
+ self.assertRaises(test.TestingException,
+ self.compute_api.snapshot,
+ self.context,
+ instance,
+ 'no_image_snapshot',
+ image_id=image_id)
+
+ self.stubs.Set(fake_image._FakeImageService, method, restore)
+ db_instance = db.instance_get_all(self.context)[0]
+ self.assertIsNone(db_instance['task_state'])
+
+ def test_snapshot_image_creation_fails(self):
+ self._do_test_snapshot_image_service_fails('create', None)
+
+ def test_snapshot_image_show_fails(self):
+ self._do_test_snapshot_image_service_fails('show', 'image')
+
+ def _do_test_backup_image_service_fails(self, method, image_id):
+ # Ensure task_state remains at None if image service fails.
+ def fake_fails(*args, **kwargs):
+ raise test.TestingException()
+
+ restore = getattr(fake_image._FakeImageService, method)
+ self.stubs.Set(fake_image._FakeImageService, method, fake_fails)
+
+ instance = self._create_fake_instance()
+ self.assertRaises(test.TestingException,
+ self.compute_api.backup,
+ self.context,
+ instance,
+ 'no_image_backup',
+ 'DAILY',
+ 0,
+ image_id=image_id)
+
+ self.stubs.Set(fake_image._FakeImageService, method, restore)
+ db_instance = db.instance_get_all(self.context)[0]
+ self.assertIsNone(db_instance['task_state'])
+
+ def test_backup_image_creation_fails(self):
+ self._do_test_backup_image_service_fails('create', None)
+
+ def test_backup_image_show_fails(self):
+ self._do_test_backup_image_service_fails('show', 'image')
+
def test_backup(self):
# Can't backup an instance which is already being backed up.
instance = self._create_fake_instance()
@@ -5287,7 +5341,7 @@ class ComputeAPITestCase(BaseTestCase):
_context = context.get_admin_context()
instance = self._create_fake_instance({'metadata': {'key1': 'value1'}})
- instance = dict(instance)
+ instance = dict(instance.iteritems())
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, {'key1': 'value1'})
@@ -5879,11 +5933,11 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance()
def rule_get(*args, **kwargs):
- mock_rule = FakeModel({'parent_group_id': 1})
+ mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
- mock_group = FakeModel({'instances': [instance]})
+ mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(
@@ -5908,11 +5962,11 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance()
def rule_get(*args, **kwargs):
- mock_rule = FakeModel({'parent_group_id': 1})
+ mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
- mock_group = FakeModel({'instances': [instance]})
+ mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(
@@ -5935,11 +5989,11 @@ class ComputeAPITestCase(BaseTestCase):
def test_secgroup_refresh_none(self):
def rule_get(*args, **kwargs):
- mock_rule = FakeModel({'parent_group_id': 1})
+ mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
- mock_group = FakeModel({'instances': []})
+ mock_group = db_fakes.FakeModel({'instances': []})
return mock_group
self.stubs.Set(
@@ -5957,7 +6011,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance()
def group_get(*args, **kwargs):
- mock_group = FakeModel({'instances': [instance]})
+ mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
@@ -5978,7 +6032,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance()
def group_get(*args, **kwargs):
- mock_group = FakeModel({'instances': [instance]})
+ mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
@@ -5997,7 +6051,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_secrule_refresh_none(self):
def group_get(*args, **kwargs):
- mock_group = FakeModel({'instances': []})
+ mock_group = db_fakes.FakeModel({'instances': []})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
index 6e7227d4c..4372039e0 100644
--- a/nova/tests/compute/test_compute_utils.py
+++ b/nova/tests/compute/test_compute_utils.py
@@ -359,6 +359,9 @@ class UsageInfoTestCase(test.TestCase):
extra_usage_info = {'image_name': 'fake_name'}
db.instance_system_metadata_update(self.context, instance['uuid'],
sys_metadata, False)
+ # NOTE(russellb) Make sure our instance has the latest system_metadata
+ # in it.
+ instance = db.instance_get(self.context, instance_id)
compute_utils.notify_about_instance_usage(self.context, instance,
'create.start', extra_usage_info=extra_usage_info)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
@@ -382,14 +385,3 @@ class UsageInfoTestCase(test.TestCase):
image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance)
-
-
-class MetadataToDictTestCase(test.TestCase):
- def test_metadata_to_dict(self):
- self.assertEqual(compute_utils.metadata_to_dict(
- [{'key': 'foo1', 'value': 'bar'},
- {'key': 'foo2', 'value': 'baz'}]),
- {'foo1': 'bar', 'foo2': 'baz'})
-
- def test_metadata_to_dict_empty(self):
- self.assertEqual(compute_utils.metadata_to_dict([]), {})
diff --git a/nova/tests/conf_fixture.py b/nova/tests/conf_fixture.py
index 9155a3f68..2f4d0ebb1 100644
--- a/nova/tests/conf_fixture.py
+++ b/nova/tests/conf_fixture.py
@@ -22,7 +22,7 @@ from nova import config
from nova import ipv6
from nova.openstack.common import cfg
from nova import paths
-from nova.tests.utils import cleanup_dns_managers
+from nova.tests import utils
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
@@ -70,5 +70,5 @@ class ConfFixture(fixtures.Fixture):
self.conf.set_default('vlan_interface', 'eth0')
config.parse_args([], default_config_files=[])
self.addCleanup(self.conf.reset)
- self.addCleanup(cleanup_dns_managers)
+ self.addCleanup(utils.cleanup_dns_managers)
self.addCleanup(ipv6.api.reset_backend)
diff --git a/nova/tests/fakelibvirt.py b/nova/tests/fakelibvirt.py
index a573b7d1c..259d192cb 100644
--- a/nova/tests/fakelibvirt.py
+++ b/nova/tests/fakelibvirt.py
@@ -179,6 +179,7 @@ class Domain(object):
self._def = self._parse_definition(xml)
self._has_saved_state = False
self._snapshots = {}
+ self._id = self._connection._id_counter
def _parse_definition(self, xml):
try:
@@ -299,6 +300,9 @@ class Domain(object):
self._state = VIR_DOMAIN_SHUTOFF
self._connection._mark_not_running(self)
+ def ID(self):
+ return self._id
+
def name(self):
return self._def['name']
@@ -517,6 +521,8 @@ class Connection(object):
if dom._transient:
self._undefine(dom)
+ dom._id = -1
+
for (k, v) in self._running_vms.iteritems():
if v == dom:
del self._running_vms[k]
diff --git a/nova/tests/integrated/api_samples/README.rst b/nova/tests/integrated/api_samples/README.rst
index 065df1d32..b2ad71d4c 100644
--- a/nova/tests/integrated/api_samples/README.rst
+++ b/nova/tests/integrated/api_samples/README.rst
@@ -1,11 +1,21 @@
Api Samples
===========
-Samples in this directory are automatically generated from the api samples
-integration tests. To regenerate the samples, simply set GENERATE_SAMPLES
-in the environment before running the tests. For example:
+This part of the tree contains templates for API samples. The
+documentation in doc/api_samples is completely autogenerated from the
+tests in this directory.
+
+To add a new api sample, add tests for the common passing and failing
+cases in this directory for your extension, and modify test_samples.py
+for your tests. There should be both JSON and XML tests included.
+
+Then run the following command:
GENERATE_SAMPLES=True tox -epy27 nova.tests.integrated
+Which will create the files on doc/api_samples.
+
If new tests are added or the .tpl files are changed due to bug fixes, the
-samples should be regenerated so they are in sync with the templates.
+samples must be regenerated so they are in sync with the templates, as
+there is an additional test which reloads the documentation and
+ensures that it's in sync.
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl
new file mode 100644
index 000000000..607109d70
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl
@@ -0,0 +1,10 @@
+{
+ "floating_ip_pools": [
+ {
+ "name": "%(pool1)s"
+ },
+ {
+ "name": "%(pool2)s"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml.tpl
new file mode 100644
index 000000000..ae4b3a4bb
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml.tpl
@@ -0,0 +1,4 @@
+<floating_ip_pools>
+ <floating_ip_pool name="%(pool1)s"/>
+ <floating_ip_pool name="%(pool2)s"/>
+</floating_ip_pools>
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 887ca206f..4cadbf9e5 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -22,13 +22,13 @@ import re
import urllib
import uuid as uuid_lib
-from coverage import coverage
+import coverage
from lxml import etree
from nova.api.metadata import password
from nova.api.openstack.compute.contrib import coverage_ext
# Import extensions to pull in osapi_compute_extension CONF option used below.
-from nova.cloudpipe.pipelib import CloudPipe
+from nova.cloudpipe import pipelib
from nova import context
from nova import db
from nova.db.sqlalchemy import models
@@ -379,7 +379,6 @@ class ApiSamplesTrap(ApiSampleTestBase):
do_not_approve_additions.append('os-flavor-access')
do_not_approve_additions.append('os-flavor-extra-specs')
do_not_approve_additions.append('os-floating-ip-dns')
- do_not_approve_additions.append('os-floating-ip-pools')
do_not_approve_additions.append('os-fping')
do_not_approve_additions.append('os-hypervisors')
do_not_approve_additions.append('os-instance_usage_audit_log')
@@ -761,7 +760,7 @@ class CoverageExtJsonTests(ApiSampleTestBase):
self.stubs.Set(coverage_ext.CoverageController, '_check_coverage',
_fake_check_coverage)
- self.stubs.Set(coverage, 'xml_report', _fake_xml_report)
+ self.stubs.Set(coverage.coverage, 'xml_report', _fake_xml_report)
def test_start_coverage(self):
# Start coverage data collection.
@@ -1512,7 +1511,7 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase):
return {'vpn_public_address': '127.0.0.1',
'vpn_public_port': 22}
- self.stubs.Set(CloudPipe, 'get_encoded_zip', get_user_data)
+ self.stubs.Set(pipelib.CloudPipe, 'get_encoded_zip', get_user_data)
self.stubs.Set(network_manager.NetworkManager, "get_network",
network_api_get)
@@ -2662,3 +2661,30 @@ class BareMetalNodesJsonTest(ApiSampleTestBase, bm_db_base.BMDBTestCase):
class BareMetalNodesXmlTest(BareMetalNodesJsonTest):
ctype = 'xml'
+
+
+class FloatingIPPoolsSampleJsonTests(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib.floating_ip_pools."
+ "Floating_ip_pools")
+
+ def test_list_floatingippools(self):
+ pool_list = ["pool1", "pool2"]
+
+ def fake_get_floating_ip_pools(self, context):
+ return [{'name': pool_list[0]},
+ {'name': pool_list[1]}]
+
+ self.stubs.Set(network_api.API, "get_floating_ip_pools",
+ fake_get_floating_ip_pools)
+ response = self._do_get('os-floating-ip-pools')
+ self.assertEqual(response.status, 200)
+ subs = {
+ 'pool1': pool_list[0],
+ 'pool2': pool_list[1]
+ }
+ return self._verify_response('floatingippools-list-resp',
+ subs, response)
+
+
+class FloatingIPPoolsSampleXmlTests(FloatingIPPoolsSampleJsonTests):
+ ctype = "xml"
diff --git a/nova/tests/integrated/test_multiprocess_api.py b/nova/tests/integrated/test_multiprocess_api.py
index 5a82e0033..b2361b13c 100644
--- a/nova/tests/integrated/test_multiprocess_api.py
+++ b/nova/tests/integrated/test_multiprocess_api.py
@@ -150,7 +150,7 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase):
workers = self._get_workers()
LOG.info('workers: %r' % workers)
- self.assertFalse(workers, 'No OS processes left.')
+ self.assertFalse(workers, 'OS processes left %r' % workers)
def test_terminate_sigkill(self):
self._terminate_with_signal(signal.SIGKILL)
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index f8b9f9296..6fcd19d92 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -25,7 +25,7 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.scheduler import filters
from nova.scheduler.filters import extra_specs_ops
-from nova.scheduler.filters.trusted_filter import AttestationService
+from nova.scheduler.filters import trusted_filter
from nova import servicegroup
from nova import test
from nova.tests.scheduler import fakes
@@ -242,7 +242,8 @@ class HostFiltersTestCase(test.TestCase):
self.oat_data = ''
self.oat_attested = False
self.stubs = stubout.StubOutForTesting()
- self.stubs.Set(AttestationService, '_request', self.fake_oat_request)
+ self.stubs.Set(trusted_filter.AttestationService, '_request',
+ self.fake_oat_request)
self.context = context.RequestContext('fake', 'fake')
self.json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index fb2e76e45..11c16d6dd 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -26,9 +26,9 @@ from boto.ec2 import regioninfo
from boto import exception as boto_exc
# newer versions of boto use their own wrapper on top of httplib.HTTPResponse
try:
- from boto.connection import HTTPResponse
+ import boto.connection as httplib
except ImportError:
- from httplib import HTTPResponse
+ import httplib
import fixtures
import webob
@@ -79,7 +79,7 @@ class FakeHttplibConnection(object):
# guess that's a function the web server usually provides.
resp = "HTTP/1.0 %s" % resp
self.sock = FakeHttplibSocket(resp)
- self.http_response = HTTPResponse(self.sock)
+ self.http_response = httplib.HTTPResponse(self.sock)
# NOTE(vish): boto is accessing private variables for some reason
self._HTTPConnection__response = self.http_response
self.http_response.begin()
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index f15d71633..c2f0b5a11 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -19,7 +19,7 @@
"""Tests for metadata service."""
import base64
-from copy import copy
+import copy
import json
import re
@@ -120,14 +120,14 @@ class MetadataTestCase(test.TestCase):
spectacular=True)
def test_user_data(self):
- inst = copy(self.instance)
+ inst = copy.copy(self.instance)
inst['user_data'] = base64.b64encode("happy")
md = fake_InstanceMetadata(self.stubs, inst)
self.assertEqual(
md.get_ec2_metadata(version='2009-04-04')['user-data'], "happy")
def test_no_user_data(self):
- inst = copy(self.instance)
+ inst = copy.copy(self.instance)
del inst['user_data']
md = fake_InstanceMetadata(self.stubs, inst)
obj = object()
@@ -136,7 +136,7 @@ class MetadataTestCase(test.TestCase):
obj)
def test_security_groups(self):
- inst = copy(self.instance)
+ inst = copy.copy(self.instance)
sgroups = [{'name': 'default'}, {'name': 'other'}]
expected = ['default', 'other']
@@ -145,7 +145,7 @@ class MetadataTestCase(test.TestCase):
self.assertEqual(data['meta-data']['security-groups'], expected)
def test_local_hostname_fqdn(self):
- md = fake_InstanceMetadata(self.stubs, copy(self.instance))
+ md = fake_InstanceMetadata(self.stubs, copy.copy(self.instance))
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-hostname'],
"%s.%s" % (self.instance['hostname'], CONF.dhcp_domain))
@@ -195,7 +195,7 @@ class MetadataTestCase(test.TestCase):
expected)
def test_pubkey(self):
- md = fake_InstanceMetadata(self.stubs, copy(self.instance))
+ md = fake_InstanceMetadata(self.stubs, copy.copy(self.instance))
pubkey_ent = md.lookup("/2009-04-04/meta-data/public-keys")
self.assertEqual(base.ec2_md_print(pubkey_ent),
@@ -204,7 +204,7 @@ class MetadataTestCase(test.TestCase):
self.instance['key_data'])
def test_image_type_ramdisk(self):
- inst = copy(self.instance)
+ inst = copy.copy(self.instance)
inst['ramdisk_id'] = 'ari-853667c0'
md = fake_InstanceMetadata(self.stubs, inst)
data = md.lookup("/latest/meta-data/ramdisk-id")
@@ -213,7 +213,7 @@ class MetadataTestCase(test.TestCase):
self.assertTrue(re.match('ari-[0-9a-f]{8}', data))
def test_image_type_kernel(self):
- inst = copy(self.instance)
+ inst = copy.copy(self.instance)
inst['kernel_id'] = 'aki-c2e26ff2'
md = fake_InstanceMetadata(self.stubs, inst)
data = md.lookup("/2009-04-04/meta-data/kernel-id")
@@ -229,7 +229,7 @@ class MetadataTestCase(test.TestCase):
md.lookup, "/2009-04-04/meta-data/kernel-id")
def test_check_version(self):
- inst = copy(self.instance)
+ inst = copy.copy(self.instance)
md = fake_InstanceMetadata(self.stubs, inst)
self.assertTrue(md._check_version('1.0', '2009-04-04'))
@@ -250,7 +250,7 @@ class OpenStackMetadataTestCase(test.TestCase):
def test_top_level_listing(self):
# request for /openstack/<version>/ should show metadata.json
- inst = copy(self.instance)
+ inst = copy.copy(self.instance)
mdinst = fake_InstanceMetadata(self.stubs, inst)
listing = mdinst.lookup("/openstack/")
@@ -267,14 +267,14 @@ class OpenStackMetadataTestCase(test.TestCase):
def test_version_content_listing(self):
# request for /openstack/<version>/ should show metadata.json
- inst = copy(self.instance)
+ inst = copy.copy(self.instance)
mdinst = fake_InstanceMetadata(self.stubs, inst)
listing = mdinst.lookup("/openstack/2012-08-10")
self.assertTrue("meta_data.json" in listing)
def test_metadata_json(self):
- inst = copy(self.instance)
+ inst = copy.copy(self.instance)
content = [
('/etc/my.conf', "content of my.conf"),
('/root/hello', "content of /root/hello"),
@@ -309,7 +309,7 @@ class OpenStackMetadataTestCase(test.TestCase):
def test_extra_md(self):
# make sure extra_md makes it through to metadata
- inst = copy(self.instance)
+ inst = copy.copy(self.instance)
extra = {'foo': 'bar', 'mylist': [1, 2, 3],
'mydict': {"one": 1, "two": 2}}
mdinst = fake_InstanceMetadata(self.stubs, inst, extra_md=extra)
@@ -322,14 +322,14 @@ class OpenStackMetadataTestCase(test.TestCase):
def test_password(self):
# make sure extra_md makes it through to metadata
- inst = copy(self.instance)
+ inst = copy.copy(self.instance)
mdinst = fake_InstanceMetadata(self.stubs, inst)
result = mdinst.lookup("/openstack/latest/password")
self.assertEqual(result, password.handle_password)
def test_userdata(self):
- inst = copy(self.instance)
+ inst = copy.copy(self.instance)
mdinst = fake_InstanceMetadata(self.stubs, inst)
userdata_found = mdinst.lookup("/openstack/2012-08-10/user_data")
@@ -348,7 +348,7 @@ class OpenStackMetadataTestCase(test.TestCase):
mdinst.lookup, "/openstack/2012-08-10/user_data")
def test_random_seed(self):
- inst = copy(self.instance)
+ inst = copy.copy(self.instance)
mdinst = fake_InstanceMetadata(self.stubs, inst)
# verify that 2013-04-04 has the 'random' field
@@ -364,7 +364,7 @@ class OpenStackMetadataTestCase(test.TestCase):
def test_no_dashes_in_metadata(self):
# top level entries in meta_data should not contain '-' in their name
- inst = copy(self.instance)
+ inst = copy.copy(self.instance)
mdinst = fake_InstanceMetadata(self.stubs, inst)
mdjson = json.loads(mdinst.lookup("/openstack/latest/meta_data.json"))
@@ -522,7 +522,7 @@ class MetadataPasswordTestCase(test.TestCase):
super(MetadataPasswordTestCase, self).setUp()
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
spectacular=True)
- self.instance = copy(INSTANCES[0])
+ self.instance = copy.copy(INSTANCES[0])
self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
address=None, sgroups=None)
diff --git a/nova/tests/test_periodic_tasks.py b/nova/tests/test_periodic_tasks.py
index 39669967f..3c63f6d4a 100644
--- a/nova/tests/test_periodic_tasks.py
+++ b/nova/tests/test_periodic_tasks.py
@@ -15,9 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import time
import fixtures
-import time
+from testtools import matchers
from nova import manager
from nova import test
@@ -44,10 +45,11 @@ class ManagerMetaTestCase(test.TestCase):
return 'baz'
m = Manager()
- self.assertEqual(2, len(m._periodic_tasks))
+ self.assertThat(m._periodic_tasks, matchers.HasLength(2))
self.assertEqual(None, m._periodic_spacing['foo'])
self.assertEqual(4, m._periodic_spacing['bar'])
- self.assertFalse('baz' in m._periodic_spacing)
+ self.assertThat(
+ m._periodic_spacing, matchers.Not(matchers.Contains('baz')))
class Manager(test.TestCase):
@@ -60,7 +62,7 @@ class Manager(test.TestCase):
return 'bar'
m = Manager()
- self.assertEqual(1, len(m._periodic_tasks))
+ self.assertThat(m._periodic_tasks, matchers.HasLength(1))
self.assertEqual(200, m._periodic_spacing['bar'])
# Now a single pass of the periodic tasks
@@ -87,8 +89,8 @@ class Manager(test.TestCase):
m.periodic_tasks(None)
time.sleep(0.1)
idle = m.periodic_tasks(None)
- self.assertTrue(idle > 9.7)
- self.assertTrue(idle < 9.9)
+ self.assertThat(idle, matchers.GreaterThan(9.7))
+ self.assertThat(idle, matchers.LessThan(9.9))
def test_periodic_tasks_disabled(self):
class Manager(manager.Manager):
@@ -109,7 +111,7 @@ class Manager(test.TestCase):
return 'bar'
m = Manager()
- self.assertEqual(1, len(m._periodic_tasks))
+ self.assertThat(m._periodic_tasks, matchers.HasLength(1))
def test_external_running_elsewhere(self):
self.flags(run_external_periodic_tasks=False)
@@ -120,4 +122,4 @@ class Manager(test.TestCase):
return 'bar'
m = Manager()
- self.assertEqual(0, len(m._periodic_tasks))
+ self.assertEqual([], m._periodic_tasks)
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 9eab72c5b..84d56cadf 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -778,3 +778,14 @@ class IntLikeTestCase(test.TestCase):
self.assertFalse(
utils.is_int_like("0cc3346e-9fef-4445-abe6-5d2b2690ec64"))
self.assertFalse(utils.is_int_like("a1"))
+
+
+class MetadataToDictTestCase(test.TestCase):
+ def test_metadata_to_dict(self):
+ self.assertEqual(utils.metadata_to_dict(
+ [{'key': 'foo1', 'value': 'bar'},
+ {'key': 'foo2', 'value': 'baz'}]),
+ {'foo1': 'bar', 'foo2': 'baz'})
+
+ def test_metadata_to_dict_empty(self):
+ self.assertEqual(utils.metadata_to_dict([]), {})
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index 9747ecccd..6ea2d0ef7 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -20,7 +20,7 @@ import netaddr
import sys
import traceback
-from nova.compute.manager import ComputeManager
+from nova.compute import manager
from nova import exception
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
@@ -159,7 +159,7 @@ class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
# NOTE(sdague) the try block is to make it easier to debug a
# failure by knowing which driver broke
try:
- cm = ComputeManager()
+ cm = manager.ComputeManager()
except Exception as e:
self.fail("Couldn't load driver %s - %s" % (cls, e))
@@ -173,7 +173,7 @@ class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
raise test.TestingException()
self.stubs.Set(sys, 'exit', _fake_exit)
- self.assertRaises(test.TestingException, ComputeManager)
+ self.assertRaises(test.TestingException, manager.ComputeManager)
class _VirtDriverTestCase(_FakeDriverBackendTestCase):
diff --git a/nova/tests/test_wsgi.py b/nova/tests/test_wsgi.py
index b04bc3e03..cd64688a2 100644
--- a/nova/tests/test_wsgi.py
+++ b/nova/tests/test_wsgi.py
@@ -22,6 +22,8 @@ import os.path
import tempfile
import eventlet
+import httplib2
+import paste
import nova.exception
from nova import test
@@ -108,6 +110,25 @@ class TestWSGIServer(test.TestCase):
server.stop()
server.wait()
+ def test_uri_length_limit(self):
+ server = nova.wsgi.Server("test_uri_length_limit", None,
+ host="127.0.0.1", max_url_len=16384)
+ server.start()
+
+ uri = "http://127.0.0.1:%d/%s" % (server.port, 10000 * 'x')
+ resp, _ = httplib2.Http().request(uri)
+ eventlet.sleep(0)
+ self.assertNotEqual(resp.status,
+ paste.httpexceptions.HTTPRequestURITooLong.code)
+
+ uri = "http://127.0.0.1:%d/%s" % (server.port, 20000 * 'x')
+ resp, _ = httplib2.Http().request(uri)
+ eventlet.sleep(0)
+ self.assertEqual(resp.status,
+ paste.httpexceptions.HTTPRequestURITooLong.code)
+ server.stop()
+ server.wait()
+
class TestWSGIServerWithSSL(test.TestCase):
"""WSGI server with SSL tests."""
diff --git a/nova/utils.py b/nova/utils.py
index f9e08fd80..97091e42c 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -1261,3 +1261,10 @@ def last_bytes(file_like_object, num):
remaining = file_like_object.tell()
return (file_like_object.read(), remaining)
+
+
+def metadata_to_dict(metadata):
+ result = {}
+ for item in metadata:
+ result[item['key']] = item['value']
+ return result
diff --git a/nova/virt/baremetal/__init__.py b/nova/virt/baremetal/__init__.py
index e3ecef821..9c8318660 100644
--- a/nova/virt/baremetal/__init__.py
+++ b/nova/virt/baremetal/__init__.py
@@ -12,4 +12,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-from nova.virt.baremetal.driver import BareMetalDriver
+from nova.virt.baremetal import driver
+
+BareMetalDriver = driver.BareMetalDriver
diff --git a/nova/virt/baremetal/db/sqlalchemy/api.py b/nova/virt/baremetal/db/sqlalchemy/api.py
index e2240053c..34bcd1229 100644
--- a/nova/virt/baremetal/db/sqlalchemy/api.py
+++ b/nova/virt/baremetal/db/sqlalchemy/api.py
@@ -23,14 +23,13 @@
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import literal_column
-from nova.db.sqlalchemy.api import is_user_context
-from nova.db.sqlalchemy.api import require_admin_context
+from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova.virt.baremetal.db.sqlalchemy import models
-from nova.virt.baremetal.db.sqlalchemy.session import get_session
+from nova.virt.baremetal.db.sqlalchemy import session as db_session
LOG = logging.getLogger(__name__)
@@ -44,7 +43,7 @@ def model_query(context, *args, **kwargs):
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
- session = kwargs.get('session') or get_session()
+ session = kwargs.get('session') or db_session.get_session()
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only')
@@ -60,7 +59,7 @@ def model_query(context, *args, **kwargs):
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
- if project_only and is_user_context(context):
+ if project_only and sqlalchemy_api.is_user_context(context):
query = query.filter_by(project_id=context.project_id)
return query
@@ -68,7 +67,7 @@ def model_query(context, *args, **kwargs):
def _save(ref, session=None):
if not session:
- session = get_session()
+ session = db_session.get_session()
# We must not call ref.save() with session=None, otherwise NovaBase
# uses nova-db's session, which cannot access bm-db.
ref.save(session=session)
@@ -81,7 +80,7 @@ def _build_node_order_by(query):
return query
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_node_get_all(context, service_host=None):
query = model_query(context, models.BareMetalNode, read_deleted="no")
if service_host:
@@ -89,7 +88,7 @@ def bm_node_get_all(context, service_host=None):
return query.all()
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_node_find_free(context, service_host=None,
cpus=None, memory_mb=None, local_gb=None):
query = model_query(context, models.BareMetalNode, read_deleted="no")
@@ -106,7 +105,7 @@ def bm_node_find_free(context, service_host=None,
return query.first()
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_node_get(context, bm_node_id):
# bm_node_id may be passed as a string. Convert to INT to improve DB perf.
bm_node_id = int(bm_node_id)
@@ -120,7 +119,7 @@ def bm_node_get(context, bm_node_id):
return result
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_node_get_by_instance_uuid(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InstanceNotFound(instance_id=instance_uuid)
@@ -135,7 +134,7 @@ def bm_node_get_by_instance_uuid(context, instance_uuid):
return result
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_node_create(context, values):
bm_node_ref = models.BareMetalNode()
bm_node_ref.update(values)
@@ -143,14 +142,14 @@ def bm_node_create(context, values):
return bm_node_ref
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_node_update(context, bm_node_id, values):
model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(id=bm_node_id).\
update(values)
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_node_set_uuid_safe(context, bm_node_id, values):
"""Associate an instance to a node safely
@@ -164,7 +163,7 @@ def bm_node_set_uuid_safe(context, bm_node_id, values):
raise exception.NovaException(_(
"instance_uuid must be supplied to bm_node_set_uuid_safe"))
- session = get_session()
+ session = db_session.get_session()
with session.begin():
query = model_query(context, models.BareMetalNode,
session=session, read_deleted="no").\
@@ -181,7 +180,7 @@ def bm_node_set_uuid_safe(context, bm_node_id, values):
return ref
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_node_destroy(context, bm_node_id):
model_query(context, models.BareMetalNode).\
filter_by(id=bm_node_id).\
@@ -190,13 +189,13 @@ def bm_node_destroy(context, bm_node_id):
'updated_at': literal_column('updated_at')})
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_pxe_ip_get_all(context):
query = model_query(context, models.BareMetalPxeIp, read_deleted="no")
return query.all()
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_pxe_ip_create(context, address, server_address):
ref = models.BareMetalPxeIp()
ref.address = address
@@ -205,7 +204,7 @@ def bm_pxe_ip_create(context, address, server_address):
return ref
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_pxe_ip_create_direct(context, bm_pxe_ip):
ref = bm_pxe_ip_create(context,
address=bm_pxe_ip['address'],
@@ -213,7 +212,7 @@ def bm_pxe_ip_create_direct(context, bm_pxe_ip):
return ref
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_pxe_ip_destroy(context, ip_id):
# Delete physically since it has unique columns
model_query(context, models.BareMetalPxeIp, read_deleted="no").\
@@ -221,7 +220,7 @@ def bm_pxe_ip_destroy(context, ip_id):
delete()
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_pxe_ip_destroy_by_address(context, address):
# Delete physically since it has unique columns
model_query(context, models.BareMetalPxeIp, read_deleted="no").\
@@ -229,7 +228,7 @@ def bm_pxe_ip_destroy_by_address(context, address):
delete()
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_pxe_ip_get(context, ip_id):
result = model_query(context, models.BareMetalPxeIp, read_deleted="no").\
filter_by(id=ip_id).\
@@ -238,7 +237,7 @@ def bm_pxe_ip_get(context, ip_id):
return result
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_pxe_ip_get_by_bm_node_id(context, bm_node_id):
result = model_query(context, models.BareMetalPxeIp, read_deleted="no").\
filter_by(bm_node_id=bm_node_id).\
@@ -250,9 +249,9 @@ def bm_pxe_ip_get_by_bm_node_id(context, bm_node_id):
return result
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_pxe_ip_associate(context, bm_node_id):
- session = get_session()
+ session = db_session.get_session()
with session.begin():
# Check if the node really exists
node_ref = model_query(context, models.BareMetalNode,
@@ -288,14 +287,14 @@ def bm_pxe_ip_associate(context, bm_node_id):
return ip_ref.id
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_pxe_ip_disassociate(context, bm_node_id):
model_query(context, models.BareMetalPxeIp, read_deleted="no").\
filter_by(bm_node_id=bm_node_id).\
update({'bm_node_id': None})
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_interface_get(context, if_id):
result = model_query(context, models.BareMetalInterface,
read_deleted="no").\
@@ -309,14 +308,14 @@ def bm_interface_get(context, if_id):
return result
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_interface_get_all(context):
query = model_query(context, models.BareMetalInterface,
read_deleted="no")
return query.all()
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_interface_destroy(context, if_id):
# Delete physically since it has unique columns
model_query(context, models.BareMetalInterface, read_deleted="no").\
@@ -324,7 +323,7 @@ def bm_interface_destroy(context, if_id):
delete()
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_interface_create(context, bm_node_id, address, datapath_id, port_no):
ref = models.BareMetalInterface()
ref.bm_node_id = bm_node_id
@@ -335,9 +334,9 @@ def bm_interface_create(context, bm_node_id, address, datapath_id, port_no):
return ref.id
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
- session = get_session()
+ session = db_session.get_session()
with session.begin():
bm_interface = model_query(context, models.BareMetalInterface,
read_deleted="no", session=session).\
@@ -361,7 +360,7 @@ def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
raise e
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_interface_get_by_vif_uuid(context, vif_uuid):
result = model_query(context, models.BareMetalInterface,
read_deleted="no").\
@@ -375,7 +374,7 @@ def bm_interface_get_by_vif_uuid(context, vif_uuid):
return result
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
result = model_query(context, models.BareMetalInterface,
read_deleted="no").\
@@ -388,7 +387,7 @@ def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
return result
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_deployment_create(context, key, image_path, pxe_config_path, root_mb,
swap_mb):
ref = models.BareMetalDeployment()
@@ -401,7 +400,7 @@ def bm_deployment_create(context, key, image_path, pxe_config_path, root_mb,
return ref.id
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_deployment_get(context, dep_id):
result = model_query(context, models.BareMetalDeployment,
read_deleted="no").\
@@ -410,7 +409,7 @@ def bm_deployment_get(context, dep_id):
return result
-@require_admin_context
+@sqlalchemy_api.require_admin_context
def bm_deployment_destroy(context, dep_id):
model_query(context, models.BareMetalDeployment).\
filter_by(id=dep_id).\
diff --git a/nova/virt/baremetal/db/sqlalchemy/migration.py b/nova/virt/baremetal/db/sqlalchemy/migration.py
index 929793e70..cfc26c04c 100644
--- a/nova/virt/baremetal/db/sqlalchemy/migration.py
+++ b/nova/virt/baremetal/db/sqlalchemy/migration.py
@@ -25,7 +25,7 @@ import sqlalchemy
from nova import exception
from nova.openstack.common import log as logging
from nova.virt.baremetal.db import migration
-from nova.virt.baremetal.db.sqlalchemy.session import get_engine
+from nova.virt.baremetal.db.sqlalchemy import session
LOG = logging.getLogger(__name__)
@@ -71,24 +71,25 @@ def db_sync(version=None):
current_version = db_version()
repository = _find_migrate_repo()
if version is None or version > current_version:
- return versioning_api.upgrade(get_engine(), repository, version)
+ return versioning_api.upgrade(session.get_engine(), repository,
+ version)
else:
- return versioning_api.downgrade(get_engine(), repository,
+ return versioning_api.downgrade(session.get_engine(), repository,
version)
def db_version():
repository = _find_migrate_repo()
try:
- return versioning_api.db_version(get_engine(), repository)
+ return versioning_api.db_version(session.get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
- engine = get_engine()
+ engine = session.get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(migration.INIT_VERSION)
- return versioning_api.db_version(get_engine(), repository)
+ return versioning_api.db_version(session.get_engine(), repository)
else:
# Some pre-Essex DB's may not be version controlled.
# Require them to upgrade using Essex first.
@@ -98,7 +99,7 @@ def db_version():
def db_version_control(version=None):
repository = _find_migrate_repo()
- versioning_api.version_control(get_engine(), repository, version)
+ versioning_api.version_control(session.get_engine(), repository, version)
return version
diff --git a/nova/virt/baremetal/fake.py b/nova/virt/baremetal/fake.py
index 7a400af6f..b3f39fdc3 100644
--- a/nova/virt/baremetal/fake.py
+++ b/nova/virt/baremetal/fake.py
@@ -17,7 +17,7 @@
# under the License.
from nova.virt.baremetal import base
-from nova.virt.firewall import NoopFirewallDriver
+from nova.virt import firewall
class FakeDriver(base.NodeDriver):
@@ -52,7 +52,7 @@ class FakePowerManager(base.PowerManager):
super(FakePowerManager, self).__init__(**kwargs)
-class FakeFirewallDriver(NoopFirewallDriver):
+class FakeFirewallDriver(firewall.NoopFirewallDriver):
def __init__(self):
super(FakeFirewallDriver, self).__init__()
diff --git a/nova/virt/baremetal/ipmi.py b/nova/virt/baremetal/ipmi.py
index 393b3657b..5d4a87625 100644
--- a/nova/virt/baremetal/ipmi.py
+++ b/nova/virt/baremetal/ipmi.py
@@ -25,7 +25,7 @@ import os
import stat
import tempfile
-from nova.exception import InvalidParameterValue
+from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import paths
@@ -104,13 +104,17 @@ class IPMI(base.PowerManager):
self.port = node['terminal_port']
if self.node_id == None:
- raise InvalidParameterValue(_("Node id not supplied to IPMI"))
+ raise exception.InvalidParameterValue(_("Node id not supplied "
+ "to IPMI"))
if self.address == None:
- raise InvalidParameterValue(_("Address not supplied to IPMI"))
+ raise exception.InvalidParameterValue(_("Address not supplied "
+ "to IPMI"))
if self.user == None:
- raise InvalidParameterValue(_("User not supplied to IPMI"))
+ raise exception.InvalidParameterValue(_("User not supplied "
+ "to IPMI"))
if self.password == None:
- raise InvalidParameterValue(_("Password not supplied to IPMI"))
+ raise exception.InvalidParameterValue(_("Password not supplied "
+ "to IPMI"))
def _exec_ipmitool(self, command):
args = ['ipmitool',
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
index 0daac1d46..91de43dd1 100644
--- a/nova/virt/baremetal/pxe.py
+++ b/nova/virt/baremetal/pxe.py
@@ -67,7 +67,8 @@ CHEETAH = None
def _get_cheetah():
global CHEETAH
if CHEETAH is None:
- from Cheetah.Template import Template as CHEETAH
+ from Cheetah import Template
+ CHEETAH = Template.Template
return CHEETAH
diff --git a/nova/virt/hyperv/vif.py b/nova/virt/hyperv/vif.py
index a898d3ac2..e01006eaa 100644
--- a/nova/virt/hyperv/vif.py
+++ b/nova/virt/hyperv/vif.py
@@ -15,6 +15,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import abc
import sys
import uuid
@@ -22,11 +23,12 @@ import uuid
if sys.platform == 'win32':
import wmi
-from abc import abstractmethod
+
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.hyperv import vmutils
+
hyperv_opts = [
cfg.StrOpt('vswitch_name',
default=None,
@@ -42,11 +44,11 @@ LOG = logging.getLogger(__name__)
class HyperVBaseVIFDriver(object):
- @abstractmethod
+ @abc.abstractmethod
def plug(self, instance, vif):
pass
- @abstractmethod
+ @abc.abstractmethod
def unplug(self, instance, vif):
pass
diff --git a/nova/virt/libvirt/__init__.py b/nova/virt/libvirt/__init__.py
index 00f4fd6b0..535d6c729 100644
--- a/nova/virt/libvirt/__init__.py
+++ b/nova/virt/libvirt/__init__.py
@@ -14,4 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova.virt.libvirt.driver import LibvirtDriver
+from nova.virt.libvirt import driver
+
+LibvirtDriver = driver.LibvirtDriver
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 7439ad40a..4ee42a0ce 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -874,6 +874,7 @@ class LibvirtDriver(driver.ComputeDriver):
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
+ old_domid = dom.ID()
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
@@ -882,8 +883,10 @@ class LibvirtDriver(driver.ComputeDriver):
# FLAG defines depending on how long the get_info
# call takes to return.
for x in xrange(CONF.libvirt_wait_soft_reboot_seconds):
+ dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
+ new_domid = dom.ID()
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
@@ -894,6 +897,10 @@ class LibvirtDriver(driver.ComputeDriver):
instance)
timer.start(interval=0.5).wait()
return True
+ elif old_domid != new_domid:
+ LOG.info(_("Instance may have been rebooted during soft "
+ "reboot, so return now."), instance=instance)
+ return True
greenthread.sleep(1)
return False
@@ -966,11 +973,6 @@ class LibvirtDriver(driver.ComputeDriver):
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
- xml = self._get_existing_domain_xml(instance, network_info,
- block_device_info)
- self._create_domain_and_network(xml, instance, network_info,
- block_device_info)
-
# Check if the instance is running already and avoid doing
# anything if it is.
if self.instance_exists(instance['name']):
diff --git a/nova/virt/powervm/__init__.py b/nova/virt/powervm/__init__.py
index 83bbcd289..1b63f8310 100644
--- a/nova/virt/powervm/__init__.py
+++ b/nova/virt/powervm/__init__.py
@@ -26,4 +26,6 @@ refer to the IBM Redbook[1] publication.
May 2011. <http://www.redbooks.ibm.com/abstracts/sg247940.html>
"""
-from nova.virt.powervm.driver import PowerVMDriver
+from nova.virt.powervm import driver
+
+PowerVMDriver = driver.PowerVMDriver
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index 5a4a2938b..87da30a14 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -372,7 +372,7 @@ class BaseOperator(object):
"""
cmd = self.command.lssyscfg('-r %s --filter "lpar_names=%s"'
% (resource_type, instance_name))
- output = self.run_command(cmd)
+ output = self.run_vios_command(cmd)
if not output:
return None
lpar = LPAR.load_from_conf_data(output[0])
@@ -383,7 +383,8 @@ class BaseOperator(object):
:returns: list -- list with instances names.
"""
- lpar_names = self.run_command(self.command.lssyscfg('-r lpar -F name'))
+ lpar_names = self.run_vios_command(self.command.lssyscfg(
+ '-r lpar -F name'))
if not lpar_names:
return []
return lpar_names
@@ -394,14 +395,15 @@ class BaseOperator(object):
:param lpar: LPAR object
"""
conf_data = lpar.to_string()
- self.run_command(self.command.mksyscfg('-r lpar -i "%s"' % conf_data))
+ self.run_vios_command(self.command.mksyscfg('-r lpar -i "%s"' %
+ conf_data))
def start_lpar(self, instance_name):
"""Start a LPAR instance.
:param instance_name: LPAR instance name
"""
- self.run_command(self.command.chsysstate('-r lpar -o on -n %s'
+ self.run_vios_command(self.command.chsysstate('-r lpar -o on -n %s'
% instance_name))
def stop_lpar(self, instance_name, timeout=30):
@@ -413,7 +415,7 @@ class BaseOperator(object):
"""
cmd = self.command.chsysstate('-r lpar -o shutdown --immed -n %s' %
instance_name)
- self.run_command(cmd)
+ self.run_vios_command(cmd)
# poll instance until stopped or raise exception
lpar_obj = self.get_lpar(instance_name)
@@ -435,7 +437,7 @@ class BaseOperator(object):
:param instance_name: LPAR instance name
"""
- self.run_command(self.command.rmsyscfg('-r lpar -n %s'
+ self.run_vios_command(self.command.rmsyscfg('-r lpar -n %s'
% instance_name))
def get_vhost_by_instance_id(self, instance_id):
@@ -446,7 +448,7 @@ class BaseOperator(object):
"""
instance_hex_id = '%#010x' % int(instance_id)
cmd = self.command.lsmap('-all -field clientid svsa -fmt :')
- output = self.run_command(cmd)
+ output = self.run_vios_command(cmd)
vhosts = dict(item.split(':') for item in list(output))
if instance_hex_id in vhosts:
@@ -463,10 +465,10 @@ class BaseOperator(object):
:returns: id of the virtual ethernet adapter.
"""
cmd = self.command.lsmap('-all -net -field sea -fmt :')
- output = self.run_command(cmd)
+ output = self.run_vios_command(cmd)
sea = output[0]
cmd = self.command.lsdev('-dev %s -attr pvid' % sea)
- output = self.run_command(cmd)
+ output = self.run_vios_command(cmd)
# Returned output looks like this: ['value', '', '1']
if output:
return output[2]
@@ -478,7 +480,7 @@ class BaseOperator(object):
:returns: string -- hostname
"""
- output = self.run_command(self.command.hostname())
+ output = self.run_vios_command(self.command.hostname())
return output[0]
def get_disk_name_by_vhost(self, vhost):
@@ -488,7 +490,7 @@ class BaseOperator(object):
:returns: string -- disk name
"""
cmd = self.command.lsmap('-vadapter %s -field backing -fmt :' % vhost)
- output = self.run_command(cmd)
+ output = self.run_vios_command(cmd)
if output:
return output[0]
@@ -501,7 +503,7 @@ class BaseOperator(object):
:param vhost: the vhost name
"""
cmd = self.command.mkvdev('-vdev %s -vadapter %s') % (disk, vhost)
- self.run_command(cmd)
+ self.run_vios_command(cmd)
def get_memory_info(self):
"""Get memory info.
@@ -510,7 +512,7 @@ class BaseOperator(object):
"""
cmd = self.command.lshwres(
'-r mem --level sys -F configurable_sys_mem,curr_avail_sys_mem')
- output = self.run_command(cmd)
+ output = self.run_vios_command(cmd)
total_mem, avail_mem = output[0].split(',')
return {'total_mem': int(total_mem),
'avail_mem': int(avail_mem)}
@@ -523,7 +525,7 @@ class BaseOperator(object):
cmd = self.command.lshwres(
'-r proc --level sys -F '
'configurable_sys_proc_units,curr_avail_sys_proc_units')
- output = self.run_command(cmd)
+ output = self.run_vios_command(cmd)
total_procs, avail_procs = output[0].split(',')
return {'total_procs': float(total_procs),
'avail_procs': float(avail_procs)}
@@ -533,12 +535,12 @@ class BaseOperator(object):
:returns: tuple - disk info (disk_total, disk_used, disk_avail)
"""
- vgs = self.run_command(self.command.lsvg())
+ vgs = self.run_vios_command(self.command.lsvg())
(disk_total, disk_used, disk_avail) = [0, 0, 0]
for vg in vgs:
cmd = self.command.lsvg('%s -field totalpps usedpps freepps -fmt :'
% vg)
- output = self.run_command(cmd)
+ output = self.run_vios_command(cmd)
# Output example:
# 1271 (10168 megabytes):0 (0 megabytes):1271 (10168 megabytes)
(d_total, d_used, d_avail) = re.findall(r'(\d+) megabytes',
@@ -551,7 +553,7 @@ class BaseOperator(object):
'disk_used': disk_used,
'disk_avail': disk_avail}
- def run_command(self, cmd, check_exit_code=True):
+ def run_vios_command(self, cmd, check_exit_code=True):
"""Run a remote command using an active ssh connection.
:param command: String with the command to run.
@@ -561,7 +563,7 @@ class BaseOperator(object):
check_exit_code=check_exit_code)
return stdout.strip().splitlines()
- def run_command_as_root(self, command, check_exit_code=True):
+ def run_vios_command_as_root(self, command, check_exit_code=True):
"""Run a remote command as root using an active ssh connection.
:param command: List of commands.
diff --git a/nova/virt/vmwareapi/__init__.py b/nova/virt/vmwareapi/__init__.py
index 66e7d9b02..37d816f8c 100644
--- a/nova/virt/vmwareapi/__init__.py
+++ b/nova/virt/vmwareapi/__init__.py
@@ -18,4 +18,6 @@
:mod:`vmwareapi` -- Nova support for VMware ESX/ESXi Server through VMware API.
"""
# NOTE(sdague) for nicer compute_driver specification
-from nova.virt.vmwareapi.driver import VMwareESXDriver
+from nova.virt.vmwareapi import driver
+
+VMwareESXDriver = driver.VMwareESXDriver
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 986c4ef28..4000f1f9c 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -45,6 +45,7 @@ from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vmops
+from nova.virt.vmwareapi import volumeops
LOG = logging.getLogger(__name__)
@@ -52,7 +53,7 @@ LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('vmwareapi_host_ip',
default=None,
- help='URL for connection to VMware ESX host.Required if '
+ help='URL for connection to VMware ESX host. Required if '
'compute_driver is vmwareapi.VMwareESXDriver.'),
cfg.StrOpt('vmwareapi_host_username',
default=None,
@@ -109,9 +110,10 @@ class VMwareESXDriver(driver.ComputeDriver):
"and vmwareapi_host_password to use"
"compute_driver=vmwareapi.VMwareESXDriver"))
- session = VMwareAPISession(host_ip, host_username, host_password,
+ self._session = VMwareAPISession(host_ip, host_username, host_password,
api_retry_count, scheme=scheme)
- self._vmops = vmops.VMwareVMOps(session)
+ self._volumeops = volumeops.VMwareVolumeOps(self._session)
+ self._vmops = vmops.VMwareVMOps(self._session)
def init_host(self, host):
"""Do the initialization that needs to be done."""
@@ -172,23 +174,21 @@ class VMwareESXDriver(driver.ComputeDriver):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
- def get_volume_connector(self, _instance):
+ def get_volume_connector(self, instance):
"""Return volume connector information."""
- # TODO(vish): When volume attaching is supported, return the
- # proper initiator iqn and host.
- return {
- 'ip': CONF.vmwareapi_host_ip,
- 'initiator': None,
- 'host': None
- }
+ return self._volumeops.get_volume_connector(instance)
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach volume storage to VM instance."""
- pass
+ return self._volumeops.attach_volume(connection_info,
+ instance,
+ mountpoint)
def detach_volume(self, connection_info, instance, mountpoint):
"""Detach volume storage to VM instance."""
- pass
+ return self._volumeops.detach_volume(connection_info,
+ instance,
+ mountpoint)
def get_console_pool_info(self, console_type):
"""Get info about the host on which the VM resides."""
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
index 3f5041c22..27e26526f 100644
--- a/nova/virt/vmwareapi/fake.py
+++ b/nova/virt/vmwareapi/fake.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -140,16 +141,30 @@ class DataObject(object):
class VirtualDisk(DataObject):
"""
- Virtual Disk class. Does nothing special except setting
- __class__.__name__ to 'VirtualDisk'. Refer place where __class__.__name__
- is used in the code.
+ Virtual Disk class.
"""
- pass
+
+ def __init__(self):
+ super(VirtualDisk, self).__init__()
+ self.key = 0
+ self.unitNumber = 0
class VirtualDiskFlatVer2BackingInfo(DataObject):
"""VirtualDiskFlatVer2BackingInfo class."""
- pass
+
+ def __init__(self):
+ super(VirtualDiskFlatVer2BackingInfo, self).__init__()
+ self.thinProvisioned = False
+ self.eagerlyScrub = False
+
+
+class VirtualDiskRawDiskMappingVer1BackingInfo(DataObject):
+ """VirtualDiskRawDiskMappingVer1BackingInfo class."""
+
+ def __init__(self):
+ super(VirtualDiskRawDiskMappingVer1BackingInfo, self).__init__()
+ self.lunUuid = ""
class VirtualLsiLogicController(DataObject):
diff --git a/nova/virt/vmwareapi/io_util.py b/nova/virt/vmwareapi/io_util.py
index 999e7a085..6a50c4d6e 100644
--- a/nova/virt/vmwareapi/io_util.py
+++ b/nova/virt/vmwareapi/io_util.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -57,6 +58,14 @@ class ThreadSafePipe(queue.LightQueue):
"""Put a data item in the pipe."""
self.put(data)
+ def seek(self, offset, whence=0):
+ """Set the file's current position at the offset."""
+ pass
+
+ def tell(self):
+ """Get size of the file to be read."""
+ return self.transfer_size
+
def close(self):
"""A place-holder to maintain consistency."""
pass
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index e03b88804..381c47193 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -18,6 +19,9 @@
The VMware API VM utility module to build SOAP object specs.
"""
+import copy
+from nova.virt.vmwareapi import vim_util
+
def build_datastore_path(datastore_name, path):
"""Build the datastore compliant path."""
@@ -42,7 +46,7 @@ def get_vm_create_spec(client_factory, instance, data_store_name,
vif_infos, os_type="otherGuest"):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
- config_spec.name = instance.name
+ config_spec.name = instance['name']
config_spec.guestId = os_type
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
@@ -57,8 +61,8 @@ def get_vm_create_spec(client_factory, instance, data_store_name,
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
- config_spec.numCPUs = int(instance.vcpus)
- config_spec.memoryMB = int(instance.memory_mb)
+ config_spec.numCPUs = int(instance['vcpus'])
+ config_spec.memoryMB = int(instance['memory_mb'])
vif_spec_list = []
for vif_info in vif_infos:
@@ -71,9 +75,9 @@ def get_vm_create_spec(client_factory, instance, data_store_name,
return config_spec
-def create_controller_spec(client_factory, key):
+def create_controller_spec(client_factory, key, adapter_type="lsiLogic"):
"""
- Builds a Config Spec for the LSI Logic Controller's addition
+ Builds a Config Spec for the LSI or Bus Logic Controller's addition
which acts as the controller for the virtual hard disk to be attached
to the VM.
"""
@@ -81,11 +85,16 @@ def create_controller_spec(client_factory, key):
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
- virtual_lsi = client_factory.create('ns0:VirtualLsiLogicController')
- virtual_lsi.key = key
- virtual_lsi.busNumber = 0
- virtual_lsi.sharedBus = "noSharing"
- virtual_device_config.device = virtual_lsi
+ if adapter_type == "busLogic":
+ virtual_controller = client_factory.create(
+ 'ns0:VirtualBusLogicController')
+ else:
+ virtual_controller = client_factory.create(
+ 'ns0:VirtualLsiLogicController')
+ virtual_controller.key = key
+ virtual_controller.busNumber = 0
+ virtual_controller.sharedBus = "noSharing"
+ virtual_device_config.device = virtual_controller
return virtual_device_config
@@ -142,8 +151,15 @@ def create_network_spec(client_factory, vif_info):
return network_spec
-def get_vmdk_attach_config_spec(client_factory, disksize, file_path,
- adapter_type="lsiLogic"):
+def get_vmdk_attach_config_spec(client_factory,
+ adapter_type="lsiLogic",
+ disk_type="preallocated",
+ file_path=None,
+ disk_size=None,
+ linked_clone=False,
+ controller_key=None,
+ unit_number=None,
+ device_name=None):
"""Builds the vmdk attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
@@ -152,15 +168,19 @@ def get_vmdk_attach_config_spec(client_factory, disksize, file_path,
device_config_spec = []
# For IDE devices, there are these two default controllers created in the
# VM having keys 200 and 201
- if adapter_type == "ide":
- controller_key = 200
- else:
- controller_key = -101
- controller_spec = create_controller_spec(client_factory,
- controller_key)
- device_config_spec.append(controller_spec)
+ if controller_key is None:
+ if adapter_type == "ide":
+ controller_key = 200
+ else:
+ controller_key = -101
+ controller_spec = create_controller_spec(client_factory,
+ controller_key,
+ adapter_type)
+ device_config_spec.append(controller_spec)
virtual_device_config_spec = create_virtual_disk_spec(client_factory,
- disksize, controller_key, file_path)
+ controller_key, disk_type, file_path,
+ disk_size, linked_clone,
+ unit_number, device_name)
device_config_spec.append(virtual_device_config_spec)
@@ -168,20 +188,45 @@ def get_vmdk_attach_config_spec(client_factory, disksize, file_path,
return config_spec
-def get_vmdk_file_path_and_adapter_type(client_factory, hardware_devices):
+def get_vmdk_detach_config_spec(client_factory, device):
+ """Builds the vmdk detach config spec."""
+ config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
+
+ device_config_spec = []
+ virtual_device_config_spec = delete_virtual_disk_spec(client_factory,
+ device)
+
+ device_config_spec.append(virtual_device_config_spec)
+
+ config_spec.deviceChange = device_config_spec
+ return config_spec
+
+
+def get_vmdk_path_and_adapter_type(hardware_devices):
"""Gets the vmdk file path and the storage adapter type."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
vmdk_file_path = None
vmdk_controler_key = None
+ disk_type = None
+ unit_number = 0
adapter_type_dict = {}
for device in hardware_devices:
- if (device.__class__.__name__ == "VirtualDisk" and
- device.backing.__class__.__name__ ==
- "VirtualDiskFlatVer2BackingInfo"):
- vmdk_file_path = device.backing.fileName
- vmdk_controler_key = device.controllerKey
+ if device.__class__.__name__ == "VirtualDisk":
+ if device.backing.__class__.__name__ == \
+ "VirtualDiskFlatVer2BackingInfo":
+ vmdk_file_path = device.backing.fileName
+ vmdk_controler_key = device.controllerKey
+ if getattr(device.backing, 'thinProvisioned', False):
+ disk_type = "thin"
+ else:
+ if getattr(device.backing, 'eagerlyScrub', False):
+ disk_type = "eagerZeroedThick"
+ else:
+ disk_type = "preallocated"
+ if device.unitNumber > unit_number:
+ unit_number = device.unitNumber
elif device.__class__.__name__ == "VirtualLsiLogicController":
adapter_type_dict[device.key] = "lsiLogic"
elif device.__class__.__name__ == "VirtualBusLogicController":
@@ -193,28 +238,59 @@ def get_vmdk_file_path_and_adapter_type(client_factory, hardware_devices):
adapter_type = adapter_type_dict.get(vmdk_controler_key, "")
- return vmdk_file_path, adapter_type
+ return (vmdk_file_path, vmdk_controler_key, adapter_type,
+ disk_type, unit_number)
+
+
+def get_rdm_disk(hardware_devices, uuid):
+ """Gets the RDM disk key."""
+ if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
+ hardware_devices = hardware_devices.VirtualDevice
+
+ for device in hardware_devices:
+ if (device.__class__.__name__ == "VirtualDisk" and
+ device.backing.__class__.__name__ ==
+ "VirtualDiskRawDiskMappingVer1BackingInfo" and
+ device.backing.lunUuid == uuid):
+ return device
-def get_copy_virtual_disk_spec(client_factory, adapter_type="lsilogic"):
+def get_copy_virtual_disk_spec(client_factory, adapter_type="lsilogic",
+ disk_type="preallocated"):
"""Builds the Virtual Disk copy spec."""
dest_spec = client_factory.create('ns0:VirtualDiskSpec')
dest_spec.adapterType = adapter_type
- dest_spec.diskType = "thick"
+ dest_spec.diskType = disk_type
return dest_spec
-def get_vmdk_create_spec(client_factory, size_in_kb, adapter_type="lsiLogic"):
+def get_vmdk_create_spec(client_factory, size_in_kb, adapter_type="lsiLogic",
+ disk_type="preallocated"):
"""Builds the virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:FileBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = adapter_type
- create_vmdk_spec.diskType = "thick"
+ create_vmdk_spec.diskType = disk_type
create_vmdk_spec.capacityKb = size_in_kb
return create_vmdk_spec
-def create_virtual_disk_spec(client_factory, disksize, controller_key,
- file_path=None):
+def get_rdm_create_spec(client_factory, device, adapter_type="lsiLogic",
+ disk_type="rdmp"):
+ """Builds the RDM virtual disk create spec."""
+ create_vmdk_spec = client_factory.create('ns0:DeviceBackedVirtualDiskSpec')
+ create_vmdk_spec.adapterType = adapter_type
+ create_vmdk_spec.diskType = disk_type
+ create_vmdk_spec.device = device
+ return create_vmdk_spec
+
+
+def create_virtual_disk_spec(client_factory, controller_key,
+ disk_type="preallocated",
+ file_path=None,
+ disk_size=None,
+ linked_clone=False,
+ unit_number=None,
+ device_name=None):
"""
Builds spec for the creation of a new/ attaching of an already existing
Virtual Disk to the VM.
@@ -222,26 +298,40 @@ def create_virtual_disk_spec(client_factory, disksize, controller_key,
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
- if file_path is None:
+ if (file_path is None) or linked_clone:
virtual_device_config.fileOperation = "create"
virtual_disk = client_factory.create('ns0:VirtualDisk')
- disk_file_backing = client_factory.create(
- 'ns0:VirtualDiskFlatVer2BackingInfo')
- disk_file_backing.diskMode = "persistent"
- disk_file_backing.thinProvisioned = False
- if file_path is not None:
- disk_file_backing.fileName = file_path
+ if disk_type == "rdm" or disk_type == "rdmp":
+ disk_file_backing = client_factory.create(
+ 'ns0:VirtualDiskRawDiskMappingVer1BackingInfo')
+ disk_file_backing.compatibilityMode = "virtualMode" \
+ if disk_type == "rdm" else "physicalMode"
+ disk_file_backing.diskMode = "independent_persistent"
+ disk_file_backing.deviceName = device_name or ""
else:
- disk_file_backing.fileName = ""
+ disk_file_backing = client_factory.create(
+ 'ns0:VirtualDiskFlatVer2BackingInfo')
+ disk_file_backing.diskMode = "persistent"
+ if disk_type == "thin":
+ disk_file_backing.thinProvisioned = True
+ else:
+ if disk_type == "eagerZeroedThick":
+ disk_file_backing.eagerlyScrub = True
+ disk_file_backing.fileName = file_path or ""
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
- virtual_disk.backing = disk_file_backing
+ if not linked_clone:
+ virtual_disk.backing = disk_file_backing
+ else:
+ virtual_disk.backing = copy.copy(disk_file_backing)
+ virtual_disk.backing.fileName = ""
+ virtual_disk.backing.parent = disk_file_backing
virtual_disk.connectable = connectable_spec
# The Server assigns a Key to the device. Here we pass a -ve random key.
@@ -249,14 +339,27 @@ def create_virtual_disk_spec(client_factory, disksize, controller_key,
# want a clash with the key that server might associate with the device
virtual_disk.key = -100
virtual_disk.controllerKey = controller_key
- virtual_disk.unitNumber = 0
- virtual_disk.capacityInKB = disksize
+ virtual_disk.unitNumber = unit_number or 0
+ virtual_disk.capacityInKB = disk_size or 0
virtual_device_config.device = virtual_disk
return virtual_device_config
+def delete_virtual_disk_spec(client_factory, device):
+ """
+ Builds spec for the deletion of an already existing Virtual Disk from VM.
+ """
+ virtual_device_config = client_factory.create(
+ 'ns0:VirtualDeviceConfigSpec')
+ virtual_device_config.operation = "remove"
+ virtual_device_config.fileOperation = "destroy"
+ virtual_device_config.device = device
+
+ return virtual_device_config
+
+
def get_dummy_vm_create_spec(client_factory, name, data_store_name):
"""Builds the dummy VM create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
@@ -318,3 +421,13 @@ def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
vswitch_port_group_spec.policy = policy
return vswitch_port_group_spec
+
+
+def get_vm_ref_from_name(session, vm_name):
+ """Get reference to the VM with the name specified."""
+ vms = session._call_method(vim_util, "get_objects",
+ "VirtualMachine", ["name"])
+ for vm in vms:
+ if vm.propSet[0].val == vm_name:
+ return vm.obj
+ return None
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 883e751a8..625d6290e 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -309,8 +309,8 @@ class VMwareVMOps(object):
"""
vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(
client_factory,
- vmdk_file_size_in_kb, uploaded_vmdk_path,
- adapter_type)
+ adapter_type, "preallocated",
+ uploaded_vmdk_path, vmdk_file_size_in_kb)
LOG.debug(_("Reconfiguring VM instance to attach the image disk"),
instance=instance)
reconfig_task = self._session._call_method(
@@ -361,19 +361,19 @@ class VMwareVMOps(object):
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
- _vmdk_info = vm_util.get_vmdk_file_path_and_adapter_type(
- client_factory, hardware_devices)
- vmdk_file_path_before_snapshot, adapter_type = _vmdk_info
+ (vmdk_file_path_before_snapshot, controller_key, adapter_type,
+ disk_type, unit_number) = vm_util.get_vmdk_path_and_adapter_type(
+ hardware_devices)
datastore_name = vm_util.split_datastore_path(
vmdk_file_path_before_snapshot)[0]
os_type = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "summary.config.guestId")
- return (vmdk_file_path_before_snapshot, adapter_type,
+ return (vmdk_file_path_before_snapshot, adapter_type, disk_type,
datastore_name, os_type)
- (vmdk_file_path_before_snapshot, adapter_type, datastore_name,
- os_type) = _get_vm_and_vmdk_attribs()
+ (vmdk_file_path_before_snapshot, adapter_type, disk_type,
+ datastore_name, os_type) = _get_vm_and_vmdk_attribs()
def _create_vm_snapshot():
# Create a snapshot of the VM
@@ -384,7 +384,7 @@ class VMwareVMOps(object):
"CreateSnapshot_Task", vm_ref,
name="%s-snapshot" % instance.name,
description="Taking Snapshot of the VM",
- memory=True,
+ memory=False,
quiesce=True)
self._session._wait_for_task(instance['uuid'], snapshot_task)
LOG.debug(_("Created Snapshot of the VM instance"),
diff --git a/nova/virt/vmwareapi/volume_util.py b/nova/virt/vmwareapi/volume_util.py
new file mode 100644
index 000000000..9d556cd26
--- /dev/null
+++ b/nova/virt/vmwareapi/volume_util.py
@@ -0,0 +1,178 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helper methods for operations related to the management of volumes,
+and storage repositories
+"""
+
+import re
+import string
+
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.virt.vmwareapi import vim_util
+
+LOG = logging.getLogger(__name__)
+
+
+class StorageError(Exception):
+ """To raise errors related to Volume commands."""
+
+ def __init__(self, message=None):
+ super(StorageError, self).__init__(message)
+
+
+def get_host_iqn(session):
+ """
+ Return the host iSCSI IQN.
+ """
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ hbas_ret = session._call_method(vim_util, "get_dynamic_property",
+ host_mor, "HostSystem",
+ "config.storageDevice.hostBusAdapter")
+
+ # Meaning there are no host bus adapters on the host
+ if not hbas_ret:
+ return
+ host_hbas = hbas_ret.HostHostBusAdapter
+ for hba in host_hbas:
+ if hba.__class__.__name__ == 'HostInternetScsiHba':
+ return hba.iScsiName
+
+
+def find_st(session, data):
+ """
+ Return the iSCSI Target given a volume info.
+ """
+ target_portal = data['target_portal']
+ target_iqn = data['target_iqn']
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+
+ lst_properties = ["config.storageDevice.hostBusAdapter",
+ "config.storageDevice.scsiTopology",
+ "config.storageDevice.scsiLun"]
+ props = session._call_method(vim_util, "get_object_properties",
+ None, host_mor, "HostSystem",
+ lst_properties)
+ result = (None, None)
+ hbas_ret = None
+ scsi_topology = None
+ scsi_lun_ret = None
+ for elem in props:
+ for prop in elem.propSet:
+ if prop.name == "config.storageDevice.hostBusAdapter":
+ hbas_ret = prop.val
+ elif prop.name == "config.storageDevice.scsiTopology":
+ scsi_topology = prop.val
+ elif prop.name == "config.storageDevice.scsiLun":
+ scsi_lun_ret = prop.val
+
+ # Meaning there are no host bus adapters on the host
+ if hbas_ret is None:
+ return result
+ host_hbas = hbas_ret.HostHostBusAdapter
+ if not host_hbas:
+ return result
+ for hba in host_hbas:
+ if hba.__class__.__name__ == 'HostInternetScsiHba':
+ hba_key = hba.key
+ break
+ else:
+ return result
+
+ if scsi_topology is None:
+ return result
+ host_adapters = scsi_topology.adapter
+ if not host_adapters:
+ return result
+ scsi_lun_key = None
+ for adapter in host_adapters:
+ if adapter.adapter == hba_key:
+ if not getattr(adapter, 'target', None):
+ return result
+ for target in adapter.target:
+ if (getattr(target.transport, 'address', None) and
+ target.transport.address[0] == target_portal and
+ target.transport.iScsiName == target_iqn):
+ if not target.lun:
+ return result
+ for lun in target.lun:
+ if 'host.ScsiDisk' in lun.scsiLun:
+ scsi_lun_key = lun.scsiLun
+ break
+ break
+ break
+
+ if scsi_lun_key is None:
+ return result
+
+ if scsi_lun_ret is None:
+ return result
+ host_scsi_luns = scsi_lun_ret.ScsiLun
+ if not host_scsi_luns:
+ return result
+ for scsi_lun in host_scsi_luns:
+ if scsi_lun.key == scsi_lun_key:
+ return (scsi_lun.deviceName, scsi_lun.uuid)
+
+ return result
+
+
+def rescan_iscsi_hba(session):
+ """
+ Rescan the iSCSI HBA to discover iSCSI targets.
+ """
+ # There is only one default storage system in a standalone ESX host
+ storage_system_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem", ["configManager.storageSystem"])[0].propSet[0].val
+ hbas_ret = session._call_method(vim_util,
+ "get_dynamic_property",
+ storage_system_mor,
+ "HostStorageSystem",
+ "storageDeviceInfo.hostBusAdapter")
+ # Meaning there are no host bus adapters on the host
+ if hbas_ret is None:
+ return
+ host_hbas = hbas_ret.HostHostBusAdapter
+ if not host_hbas:
+ return
+ for hba in host_hbas:
+ if hba.__class__.__name__ == 'HostInternetScsiHba':
+ hba_device = hba.device
+ break
+ else:
+ return
+
+ LOG.debug(_("Rescanning HBA %s") % hba_device)
+ session._call_method(session._get_vim(), "RescanHba", storage_system_mor,
+ hbaDevice=hba_device)
+ LOG.debug(_("Rescanned HBA %s ") % hba_device)
+
+
+def mountpoint_to_number(mountpoint):
+ """Translate a mountpoint like /dev/sdc into a numeric."""
+ if mountpoint.startswith('/dev/'):
+ mountpoint = mountpoint[5:]
+ if re.match('^[hsv]d[a-p]$', mountpoint):
+ return (ord(mountpoint[2:3]) - ord('a'))
+ elif re.match('^[0-9]+$', mountpoint):
+ return string.atoi(mountpoint, 10)
+ else:
+ LOG.warn(_("Mountpoint cannot be translated: %s") % mountpoint)
+ return -1
diff --git a/nova/virt/vmwareapi/volumeops.py b/nova/virt/vmwareapi/volumeops.py
new file mode 100644
index 000000000..5ec389f80
--- /dev/null
+++ b/nova/virt/vmwareapi/volumeops.py
@@ -0,0 +1,183 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management class for Storage-related functions (attach, detach, etc).
+"""
+
+from nova import context
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+from nova.virt.vmwareapi import volume_util
+
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+class VMwareVolumeOps(object):
+ """
+ Management class for Volume-related tasks
+ """
+
+ def __init__(self, session):
+ self._session = session
+
+ def attach_disk_to_vm(self, vm_ref, instance_name,
+ adapter_type, disk_type, vmdk_path=None,
+ disk_size=None, linked_clone=False,
+ controller_key=None, unit_number=None,
+ device_name=None):
+ """
+ Attach disk to VM by reconfiguration.
+ """
+ client_factory = self._session._get_vim().client.factory
+ vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(
+ client_factory, adapter_type, disk_type,
+ vmdk_path, disk_size, linked_clone,
+ controller_key, unit_number, device_name)
+
+ LOG.debug(_("Reconfiguring VM instance %(instance_name)s to attach "
+ "disk %(vmdk_path)s or device %(device_name)s with type "
+ "%(disk_type)s") % locals())
+ reconfig_task = self._session._call_method(
+ self._session._get_vim(),
+ "ReconfigVM_Task", vm_ref,
+ spec=vmdk_attach_config_spec)
+ self._session._wait_for_task(instance_name, reconfig_task)
+ LOG.debug(_("Reconfigured VM instance %(instance_name)s to attach "
+ "disk %(vmdk_path)s or device %(device_name)s with type "
+ "%(disk_type)s") % locals())
+
+ def detach_disk_from_vm(self, vm_ref, instance_name, device):
+ """
+ Detach disk from VM by reconfiguration.
+ """
+ client_factory = self._session._get_vim().client.factory
+ vmdk_detach_config_spec = vm_util.get_vmdk_detach_config_spec(
+ client_factory, device)
+ disk_key = device.key
+ LOG.debug(_("Reconfiguring VM instance %(instance_name)s to detach "
+ "disk %(disk_key)s") % locals())
+ reconfig_task = self._session._call_method(
+ self._session._get_vim(),
+ "ReconfigVM_Task", vm_ref,
+ spec=vmdk_detach_config_spec)
+ self._session._wait_for_task(instance_name, reconfig_task)
+ LOG.debug(_("Reconfigured VM instance %(instance_name)s to detach "
+ "disk %(disk_key)s") % locals())
+
+ def discover_st(self, data):
+ """Discover iSCSI targets."""
+ target_portal = data['target_portal']
+ target_iqn = data['target_iqn']
+ LOG.debug(_("Discovering iSCSI target %(target_iqn)s from "
+ "%(target_portal)s.") % locals())
+ device_name, uuid = volume_util.find_st(self._session, data)
+ if device_name:
+ LOG.debug(_("Storage target found. No need to discover"))
+ return (device_name, uuid)
+ # Rescan iSCSI HBA
+ volume_util.rescan_iscsi_hba(self._session)
+ # Find iSCSI Target again
+ device_name, uuid = volume_util.find_st(self._session, data)
+ if device_name:
+ LOG.debug(_("Discovered iSCSI target %(target_iqn)s from "
+ "%(target_portal)s.") % locals())
+ else:
+ LOG.debug(_("Unable to discovered iSCSI target %(target_iqn)s "
+ "from %(target_portal)s.") % locals())
+ return (device_name, uuid)
+
+ def get_volume_connector(self, instance):
+ """Return volume connector information."""
+ iqn = volume_util.get_host_iqn(self._session)
+ return {
+ 'ip': CONF.vmwareapi_host_ip,
+ 'initiator': iqn
+ }
+
+ def attach_volume(self, connection_info, instance, mountpoint):
+ """Attach volume storage to VM instance."""
+ instance_name = instance['name']
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance_name)
+ # Attach Volume to VM
+ LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s, "
+ "%(mountpoint)s") % locals())
+ driver_type = connection_info['driver_volume_type']
+ if driver_type not in ['iscsi']:
+ raise exception.VolumeDriverNotFound(driver_type=driver_type)
+ data = connection_info['data']
+ mount_unit = volume_util.mountpoint_to_number(mountpoint)
+
+ # Discover iSCSI Target
+ device_name, uuid = self.discover_st(data)
+ if device_name is None:
+ raise volume_util.StorageError(_("Unable to find iSCSI Target"))
+
+ # Get the vmdk file name that the VM is pointing to
+ hardware_devices = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "config.hardware.device")
+ vmdk_file_path, controller_key, adapter_type, disk_type, unit_number \
+ = vm_util.get_vmdk_path_and_adapter_type(hardware_devices)
+ # Figure out the correct unit number
+ if unit_number < mount_unit:
+ unit_number = mount_unit
+ else:
+ unit_number = unit_number + 1
+ self.attach_disk_to_vm(vm_ref, instance_name,
+ adapter_type, disk_type="rdmp",
+ controller_key=controller_key,
+ unit_number=unit_number,
+ device_name=device_name)
+ LOG.info(_("Mountpoint %(mountpoint)s attached to "
+ "instance %(instance_name)s") % locals())
+
+ def detach_volume(self, connection_info, instance, mountpoint):
+ """Detach volume storage to VM instance."""
+ instance_name = instance['name']
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance_name)
+ # Detach Volume from VM
+ LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
+ % locals())
+ driver_type = connection_info['driver_volume_type']
+ if driver_type not in ['iscsi']:
+ raise exception.VolumeDriverNotFound(driver_type=driver_type)
+ data = connection_info['data']
+
+ # Discover iSCSI Target
+ device_name, uuid = volume_util.find_st(self._session, data)
+ if device_name is None:
+ raise volume_util.StorageError(_("Unable to find iSCSI Target"))
+
+ # Get the vmdk file name that the VM is pointing to
+ hardware_devices = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "config.hardware.device")
+ device = vm_util.get_rdm_disk(hardware_devices, uuid)
+ if device is None:
+ raise volume_util.StorageError(_("Unable to find volume"))
+ self.detach_disk_from_vm(vm_ref, instance_name, device)
+ LOG.info(_("Mountpoint %(mountpoint)s detached from "
+ "instance %(instance_name)s") % locals())
diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py
index 6a56a918c..3853f09f2 100644
--- a/nova/virt/xenapi/__init__.py
+++ b/nova/virt/xenapi/__init__.py
@@ -18,4 +18,6 @@
:mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI
==================================================================
"""
-from nova.virt.xenapi.driver import XenAPIDriver
+from nova.virt.xenapi import driver
+
+XenAPIDriver = driver.XenAPIDriver
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 52a5f37b2..582a9320a 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -196,13 +196,6 @@ class ImageType(object):
}.get(image_type_id)
-def _system_metadata_to_dict(system_metadata):
- result = {}
- for item in system_metadata:
- result[item['key']] = item['value']
- return result
-
-
def create_vm(session, instance, name_label, kernel, ramdisk,
use_pv_kernel=False):
"""Create a VM record. Returns new VM reference.
@@ -994,7 +987,7 @@ def _create_image(context, session, instance, name_label, image_id,
elif cache_images == 'all':
cache = True
elif cache_images == 'some':
- sys_meta = _system_metadata_to_dict(instance['system_metadata'])
+ sys_meta = utils.metadata_to_dict(instance['system_metadata'])
try:
cache = utils.bool_from_str(sys_meta['image_cache_in_nova'])
except KeyError:
@@ -1087,7 +1080,7 @@ def _image_uses_bittorrent(context, instance):
if xenapi_torrent_images == 'all':
bittorrent = True
elif xenapi_torrent_images == 'some':
- sys_meta = _system_metadata_to_dict(instance['system_metadata'])
+ sys_meta = utils.metadata_to_dict(instance['system_metadata'])
try:
bittorrent = utils.bool_from_str(sys_meta['image_bittorrent'])
except KeyError:
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 3e1ccc66b..daca69854 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -20,7 +20,7 @@
Handles all requests relating to volumes + cinder.
"""
-from copy import deepcopy
+import copy
import sys
from cinderclient import exceptions as cinder_exception
@@ -139,7 +139,7 @@ def _untranslate_volume_summary_view(context, vol):
d['volume_metadata'].append(item)
if hasattr(vol, 'volume_image_metadata'):
- d['volume_image_metadata'] = deepcopy(vol.volume_image_metadata)
+ d['volume_image_metadata'] = copy.deepcopy(vol.volume_image_metadata)
return d
diff --git a/nova/wsgi.py b/nova/wsgi.py
index 0a7570b6c..651dbc4f6 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -75,7 +75,7 @@ class Server(object):
def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None,
protocol=eventlet.wsgi.HttpProtocol, backlog=128,
- use_ssl=False):
+ use_ssl=False, max_url_len=None):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
@@ -84,6 +84,7 @@ class Server(object):
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:param backlog: Maximum number of queued connections.
+ :param max_url_len: Maximum length of permitted URLs.
:returns: None
:raises: nova.exception.InvalidInput
"""
@@ -95,6 +96,7 @@ class Server(object):
self._logger = logging.getLogger("nova.%s.wsgi.server" % self.name)
self._wsgi_logger = logging.WritableLogger(self._logger)
self._use_ssl = use_ssl
+ self._max_url_len = max_url_len
if backlog < 1:
raise exception.InvalidInput(
@@ -177,13 +179,20 @@ class Server(object):
":%(port)s with SSL support") % self.__dict__)
raise
- self._server = eventlet.spawn(eventlet.wsgi.server,
- self._socket,
- self.app,
- protocol=self._protocol,
- custom_pool=self._pool,
- log=self._wsgi_logger,
- log_format=CONF.wsgi_log_format)
+ wsgi_kwargs = {
+ 'func': eventlet.wsgi.server,
+ 'sock': self._socket,
+ 'site': self.app,
+ 'protocol': self._protocol,
+ 'custom_pool': self._pool,
+ 'log': self._wsgi_logger,
+ 'log_format': CONF.wsgi_log_format
+ }
+
+ if self._max_url_len:
+ wsgi_kwargs['url_length_limit'] = self._max_url_len
+
+ self._server = eventlet.spawn(**wsgi_kwargs)
def stop(self):
"""Stop this server.
diff --git a/run_tests.sh b/run_tests.sh
index aea564b78..11bc8b518 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -99,6 +99,13 @@ function run_tests {
copy_subunit_log
+ if [ $coverage -eq 1 ]; then
+ echo "Generating coverage report in covhtml/"
+ # Don't compute coverage for common code, which is tested elsewhere
+ ${wrapper} coverage combine
+ ${wrapper} coverage html --include='nova/*' --omit='nova/openstack/common/*' -d covhtml -i
+ fi
+
return $RESULT
}
@@ -201,10 +208,3 @@ if [ -z "$testrargs" ]; then
run_pep8
fi
fi
-
-if [ $coverage -eq 1 ]; then
- echo "Generating coverage report in covhtml/"
- # Don't compute coverage for common code, which is tested elsewhere
- ${wrapper} coverage combine
- ${wrapper} coverage html --include='nova/*' --omit='nova/openstack/common/*' -d covhtml -i
-fi
diff --git a/smoketests/base.py b/smoketests/base.py
index f6cec3168..7c7d19838 100644
--- a/smoketests/base.py
+++ b/smoketests/base.py
@@ -17,7 +17,7 @@
# under the License.
import boto
-from boto.ec2.regioninfo import RegionInfo
+from boto.ec2 import regioninfo
import commands
import httplib
import os
@@ -123,7 +123,7 @@ class SmokeTestCase(unittest.TestCase):
return boto_v6.connect_ec2(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=parts['is_secure'],
- region=RegionInfo(None,
+ region=regioninfo.RegionInfo(None,
'nova',
parts['ip']),
port=parts['port'],
@@ -133,7 +133,7 @@ class SmokeTestCase(unittest.TestCase):
return boto.connect_ec2(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=parts['is_secure'],
- region=RegionInfo(None,
+ region=regioninfo.RegionInfo(None,
'nova',
parts['ip']),
port=parts['port'],
diff --git a/tools/hacking.py b/tools/hacking.py
index 56f6694bd..801a87899 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -18,7 +18,7 @@
"""nova HACKING file compliance testing
-built on top of pep8.py
+Built on top of pep8.py
"""
import inspect
@@ -49,6 +49,8 @@ START_DOCSTRING_TRIPLE = ['u"""', 'r"""', '"""', "u'''", "r'''", "'''"]
END_DOCSTRING_TRIPLE = ['"""', "'''"]
VERBOSE_MISSING_IMPORT = os.getenv('HACKING_VERBOSE_MISSING_IMPORT', 'False')
+_missingImport = set([])
+
# Monkey patch broken excluded filter in pep8
# See https://github.com/jcrocholl/pep8/pull/111
@@ -103,7 +105,7 @@ def import_normalize(line):
return line
-def nova_todo_format(physical_line):
+def nova_todo_format(physical_line, tokens):
"""Check for 'TODO()'.
nova HACKING guide recommendation for TODO:
@@ -111,14 +113,13 @@ def nova_todo_format(physical_line):
Okay: #TODO(sdague)
N101: #TODO fail
+ N101: #TODO (jogo) fail
"""
# TODO(sdague): TODO check shouldn't fail inside of space
pos = physical_line.find('TODO')
pos1 = physical_line.find('TODO(')
pos2 = physical_line.find('#') # make sure it's a comment
- # TODO(sdague): should be smarter on this test
- this_test = physical_line.find('N101: #TODO fail')
- if pos != pos1 and pos2 >= 0 and pos2 < pos and this_test == -1:
+ if (pos != pos1 and pos2 >= 0 and pos2 < pos and len(tokens) == 0):
return pos, "N101: Use TODO(NAME)"
@@ -165,8 +166,6 @@ def nova_one_import_per_line(logical_line):
not is_import_exception(parts[1])):
yield pos, "N301: one import per line"
-_missingImport = set([])
-
def nova_import_module_only(logical_line):
r"""Check for import module only.
@@ -175,20 +174,23 @@ def nova_import_module_only(logical_line):
Do not import objects, only modules
Okay: from os import path
- N302 from os.path import mkdir as mkdir2
- N303 import bubba
- N304 import blueblue
+ Okay: import os.path
+ N302: from os.path import dirname as dirname2
+ N303 from os.path import *
+ N304 import flakes
"""
# N302 import only modules
# N303 Invalid Import
# N304 Relative Import
# TODO(sdague) actually get these tests working
- def importModuleCheck(mod, parent=None, added=False):
- """Import Module helper function.
+ # TODO(jogo) simplify this code
+ def import_module_check(mod, parent=None, added=False):
+ """Checks for relative, modules and invalid imports.
If can't find module on first try, recursively check for relative
- imports
+ imports.
+ When parsing 'from x import y,' x is the parent.
"""
current_path = os.path.dirname(pep8.current_file)
try:
@@ -196,8 +198,6 @@ def nova_import_module_only(logical_line):
warnings.simplefilter('ignore', DeprecationWarning)
valid = True
if parent:
- if is_import_exception(parent):
- return
parent_mod = __import__(parent, globals(), locals(),
[mod], -1)
valid = inspect.ismodule(getattr(parent_mod, mod))
@@ -209,7 +209,7 @@ def nova_import_module_only(logical_line):
sys.path.pop()
added = False
return logical_line.find(mod), ("N304: No "
- "relative imports. '%s' is a relative import"
+ "relative imports. '%s' is a relative import"
% logical_line)
return logical_line.find(mod), ("N302: import only "
"modules. '%s' does not import a module"
@@ -219,7 +219,7 @@ def nova_import_module_only(logical_line):
if not added:
added = True
sys.path.append(current_path)
- return importModuleCheck(mod, parent, added)
+ return import_module_check(mod, parent, added)
else:
name = logical_line.split()[1]
if name not in _missingImport:
@@ -234,23 +234,27 @@ def nova_import_module_only(logical_line):
except AttributeError:
# Invalid import
+ if "import *" in logical_line:
+ # TODO(jogo): handle "from x import *, by checking all
+ # "objects in x"
+ return
return logical_line.find(mod), ("N303: Invalid import, "
- "AttributeError raised")
+ "%s" % mod)
- # convert "from x import y" to " import x.y"
- # convert "from x import y as z" to " import x.y"
- import_normalize(logical_line)
split_line = logical_line.split()
-
- if (logical_line.startswith("import ") and "," not in logical_line and
- (len(split_line) == 2 or
- (len(split_line) == 4 and split_line[2] == "as"))):
- mod = split_line[1]
- rval = importModuleCheck(mod)
+ if (", " not in logical_line and
+ split_line[0] in ('import', 'from') and
+ (len(split_line) in (2, 4, 6)) and
+ split_line[1] != "__future__"):
+ if is_import_exception(split_line[1]):
+ return
+ if "from" == split_line[0]:
+ rval = import_module_check(split_line[3], parent=split_line[1])
+ else:
+ rval = import_module_check(split_line[1])
if rval is not None:
yield rval
- # TODO(jogo) handle "from x import *"
#TODO(jogo): import template: N305
@@ -329,6 +333,8 @@ def nova_docstring_one_line(physical_line):
A one line docstring looks like this and ends in punctuation.
Okay: '''This is good.'''
+ Okay: '''This is good too!'''
+ Okay: '''How about this?'''
N402: '''This is not'''
N402: '''Bad punctuation,'''
"""
diff --git a/tools/test-requires b/tools/test-requires
index bc279166e..851023af4 100644
--- a/tools/test-requires
+++ b/tools/test-requires
@@ -12,4 +12,4 @@ pylint==0.25.2
python-subunit
sphinx>=1.1.2
testrepository>=0.0.13
-testtools>=0.9.26
+testtools>=0.9.27