summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--nova/compute/api.py3
-rw-r--r--nova/compute/manager.py31
-rw-r--r--nova/compute/rpcapi.py17
-rw-r--r--nova/console/api.py6
-rw-r--r--nova/console/manager.py11
-rw-r--r--nova/console/rpcapi.py6
-rw-r--r--nova/console/vmrc.py7
-rw-r--r--nova/console/vmrc_manager.py15
-rw-r--r--nova/console/xvp.py25
-rw-r--r--nova/openstack/common/rpc/__init__.py2
-rw-r--r--nova/openstack/common/rpc/impl_kombu.py5
-rw-r--r--nova/openstack/common/rpc/impl_zmq.py2
-rw-r--r--nova/tests/compute/test_compute.py26
-rw-r--r--nova/tests/compute/test_rpcapi.py7
-rw-r--r--nova/virt/baremetal/dom.py2
-rw-r--r--nova/virt/baremetal/driver.py51
-rw-r--r--nova/virt/baremetal/nodes.py8
-rw-r--r--nova/virt/baremetal/tilera.py11
-rw-r--r--nova/virt/configdrive.py17
-rw-r--r--nova/virt/disk/api.py9
-rw-r--r--nova/virt/disk/nbd.py9
-rw-r--r--nova/virt/driver.py2
-rw-r--r--nova/virt/firewall.py29
-rw-r--r--nova/virt/hyperv/livemigrationops.py5
-rw-r--r--nova/virt/hyperv/snapshotops.py5
-rw-r--r--nova/virt/hyperv/vmops.py21
-rw-r--r--nova/virt/hyperv/vmutils.py9
-rw-r--r--nova/virt/hyperv/volumeops.py11
-rw-r--r--nova/virt/hyperv/volumeutils.py7
-rw-r--r--nova/virt/images.py7
-rw-r--r--nova/virt/libvirt/driver.py218
-rw-r--r--nova/virt/libvirt/firewall.py10
-rw-r--r--nova/virt/libvirt/imagebackend.py27
-rw-r--r--nova/virt/libvirt/imagecache.py23
-rw-r--r--nova/virt/libvirt/utils.py13
-rw-r--r--nova/virt/libvirt/vif.py29
-rw-r--r--nova/virt/libvirt/volume.py11
-rw-r--r--nova/virt/libvirt/volume_nfs.py7
-rw-r--r--nova/virt/netutils.py6
-rw-r--r--nova/virt/powervm/driver.py5
-rw-r--r--nova/virt/powervm/operator.py15
-rw-r--r--nova/virt/vmwareapi/driver.py23
-rw-r--r--nova/virt/vmwareapi/read_write_util.py2
-rw-r--r--nova/virt/vmwareapi/vif.py7
-rw-r--r--nova/virt/vmwareapi/vim.py7
-rw-r--r--nova/virt/vmwareapi/vmops.py9
-rw-r--r--nova/virt/xenapi/agent.py15
-rw-r--r--nova/virt/xenapi/driver.py31
-rw-r--r--nova/virt/xenapi/firewall.py2
-rw-r--r--nova/virt/xenapi/host.py2
-rw-r--r--nova/virt/xenapi/pool.py21
-rw-r--r--nova/virt/xenapi/vif.py9
-rw-r--r--nova/virt/xenapi/vm_utils.py81
-rw-r--r--nova/virt/xenapi/vmops.py18
-rw-r--r--nova/virt/xenapi/volume_utils.py15
55 files changed, 499 insertions, 473 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 46f30e10b..9ee384393 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -2188,9 +2188,10 @@ class AggregateAPI(base.Base):
"""Removes host from the aggregate."""
# validates the host; ComputeHostNotFound is raised if invalid
service = self.db.service_get_all_compute_by_host(context, host)[0]
+ aggregate = self.db.aggregate_get(context, aggregate_id)
self.db.aggregate_host_delete(context, aggregate_id, host)
self.compute_rpcapi.remove_aggregate_host(context,
- aggregate_id=aggregate_id, host_param=host, host=host)
+ aggregate=aggregate, host_param=host, host=host)
return self.get_aggregate(context, aggregate_id)
def _get_aggregate_info(self, context, aggregate):
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index c7b63df27..6d69fbac4 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -230,7 +230,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.14'
+ RPC_API_VERSION = '2.16'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -1680,12 +1680,7 @@ class ComputeManager(manager.SchedulerDependentManager):
msg = _('destination same as source!')
raise exception.MigrationError(msg)
- # TODO(russellb): no-db-compute: Send the old instance type
- # info that is needed via rpc so db access isn't required
- # here.
- old_instance_type_id = instance['instance_type_id']
- old_instance_type = instance_types.get_instance_type(
- old_instance_type_id)
+ old_instance_type = instance['instance_type']
migration_ref = self.db.migration_create(context.elevated(),
{'instance_uuid': instance['uuid'],
@@ -1699,7 +1694,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.audit(_('Migrating'), context=context,
instance=instance)
self.compute_rpcapi.resize_instance(context, instance,
- migration_ref, image, reservations)
+ migration_ref, image, instance_type, reservations)
except Exception:
# try to re-schedule the resize elsewhere:
@@ -1754,15 +1749,17 @@ class ComputeManager(manager.SchedulerDependentManager):
@reverts_task_state
@wrap_instance_fault
def resize_instance(self, context, instance, image,
- reservations=None, migration=None, migration_id=None):
+ reservations=None, migration=None, migration_id=None,
+ instance_type=None):
"""Starts the migration of a running instance to another host."""
elevated = context.elevated()
if not migration:
migration = self.db.migration_get(elevated, migration_id)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
- instance_type_ref = self.db.instance_type_get(context,
- migration['new_instance_type_id'])
+ if not instance_type:
+ instance_type = self.db.instance_type_get(context,
+ migration['new_instance_type_id'])
network_info = self._get_instance_nw_info(context, instance)
@@ -1782,7 +1779,7 @@ class ComputeManager(manager.SchedulerDependentManager):
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration['dest_host'],
- instance_type_ref, self._legacy_nw_info(network_info),
+ instance_type, self._legacy_nw_info(network_info),
block_device_info)
self._terminate_volume_connections(context, instance)
@@ -3131,10 +3128,12 @@ class ComputeManager(manager.SchedulerDependentManager):
aggregate['id'], host)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
- def remove_aggregate_host(self, context, aggregate_id,
- host, slave_info=None):
+ def remove_aggregate_host(self, context, host, slave_info=None,
+ aggregate=None, aggregate_id=None):
"""Removes a host from a physical hypervisor pool."""
- aggregate = self.db.aggregate_get(context, aggregate_id)
+ if not aggregate:
+ aggregate = self.db.aggregate_get(context, aggregate_id)
+
try:
self.driver.remove_from_aggregate(context, aggregate, host,
slave_info=slave_info)
@@ -3143,7 +3142,7 @@ class ComputeManager(manager.SchedulerDependentManager):
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context, self.db.aggregate_host_add,
- aggregate.id, host,
+ aggregate['id'], host,
isinstance(e, exception.AggregateError))
@manager.periodic_task(
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 5bf17adcd..2ec4a6736 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -142,6 +142,8 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.12 - Remove migration_id, add migration to revert_resize
2.13 - Remove migration_id, add migration to finish_revert_resize
2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
+ 2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host
+ 2.16 - Add instance_type to resize_instance
'''
#
@@ -389,7 +391,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
self.cast(ctxt, self.make_msg('refresh_provider_fw_rules'),
_compute_topic(self.topic, ctxt, host, None))
- def remove_aggregate_host(self, ctxt, aggregate_id, host_param, host,
+ def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Remove aggregate host.
@@ -400,11 +402,12 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
:param host: This is the host to send the message to.
'''
+ aggregate_p = jsonutils.to_primitive(aggregate)
self.cast(ctxt, self.make_msg('remove_aggregate_host',
- aggregate_id=aggregate_id, host=host_param,
+ aggregate=aggregate_p, host=host_param,
slave_info=slave_info),
topic=_compute_topic(self.topic, ctxt, host, None),
- version='2.2')
+ version='2.15')
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
instance_p = jsonutils.to_primitive(instance)
@@ -431,15 +434,17 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
- def resize_instance(self, ctxt, instance, migration, image,
+ def resize_instance(self, ctxt, instance, migration, image, instance_type,
reservations=None):
topic = _compute_topic(self.topic, ctxt, None, instance)
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
+ instance_type_p = jsonutils.to_primitive(instance_type)
self.cast(ctxt, self.make_msg('resize_instance',
instance=instance_p, migration=migration_p,
- image=image, reservations=reservations), topic,
- version='2.6')
+ image=image, reservations=reservations,
+ instance_type=instance_type_p), topic,
+ version='2.16')
def resume_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
diff --git a/nova/console/api.py b/nova/console/api.py
index 5a9294ce7..5fb506fe0 100644
--- a/nova/console/api.py
+++ b/nova/console/api.py
@@ -18,14 +18,14 @@
"""Handles ConsoleProxy API requests."""
from nova.compute import rpcapi as compute_rpcapi
+from nova import config
from nova.console import rpcapi as console_rpcapi
from nova.db import base
from nova import flags
from nova.openstack.common import rpc
from nova.openstack.common import uuidutils
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class API(base.Base):
@@ -42,7 +42,7 @@ class API(base.Base):
def delete_console(self, context, instance_uuid, console_uuid):
console = self.db.console_get(context, console_uuid, instance_uuid)
- topic = rpc.queue_get_for(context, FLAGS.console_topic,
+ topic = rpc.queue_get_for(context, CONF.console_topic,
console['pool']['host'])
rpcapi = console_rpcapi.ConsoleAPI(topic=topic)
rpcapi.remove_console(context, console['id'])
diff --git a/nova/console/manager.py b/nova/console/manager.py
index faaf58d0c..a2f48494b 100644
--- a/nova/console/manager.py
+++ b/nova/console/manager.py
@@ -20,6 +20,7 @@
import socket
from nova.compute import rpcapi as compute_rpcapi
+from nova import config
from nova import exception
from nova import flags
from nova import manager
@@ -41,8 +42,8 @@ console_manager_opts = [
help='Publicly visible name for this console host'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(console_manager_opts)
+CONF = config.CONF
+CONF.register_opts(console_manager_opts)
LOG = logging.getLogger(__name__)
@@ -57,7 +58,7 @@ class ConsoleProxyManager(manager.Manager):
def __init__(self, console_driver=None, *args, **kwargs):
if not console_driver:
- console_driver = FLAGS.console_driver
+ console_driver = CONF.console_driver
self.driver = importutils.import_object(console_driver)
super(ConsoleProxyManager, self).__init__(*args, **kwargs)
self.driver.host = self.host
@@ -118,7 +119,7 @@ class ConsoleProxyManager(manager.Manager):
#NOTE(mdragon): Right now, the only place this info exists is the
# compute worker's flagfile, at least for
# xenserver. Thus we ned to ask.
- if FLAGS.stub_compute:
+ if CONF.stub_compute:
pool_info = {'address': '127.0.0.1',
'username': 'test',
'password': '1234pass'}
@@ -128,7 +129,7 @@ class ConsoleProxyManager(manager.Manager):
pool_info['password'] = self.driver.fix_pool_password(
pool_info['password'])
pool_info['host'] = self.host
- pool_info['public_hostname'] = FLAGS.console_public_hostname
+ pool_info['public_hostname'] = CONF.console_public_hostname
pool_info['console_type'] = self.driver.console_type
pool_info['compute_host'] = instance_host
pool = self.db.console_pool_create(context, pool_info)
diff --git a/nova/console/rpcapi.py b/nova/console/rpcapi.py
index a1f289bb0..06e282dce 100644
--- a/nova/console/rpcapi.py
+++ b/nova/console/rpcapi.py
@@ -18,11 +18,11 @@
Client side of the console RPC API.
"""
+from nova import config
from nova import flags
import nova.openstack.common.rpc.proxy
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class ConsoleAPI(nova.openstack.common.rpc.proxy.RpcProxy):
@@ -44,7 +44,7 @@ class ConsoleAPI(nova.openstack.common.rpc.proxy.RpcProxy):
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic=None):
- topic = topic if topic else FLAGS.console_topic
+ topic = topic if topic else CONF.console_topic
super(ConsoleAPI, self).__init__(
topic=topic,
default_version=self.BASE_RPC_API_VERSION)
diff --git a/nova/console/vmrc.py b/nova/console/vmrc.py
index a8b934677..ae66d5f05 100644
--- a/nova/console/vmrc.py
+++ b/nova/console/vmrc.py
@@ -19,6 +19,7 @@
import base64
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import cfg
@@ -35,8 +36,8 @@ vmrc_opts = [
help="number of retries for retrieving VMRC information"),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(vmrc_opts)
+CONF = config.CONF
+CONF.register_opts(vmrc_opts)
class VMRCConsole(object):
@@ -51,7 +52,7 @@ class VMRCConsole(object):
def get_port(self, context):
"""Get available port for consoles."""
- return FLAGS.console_vmrc_port
+ return CONF.console_vmrc_port
def setup_console(self, context, console):
"""Sets up console."""
diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py
index e654780a3..c40067daa 100644
--- a/nova/console/vmrc_manager.py
+++ b/nova/console/vmrc_manager.py
@@ -18,6 +18,7 @@
"""VMRC Console Manager."""
from nova.compute import rpcapi as compute_rpcapi
+from nova import config
from nova import exception
from nova import flags
from nova import manager
@@ -29,16 +30,16 @@ from nova.virt.vmwareapi import driver as vmwareapi_conn
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-flags.DECLARE('console_driver', 'nova.console.manager')
-flags.DECLARE('console_public_hostname', 'nova.console.manager')
+CONF = config.CONF
+CONF.import_opt('console_driver', 'nova.console.manager')
+CONF.import_opt('console_public_hostname', 'nova.console.manager')
class ConsoleVMRCManager(manager.Manager):
"""Manager to handle VMRC connections for accessing instance consoles."""
def __init__(self, console_driver=None, *args, **kwargs):
- self.driver = importutils.import_object(FLAGS.console_driver)
+ self.driver = importutils.import_object(CONF.console_driver)
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(ConsoleVMRCManager, self).__init__(*args, **kwargs)
@@ -54,7 +55,7 @@ class ConsoleVMRCManager(manager.Manager):
pool['address'],
pool['username'],
pool['password'],
- FLAGS.console_vmrc_error_retries)
+ CONF.console_vmrc_error_retries)
self.sessions[pool['id']] = vim_session
return self.sessions[pool['id']]
@@ -137,8 +138,8 @@ class ConsoleVMRCManager(manager.Manager):
pool_info['host'] = self.host
# ESX Address or Proxy Address
public_host_name = pool_info['address']
- if FLAGS.console_public_hostname:
- public_host_name = FLAGS.console_public_hostname
+ if CONF.console_public_hostname:
+ public_host_name = CONF.console_public_hostname
pool_info['public_hostname'] = public_host_name
pool_info['console_type'] = console_type
pool_info['compute_host'] = instance_host
diff --git a/nova/console/xvp.py b/nova/console/xvp.py
index 60197c766..12a7f3fd3 100644
--- a/nova/console/xvp.py
+++ b/nova/console/xvp.py
@@ -22,6 +22,7 @@ import signal
from Cheetah import Template
+from nova import config
from nova import context
from nova import db
from nova import exception
@@ -49,8 +50,8 @@ xvp_opts = [
help='port for XVP to multiplex VNC connections on'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(xvp_opts)
+CONF = config.CONF
+CONF.register_opts(xvp_opts)
LOG = logging.getLogger(__name__)
@@ -58,8 +59,8 @@ class XVPConsoleProxy(object):
"""Sets up XVP config, and manages XVP daemon."""
def __init__(self):
- self.xvpconf_template = open(FLAGS.console_xvp_conf_template).read()
- self.host = FLAGS.host # default, set by manager.
+ self.xvpconf_template = open(CONF.console_xvp_conf_template).read()
+ self.host = CONF.host # default, set by manager.
super(XVPConsoleProxy, self).__init__()
@property
@@ -71,7 +72,7 @@ class XVPConsoleProxy(object):
#TODO(mdragon): implement port selection for non multiplex ports,
# we are not using that, but someone else may want
# it.
- return FLAGS.console_xvp_multiplex_port
+ return CONF.console_xvp_multiplex_port
def setup_console(self, context, console):
"""Sets up actual proxies."""
@@ -104,7 +105,7 @@ class XVPConsoleProxy(object):
LOG.debug('No console pools!')
self._xvp_stop()
return
- conf_data = {'multiplex_port': FLAGS.console_xvp_multiplex_port,
+ conf_data = {'multiplex_port': CONF.console_xvp_multiplex_port,
'pools': pools,
'pass_encode': self.fix_console_password}
config = str(Template.Template(self.xvpconf_template,
@@ -113,8 +114,8 @@ class XVPConsoleProxy(object):
self._xvp_restart()
def _write_conf(self, config):
- LOG.debug(_('Re-wrote %s') % FLAGS.console_xvp_conf)
- with open(FLAGS.console_xvp_conf, 'w') as cfile:
+ LOG.debug(_('Re-wrote %s') % CONF.console_xvp_conf)
+ with open(CONF.console_xvp_conf, 'w') as cfile:
cfile.write(config)
def _xvp_stop(self):
@@ -134,9 +135,9 @@ class XVPConsoleProxy(object):
LOG.debug(_('Starting xvp'))
try:
utils.execute('xvp',
- '-p', FLAGS.console_xvp_pid,
- '-c', FLAGS.console_xvp_conf,
- '-l', FLAGS.console_xvp_log)
+ '-p', CONF.console_xvp_pid,
+ '-c', CONF.console_xvp_conf,
+ '-l', CONF.console_xvp_log)
except exception.ProcessExecutionError, err:
LOG.error(_('Error starting xvp: %s') % err)
@@ -151,7 +152,7 @@ class XVPConsoleProxy(object):
def _xvp_pid(self):
try:
- with open(FLAGS.console_xvp_pid, 'r') as pidfile:
+ with open(CONF.console_xvp_pid, 'r') as pidfile:
pid = int(pidfile.read())
except IOError:
return None
diff --git a/nova/openstack/common/rpc/__init__.py b/nova/openstack/common/rpc/__init__.py
index 0f82c47a2..bf2b2e9e0 100644
--- a/nova/openstack/common/rpc/__init__.py
+++ b/nova/openstack/common/rpc/__init__.py
@@ -250,7 +250,7 @@ def queue_get_for(context, topic, host):
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
<host>.
"""
- return '%s.%s' % (topic, host)
+ return '%s.%s' % (topic, host) if host else topic
_RPCIMPL = None
diff --git a/nova/openstack/common/rpc/impl_kombu.py b/nova/openstack/common/rpc/impl_kombu.py
index 7b16e0e19..46295d90f 100644
--- a/nova/openstack/common/rpc/impl_kombu.py
+++ b/nova/openstack/common/rpc/impl_kombu.py
@@ -31,9 +31,9 @@ import kombu.messaging
from nova.openstack.common import cfg
from nova.openstack.common.gettextutils import _
+from nova.openstack.common import network_utils
from nova.openstack.common.rpc import amqp as rpc_amqp
from nova.openstack.common.rpc import common as rpc_common
-from nova.openstack.common import network_utils
kombu_opts = [
cfg.StrOpt('kombu_ssl_version',
@@ -267,6 +267,7 @@ class FanoutConsumer(ConsumerBase):
# Default options
options = {'durable': False,
+ 'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': True}
options.update(kwargs)
@@ -776,7 +777,7 @@ def cast_to_server(conf, context, server_params, topic, msg):
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
- return rpc_amqp.cast_to_server(
+ return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
diff --git a/nova/openstack/common/rpc/impl_zmq.py b/nova/openstack/common/rpc/impl_zmq.py
index 4ffb1ae69..0daf07cf4 100644
--- a/nova/openstack/common/rpc/impl_zmq.py
+++ b/nova/openstack/common/rpc/impl_zmq.py
@@ -546,7 +546,7 @@ def _call(addr, context, msg_id, topic, msg, timeout=None):
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
- msg_id = str(uuid.uuid4().hex)
+ msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 1c5be489d..a7b1b3061 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -1767,7 +1767,7 @@ class ComputeTestCase(BaseTestCase):
migration_ref = db.migration_get_by_instance_and_status(
self.context.elevated(), instance['uuid'], 'pre-migrating')
self.compute.resize_instance(self.context, instance=instance,
- migration=migration_ref, image={})
+ migration=migration_ref, image={}, instance_type=new_type)
timeutils.set_time_override(cur_time)
test_notifier.NOTIFICATIONS = []
@@ -1890,7 +1890,8 @@ class ComputeTestCase(BaseTestCase):
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration_ref, image={},
- reservations=reservations)
+ reservations=reservations,
+ instance_type=jsonutils.to_primitive(instance_type))
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
@@ -1912,8 +1913,8 @@ class ComputeTestCase(BaseTestCase):
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.RESIZE_PREP})
self.compute.resize_instance(self.context, instance=instance,
- migration=migration_ref,
- image={})
+ migration=migration_ref, image={},
+ instance_type=jsonutils.to_primitive(instance_type))
inst = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(migration_ref['dest_compute'], inst['host'])
@@ -1946,9 +1947,10 @@ class ComputeTestCase(BaseTestCase):
new_instance_type_ref = db.instance_type_get_by_flavor_id(
self.context, 3)
+ new_instance_type_p = jsonutils.to_primitive(new_instance_type_ref)
self.compute.prep_resize(self.context,
instance=jsonutils.to_primitive(new_inst_ref),
- instance_type=jsonutils.to_primitive(new_instance_type_ref),
+ instance_type=new_instance_type_p,
image={}, reservations=reservations)
migration_ref = db.migration_get_by_instance_and_status(
@@ -1960,7 +1962,8 @@ class ComputeTestCase(BaseTestCase):
{"task_state": task_states.RESIZE_PREP})
self.compute.resize_instance(self.context, instance=instance,
migration=migration_ref,
- image={})
+ image={},
+ instance_type=new_instance_type_p)
self.compute.finish_resize(self.context,
migration=jsonutils.to_primitive(migration_ref),
disk_info={}, image={}, instance=instance)
@@ -2048,7 +2051,8 @@ class ComputeTestCase(BaseTestCase):
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=inst_ref,
migration=migration_ref, image={},
- reservations=reservations)
+ reservations=reservations,
+ instance_type=jsonutils.to_primitive(instance_type))
inst_ref = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
self.assertEqual(inst_ref['vm_state'], vm_states.ERROR)
self.compute.terminate_instance(self.context,
@@ -5165,7 +5169,8 @@ class ComputeAggrTestCase(BaseTestCase):
self.stubs.Set(self.compute.driver, "remove_from_aggregate",
fake_driver_remove_from_aggregate)
- self.compute.remove_aggregate_host(self.context, self.aggr.id, "host")
+ self.compute.remove_aggregate_host(self.context,
+ aggregate=jsonutils.to_primitive(self.aggr), host="host")
self.assertTrue(fake_driver_remove_from_aggregate.called)
def test_add_aggregate_host_passes_slave_info_to_driver(self):
@@ -5185,7 +5190,7 @@ class ComputeAggrTestCase(BaseTestCase):
def test_remove_from_aggregate_passes_slave_info_to_driver(self):
def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
self.assertEquals(self.context, context)
- self.assertEquals(aggregate.id, self.aggr.id)
+ self.assertEquals(aggregate['id'], self.aggr.id)
self.assertEquals(host, "the_host")
self.assertEquals("SLAVE_INFO", kwargs.get("slave_info"))
@@ -5193,7 +5198,8 @@ class ComputeAggrTestCase(BaseTestCase):
driver_remove_from_aggregate)
self.compute.remove_aggregate_host(self.context,
- self.aggr.id, "the_host", slave_info="SLAVE_INFO")
+ aggregate=jsonutils.to_primitive(self.aggr), host="the_host",
+ slave_info="SLAVE_INFO")
class ComputePolicyTestCase(BaseTestCase):
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index 1edfa771f..8db2534af 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -264,8 +264,8 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_remove_aggregate_host(self):
self._test_compute_api('remove_aggregate_host', 'cast',
- aggregate_id='id', host_param='host', host='host',
- slave_info={}, version='2.2')
+ aggregate={'id': 'fake_id'}, host_param='host', host='host',
+ slave_info={}, version='2.15')
def test_remove_fixed_ip_from_instance(self):
self._test_compute_api('remove_fixed_ip_from_instance', 'cast',
@@ -286,7 +286,8 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_resize_instance(self):
self._test_compute_api('resize_instance', 'cast',
instance=self.fake_instance, migration={'id': 'fake_id'},
- image='image', reservations=list('fake_res'), version='2.6')
+ image='image', instance_type={'id': 1},
+ reservations=list('fake_res'), version='2.16')
def test_resume_instance(self):
self._test_compute_api('resume_instance', 'cast',
diff --git a/nova/virt/baremetal/dom.py b/nova/virt/baremetal/dom.py
index 350506f73..aef648eb0 100644
--- a/nova/virt/baremetal/dom.py
+++ b/nova/virt/baremetal/dom.py
@@ -21,8 +21,6 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.virt.baremetal import nodes
-FLAGS = flags.FLAGS
-
LOG = logging.getLogger(__name__)
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index ee183584c..b0576aa38 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -36,6 +36,7 @@ import shutil
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import vm_states
+from nova import config
from nova import context as nova_context
from nova import exception
from nova import flags
@@ -56,7 +57,7 @@ Template = None
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = config.CONF
baremetal_opts = [
cfg.StrOpt('baremetal_type',
@@ -64,7 +65,7 @@ baremetal_opts = [
help='baremetal domain type'),
]
-FLAGS.register_opts(baremetal_opts)
+CONF.register_opts(baremetal_opts)
def _late_load_cheetah():
@@ -125,11 +126,11 @@ class BareMetalDriver(driver.ComputeDriver):
return True
def _cleanup(self, instance):
- target = os.path.join(FLAGS.instances_path, instance['name'])
+ target = os.path.join(CONF.instances_path, instance['name'])
instance_name = instance['name']
LOG.info(_('instance %(instance_name)s: deleting instance files'
' %(target)s') % locals(), instance=instance)
- if FLAGS.baremetal_type == 'lxc':
+ if CONF.baremetal_type == 'lxc':
disk.destroy_container(self.container)
if os.path.exists(target):
shutil.rmtree(target)
@@ -175,9 +176,9 @@ class BareMetalDriver(driver.ComputeDriver):
"""
self.destroy(instance, False)
- rescue_images = {'image_id': FLAGS.baremetal_rescue_image_id,
- 'kernel_id': FLAGS.baremetal_rescue_kernel_id,
- 'ramdisk_id': FLAGS.baremetal_rescue_ramdisk_id}
+ rescue_images = {'image_id': CONF.baremetal_rescue_image_id,
+ 'kernel_id': CONF.baremetal_rescue_kernel_id,
+ 'ramdisk_id': CONF.baremetal_rescue_ramdisk_id}
self._create_image(instance, '.rescue', rescue_images,
network_info=network_info)
@@ -211,7 +212,7 @@ class BareMetalDriver(driver.ComputeDriver):
LOG.debug(_("<============= spawn of baremetal =============>"))
def basepath(fname='', suffix=''):
- return os.path.join(FLAGS.instances_path,
+ return os.path.join(CONF.instances_path,
instance['name'],
fname + suffix)
bpath = basepath(suffix='')
@@ -265,7 +266,7 @@ class BareMetalDriver(driver.ComputeDriver):
return timer.start(interval=0.5).wait()
def get_console_output(self, instance):
- console_log = os.path.join(FLAGS.instances_path, instance['name'],
+ console_log = os.path.join(CONF.instances_path, instance['name'],
'console.log')
libvirt_utils.chown(console_log, os.getuid())
@@ -302,7 +303,7 @@ class BareMetalDriver(driver.ComputeDriver):
If cow is True, it will make a CoW image instead of a copy.
"""
if not os.path.exists(target):
- base_dir = os.path.join(FLAGS.instances_path, '_base')
+ base_dir = os.path.join(CONF.instances_path, '_base')
if not os.path.exists(base_dir):
fileutils.ensure_tree(base_dir)
base = os.path.join(base_dir, fname)
@@ -327,7 +328,7 @@ class BareMetalDriver(driver.ComputeDriver):
# syntactic nicety
def basepath(fname='', suffix=suffix):
- return os.path.join(FLAGS.instances_path,
+ return os.path.join(CONF.instances_path,
inst['name'],
fname + suffix)
@@ -338,7 +339,7 @@ class BareMetalDriver(driver.ComputeDriver):
LOG.info(_('instance %s: Creating image'), inst['name'],
instance=inst)
- if FLAGS.baremetal_type == 'lxc':
+ if CONF.baremetal_type == 'lxc':
container_dir = '%s/rootfs' % basepath(suffix='')
fileutils.ensure_tree(container_dir)
@@ -386,7 +387,7 @@ class BareMetalDriver(driver.ComputeDriver):
context=context,
target=basepath('root'),
fname=root_fname,
- cow=False, # FLAGS.use_cow_images,
+ cow=False, # CONF.use_cow_images,
image_id=disk_images['image_id'],
user_id=inst['user_id'],
project_id=inst['project_id'])
@@ -398,7 +399,7 @@ class BareMetalDriver(driver.ComputeDriver):
if not inst['kernel_id']:
target_partition = "1"
- if FLAGS.baremetal_type == 'lxc':
+ if CONF.baremetal_type == 'lxc':
target_partition = None
if inst['key_data']:
@@ -408,7 +409,7 @@ class BareMetalDriver(driver.ComputeDriver):
net = None
nets = []
- ifc_template = open(FLAGS.injected_network_template).read()
+ ifc_template = open(CONF.injected_network_template).read()
ifc_num = -1
have_injected_networks = False
admin_context = nova_context.get_admin_context()
@@ -424,7 +425,7 @@ class BareMetalDriver(driver.ComputeDriver):
address_v6 = None
gateway_v6 = None
netmask_v6 = None
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
address_v6 = mapping['ip6s'][0]['ip']
netmask_v6 = mapping['ip6s'][0]['netmask']
gateway_v6 = mapping['gateway_v6']
@@ -442,7 +443,7 @@ class BareMetalDriver(driver.ComputeDriver):
if have_injected_networks:
net = str(Template(ifc_template,
searchList=[{'interfaces': nets,
- 'use_ipv6': FLAGS.use_ipv6}]))
+ 'use_ipv6': CONF.use_ipv6}]))
metadata = inst.get('metadata')
if any((key, net, metadata)):
@@ -459,7 +460,7 @@ class BareMetalDriver(driver.ComputeDriver):
try:
disk.inject_data(injection_path, key, net, metadata,
partition=target_partition,
- use_cow=False) # FLAGS.use_cow_images
+ use_cow=False) # CONF.use_cow_images
except Exception as e:
# This could be a windows image, or a vmdk format disk
@@ -482,9 +483,9 @@ class BareMetalDriver(driver.ComputeDriver):
driver_type = 'raw'
- xml_info = {'type': FLAGS.baremetal_type,
+ xml_info = {'type': CONF.baremetal_type,
'name': instance['name'],
- 'basepath': os.path.join(FLAGS.instances_path,
+ 'basepath': os.path.join(CONF.instances_path,
instance['name']),
'memory_kb': inst_type['memory_mb'] * 1024,
'vcpus': inst_type['vcpus'],
@@ -676,11 +677,11 @@ class BareMetalDriver(driver.ComputeDriver):
'local_gb_used': self.get_local_gb_used(),
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
- 'hypervisor_hostname': FLAGS.host,
+ 'hypervisor_hostname': CONF.host,
'cpu_info': self.get_cpu_info(),
- 'cpu_arch': FLAGS.cpu_arch}
+ 'cpu_arch': CONF.cpu_arch}
- LOG.info(_('#### RLK: cpu_arch = %s ') % FLAGS.cpu_arch)
+ LOG.info(_('#### RLK: cpu_arch = %s ') % CONF.cpu_arch)
return dic
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
@@ -729,7 +730,7 @@ class HostState(object):
data["vcpus"] = self.connection.get_vcpu_total()
data["vcpus_used"] = self.connection.get_vcpu_used()
data["cpu_info"] = self.connection.get_cpu_info()
- data["cpu_arch"] = FLAGS.cpu_arch
+ data["cpu_arch"] = CONF.cpu_arch
data["disk_total"] = self.connection.get_local_gb_total()
data["disk_used"] = self.connection.get_local_gb_used()
data["disk_available"] = data["disk_total"] - data["disk_used"]
@@ -738,5 +739,5 @@ class HostState(object):
self.connection.get_memory_mb_used())
data["hypervisor_type"] = self.connection.get_hypervisor_type()
data["hypervisor_version"] = self.connection.get_hypervisor_version()
- data["hypervisor_hostname"] = FLAGS.host
+ data["hypervisor_hostname"] = CONF.host
self._stats = data
diff --git a/nova/virt/baremetal/nodes.py b/nova/virt/baremetal/nodes.py
index b4f2a50e2..d4bd90100 100644
--- a/nova/virt/baremetal/nodes.py
+++ b/nova/virt/baremetal/nodes.py
@@ -15,25 +15,25 @@
# under the License.
#
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.virt.baremetal import fake
from nova.virt.baremetal import tilera
-FLAGS = flags.FLAGS
-
baremetal_opts = [
cfg.StrOpt('baremetal_driver',
default='tilera',
help='Bare-metal driver runs on')
]
-FLAGS.register_opts(baremetal_opts)
+CONF = config.CONF
+CONF.register_opts(baremetal_opts)
def get_baremetal_nodes():
- d = FLAGS.baremetal_driver
+ d = CONF.baremetal_driver
if d == 'tilera':
return tilera.get_baremetal_nodes()
elif d == 'fake':
diff --git a/nova/virt/baremetal/tilera.py b/nova/virt/baremetal/tilera.py
index 4d4a37007..c0343bac4 100644
--- a/nova/virt/baremetal/tilera.py
+++ b/nova/virt/baremetal/tilera.py
@@ -26,13 +26,14 @@ import subprocess
import time
from nova.compute import power_state
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
-FLAGS = flags.FLAGS
+CONF = config.CONF
tilera_opts = [
cfg.StrOpt('tile_monitor',
@@ -40,7 +41,7 @@ tilera_opts = [
help='Tilera command line program for Bare-metal driver')
]
-FLAGS.register_opts(tilera_opts)
+CONF.register_opts(tilera_opts)
LOG = logging.getLogger(__name__)
@@ -236,7 +237,7 @@ class BareMetalNodes(object):
User can access the bare-metal node using ssh.
"""
- cmd = (FLAGS.tile_monitor +
+ cmd = (CONF.tile_monitor +
" --resume --net " + node_ip + " --run - " +
"ifconfig xgbe0 hw ether " + mac_address +
" - --wait --run - ifconfig xgbe0 " + ip_address +
@@ -299,7 +300,7 @@ class BareMetalNodes(object):
"""
Sets and Runs sshd in the node.
"""
- cmd = (FLAGS.tile_monitor +
+ cmd = (CONF.tile_monitor +
" --resume --net " + node_ip + " --run - " +
"/usr/sbin/sshd - --wait --quit")
subprocess.Popen(cmd, shell=True)
@@ -332,7 +333,7 @@ class BareMetalNodes(object):
"""
node_ip = self.get_ip_by_id(node_id)
log_path = "/tftpboot/log_" + str(node_id)
- kmsg_cmd = (FLAGS.tile_monitor +
+ kmsg_cmd = (CONF.tile_monitor +
" --resume --net " + node_ip +
" -- dmesg > " + log_path)
subprocess.Popen(kmsg_cmd, shell=True)
diff --git a/nova/virt/configdrive.py b/nova/virt/configdrive.py
index 7b4cb718b..0dc11483d 100644
--- a/nova/virt/configdrive.py
+++ b/nova/virt/configdrive.py
@@ -21,6 +21,7 @@ import os
import shutil
import tempfile
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import cfg
@@ -47,8 +48,8 @@ configdrive_opts = [
'(if set, valid options are: always)'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(configdrive_opts)
+CONF = config.CONF
+CONF.register_opts(configdrive_opts)
class ConfigDriveBuilder(object):
@@ -58,7 +59,7 @@ class ConfigDriveBuilder(object):
# TODO(mikal): I don't think I can use utils.tempdir here, because
# I need to have the directory last longer than the scope of this
# method call
- self.tempdir = tempfile.mkdtemp(dir=FLAGS.config_drive_tempdir,
+ self.tempdir = tempfile.mkdtemp(dir=CONF.config_drive_tempdir,
prefix='cd_gen_')
if instance_md is not None:
@@ -105,7 +106,7 @@ class ConfigDriveBuilder(object):
mounted = False
try:
- mountdir = tempfile.mkdtemp(dir=FLAGS.config_drive_tempdir,
+ mountdir = tempfile.mkdtemp(dir=CONF.config_drive_tempdir,
prefix='cd_mnt_')
_out, err = utils.trycmd('mount', '-o', 'loop', path, mountdir,
run_as_root=True)
@@ -133,13 +134,13 @@ class ConfigDriveBuilder(object):
shutil.rmtree(mountdir)
def make_drive(self, path):
- if FLAGS.config_drive_format == 'iso9660':
+ if CONF.config_drive_format == 'iso9660':
self._make_iso9660(path)
- elif FLAGS.config_drive_format == 'vfat':
+ elif CONF.config_drive_format == 'vfat':
self._make_vfat(path)
else:
raise exception.ConfigDriveUnknownFormat(
- format=FLAGS.config_drive_format)
+ format=CONF.config_drive_format)
def cleanup(self):
if self.imagefile:
@@ -152,7 +153,7 @@ class ConfigDriveBuilder(object):
def required_by(instance):
- return instance.get('config_drive') or FLAGS.force_config_drive
+ return instance.get('config_drive') or CONF.force_config_drive
def enabled_for(instance):
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index e113391a5..529f231af 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -32,6 +32,7 @@ import tempfile
if os.name != 'nt':
import crypt
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import cfg
@@ -76,14 +77,14 @@ disk_opts = [
'The format is <os_type>=<mkfs command>'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(disk_opts)
+CONF = config.CONF
+CONF.register_opts(disk_opts)
_MKFS_COMMAND = {}
_DEFAULT_MKFS_COMMAND = None
-for s in FLAGS.virt_mkfs:
+for s in CONF.virt_mkfs:
# NOTE(yamahata): mkfs command may includes '=' for its options.
# So item.partition('=') doesn't work here
os_type, mkfs_command = s.split('=', 1)
@@ -188,7 +189,7 @@ class _DiskImage(object):
# As a performance tweak, don't bother trying to
# directly loopback mount a cow image.
- self.handlers = FLAGS.img_handlers[:]
+ self.handlers = CONF.img_handlers[:]
if use_cow and 'loop' in self.handlers:
self.handlers.remove('loop')
diff --git a/nova/virt/disk/nbd.py b/nova/virt/disk/nbd.py
index 10895d5c9..8503273a6 100644
--- a/nova/virt/disk/nbd.py
+++ b/nova/virt/disk/nbd.py
@@ -18,6 +18,7 @@
import os
import time
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova import utils
@@ -33,8 +34,8 @@ nbd_opts = [
help='maximum number of possible nbd devices'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(nbd_opts)
+CONF = config.CONF
+CONF.register_opts(nbd_opts)
class Mount(mount.Mount):
@@ -52,7 +53,7 @@ class Mount(mount.Mount):
# are no free devices. Note that patch currently hardcodes 16 devices.
# We might be able to alleviate problem 2. by scanning /proc/partitions
# like the aformentioned patch does.
- _DEVICES = ['/dev/nbd%s' % i for i in range(FLAGS.max_nbd_devices)]
+ _DEVICES = ['/dev/nbd%s' % i for i in range(CONF.max_nbd_devices)]
def _allocate_nbd(self):
if not os.path.exists("/sys/block/nbd0"):
@@ -89,7 +90,7 @@ class Mount(mount.Mount):
# NOTE(vish): this forks into another process, so give it a chance
# to set up before continuing
- for _i in range(FLAGS.timeout_nbd):
+ for _i in range(CONF.timeout_nbd):
if os.path.exists("/sys/block/%s/pid" % os.path.basename(device)):
self.device = device
break
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index a466fa180..cb960466f 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -25,9 +25,7 @@ Driver base-classes:
from nova import flags
from nova.openstack.common import log as logging
-
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
def block_device_info_get_root(block_device_info):
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index d066a9c21..ff464e8e3 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -17,6 +17,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import config
from nova import context
from nova import db
from nova import flags
@@ -41,12 +42,12 @@ firewall_opts = [
help='Whether to allow network traffic from same network'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(firewall_opts)
+CONF = config.CONF
+CONF.register_opts(firewall_opts)
def load_driver(default, *args, **kwargs):
- fw_class = importutils.import_class(FLAGS.firewall_driver or default)
+ fw_class = importutils.import_class(CONF.firewall_driver or default)
return fw_class(*args, **kwargs)
@@ -204,7 +205,7 @@ class IptablesFirewallDriver(FirewallDriver):
ipv4_rules = self._create_filter(ips_v4, chain_name)
ipv6_rules = []
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
ips_v6 = [ip['ip'] for (_n, mapping) in network_info
for ip in mapping['ip6s']]
ipv6_rules = self._create_filter(ips_v6, chain_name)
@@ -215,7 +216,7 @@ class IptablesFirewallDriver(FirewallDriver):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
@@ -223,7 +224,7 @@ class IptablesFirewallDriver(FirewallDriver):
inst_ipv6_rules):
network_info = self.network_infos[instance['id']]
chain_name = self._instance_chain_name(instance)
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
self.iptables.ipv6['filter'].add_chain(chain_name)
self.iptables.ipv4['filter'].add_chain(chain_name)
ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
@@ -235,7 +236,7 @@ class IptablesFirewallDriver(FirewallDriver):
chain_name = self._instance_chain_name(instance)
self.iptables.ipv4['filter'].remove_chain(chain_name)
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
self.iptables.ipv6['filter'].remove_chain(chain_name)
@staticmethod
@@ -276,7 +277,7 @@ class IptablesFirewallDriver(FirewallDriver):
cidrs = [network['cidr'] for (network, _i) in network_info]
for cidr in cidrs:
ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
cidrv6s = [network['cidr_v6'] for (network, _i) in
network_info]
@@ -335,13 +336,13 @@ class IptablesFirewallDriver(FirewallDriver):
self._do_dhcp_rules(ipv4_rules, network_info)
#Allow project network traffic
- if FLAGS.allow_same_net_traffic:
+ if CONF.allow_same_net_traffic:
self._do_project_network_rules(ipv4_rules, ipv6_rules,
network_info)
- # We wrap these in FLAGS.use_ipv6 because they might cause
+ # We wrap these in CONF.use_ipv6 because they might cause
# a DB lookup. The other ones are just list operations, so
# they're not worth the clutter.
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
# Allow RA responses
self._do_ra_rules(ipv6_rules, network_info)
@@ -462,19 +463,19 @@ class IptablesFirewallDriver(FirewallDriver):
def _purge_provider_fw_rules(self):
"""Remove all rules from the provider chains."""
self.iptables.ipv4['filter'].empty_chain('provider')
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
self.iptables.ipv6['filter'].empty_chain('provider')
def _build_provider_fw_rules(self):
"""Create all rules for the provider IP DROPs."""
self.iptables.ipv4['filter'].add_chain('provider')
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
self.iptables.ipv6['filter'].add_chain('provider')
ipv4_rules, ipv6_rules = self._provider_rules()
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule('provider', rule)
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule('provider', rule)
diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py
index 16baf4059..1bd6ab05f 100644
--- a/nova/virt/hyperv/livemigrationops.py
+++ b/nova/virt/hyperv/livemigrationops.py
@@ -21,6 +21,7 @@ Management class for live migration VM operations.
import os
import sys
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import excutils
@@ -34,7 +35,7 @@ if sys.platform == 'win32':
import wmi
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = config.CONF
class LiveMigrationOps(baseops.BaseOps):
@@ -139,7 +140,7 @@ class LiveMigrationOps(baseops.BaseOps):
LOG.debug(_("pre_live_migration called"), instance=instance)
self._check_live_migration_config()
- if FLAGS.use_cow_images:
+ if CONF.use_cow_images:
ebs_root = self._volumeops.volume_in_mapping(
self._volumeops.get_default_root_device(),
block_device_info)
diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py
index 5e4676a4a..65b123932 100644
--- a/nova/virt/hyperv/snapshotops.py
+++ b/nova/virt/hyperv/snapshotops.py
@@ -22,6 +22,7 @@ import os
import shutil
import sys
+from nova import config
from nova import exception
from nova import flags
from nova.image import glance
@@ -36,7 +37,7 @@ from xml.etree import ElementTree
if sys.platform == 'win32':
import wmi
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -75,7 +76,7 @@ class SnapshotOps(baseops.BaseOps):
f = None
try:
- src_vhd_path = os.path.join(FLAGS.instances_path, instance_name,
+ src_vhd_path = os.path.join(CONF.instances_path, instance_name,
instance_name + ".vhd")
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index e248fd37d..d252fbfb3 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -21,6 +21,7 @@ Management class for basic VM operations.
import os
import uuid
+from nova import config
from nova import db
from nova import exception
from nova import flags
@@ -44,8 +45,8 @@ hyperv_opts = [
'hosts with different CPU features')
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(hyperv_opts)
+CONF = config.CONF
+CONF.register_opts(hyperv_opts)
class VMOps(baseops.BaseOps):
@@ -128,7 +129,7 @@ class VMOps(baseops.BaseOps):
image_id=instance['image_ref'],
user=instance['user_id'],
project=instance['project_id'],
- cow=FLAGS.use_cow_images)
+ cow=CONF.use_cow_images)
except Exception as exn:
LOG.exception(_('cache image failed: %s'), exn)
self.destroy(instance)
@@ -200,7 +201,7 @@ class VMOps(baseops.BaseOps):
procsetting.Reservation = vcpus
procsetting.Limit = 100000 # static assignment to 100%
- if FLAGS.limit_cpu_features:
+ if CONF.limit_cpu_features:
procsetting.LimitProcessorFeatures = True
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
@@ -337,20 +338,20 @@ class VMOps(baseops.BaseOps):
"""
#If there are no physical nics connected to networks, return.
LOG.debug(_("Attempting to bind NIC to %s ")
- % FLAGS.vswitch_name)
- if FLAGS.vswitch_name:
+ % CONF.vswitch_name)
+ if CONF.vswitch_name:
LOG.debug(_("Attempting to bind NIC to %s ")
- % FLAGS.vswitch_name)
+ % CONF.vswitch_name)
bound = self._conn.Msvm_VirtualSwitch(
- ElementName=FLAGS.vswitch_name)
+ ElementName=CONF.vswitch_name)
else:
LOG.debug(_("No vSwitch specified, attaching to default"))
self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
if len(bound) == 0:
return None
- if FLAGS.vswitch_name:
+ if CONF.vswitch_name:
return self._conn.Msvm_VirtualSwitch(
- ElementName=FLAGS.vswitch_name)[0]\
+ ElementName=CONF.vswitch_name)[0]\
.associators(wmi_result_class='Msvm_SwitchPort')[0]\
.associators(wmi_result_class='Msvm_VirtualSwitch')[0]
else:
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index 2e54e6d47..789d74149 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -25,6 +25,7 @@ import sys
import time
import uuid
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
@@ -35,7 +36,7 @@ from nova.virt import images
if sys.platform == 'win32':
import wmi
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -75,20 +76,20 @@ class VMUtils(object):
return True
def get_vhd_path(self, instance_name):
- base_vhd_folder = os.path.join(FLAGS.instances_path, instance_name)
+ base_vhd_folder = os.path.join(CONF.instances_path, instance_name)
if not os.path.exists(base_vhd_folder):
LOG.debug(_('Creating folder %s '), base_vhd_folder)
os.makedirs(base_vhd_folder)
return os.path.join(base_vhd_folder, instance_name + ".vhd")
def get_base_vhd_path(self, image_name):
- base_dir = os.path.join(FLAGS.instances_path, '_base')
+ base_dir = os.path.join(CONF.instances_path, '_base')
if not os.path.exists(base_dir):
os.makedirs(base_dir)
return os.path.join(base_dir, image_name + ".vhd")
def make_export_path(self, instance_name):
- export_folder = os.path.join(FLAGS.instances_path, "export",
+ export_folder = os.path.join(CONF.instances_path, "export",
instance_name)
if os.path.isdir(export_folder):
LOG.debug(_('Removing existing folder %s '), export_folder)
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index d15dfb68c..d2726f871 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -21,6 +21,7 @@ Management class for Storage-related functions (attach, detach, etc).
import time
from nova import block_device
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -40,8 +41,8 @@ hyper_volumeops_opts = [
help='The seconds to wait between an volume attachment attempt'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(hyper_volumeops_opts)
+CONF = config.CONF
+CONF.register_opts(hyper_volumeops_opts)
class VolumeOps(baseops.BaseOps):
@@ -59,9 +60,9 @@ class VolumeOps(baseops.BaseOps):
self._initiator = None
self._default_root_device = 'vda'
self._attaching_volume_retry_count = \
- FLAGS.hyperv_attaching_volume_retry_count
+ CONF.hyperv_attaching_volume_retry_count
self._wait_between_attach_retry = \
- FLAGS.hyperv_wait_between_attach_retry
+ CONF.hyperv_wait_between_attach_retry
self._volutils = volumeutils.VolumeUtils()
def attach_boot_volume(self, block_device_info, vm_name):
@@ -207,7 +208,7 @@ class VolumeOps(baseops.BaseOps):
LOG.warn(_('Could not determine iscsi initiator name'),
instance=instance)
return {
- 'ip': FLAGS.my_ip,
+ 'ip': CONF.my_ip,
'initiator': self._initiator,
}
diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py
index 018a4c278..b4ec7dc7d 100644
--- a/nova/virt/hyperv/volumeutils.py
+++ b/nova/virt/hyperv/volumeutils.py
@@ -25,6 +25,7 @@ import sys
import time
from nova import block_device
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
from nova.virt import driver
@@ -35,7 +36,7 @@ if sys.platform == 'win32':
import _winreg
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = config.CONF
class VolumeUtils(object):
@@ -71,7 +72,7 @@ class VolumeUtils(object):
initiator_name = "iqn.1991-05.com.microsoft:" + \
hostname.lower()
return {
- 'ip': FLAGS.my_ip,
+ 'ip': CONF.my_ip,
'initiator': initiator_name,
}
@@ -89,7 +90,7 @@ class VolumeUtils(object):
#Sending login
self.execute('iscsicli.exe ' + 'qlogintarget ' + target_iqn)
#Waiting the disk to be mounted. Research this
- time.sleep(FLAGS.hyperv_wait_between_attach_retry)
+ time.sleep(CONF.hyperv_wait_between_attach_retry)
def logout_storage_target(self, _conn_wmi, target_iqn):
""" Logs out storage target through its session id """
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 5b631a0da..f0ed3ba68 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -24,6 +24,7 @@ Handling of VM disk images.
import os
import re
+from nova import config
from nova import exception
from nova import flags
from nova.image import glance
@@ -40,8 +41,8 @@ image_opts = [
help='Force backing images to raw format'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(image_opts)
+CONF = config.CONF
+CONF.register_opts(image_opts)
class QemuImgInfo(object):
@@ -218,7 +219,7 @@ def fetch_to_raw(context, image_href, path, user_id, project_id):
raise exception.ImageUnacceptable(image_id=image_href,
reason=_("fmt=%(fmt)s backed by: %(backing_file)s") % locals())
- if fmt != "raw" and FLAGS.force_raw_images:
+ if fmt != "raw" and CONF.force_raw_images:
staged = "%s.converted" % path
LOG.debug("%s was %s, converting to raw" % (image_href, fmt))
with utils.remove_path_on_error(staged):
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index da86fa135..c59876d95 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -185,10 +185,8 @@ libvirt_opts = [
'before uploading them to image service'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(libvirt_opts)
-
CONF = config.CONF
+CONF.register_opts(libvirt_opts)
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
@@ -272,19 +270,19 @@ class LibvirtDriver(driver.ComputeDriver):
self.firewall_driver = firewall.load_driver(
default=DEFAULT_FIREWALL_DRIVER,
get_connection=self._get_connection)
- self.vif_driver = importutils.import_object(FLAGS.libvirt_vif_driver)
+ self.vif_driver = importutils.import_object(CONF.libvirt_vif_driver)
self.volume_drivers = {}
- for driver_str in FLAGS.libvirt_volume_drivers:
+ for driver_str in CONF.libvirt_volume_drivers:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
self.volume_drivers[driver_type] = driver_class(self)
self._host_state = None
disk_prefix_map = {"lxc": "", "uml": "ubd", "xen": "sd"}
- if FLAGS.libvirt_disk_prefix:
- self._disk_prefix = FLAGS.libvirt_disk_prefix
+ if CONF.libvirt_disk_prefix:
+ self._disk_prefix = CONF.libvirt_disk_prefix
else:
- self._disk_prefix = disk_prefix_map.get(FLAGS.libvirt_type, 'vd')
+ self._disk_prefix = disk_prefix_map.get(CONF.libvirt_type, 'vd')
self.default_root_device = self._disk_prefix + 'a'
self.default_second_device = self._disk_prefix + 'b'
self.default_third_device = self._disk_prefix + 'c'
@@ -292,7 +290,7 @@ class LibvirtDriver(driver.ComputeDriver):
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
- self.image_backend = imagebackend.Backend(FLAGS.use_cow_images)
+ self.image_backend = imagebackend.Backend(CONF.use_cow_images)
@property
def disk_cachemode(self):
@@ -305,7 +303,7 @@ class LibvirtDriver(driver.ComputeDriver):
# provided the filesystem is cache coherant (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
- if not self._supports_direct_io(FLAGS.instances_path):
+ if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
@@ -338,7 +336,7 @@ class LibvirtDriver(driver.ComputeDriver):
def _get_connection(self):
if not self._wrapped_conn or not self._test_connection():
LOG.debug(_('Connecting to libvirt: %s'), self.uri)
- if not FLAGS.libvirt_nonblocking:
+ if not CONF.libvirt_nonblocking:
self._wrapped_conn = self._connect(self.uri,
self.read_only)
else:
@@ -364,14 +362,14 @@ class LibvirtDriver(driver.ComputeDriver):
@property
def uri(self):
- if FLAGS.libvirt_type == 'uml':
- uri = FLAGS.libvirt_uri or 'uml:///system'
- elif FLAGS.libvirt_type == 'xen':
- uri = FLAGS.libvirt_uri or 'xen:///'
- elif FLAGS.libvirt_type == 'lxc':
- uri = FLAGS.libvirt_uri or 'lxc:///'
+ if CONF.libvirt_type == 'uml':
+ uri = CONF.libvirt_uri or 'uml:///system'
+ elif CONF.libvirt_type == 'xen':
+ uri = CONF.libvirt_uri or 'xen:///'
+ elif CONF.libvirt_type == 'lxc':
+ uri = CONF.libvirt_uri or 'lxc:///'
else:
- uri = FLAGS.libvirt_uri or 'qemu:///system'
+ uri = CONF.libvirt_uri or 'qemu:///system'
return uri
@staticmethod
@@ -550,11 +548,11 @@ class LibvirtDriver(driver.ComputeDriver):
connection_info,
mount_device)
- target = os.path.join(FLAGS.instances_path, instance['name'])
+ target = os.path.join(CONF.instances_path, instance['name'])
LOG.info(_('Deleting instance files %(target)s') % locals(),
instance=instance)
- if FLAGS.libvirt_type == 'lxc':
- container_dir = os.path.join(FLAGS.instances_path,
+ if CONF.libvirt_type == 'lxc':
+ container_dir = os.path.join(CONF.instances_path,
instance['name'],
'rootfs')
disk.destroy_container(container_dir=container_dir)
@@ -579,8 +577,8 @@ class LibvirtDriver(driver.ComputeDriver):
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object"""
- if FLAGS.libvirt_images_volume_group:
- vg = os.path.join('/dev', FLAGS.libvirt_images_volume_group)
+ if CONF.libvirt_images_volume_group:
+ vg = os.path.join('/dev', CONF.libvirt_images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance['name']
@@ -605,18 +603,18 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.warn(_('Could not determine iscsi initiator name'),
instance=instance)
return {
- 'ip': FLAGS.my_ip,
+ 'ip': CONF.my_ip,
'initiator': self._initiator,
- 'host': FLAGS.host
+ 'host': CONF.host
}
def _cleanup_resize(self, instance, network_info):
- target = os.path.join(FLAGS.instances_path,
+ target = os.path.join(CONF.instances_path,
instance['name'] + "_resize")
if os.path.exists(target):
shutil.rmtree(target)
- if instance['host'] != FLAGS.host:
+ if instance['host'] != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(instance, network_info)
@@ -638,7 +636,7 @@ class LibvirtDriver(driver.ComputeDriver):
connection_info,
mount_device)
- if FLAGS.libvirt_type == 'lxc':
+ if CONF.libvirt_type == 'lxc':
self._attach_lxc_volume(conf.to_xml(), virt_dom, instance_name)
# TODO(danms) once libvirt has support for LXC hotplug,
# replace this re-define with use of the
@@ -701,7 +699,7 @@ class LibvirtDriver(driver.ComputeDriver):
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
if not xml:
raise exception.DiskNotFound(location=mount_device)
- if FLAGS.libvirt_type == 'lxc':
+ if CONF.libvirt_type == 'lxc':
self._detach_lxc_volume(xml, virt_dom, instance_name)
# TODO(danms) once libvirt has support for LXC hotplug,
# replace this re-define with use of the
@@ -821,7 +819,7 @@ class LibvirtDriver(driver.ComputeDriver):
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
- image_format = FLAGS.snapshot_image_format or source_format
+ image_format = CONF.snapshot_image_format or source_format
# NOTE(bfilippov): save lvm as raw
if image_format == 'lvm':
@@ -841,7 +839,7 @@ class LibvirtDriver(driver.ComputeDriver):
state = LIBVIRT_POWER_STATE[state]
# NOTE(dkang): managedSave does not work for LXC
- if FLAGS.libvirt_type != 'lxc':
+ if CONF.libvirt_type != 'lxc':
if state == power_state.RUNNING:
virt_dom.managedSave(0)
@@ -852,7 +850,7 @@ class LibvirtDriver(driver.ComputeDriver):
snapshot.create()
# Export the snapshot to a raw image
- snapshot_directory = FLAGS.libvirt_snapshots_directory
+ snapshot_directory = CONF.libvirt_snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
@@ -862,7 +860,7 @@ class LibvirtDriver(driver.ComputeDriver):
snapshot.delete()
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
- if FLAGS.libvirt_type != 'lxc':
+ if CONF.libvirt_type != 'lxc':
if state == power_state.RUNNING:
self._create_domain(domain=virt_dom)
@@ -910,7 +908,7 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(vish): This actually could take slighty longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
- for x in xrange(FLAGS.libvirt_wait_soft_reboot_seconds):
+ for x in xrange(CONF.libvirt_wait_soft_reboot_seconds):
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
@@ -1017,15 +1015,15 @@ class LibvirtDriver(driver.ComputeDriver):
"""
unrescue_xml = self._get_domain_xml(instance, network_info)
- unrescue_xml_path = os.path.join(FLAGS.instances_path,
+ unrescue_xml_path = os.path.join(CONF.instances_path,
instance['name'],
'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_images = {
- 'image_id': FLAGS.rescue_image_id or instance['image_ref'],
- 'kernel_id': FLAGS.rescue_kernel_id or instance['kernel_id'],
- 'ramdisk_id': FLAGS.rescue_ramdisk_id or instance['ramdisk_id'],
+ 'image_id': CONF.rescue_image_id or instance['image_ref'],
+ 'kernel_id': CONF.rescue_kernel_id or instance['kernel_id'],
+ 'ramdisk_id': CONF.rescue_ramdisk_id or instance['ramdisk_id'],
}
xml = self.to_xml(instance, network_info, image_meta,
rescue=rescue_images)
@@ -1039,7 +1037,7 @@ class LibvirtDriver(driver.ComputeDriver):
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
- unrescue_xml_path = os.path.join(FLAGS.instances_path,
+ unrescue_xml_path = os.path.join(CONF.instances_path,
instance['name'],
'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
@@ -1047,7 +1045,7 @@ class LibvirtDriver(driver.ComputeDriver):
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
- rescue_files = os.path.join(FLAGS.instances_path, instance['name'],
+ rescue_files = os.path.join(CONF.instances_path, instance['name'],
"*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
@@ -1163,7 +1161,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def get_host_ip_addr():
- return FLAGS.my_ip
+ return CONF.my_ip
@exception.wrap_exception()
def get_vnc_console(self, instance):
@@ -1178,7 +1176,7 @@ class LibvirtDriver(driver.ComputeDriver):
return graphic.getAttribute('port')
port = get_vnc_port_for_instance(instance['name'])
- host = FLAGS.vncserver_proxyclient_address
+ host = CONF.vncserver_proxyclient_address
return {'host': host, 'port': port, 'internal_access_path': None}
@@ -1224,7 +1222,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""Create a blank image of specified size"""
if not fs_format:
- fs_format = FLAGS.default_ephemeral_format
+ fs_format = CONF.default_ephemeral_format
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
@@ -1243,7 +1241,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _get_console_log_path(instance_name):
- return os.path.join(FLAGS.instances_path, instance_name,
+ return os.path.join(CONF.instances_path, instance_name,
'console.log')
def _chown_console_log_for_instance(self, instance_name):
@@ -1259,11 +1257,11 @@ class LibvirtDriver(driver.ComputeDriver):
# syntactic nicety
def basepath(fname='', suffix=suffix):
- return os.path.join(FLAGS.instances_path,
+ return os.path.join(CONF.instances_path,
instance['name'],
fname + suffix)
- def image(fname, image_type=FLAGS.libvirt_images_type):
+ def image(fname, image_type=CONF.libvirt_images_type):
return self.image_backend.image(instance['name'],
fname + suffix, image_type)
@@ -1276,8 +1274,8 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.info(_('Creating image'), instance=instance)
libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml)
- if FLAGS.libvirt_type == 'lxc':
- container_dir = os.path.join(FLAGS.instances_path,
+ if CONF.libvirt_type == 'lxc':
+ container_dir = os.path.join(CONF.instances_path,
instance['name'],
'rootfs')
fileutils.ensure_tree(container_dir)
@@ -1378,13 +1376,13 @@ class LibvirtDriver(driver.ComputeDriver):
# target partition for file injection
target_partition = None
if not instance['kernel_id']:
- target_partition = FLAGS.libvirt_inject_partition
+ target_partition = CONF.libvirt_inject_partition
if target_partition == 0:
target_partition = None
- if FLAGS.libvirt_type == 'lxc':
+ if CONF.libvirt_type == 'lxc':
target_partition = None
- if FLAGS.libvirt_inject_key and instance['key_data']:
+ if CONF.libvirt_inject_key and instance['key_data']:
key = str(instance['key_data'])
else:
key = None
@@ -1392,7 +1390,7 @@ class LibvirtDriver(driver.ComputeDriver):
# File injection
metadata = instance.get('metadata')
- if not FLAGS.libvirt_inject_password:
+ if not CONF.libvirt_inject_password:
admin_pass = None
net = netutils.get_injected_network_template(network_info)
@@ -1429,7 +1427,7 @@ class LibvirtDriver(driver.ComputeDriver):
disk.inject_data(injection_path,
key, net, metadata, admin_pass, files,
partition=target_partition,
- use_cow=FLAGS.use_cow_images)
+ use_cow=CONF.use_cow_images)
except Exception as e:
# This could be a windows image, or a vmdk format disk
@@ -1437,12 +1435,12 @@ class LibvirtDriver(driver.ComputeDriver):
'%(img_id)s (%(e)s)') % locals(),
instance=instance)
- if FLAGS.libvirt_type == 'lxc':
+ if CONF.libvirt_type == 'lxc':
disk.setup_container(basepath('disk'),
container_dir=container_dir,
- use_cow=FLAGS.use_cow_images)
+ use_cow=CONF.use_cow_images)
- if FLAGS.libvirt_type == 'uml':
+ if CONF.libvirt_type == 'uml':
libvirt_utils.chown(basepath('disk'), 'root')
@staticmethod
@@ -1494,11 +1492,11 @@ class LibvirtDriver(driver.ComputeDriver):
return guestcpu
def get_guest_cpu_config(self):
- mode = FLAGS.libvirt_cpu_mode
- model = FLAGS.libvirt_cpu_model
+ mode = CONF.libvirt_cpu_mode
+ model = CONF.libvirt_cpu_model
if mode is None:
- if FLAGS.libvirt_type == "kvm" or FLAGS.libvirt_type == "qemu":
+ if CONF.libvirt_type == "kvm" or CONF.libvirt_type == "qemu":
mode = "host-model"
else:
mode = "none"
@@ -1506,10 +1504,10 @@ class LibvirtDriver(driver.ComputeDriver):
if mode == "none":
return None
- if FLAGS.libvirt_type != "kvm" and FLAGS.libvirt_type != "qemu":
+ if CONF.libvirt_type != "kvm" and CONF.libvirt_type != "qemu":
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
- "support selecting CPU models") % FLAGS.libvirt_type
+ "support selecting CPU models") % CONF.libvirt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
@@ -1552,10 +1550,10 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
- if FLAGS.libvirt_type == "lxc":
+ if CONF.libvirt_type == "lxc":
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
- fs.source_dir = os.path.join(FLAGS.instances_path,
+ fs.source_dir = os.path.join(CONF.instances_path,
instance['name'],
'rootfs')
devices.append(fs)
@@ -1566,9 +1564,9 @@ class LibvirtDriver(driver.ComputeDriver):
else:
root_device_type = 'disk'
- if FLAGS.libvirt_type == "uml":
+ if CONF.libvirt_type == "uml":
default_disk_bus = "uml"
- elif FLAGS.libvirt_type == "xen":
+ elif CONF.libvirt_type == "xen":
default_disk_bus = "xen"
else:
default_disk_bus = "virtio"
@@ -1663,7 +1661,7 @@ class LibvirtDriver(driver.ComputeDriver):
diskconfig.source_type = "file"
diskconfig.driver_format = "raw"
diskconfig.driver_cache = self.disk_cachemode
- diskconfig.source_path = os.path.join(FLAGS.instances_path,
+ diskconfig.source_path = os.path.join(CONF.instances_path,
instance['name'],
"disk.config")
diskconfig.target_dev = self.default_last_device
@@ -1686,7 +1684,7 @@ class LibvirtDriver(driver.ComputeDriver):
inactive=True)
guest = vconfig.LibvirtConfigGuest()
- guest.virt_type = FLAGS.libvirt_type
+ guest.virt_type = CONF.libvirt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
guest.memory = inst_type['memory_mb'] * 1024
@@ -1708,65 +1706,65 @@ class LibvirtDriver(driver.ComputeDriver):
guest.os_type = vm_mode.get_from_instance(instance)
if guest.os_type is None:
- if FLAGS.libvirt_type == "lxc":
+ if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
- elif FLAGS.libvirt_type == "uml":
+ elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
- elif FLAGS.libvirt_type == "xen":
+ elif CONF.libvirt_type == "xen":
guest.os_type = vm_mode.XEN
else:
guest.os_type = vm_mode.HVM
- if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
+ if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
guest.os_loader = '/usr/lib/xen/boot/hvmloader'
- if FLAGS.libvirt_type == "lxc":
+ if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
guest.os_init_path = "/sbin/init"
guest.os_cmdline = "console=ttyS0"
- elif FLAGS.libvirt_type == "uml":
+ elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name or "/dev/ubda"
else:
- if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
+ if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
guest.os_root = root_device_name or "/dev/xvda"
else:
guest.os_type = vm_mode.HVM
if rescue:
if rescue.get('kernel_id'):
- guest.os_kernel = os.path.join(FLAGS.instances_path,
+ guest.os_kernel = os.path.join(CONF.instances_path,
instance['name'],
"kernel.rescue")
if rescue.get('ramdisk_id'):
- guest.os_initrd = os.path.join(FLAGS.instances_path,
+ guest.os_initrd = os.path.join(CONF.instances_path,
instance['name'],
"ramdisk.rescue")
elif instance['kernel_id']:
- guest.os_kernel = os.path.join(FLAGS.instances_path,
+ guest.os_kernel = os.path.join(CONF.instances_path,
instance['name'],
"kernel")
- if FLAGS.libvirt_type == "xen":
+ if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = "root=%s console=ttyS0" % (
root_device_name or "/dev/vda",)
if instance['ramdisk_id']:
- guest.os_initrd = os.path.join(FLAGS.instances_path,
+ guest.os_initrd = os.path.join(CONF.instances_path,
instance['name'],
"ramdisk")
else:
guest.os_boot_dev = "hd"
- if FLAGS.libvirt_type != "lxc" and FLAGS.libvirt_type != "uml":
+ if CONF.libvirt_type != "lxc" and CONF.libvirt_type != "uml":
guest.acpi = True
clk = vconfig.LibvirtConfigGuestClock()
clk.offset = "utc"
guest.set_clock(clk)
- if FLAGS.libvirt_type == "kvm":
+ if CONF.libvirt_type == "kvm":
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
@@ -1793,14 +1791,14 @@ class LibvirtDriver(driver.ComputeDriver):
cfg = self.vif_driver.plug(instance, (network, mapping))
guest.add_device(cfg)
- if FLAGS.libvirt_type == "qemu" or FLAGS.libvirt_type == "kvm":
+ if CONF.libvirt_type == "qemu" or CONF.libvirt_type == "kvm":
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
- consolelog.source_path = os.path.join(FLAGS.instances_path,
+ consolelog.source_path = os.path.join(CONF.instances_path,
instance['name'],
"console.log")
guest.add_device(consolelog)
@@ -1813,8 +1811,8 @@ class LibvirtDriver(driver.ComputeDriver):
consolepty.type = "pty"
guest.add_device(consolepty)
- if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'):
- if FLAGS.use_usb_tablet and guest.os_type == vm_mode.HVM:
+ if CONF.vnc_enabled and CONF.libvirt_type not in ('lxc', 'uml'):
+ if CONF.use_usb_tablet and guest.os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
@@ -1822,8 +1820,8 @@ class LibvirtDriver(driver.ComputeDriver):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
- graphics.keymap = FLAGS.vnc_keymap
- graphics.listen = FLAGS.vncserver_listen
+ graphics.keymap = CONF.vnc_keymap
+ graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
return guest
@@ -2010,7 +2008,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""
- stats = libvirt_utils.get_fs_info(FLAGS.instances_path)
+ stats = libvirt_utils.get_fs_info(CONF.instances_path)
return stats['total'] / (1024 ** 3)
def get_vcpu_used(self):
@@ -2048,7 +2046,7 @@ class LibvirtDriver(driver.ComputeDriver):
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
- if FLAGS.libvirt_type == 'xen':
+ if CONF.libvirt_type == 'xen':
used = 0
for domain_id in self.list_instance_ids():
# skip dom0
@@ -2079,7 +2077,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""
- stats = libvirt_utils.get_fs_info(FLAGS.instances_path)
+ stats = libvirt_utils.get_fs_info(CONF.instances_path)
return stats['used'] / (1024 ** 3)
def get_hypervisor_type(self):
@@ -2245,7 +2243,7 @@ class LibvirtDriver(driver.ComputeDriver):
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = \
- (disk_available_gb * 1024) - FLAGS.reserved_host_disk_mb
+ (disk_available_gb * 1024) - CONF.reserved_host_disk_mb
# Compare CPU
src = instance_ref['host']
@@ -2282,7 +2280,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
- source = FLAGS.host
+ source = CONF.host
filename = dest_check_data["filename"]
block_migration = dest_check_data["block_migration"]
@@ -2379,8 +2377,8 @@ class LibvirtDriver(driver.ComputeDriver):
raise exception.InvalidCPUInfo(reason=m % locals())
def _create_shared_storage_test_file(self):
- """Makes tmpfile under FLAGS.instance_path."""
- dirpath = FLAGS.instances_path
+ """Makes tmpfile under CONF.instance_path."""
+ dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
@@ -2389,17 +2387,17 @@ class LibvirtDriver(driver.ComputeDriver):
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
- """Confirms existence of the tmpfile under FLAGS.instances_path.
+ """Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False."""
- tmp_file = os.path.join(FLAGS.instances_path, filename)
+ tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
- """Removes existence of the tmpfile under FLAGS.instances_path."""
- tmp_file = os.path.join(FLAGS.instances_path, filename)
+ """Removes existence of the tmpfile under CONF.instances_path."""
+ tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
@@ -2423,7 +2421,7 @@ class LibvirtDriver(driver.ComputeDriver):
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
- timeout_count = range(FLAGS.live_migration_retry_count)
+ timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance_ref,
network_info):
@@ -2486,17 +2484,17 @@ class LibvirtDriver(driver.ComputeDriver):
# Do live migration.
try:
if block_migration:
- flaglist = FLAGS.block_migration_flag.split(',')
+ flaglist = CONF.block_migration_flag.split(',')
else:
- flaglist = FLAGS.live_migration_flag.split(',')
+ flaglist = CONF.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
dom = self._lookup_by_name(instance_ref["name"])
- dom.migrateToURI(FLAGS.live_migration_uri % dest,
+ dom.migrateToURI(CONF.live_migration_uri % dest,
logical_sum,
None,
- FLAGS.live_migration_bandwidth)
+ CONF.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
@@ -2535,7 +2533,7 @@ class LibvirtDriver(driver.ComputeDriver):
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concorrent request occurs to iptables, then it complains.
- max_retry = FLAGS.live_migration_retry_count
+ max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance_ref, network_info)
@@ -2563,7 +2561,7 @@ class LibvirtDriver(driver.ComputeDriver):
disk_info = jsonutils.loads(disk_info_json)
# make instance directory
- instance_dir = os.path.join(FLAGS.instances_path, instance['name'])
+ instance_dir = os.path.join(CONF.instances_path, instance['name'])
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
@@ -2584,7 +2582,7 @@ class LibvirtDriver(driver.ComputeDriver):
image = self.image_backend.image(instance['name'],
instance_disk,
- FLAGS.libvirt_images_type)
+ CONF.libvirt_images_type)
image.cache(fetch_func=libvirt_utils.fetch_image,
context=ctxt,
filename=cache_name,
@@ -2625,7 +2623,7 @@ class LibvirtDriver(driver.ComputeDriver):
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance_ref["name"] not in dom_list:
- instance_dir = os.path.join(FLAGS.instances_path,
+ instance_dir = os.path.join(CONF.instances_path,
instance_ref["name"])
xml_path = os.path.join(instance_dir, 'libvirt.xml')
# In case of block migration, destination does not have
@@ -2802,7 +2800,7 @@ class LibvirtDriver(driver.ComputeDriver):
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
same_host = (dest == self.get_host_ip_addr())
- inst_base = "%s/%s" % (FLAGS.instances_path, instance['name'])
+ inst_base = "%s/%s" % (CONF.instances_path, instance['name'])
inst_base_resize = inst_base + "_resize"
try:
utils.execute('mv', inst_base, inst_base_resize)
@@ -2876,7 +2874,7 @@ class LibvirtDriver(driver.ComputeDriver):
if size:
disk.extend(info['path'], size)
- if fmt == 'raw' and FLAGS.use_cow_images:
+ if fmt == 'raw' and CONF.use_cow_images:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
path_qcow = info['path'] + '_qcow'
@@ -2902,7 +2900,7 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
- inst_base = "%s/%s" % (FLAGS.instances_path, instance['name'])
+ inst_base = "%s/%s" % (CONF.instances_path, instance['name'])
inst_base_resize = inst_base + "_resize"
utils.execute('mv', inst_base_resize, inst_base)
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index 819a8ec0c..ad14f21de 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -20,13 +20,13 @@
from eventlet import tpool
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
import nova.virt.firewall as base_firewall
-
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = config.CONF
try:
import libvirt
@@ -115,7 +115,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
if mapping['dhcp_server']:
allow_dhcp = True
break
- if instance['image_ref'] == str(FLAGS.vpn_image_id):
+ if instance['image_ref'] == str(CONF.vpn_image_id):
base_filter = 'nova-vpn'
elif allow_dhcp:
base_filter = 'nova-base'
@@ -142,7 +142,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
filter_set = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing']
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
self._define_filter(self.nova_no_nd_reflection_filter)
filter_set.append('nova-no-nd-reflection')
self._define_filter(self._filter_container('nova-nodhcp', filter_set))
@@ -164,7 +164,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
if callable(xml):
xml = xml()
# execute in a native thread and block current greenthread until done
- if not FLAGS.libvirt_nonblocking:
+ if not CONF.libvirt_nonblocking:
# NOTE(maoy): the original implementation is to have the API called
# in the thread pool no matter what.
tpool.execute(self._conn.nwfilterDefineXML, xml)
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 3dc8e2037..7152e3e2d 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -19,6 +19,7 @@ import abc
import contextlib
import os
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import excutils
@@ -26,7 +27,7 @@ from nova.openstack.common import fileutils
from nova.openstack.common import lockutils
from nova import utils
from nova.virt.disk import api as disk
-from nova.virt.libvirt import config
+from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import snapshots
from nova.virt.libvirt import utils as libvirt_utils
@@ -46,8 +47,8 @@ __imagebackend_opts = [
' if this flag is set to True.'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(__imagebackend_opts)
+CONF = config.CONF
+CONF.register_opts(__imagebackend_opts)
class Image(object):
@@ -67,7 +68,7 @@ class Image(object):
# NOTE(mikal): We need a lock directory which is shared along with
# instance files, to cover the scenario where multiple compute nodes
# are trying to create a base file at the same time
- self.lock_path = os.path.join(FLAGS.instances_path, 'locks')
+ self.lock_path = os.path.join(CONF.instances_path, 'locks')
@abc.abstractmethod
def create_image(self, prepare_template, base, size, *args, **kwargs):
@@ -90,7 +91,7 @@ class Image(object):
:device_type: Device type for this image.
:cache_mode: Caching mode for this image
"""
- info = config.LibvirtConfigGuestDisk()
+ info = vconfig.LibvirtConfigGuestDisk()
info.source_type = self.source_type
info.source_device = device_type
info.target_bus = disk_bus
@@ -121,7 +122,7 @@ class Image(object):
fetch_func(target=target, *args, **kwargs)
if not os.path.exists(self.path):
- base_dir = os.path.join(FLAGS.instances_path, '_base')
+ base_dir = os.path.join(CONF.instances_path, '_base')
if not os.path.exists(base_dir):
fileutils.ensure_tree(base_dir)
base = os.path.join(base_dir, filename)
@@ -142,7 +143,7 @@ class Raw(Image):
def __init__(self, instance=None, name=None, path=None):
super(Raw, self).__init__("file", "raw", is_block_dev=False)
- self.path = path or os.path.join(FLAGS.instances_path,
+ self.path = path or os.path.join(CONF.instances_path,
instance, name)
def create_image(self, prepare_template, base, size, *args, **kwargs):
@@ -170,7 +171,7 @@ class Qcow2(Image):
def __init__(self, instance=None, name=None, path=None):
super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False)
- self.path = path or os.path.join(FLAGS.instances_path,
+ self.path = path or os.path.join(CONF.instances_path,
instance, name)
def create_image(self, prepare_template, base, size, *args, **kwargs):
@@ -209,16 +210,16 @@ class Lvm(Image):
self.lv = info['LV']
self.path = path
else:
- if not FLAGS.libvirt_images_volume_group:
+ if not CONF.libvirt_images_volume_group:
raise RuntimeError(_('You should specify'
' libvirt_images_volume_group'
' flag to use LVM images.'))
- self.vg = FLAGS.libvirt_images_volume_group
+ self.vg = CONF.libvirt_images_volume_group
self.lv = '%s_%s' % (self.escape(instance),
self.escape(name))
self.path = os.path.join('/dev', self.vg, self.lv)
- self.sparse = FLAGS.libvirt_sparse_logical_volumes
+ self.sparse = CONF.libvirt_sparse_logical_volumes
def create_image(self, prepare_template, base, size, *args, **kwargs):
@lockutils.synchronized(base, 'nova-', external=True,
@@ -270,7 +271,7 @@ class Backend(object):
def backend(self, image_type=None):
if not image_type:
- image_type = FLAGS.libvirt_images_type
+ image_type = CONF.libvirt_images_type
image = self.BACKEND.get(image_type)
if not image:
raise RuntimeError(_('Unknown image_type=%s') % image_type)
@@ -282,7 +283,7 @@ class Backend(object):
:instance: Instance name.
:name: Image name.
:image_type: Image type.
- Optional, is FLAGS.libvirt_images_type by default.
+ Optional, is CONF.libvirt_images_type by default.
"""
backend = self.backend(image_type)
return backend(instance=instance, name=name)
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index 721587512..961309929 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -57,10 +57,9 @@ imagecache_opts = [
]
CONF = config.CONF
+CONF.register_opts(imagecache_opts)
CONF.import_opt('instances_path', 'nova.compute.manager')
CONF.import_opt('base_dir_name', 'nova.compute.manager')
-FLAGS = flags.FLAGS
-FLAGS.register_opts(imagecache_opts)
def read_stored_checksum(target):
@@ -148,7 +147,7 @@ class ImageCacheManager(object):
image_ref_str = str(instance['image_ref'])
local, remote, insts = self.used_images.get(image_ref_str,
(0, 0, []))
- if instance['host'] == FLAGS.host:
+ if instance['host'] == CONF.host:
local += 1
else:
remote += 1
@@ -161,10 +160,10 @@ class ImageCacheManager(object):
def _list_backing_images(self):
"""List the backing images currently in use."""
inuse_images = []
- for ent in os.listdir(FLAGS.instances_path):
+ for ent in os.listdir(CONF.instances_path):
if ent in self.instance_names:
LOG.debug(_('%s is a valid instance name'), ent)
- disk_path = os.path.join(FLAGS.instances_path, ent, 'disk')
+ disk_path = os.path.join(CONF.instances_path, ent, 'disk')
if os.path.exists(disk_path):
LOG.debug(_('%s has a disk file'), ent)
backing_file = virtutils.get_disk_backing_file(disk_path)
@@ -174,8 +173,8 @@ class ImageCacheManager(object):
'backing': backing_file})
if backing_file:
- backing_path = os.path.join(FLAGS.instances_path,
- FLAGS.base_dir_name,
+ backing_path = os.path.join(CONF.instances_path,
+ CONF.base_dir_name,
backing_file)
if not backing_path in inuse_images:
inuse_images.append(backing_path)
@@ -226,7 +225,7 @@ class ImageCacheManager(object):
handle manually when it occurs.
"""
- if not FLAGS.checksum_base_images:
+ if not CONF.checksum_base_images:
return None
stored_checksum = read_stored_checksum(base_file)
@@ -272,9 +271,9 @@ class ImageCacheManager(object):
mtime = os.path.getmtime(base_file)
age = time.time() - mtime
- maxage = FLAGS.remove_unused_resized_minimum_age_seconds
+ maxage = CONF.remove_unused_resized_minimum_age_seconds
if base_file in self.originals:
- maxage = FLAGS.remove_unused_original_minimum_age_seconds
+ maxage = CONF.remove_unused_original_minimum_age_seconds
if age < maxage:
LOG.info(_('Base file too young to remove: %s'),
@@ -374,7 +373,7 @@ class ImageCacheManager(object):
# created, but may remain from previous versions.
self._reset_state()
- base_dir = os.path.join(FLAGS.instances_path, FLAGS.base_dir_name)
+ base_dir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if not os.path.exists(base_dir):
LOG.debug(_('Skipping verification, no base directory at %s'),
base_dir)
@@ -420,7 +419,7 @@ class ImageCacheManager(object):
LOG.info(_('Removable base files: %s'),
' '.join(self.removable_base_files))
- if FLAGS.remove_unused_base_images:
+ if CONF.remove_unused_base_images:
for base_file in self.removable_base_files:
self._remove_base_file(base_file)
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 856efec4f..53b7a7571 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -48,10 +48,9 @@ util_opts = [
]
CONF = config.CONF
+CONF.register_opts(util_opts)
CONF.import_opt('instances_path', 'nova.compute.manager')
CONF.import_opt('base_dir_name', 'nova.compute.manager')
-FLAGS = flags.FLAGS
-FLAGS.register_opts(util_opts)
def execute(*args, **kwargs):
@@ -212,12 +211,12 @@ def pick_disk_driver_name(is_block_dev=False):
:param is_block_dev:
:returns: driver_name or None
"""
- if FLAGS.libvirt_type == "xen":
+ if CONF.libvirt_type == "xen":
if is_block_dev:
return "phy"
else:
return "tap"
- elif FLAGS.libvirt_type in ('kvm', 'qemu'):
+ elif CONF.libvirt_type in ('kvm', 'qemu'):
return "qemu"
else:
# UML doesn't want a driver_name set
@@ -396,7 +395,7 @@ def find_disk(virt_dom):
May be file or device"""
xml_desc = virt_dom.XMLDesc(0)
domain = etree.fromstring(xml_desc)
- if FLAGS.libvirt_type == 'lxc':
+ if CONF.libvirt_type == 'lxc':
source = domain.find('devices/filesystem/source')
disk_path = source.get('dir')
disk_path = disk_path[0:disk_path.rfind('rootfs')]
@@ -452,7 +451,7 @@ def get_info_filename(base_path):
"""
base_file = os.path.basename(base_path)
- return (FLAGS.image_info_filename_pattern
+ return (CONF.image_info_filename_pattern
% {'image': base_file})
@@ -460,7 +459,7 @@ def is_valid_info_file(path):
"""Test if a given path matches the pattern for info files."""
digest_size = hashlib.sha1().digestsize * 2
- regexp = (FLAGS.image_info_filename_pattern
+ regexp = (CONF.image_info_filename_pattern
% {'image': ('([0-9a-f]{%(digest_size)d}|'
'[0-9a-f]{%(digest_size)d}_sm|'
'[0-9a-f]{%(digest_size)d}_[0-9]+)'
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index d6edd1ad8..56c26dfd1 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -42,9 +42,8 @@ libvirt_vif_opts = [
help='Use virtio for bridge interfaces'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(libvirt_vif_opts)
CONF = config.CONF
+CONF.register_opts(libvirt_vif_opts)
CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
LINUX_DEV_LEN = 14
@@ -63,7 +62,7 @@ class LibvirtBridgeDriver(vif.VIFDriver):
conf.mac_addr = mapping['mac']
conf.source_dev = network['bridge']
conf.script = ""
- if FLAGS.libvirt_use_virtio_for_bridges:
+ if CONF.libvirt_use_virtio_for_bridges:
conf.model = "virtio"
conf.filtername = "nova-instance-" + instance['name'] + "-" + mac_id
@@ -71,15 +70,15 @@ class LibvirtBridgeDriver(vif.VIFDriver):
if mapping['dhcp_server']:
conf.add_filter_param("DHCPSERVER", mapping['dhcp_server'])
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
conf.add_filter_param("RASERVER",
mapping.get('gateway_v6') + "/128")
- if FLAGS.allow_same_net_traffic:
+ if CONF.allow_same_net_traffic:
net, mask = netutils.get_net_and_mask(network['cidr'])
conf.add_filter_param("PROJNET", net)
conf.add_filter_param("PROJMASK", mask)
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen(
network['cidr_v6'])
conf.add_filter_param("PROJNET6", net_v6)
@@ -93,7 +92,7 @@ class LibvirtBridgeDriver(vif.VIFDriver):
if (not network.get('multi_host') and
mapping.get('should_create_bridge')):
if mapping.get('should_create_vlan'):
- iface = FLAGS.vlan_interface or network['bridge_interface']
+ iface = CONF.vlan_interface or network['bridge_interface']
LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'),
{'vlan': network['vlan'],
'bridge': network['bridge']},
@@ -103,7 +102,7 @@ class LibvirtBridgeDriver(vif.VIFDriver):
network['bridge'],
iface)
else:
- iface = FLAGS.flat_interface or network['bridge_interface']
+ iface = CONF.flat_interface or network['bridge_interface']
LOG.debug(_("Ensuring bridge %s"), network['bridge'],
instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
@@ -129,7 +128,7 @@ class LibvirtOpenVswitchDriver(vif.VIFDriver):
def create_ovs_vif_port(self, dev, iface_id, mac, instance_id):
utils.execute('ovs-vsctl', '--', '--may-exist', 'add-port',
- FLAGS.libvirt_ovs_bridge, dev,
+ CONF.libvirt_ovs_bridge, dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % iface_id,
'external-ids:iface-status=active',
@@ -138,7 +137,7 @@ class LibvirtOpenVswitchDriver(vif.VIFDriver):
run_as_root=True)
def delete_ovs_vif_port(self, dev):
- utils.execute('ovs-vsctl', 'del-port', FLAGS.libvirt_ovs_bridge,
+ utils.execute('ovs-vsctl', 'del-port', CONF.libvirt_ovs_bridge,
dev, run_as_root=True)
utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
@@ -165,7 +164,7 @@ class LibvirtOpenVswitchDriver(vif.VIFDriver):
conf = vconfig.LibvirtConfigGuestInterface()
- if FLAGS.libvirt_use_virtio_for_bridges:
+ if CONF.libvirt_use_virtio_for_bridges:
conf.model = "virtio"
conf.net_type = "ethernet"
conf.target_dev = dev
@@ -260,9 +259,9 @@ class LibvirtOpenVswitchVirtualPortDriver(vif.VIFDriver):
conf = vconfig.LibvirtConfigGuestInterface()
conf.net_type = "bridge"
- conf.source_dev = FLAGS.libvirt_ovs_bridge
+ conf.source_dev = CONF.libvirt_ovs_bridge
conf.mac_addr = mapping['mac']
- if FLAGS.libvirt_use_virtio_for_bridges:
+ if CONF.libvirt_use_virtio_for_bridges:
conf.model = "virtio"
conf.vporttype = "openvswitch"
conf.add_vport_param("interfaceid", mapping['vif_uuid'])
@@ -285,12 +284,12 @@ class QuantumLinuxBridgeVIFDriver(vif.VIFDriver):
iface_id = mapping['vif_uuid']
dev = self.get_dev_name(iface_id)
- if FLAGS.libvirt_type != 'xen':
+ if CONF.libvirt_type != 'xen':
linux_net.QuantumLinuxBridgeInterfaceDriver.create_tap_dev(dev)
conf = vconfig.LibvirtConfigGuestInterface()
- if FLAGS.libvirt_use_virtio_for_bridges:
+ if CONF.libvirt_use_virtio_for_bridges:
conf.model = 'virtio'
conf.net_type = "ethernet"
conf.target_dev = dev
diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py
index 7020c2518..03c335fa0 100644
--- a/nova/virt/libvirt/volume.py
+++ b/nova/virt/libvirt/volume.py
@@ -30,7 +30,6 @@ from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as virtutils
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
CONF = config.CONF
CONF.import_opt('num_iscsi_scan_tries', 'nova.volume.driver')
@@ -92,11 +91,11 @@ class LibvirtNetVolumeDriver(LibvirtVolumeDriver):
netdisk_properties = connection_info['data']
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
- FLAGS.rbd_secret_uuid):
- conf.auth_secret_uuid = FLAGS.rbd_secret_uuid
+ CONF.rbd_secret_uuid):
+ conf.auth_secret_uuid = CONF.rbd_secret_uuid
auth_enabled = True # Force authentication locally
- if FLAGS.rbd_user:
- conf.auth_username = FLAGS.rbd_user
+ if CONF.rbd_user:
+ conf.auth_username = CONF.rbd_user
if auth_enabled:
conf.auth_username = (conf.auth_username or
netdisk_properties['auth_username'])
@@ -172,7 +171,7 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
while not os.path.exists(host_device):
- if tries >= FLAGS.num_iscsi_scan_tries:
+ if tries >= CONF.num_iscsi_scan_tries:
raise exception.NovaException(_("iSCSI device not found at %s")
% (host_device))
diff --git a/nova/virt/libvirt/volume_nfs.py b/nova/virt/libvirt/volume_nfs.py
index cf901b23a..5dec74e3d 100644
--- a/nova/virt/libvirt/volume_nfs.py
+++ b/nova/virt/libvirt/volume_nfs.py
@@ -20,6 +20,7 @@
import ctypes
import os
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import cfg
@@ -34,8 +35,8 @@ volume_opts = [
default='$state_path/mnt',
help='Base dir where nfs expected to be mounted on compute'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(volume_opts)
+CONF = config.CONF
+CONF.register_opts(volume_opts)
class NfsVolumeDriver(volume.LibvirtVolumeDriver):
@@ -63,7 +64,7 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
"""
@type nfs_export: string
"""
- mount_path = os.path.join(FLAGS.nfs_mount_point_base,
+ mount_path = os.path.join(CONF.nfs_mount_point_base,
self.get_hash_str(nfs_export))
self._mount_nfs(mount_path, nfs_export, ensure=True)
return mount_path
diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py
index 05b642b8f..484cb3db3 100644
--- a/nova/virt/netutils.py
+++ b/nova/virt/netutils.py
@@ -26,8 +26,6 @@ import netaddr
from nova import config
from nova import flags
-
-FLAGS = flags.FLAGS
CONF = config.CONF
CONF.import_opt('injected_network_template', 'nova.virt.disk.api')
@@ -57,8 +55,8 @@ def get_ip_version(cidr):
return int(net.version)
-def get_injected_network_template(network_info, use_ipv6=FLAGS.use_ipv6,
- template=FLAGS.injected_network_template):
+def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6,
+ template=CONF.injected_network_template):
"""
return a rendered network template for the given network_info
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index f4f26045e..7a0da0b88 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -17,6 +17,7 @@
from nova.compute import task_states
from nova.compute import vm_states
+from nova import config
from nova import context as nova_context
from nova import flags
@@ -50,8 +51,8 @@ powervm_opts = [
help='Local directory to download glance images to'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(powervm_opts)
+CONF = config.CONF
+CONF.register_opts(powervm_opts)
class PowerVMDriver(driver.ComputeDriver):
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index 2601f0537..09ad662b3 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -20,6 +20,7 @@ import os
import re
import time
+from nova import config
from nova import exception as nova_exception
from nova import flags
from nova import utils
@@ -35,14 +36,14 @@ from nova.virt.powervm import lpar as LPAR
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = config.CONF
def get_powervm_operator():
- if FLAGS.powervm_mgr_type == 'ivm':
- return IVMOperator(common.Connection(FLAGS.powervm_mgr,
- FLAGS.powervm_mgr_user,
- FLAGS.powervm_mgr_passwd))
+ if CONF.powervm_mgr_type == 'ivm':
+ return IVMOperator(common.Connection(CONF.powervm_mgr,
+ CONF.powervm_mgr_user,
+ CONF.powervm_mgr_passwd))
class PowerVMOperator(object):
@@ -217,14 +218,14 @@ class PowerVMOperator(object):
"""Fetch image from glance and copy it to the remote system."""
try:
file_name = '.'.join([image_id, 'gz'])
- file_path = os.path.join(FLAGS.powervm_img_local_path,
+ file_path = os.path.join(CONF.powervm_img_local_path,
file_name)
LOG.debug(_("Fetching image '%s' from glance") % image_id)
images.fetch_to_raw(context, image_id, file_path,
instance['user_id'],
project_id=instance['project_id'])
LOG.debug(_("Copying image '%s' to IVM") % file_path)
- remote_path = FLAGS.powervm_img_remote_path
+ remote_path = CONF.powervm_img_remote_path
remote_file_name, size = self._operator.copy_image_file(
file_path, remote_path)
# Logical volume
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index e56f81213..25cd05778 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -36,6 +36,7 @@ import time
from eventlet import event
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import cfg
@@ -81,8 +82,8 @@ vmwareapi_opts = [
help='Physical ethernet adapter name for vlan networking'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(vmwareapi_opts)
+CONF = config.CONF
+CONF.register_opts(vmwareapi_opts)
TIME_BETWEEN_API_CALL_RETRIES = 2.0
@@ -103,10 +104,10 @@ class VMWareESXDriver(driver.ComputeDriver):
def __init__(self, virtapi, read_only=False, scheme="https"):
super(VMWareESXDriver, self).__init__(virtapi)
- host_ip = FLAGS.vmwareapi_host_ip
- host_username = FLAGS.vmwareapi_host_username
- host_password = FLAGS.vmwareapi_host_password
- api_retry_count = FLAGS.vmwareapi_api_retry_count
+ host_ip = CONF.vmwareapi_host_ip
+ host_username = CONF.vmwareapi_host_username
+ host_password = CONF.vmwareapi_host_password
+ api_retry_count = CONF.vmwareapi_api_retry_count
if not host_ip or host_username is None or host_password is None:
raise Exception(_("Must specify vmwareapi_host_ip,"
"vmwareapi_host_username "
@@ -177,7 +178,7 @@ class VMWareESXDriver(driver.ComputeDriver):
# TODO(vish): When volume attaching is supported, return the
# proper initiator iqn and host.
return {
- 'ip': FLAGS.vmwareapi_host_ip,
+ 'ip': CONF.vmwareapi_host_ip,
'initiator': None,
'host': None
}
@@ -192,9 +193,9 @@ class VMWareESXDriver(driver.ComputeDriver):
def get_console_pool_info(self, console_type):
"""Get info about the host on which the VM resides."""
- return {'address': FLAGS.vmwareapi_host_ip,
- 'username': FLAGS.vmwareapi_host_username,
- 'password': FLAGS.vmwareapi_host_password}
+ return {'address': CONF.vmwareapi_host_ip,
+ 'username': CONF.vmwareapi_host_username,
+ 'password': CONF.vmwareapi_host_password}
def get_available_resource(self):
"""This method is supported only by libvirt."""
@@ -373,7 +374,7 @@ class VMWareAPISession(object):
done = event.Event()
loop = utils.LoopingCall(self._poll_task, instance_uuid, task_ref,
done)
- loop.start(FLAGS.vmwareapi_task_poll_interval)
+ loop.start(CONF.vmwareapi_task_poll_interval)
ret_val = done.wait()
loop.stop()
return ret_val
diff --git a/nova/virt/vmwareapi/read_write_util.py b/nova/virt/vmwareapi/read_write_util.py
index b0d7cef0b..d8840938d 100644
--- a/nova/virt/vmwareapi/read_write_util.py
+++ b/nova/virt/vmwareapi/read_write_util.py
@@ -32,8 +32,6 @@ from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-
USER_AGENT = "OpenStack-ESX-Adapter"
READ_CHUNKSIZE = 65536
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py
index a00dd5c36..c98bde68c 100644
--- a/nova/virt/vmwareapi/vif.py
+++ b/nova/virt/vmwareapi/vif.py
@@ -17,6 +17,7 @@
"""VIF drivers for VMWare."""
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
@@ -26,8 +27,8 @@ from nova.virt.vmwareapi import network_utils
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-FLAGS.set_default('vmwareapi_vlan_interface', 'vmnic0')
+CONF = config.CONF
+CONF.set_default('vmwareapi_vlan_interface', 'vmnic0')
class VMWareVlanBridgeDriver(vif.VIFDriver):
@@ -44,7 +45,7 @@ class VMWareVlanBridgeDriver(vif.VIFDriver):
"""Create a vlan and bridge unless they already exist."""
vlan_num = network['vlan']
bridge = network['bridge']
- vlan_interface = FLAGS.vmwareapi_vlan_interface
+ vlan_interface = CONF.vmwareapi_vlan_interface
# Check if the vlan_interface physical network adapter exists on the
# host.
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py
index d8684ce7c..c00617f4b 100644
--- a/nova/virt/vmwareapi/vim.py
+++ b/nova/virt/vmwareapi/vim.py
@@ -26,6 +26,7 @@ try:
except ImportError:
suds = None
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.virt.vmwareapi import error_util
@@ -41,8 +42,8 @@ vmwareapi_wsdl_loc_opt = cfg.StrOpt('vmwareapi_wsdl_loc',
'Due to a bug in vSphere ESX 4.1 default wsdl. '
'Refer readme-vmware to setup')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(vmwareapi_wsdl_loc_opt)
+CONF = config.CONF
+CONF.register_opt(vmwareapi_wsdl_loc_opt)
if suds:
@@ -85,7 +86,7 @@ class Vim:
self._protocol = protocol
self._host_name = host
- wsdl_url = FLAGS.vmwareapi_wsdl_loc
+ wsdl_url = CONF.vmwareapi_wsdl_loc
if wsdl_url is None:
raise Exception(_("Must specify vmwareapi_wsdl_loc"))
# TODO(sateesh): Use this when VMware fixes their faulty wsdl
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 541e16e44..ab1eda134 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -27,6 +27,7 @@ import urllib2
import uuid
from nova.compute import power_state
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import cfg
@@ -42,8 +43,8 @@ vmware_vif_driver_opt = cfg.StrOpt('vmware_vif_driver',
default='nova.virt.vmwareapi.vif.VMWareVlanBridgeDriver',
help='The VMWare VIF driver to configure the VIFs.')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(vmware_vif_driver_opt)
+CONF = config.CONF
+CONF.register_opt(vmware_vif_driver_opt)
LOG = logging.getLogger(__name__)
@@ -59,7 +60,7 @@ class VMWareVMOps(object):
def __init__(self, session):
"""Initializer."""
self._session = session
- self._vif_driver = importutils.import_object(FLAGS.vmware_vif_driver)
+ self._vif_driver = importutils.import_object(CONF.vmware_vif_driver)
def list_instances(self):
"""Lists the VM instances that are registered with the ESX host."""
@@ -207,7 +208,7 @@ class VMWareVMOps(object):
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
- if FLAGS.flat_injected:
+ if CONF.flat_injected:
self._set_machine_id(client_factory, instance, network_info)
# Naming the VM files in correspondence with the VM instance name
diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py
index 605c95cfd..9fad07898 100644
--- a/nova/virt/xenapi/agent.py
+++ b/nova/virt/xenapi/agent.py
@@ -21,6 +21,7 @@ import os
import time
import uuid
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
@@ -56,8 +57,8 @@ xenapi_agent_opts = [
'that VM does not have the agent installed'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(xenapi_agent_opts)
+CONF = config.CONF
+CONF.register_opts(xenapi_agent_opts)
def _call_agent(session, instance, vm_ref, method, addl_args=None,
@@ -66,7 +67,7 @@ def _call_agent(session, instance, vm_ref, method, addl_args=None,
if addl_args is None:
addl_args = {}
if timeout is None:
- timeout = FLAGS.agent_timeout
+ timeout = CONF.agent_timeout
vm_rec = session.call_xenapi("VM.get_record", vm_ref)
@@ -137,7 +138,7 @@ class XenAPIBasedAgent(object):
# also take a while to boot. So we need to be more patient than
# normal as well as watch for domid changes
- expiration = time.time() + FLAGS.agent_version_timeout
+ expiration = time.time() + CONF.agent_version_timeout
while time.time() < expiration:
ret = _get_agent_version(self.session, self.instance, self.vm_ref)
if ret:
@@ -235,7 +236,7 @@ class XenAPIBasedAgent(object):
resp = _call_agent(
self.session, self.instance, self.vm_ref, 'resetnetwork',
- timeout=FLAGS.agent_resetnetwork_timeout)
+ timeout=CONF.agent_resetnetwork_timeout)
if resp['returncode'] != '0':
LOG.error(_('Failed to reset network: %(resp)r'), locals(),
instance=self.instance)
@@ -249,10 +250,10 @@ def find_guest_agent(base_dir):
tries to locate a guest agent at the path
specificed by agent_rel_path
"""
- if FLAGS.xenapi_disable_agent:
+ if CONF.xenapi_disable_agent:
return False
- agent_rel_path = FLAGS.xenapi_agent_path
+ agent_rel_path = CONF.xenapi_agent_path
agent_path = os.path.join(base_dir, agent_rel_path)
if os.path.isfile(agent_path):
# The presence of the guest agent
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 4d032e891..55b67a931 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -46,6 +46,7 @@ import xmlrpclib
from eventlet import queue
from eventlet import timeout
+from nova import config
from nova import context
from nova import db
from nova import exception
@@ -118,8 +119,8 @@ xenapi_opts = [
help='Timeout in seconds for XenAPI login.'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(xenapi_opts)
+CONF = config.CONF
+CONF.register_opts(xenapi_opts)
class XenAPIDriver(driver.ComputeDriver):
@@ -128,9 +129,9 @@ class XenAPIDriver(driver.ComputeDriver):
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
- url = FLAGS.xenapi_connection_url
- username = FLAGS.xenapi_connection_username
- password = FLAGS.xenapi_connection_password
+ url = CONF.xenapi_connection_url
+ username = CONF.xenapi_connection_username
+ password = CONF.xenapi_connection_password
if not url or password is None:
raise Exception(_('Must specify xenapi_connection_url, '
'xenapi_connection_username (optionally), and '
@@ -153,7 +154,7 @@ class XenAPIDriver(driver.ComputeDriver):
return self._host_state
def init_host(self, host):
- if FLAGS.xenapi_check_host:
+ if CONF.xenapi_check_host:
vm_utils.ensure_correct_host(self._session)
try:
@@ -368,7 +369,7 @@ class XenAPIDriver(driver.ComputeDriver):
@staticmethod
def get_host_ip_addr():
- xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
+ xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return xs_url.netloc
def attach_volume(self, connection_info, instance_name, mountpoint):
@@ -384,10 +385,10 @@ class XenAPIDriver(driver.ComputeDriver):
mountpoint)
def get_console_pool_info(self, console_type):
- xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
+ xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return {'address': xs_url.netloc,
- 'username': FLAGS.xenapi_connection_username,
- 'password': FLAGS.xenapi_connection_password}
+ 'username': CONF.xenapi_connection_username,
+ 'password': CONF.xenapi_connection_password}
def get_available_resource(self):
"""Retrieve resource info.
@@ -631,7 +632,7 @@ class XenAPISession(object):
def _create_first_session(self, url, user, pw, exception):
try:
session = self._create_session(url)
- with timeout.Timeout(FLAGS.xenapi_login_timeout, exception):
+ with timeout.Timeout(CONF.xenapi_login_timeout, exception):
session.login_with_password(user, pw)
except self.XenAPI.Failure, e:
# if user and pw of the master are different, we're doomed!
@@ -647,21 +648,21 @@ class XenAPISession(object):
return url
def _populate_session_pool(self, url, user, pw, exception):
- for i in xrange(FLAGS.xenapi_connection_concurrent - 1):
+ for i in xrange(CONF.xenapi_connection_concurrent - 1):
session = self._create_session(url)
- with timeout.Timeout(FLAGS.xenapi_login_timeout, exception):
+ with timeout.Timeout(CONF.xenapi_login_timeout, exception):
session.login_with_password(user, pw)
self._sessions.put(session)
def _get_host_uuid(self):
if self.is_slave:
aggr = db.aggregate_get_by_host(context.get_admin_context(),
- FLAGS.host, key=pool_states.POOL_FLAG)[0]
+ CONF.host, key=pool_states.POOL_FLAG)[0]
if not aggr:
LOG.error(_('Host is member of a pool, but DB '
'says otherwise'))
raise exception.AggregateHostNotFound()
- return aggr.metadetails[FLAGS.host]
+ return aggr.metadetails[CONF.host]
else:
with self._get_session() as session:
host_ref = session.xenapi.session.get_this_host(session.handle)
diff --git a/nova/virt/xenapi/firewall.py b/nova/virt/xenapi/firewall.py
index f2b90c74b..cd837e834 100644
--- a/nova/virt/xenapi/firewall.py
+++ b/nova/virt/xenapi/firewall.py
@@ -25,9 +25,7 @@ from nova.openstack.common import log as logging
from nova.virt import firewall
from nova.virt import netutils
-
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index 39a3b9824..f109f33f4 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -231,7 +231,7 @@ def _host_find(context, session, src, dst):
:return: the compute host that manages dst
"""
# NOTE: this would be a lot simpler if nova-compute stored
- # FLAGS.host in the XenServer host's other-config map.
+ # CONF.host in the XenServer host's other-config map.
# TODO(armando-migliaccio): improve according the note above
aggregate = db.aggregate_get_by_host(context, src,
key=pool_states.POOL_FLAG)[0]
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 0c2565dbd..fc66099ca 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -22,6 +22,7 @@ Management class for Pool-related functions (join, eject, etc).
import urlparse
from nova.compute import rpcapi as compute_rpcapi
+from nova import config
from nova import db
from nova import exception
from nova import flags
@@ -40,8 +41,8 @@ xenapi_pool_opts = [
help='To use for hosts with different CPUs'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(xenapi_pool_opts)
+CONF = config.CONF
+CONF.register_opts(xenapi_pool_opts)
class ResourcePool(object):
@@ -110,7 +111,7 @@ class ResourcePool(object):
# whether we can serve the request from this host or not.
master_compute = self._get_metadata(context,
aggregate.id)['master_compute']
- if master_compute == FLAGS.host and master_compute != host:
+ if master_compute == CONF.host and master_compute != host:
# this is the master -> do a pool-join
# To this aim, nova compute on the slave has to go down.
# NOTE: it is assumed that ONLY nova compute is running now
@@ -147,7 +148,7 @@ class ResourcePool(object):
master_compute = self._get_metadata(context,
aggregate.id)['master_compute']
- if master_compute == FLAGS.host and master_compute != host:
+ if master_compute == CONF.host and master_compute != host:
# this is the master -> instruct it to eject a host from the pool
host_uuid = self._get_metadata(context, aggregate.id)[host]
self._eject_slave(aggregate.id,
@@ -190,10 +191,10 @@ class ResourcePool(object):
'url': url,
'user': user,
'password': passwd,
- 'force': jsonutils.dumps(FLAGS.use_join_force),
+ 'force': jsonutils.dumps(CONF.use_join_force),
'master_addr': self._host_addr,
- 'master_user': FLAGS.xenapi_connection_username,
- 'master_pass': FLAGS.xenapi_connection_password, }
+ 'master_user': CONF.xenapi_connection_username,
+ 'master_pass': CONF.xenapi_connection_password, }
self._session.call_plugin('xenhost', 'host_join', args)
except self._session.XenAPI.Failure as e:
LOG.error(_("Pool-Join failed: %(e)s") % locals())
@@ -249,12 +250,12 @@ class ResourcePool(object):
# because this might be 169.254.0.1, i.e. xenapi
# NOTE: password in clear is not great, but it'll do for now
sender_url = swap_xapi_host(
- FLAGS.xenapi_connection_url, self._host_addr)
+ CONF.xenapi_connection_url, self._host_addr)
return {
"url": sender_url,
- "user": FLAGS.xenapi_connection_username,
- "passwd": FLAGS.xenapi_connection_password,
+ "user": CONF.xenapi_connection_username,
+ "passwd": CONF.xenapi_connection_password,
"compute_uuid": vm_utils.get_this_vm_uuid(),
"xenhost_uuid": self._host_uuid,
}
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py
index 6d943804d..13dc74ba9 100644
--- a/nova/virt/xenapi/vif.py
+++ b/nova/virt/xenapi/vif.py
@@ -19,6 +19,7 @@
"""VIF drivers for XenAPI."""
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -31,8 +32,8 @@ xenapi_ovs_integration_bridge_opt = cfg.StrOpt('xenapi_ovs_integration_bridge',
default='xapi1',
help='Name of Integration Bridge used by Open vSwitch')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(xenapi_ovs_integration_bridge_opt)
+CONF = config.CONF
+CONF.register_opt(xenapi_ovs_integration_bridge_opt)
LOG = logging.getLogger(__name__)
@@ -76,7 +77,7 @@ class XenAPIBridgeDriver(XenVIFDriver):
vlan_num = network.get_meta('vlan')
bridge = network['bridge']
- bridge_interface = (FLAGS.vlan_interface or
+ bridge_interface = (CONF.vlan_interface or
network.get_meta('bridge_interface'))
# Check whether bridge already exists
# Retrieve network whose name_label is "bridge"
@@ -144,7 +145,7 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
# with OVS model, always plug into an OVS integration bridge
# that is already created
network_ref = network_utils.find_network_with_bridge(
- self._session, FLAGS.xenapi_ovs_integration_bridge)
+ self._session, CONF.xenapi_ovs_integration_bridge)
vif_rec = {}
vif_rec['device'] = str(device)
vif_rec['network'] = network_ref
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 02fd9188f..d0f59f56f 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -37,6 +37,7 @@ from eventlet import greenthread
from nova import block_device
from nova.compute import instance_types
from nova.compute import power_state
+from nova import config
from nova import db
from nova import exception
from nova import flags
@@ -116,8 +117,8 @@ xenapi_vm_utils_opts = [
' within a given dom0. (-1 = no limit)')
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(xenapi_vm_utils_opts)
+CONF = config.CONF
+CONF.register_opts(xenapi_vm_utils_opts)
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
@@ -318,7 +319,7 @@ def unplug_vbd(session, vbd_ref):
# DEVICE_DETACH_REJECTED. For reasons which we don't understand,
# we're seeing the device still in use, even when all processes
# using the device should be dead.
- max_attempts = FLAGS.xenapi_num_vbd_unplug_retries + 1
+ max_attempts = CONF.xenapi_num_vbd_unplug_retries + 1
for num_attempt in xrange(1, max_attempts + 1):
try:
session.call_xenapi('VBD.unplug', vbd_ref)
@@ -609,7 +610,7 @@ def get_sr_path(session):
sr_ref = safe_find_sr(session)
sr_rec = session.call_xenapi("SR.get_record", sr_ref)
sr_uuid = sr_rec["uuid"]
- return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
+ return os.path.join(CONF.xenapi_sr_base_path, sr_uuid)
def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False):
@@ -695,7 +696,7 @@ def upload_image(context, session, instance, vdi_uuids, image_id):
properties = {
'auto_disk_config': instance['auto_disk_config'],
- 'os_type': instance['os_type'] or FLAGS.default_os_type,
+ 'os_type': instance['os_type'] or CONF.default_os_type,
}
params = {'vdi_uuids': vdi_uuids,
@@ -831,7 +832,7 @@ def generate_ephemeral(session, instance, vm_ref, userdevice, name_label,
size_gb):
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'ephemeral', size_gb * 1024,
- FLAGS.default_ephemeral_format)
+ CONF.default_ephemeral_format)
def create_kernel_image(context, session, instance, name_label, image_id,
@@ -842,7 +843,7 @@ def create_kernel_image(context, session, instance, name_label, image_id,
Returns: A list of dictionaries that describe VDIs
"""
filename = ""
- if FLAGS.cache_images:
+ if CONF.cache_images:
args = {}
args['cached-image'] = image_id
args['new-image-uuid'] = str(uuid.uuid4())
@@ -872,7 +873,7 @@ def _create_cached_image(context, session, instance, name_label,
sr_type = session.call_xenapi('SR.get_record', sr_ref)["type"]
vdis = {}
- if FLAGS.use_cow_images and sr_type != "ext":
+ if CONF.use_cow_images and sr_type != "ext":
LOG.warning(_("Fast cloning is only supported on default local SR "
"of type ext. SR on this system was found to be of "
"type %(sr_type)s. Ignoring the cow flag.")
@@ -890,7 +891,7 @@ def _create_cached_image(context, session, instance, name_label,
session.call_xenapi('VDI.add_to_other_config',
root_vdi_ref, 'image-id', str(image_id))
- if FLAGS.use_cow_images and sr_type == 'ext':
+ if CONF.use_cow_images and sr_type == 'ext':
new_vdi_ref = _clone_vdi(session, root_vdi_ref)
else:
new_vdi_ref = _safe_copy_vdi(session, sr_ref, instance, root_vdi_ref)
@@ -913,7 +914,7 @@ def _create_image(context, session, instance, name_label, image_id,
Returns: A list of dictionaries that describe VDIs
"""
- cache_images = FLAGS.cache_images.lower()
+ cache_images = CONF.cache_images.lower()
# Deterimine if the image is cacheable
if image_type == ImageType.DISK_ISO:
@@ -932,7 +933,7 @@ def _create_image(context, session, instance, name_label, image_id,
cache = False
else:
LOG.warning(_("Unrecognized cache_images value '%s', defaulting to"
- " True"), FLAGS.cache_images)
+ " True"), CONF.cache_images)
cache = True
# Fetch (and cache) the image
@@ -974,7 +975,7 @@ def _fetch_image(context, session, instance, name_label, image_id, image_type):
def _fetch_using_dom0_plugin_with_retry(context, session, image_id,
plugin_name, params, callback=None):
- max_attempts = FLAGS.glance_num_retries + 1
+ max_attempts = CONF.glance_num_retries + 1
sleep_time = 0.5
for attempt_num in xrange(1, max_attempts + 1):
LOG.info(_('download_vhd %(image_id)s, '
@@ -1011,7 +1012,7 @@ def _make_uuid_stack():
def _image_uses_bittorrent(context, instance):
bittorrent = False
- xenapi_torrent_images = FLAGS.xenapi_torrent_images.lower()
+ xenapi_torrent_images = CONF.xenapi_torrent_images.lower()
if xenapi_torrent_images == 'all':
bittorrent = True
@@ -1047,19 +1048,19 @@ def _fetch_vhd_image(context, session, instance, image_id):
if _image_uses_bittorrent(context, instance):
plugin_name = 'bittorrent'
callback = None
- params['torrent_base_url'] = FLAGS.xenapi_torrent_base_url
- params['torrent_seed_duration'] = FLAGS.xenapi_torrent_seed_duration
- params['torrent_seed_chance'] = FLAGS.xenapi_torrent_seed_chance
+ params['torrent_base_url'] = CONF.xenapi_torrent_base_url
+ params['torrent_seed_duration'] = CONF.xenapi_torrent_seed_duration
+ params['torrent_seed_chance'] = CONF.xenapi_torrent_seed_chance
params['torrent_max_last_accessed'] =\
- FLAGS.xenapi_torrent_max_last_accessed
+ CONF.xenapi_torrent_max_last_accessed
params['torrent_listen_port_start'] =\
- FLAGS.xenapi_torrent_listen_port_start
+ CONF.xenapi_torrent_listen_port_start
params['torrent_listen_port_end'] =\
- FLAGS.xenapi_torrent_listen_port_end
+ CONF.xenapi_torrent_listen_port_end
params['torrent_download_stall_cutoff'] =\
- FLAGS.xenapi_torrent_download_stall_cutoff
+ CONF.xenapi_torrent_download_stall_cutoff
params['torrent_max_seeder_processes_per_host'] =\
- FLAGS.xenapi_torrent_max_seeder_processes_per_host
+ CONF.xenapi_torrent_max_seeder_processes_per_host
else:
plugin_name = 'glance'
glance_api_servers = glance.get_api_servers()
@@ -1162,8 +1163,8 @@ def _fetch_disk_image(context, session, instance, name_label, image_id,
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
elif (image_type in (ImageType.KERNEL, ImageType.RAMDISK) and
- vdi_size > FLAGS.max_kernel_ramdisk_size):
- max_size = FLAGS.max_kernel_ramdisk_size
+ vdi_size > CONF.max_kernel_ramdisk_size):
+ max_size = CONF.max_kernel_ramdisk_size
raise exception.NovaException(
_("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
"max %(max_size)d bytes") % locals())
@@ -1192,7 +1193,7 @@ def _fetch_disk_image(context, session, instance, name_label, image_id,
# Let the plugin copy the correct number of bytes.
args['image-size'] = str(vdi_size)
- if FLAGS.cache_images:
+ if CONF.cache_images:
args['cached-image'] = image_id
filename = session.call_plugin('kernel', 'copy_vdi', args)
@@ -1461,13 +1462,13 @@ def _find_sr(session):
"""Return the storage repository to hold VM images"""
host = session.get_xenapi_host()
try:
- tokens = FLAGS.sr_matching_filter.split(':')
+ tokens = CONF.sr_matching_filter.split(':')
filter_criteria = tokens[0]
filter_pattern = tokens[1]
except IndexError:
# oops, flag is invalid
LOG.warning(_("Flag sr_matching_filter '%s' does not respect "
- "formatting convention"), FLAGS.sr_matching_filter)
+ "formatting convention"), CONF.sr_matching_filter)
return None
if filter_criteria == 'other-config':
@@ -1535,7 +1536,7 @@ def _find_iso_sr(session):
def _get_rrd_server():
"""Return server's scheme and address to use for retrieving RRD XMLs."""
- xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
+ xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return [xs_url.scheme, xs_url.netloc]
@@ -1544,8 +1545,8 @@ def _get_rrd(server, vm_uuid):
try:
xml = urllib.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % (
server[0],
- FLAGS.xenapi_connection_username,
- FLAGS.xenapi_connection_password,
+ CONF.xenapi_connection_username,
+ CONF.xenapi_connection_password,
server[1],
vm_uuid))
return xml.read()
@@ -1560,8 +1561,8 @@ def _get_rrd_updates(server, start_time):
try:
xml = urllib.urlopen("%s://%s:%s@%s/rrd_updates?start=%s" % (
server[0],
- FLAGS.xenapi_connection_username,
- FLAGS.xenapi_connection_password,
+ CONF.xenapi_connection_username,
+ CONF.xenapi_connection_password,
server[1],
start_time))
return xml.read()
@@ -1762,7 +1763,7 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
# matches the underlying VHDs.
_scan_sr(session, sr_ref)
- max_attempts = FLAGS.xenapi_vhd_coalesce_max_attempts
+ max_attempts = CONF.xenapi_vhd_coalesce_max_attempts
for i in xrange(max_attempts):
_scan_sr(session, sr_ref)
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
@@ -1775,7 +1776,7 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
base_uuid = _get_vhd_parent_uuid(session, parent_ref)
return parent_uuid, base_uuid
- greenthread.sleep(FLAGS.xenapi_vhd_coalesce_poll_interval)
+ greenthread.sleep(CONF.xenapi_vhd_coalesce_poll_interval)
msg = (_("VHD coalesce attempts exceeded (%(max_attempts)d)"
", giving up...") % locals())
@@ -1792,12 +1793,12 @@ def _remap_vbd_dev(dev):
For now, we work around it by just doing a string replace.
"""
# NOTE(sirp): This hack can go away when we pull support for Maverick
- should_remap = FLAGS.xenapi_remap_vbd_dev
+ should_remap = CONF.xenapi_remap_vbd_dev
if not should_remap:
return dev
old_prefix = 'xvd'
- new_prefix = FLAGS.xenapi_remap_vbd_dev_prefix
+ new_prefix = CONF.xenapi_remap_vbd_dev_prefix
remapped_dev = dev.replace(old_prefix, new_prefix)
return remapped_dev
@@ -1805,7 +1806,7 @@ def _remap_vbd_dev(dev):
def _wait_for_device(dev):
"""Wait for device node to appear"""
- for i in xrange(0, FLAGS.block_device_creation_timeout):
+ for i in xrange(0, CONF.block_device_creation_timeout):
dev_path = utils.make_dev_path(dev)
if os.path.exists(dev_path):
return
@@ -2047,7 +2048,7 @@ def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
_write_partition(virtual_size, dst)
- if FLAGS.xenapi_sparse_copy:
+ if CONF.xenapi_sparse_copy:
_sparse_copy(src_path, dst_path, virtual_size)
else:
num_blocks = virtual_size / SECTOR_SIZE
@@ -2102,7 +2103,7 @@ def _prepare_injectables(inst, network_info):
#only if injection is performed
from Cheetah import Template as t
template = t.Template
- template_data = open(FLAGS.injected_network_template).read()
+ template_data = open(CONF.injected_network_template).read()
metadata = inst['metadata']
key = str(inst['key_data'])
@@ -2137,7 +2138,7 @@ def _prepare_injectables(inst, network_info):
'address_v6': '',
'netmask_v6': '',
'gateway_v6': '',
- 'use_ipv6': FLAGS.use_ipv6}
+ 'use_ipv6': CONF.use_ipv6}
# NOTE(tr3buchet): the original code used the old network_info
# which only supported a single ipv4 subnet
@@ -2187,7 +2188,7 @@ def _prepare_injectables(inst, network_info):
if interfaces_info:
net = str(template(template_data,
searchList=[{'interfaces': interfaces_info,
- 'use_ipv6': FLAGS.use_ipv6}]))
+ 'use_ipv6': CONF.use_ipv6}]))
return key, net, metadata
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 915c45243..0cef398ed 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -61,10 +61,8 @@ xenapi_vmops_opts = [
help='The XenAPI VIF driver using XenServer Network APIs.')
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(xenapi_vmops_opts)
-
CONF = config.CONF
+CONF.register_opts(xenapi_vmops_opts)
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
@@ -155,13 +153,13 @@ class VMOps(object):
self.firewall_driver = firewall.load_driver(
default=DEFAULT_FIREWALL_DRIVER,
xenapi_session=self._session)
- vif_impl = importutils.import_class(FLAGS.xenapi_vif_driver)
+ vif_impl = importutils.import_class(CONF.xenapi_vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
self.default_root_dev = '/dev/sda'
@property
def agent_enabled(self):
- return not FLAGS.xenapi_disable_agent
+ return not CONF.xenapi_disable_agent
def _get_agent(self, instance, vm_ref):
if self.agent_enabled:
@@ -424,7 +422,7 @@ class VMOps(object):
def _setup_vm_networking(self, instance, vm_ref, vdis, network_info,
rescue):
# Alter the image before VM start for network injection.
- if FLAGS.flat_injected:
+ if CONF.flat_injected:
vm_utils.preconfigure_instance(self._session, instance,
vdis['root']['ref'], network_info)
@@ -529,7 +527,7 @@ class VMOps(object):
# Wait for boot to finish
LOG.debug(_('Waiting for instance state to become running'),
instance=instance)
- expiration = time.time() + FLAGS.xenapi_running_timeout
+ expiration = time.time() + CONF.xenapi_running_timeout
while time.time() < expiration:
state = self.get_info(instance, vm_ref)['state']
if state == power_state.RUNNING:
@@ -1317,7 +1315,7 @@ class VMOps(object):
path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id)
# NOTE: XS5.6sp2+ use http over port 80 for xenapi com
- return {'host': FLAGS.vncserver_proxyclient_address, 'port': 80,
+ return {'host': CONF.vncserver_proxyclient_address, 'port': 80,
'internal_access_path': path}
def _vif_xenstore_data(self, vif):
@@ -1540,10 +1538,10 @@ class VMOps(object):
network_info=network_info)
def _get_host_uuid_from_aggregate(self, context, hostname):
- current_aggregate = db.aggregate_get_by_host(context, FLAGS.host,
+ current_aggregate = db.aggregate_get_by_host(context, CONF.host,
key=pool_states.POOL_FLAG)[0]
if not current_aggregate:
- raise exception.AggregateHostNotFound(host=FLAGS.host)
+ raise exception.AggregateHostNotFound(host=CONF.host)
try:
return current_aggregate.metadetails[hostname]
except KeyError:
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 7de29bf24..99976e0e6 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -22,11 +22,12 @@ and storage repositories
import re
import string
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -359,22 +360,22 @@ def _get_target_host(iscsi_string):
"""Retrieve target host"""
if iscsi_string:
return iscsi_string[0:iscsi_string.find(':')]
- elif iscsi_string is None or FLAGS.target_host:
- return FLAGS.target_host
+ elif iscsi_string is None or CONF.target_host:
+ return CONF.target_host
def _get_target_port(iscsi_string):
"""Retrieve target port"""
if iscsi_string:
return iscsi_string[iscsi_string.find(':') + 1:]
- elif iscsi_string is None or FLAGS.target_port:
- return FLAGS.target_port
+ elif iscsi_string is None or CONF.target_port:
+ return CONF.target_port
def _get_iqn(iscsi_string, id):
"""Retrieve target IQN"""
if iscsi_string:
return iscsi_string
- elif iscsi_string is None or FLAGS.iqn_prefix:
+ elif iscsi_string is None or CONF.iqn_prefix:
volume_id = _get_volume_id(id)
- return '%s:%s' % (FLAGS.iqn_prefix, volume_id)
+ return '%s:%s' % (CONF.iqn_prefix, volume_id)