diff options
123 files changed, 3581 insertions, 2884 deletions
@@ -55,6 +55,7 @@ Devdeep Singh <devdeep.singh@citrix.com> Devendra Modium <dmodium@isi.edu> Devin Carlen <devin.carlen@gmail.com> Dina Belova <dbelova@mirantis.com> +Don Dugger <donald.d.dugger@intel.com> Donal Lafferty <donal.lafferty@citrix.com> Dong-In David Kang <dkang@isi.edu> Doug Hellmann <doug.hellmann@dreamhost.com> @@ -138,6 +139,7 @@ Masanori Itoh <itoumsn@nttdata.co.jp> Matt Dietz <matt.dietz@rackspace.com> Matt Stephenson <mattstep@mattstep.net> Matthew Hooker <matt@cloudscaling.com> +Matthew Joyce <matt.joyce@cloudscaling.com> Michael Basnight <mbasnigh@rackspace.com> Michael Gundlach <michael.gundlach@rackspace.com> Michael Still <mikal@stillhq.com> diff --git a/MANIFEST.in b/MANIFEST.in index 3c43e938b..ec01a3736 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -41,7 +41,6 @@ include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml include nova/tests/bundle/1mb.part.0 include nova/tests/bundle/1mb.part.1 include nova/tests/api/ec2/public_key/* -include nova/tests/db/nova.austin.sqlite include nova/tests/image/*.tar.gz include nova/tests/policy.json include nova/tests/test_migrations.conf diff --git a/bin/clear_rabbit_queues b/bin/clear_rabbit_queues index 37394add6..1912046b6 100755 --- a/bin/clear_rabbit_queues +++ b/bin/clear_rabbit_queues @@ -46,7 +46,6 @@ from nova import flags from nova import log as logging from nova.openstack.common import cfg from nova import rpc -from nova import utils delete_exchange_opt = \ @@ -71,8 +70,7 @@ def delete_queues(queues): x.queue_delete(q) if __name__ == '__main__': - utils.default_cfgfile() - args = flags.FLAGS(sys.argv) + args = flags.parse_args(sys.argv) logging.setup() rpc.register_opts(flags.FLAGS) delete_queues(args[1:]) diff --git a/bin/instance-usage-audit b/bin/instance-usage-audit index 3db812bda..59fff1080 100755 --- a/bin/instance-usage-audit +++ b/bin/instance-usage-audit @@ -65,8 +65,7 @@ FLAGS = flags.FLAGS if __name__ == '__main__': rpc.register_opts(FLAGS) admin_context = context.get_admin_context() - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() begin, end = utils.last_completed_audit_period() print "Starting instance usage audit" diff --git a/bin/nova-all b/bin/nova-all index 529d98ed0..57e84b0dd 100755 --- a/bin/nova-all +++ b/bin/nova-all @@ -51,8 +51,7 @@ from nova.vnc import xvp_proxy LOG = logging.getLogger('nova.all') if __name__ == '__main__': - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() utils.monkey_patch() servers = [] diff --git a/bin/nova-api b/bin/nova-api index 7a601c3a6..e6779df4f 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -42,8 +42,7 @@ from nova import service from nova import utils if __name__ == '__main__': - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() utils.monkey_patch() servers = [] diff --git a/bin/nova-api-ec2 b/bin/nova-api-ec2 index e1a1549c7..b53c9158a 100755 --- a/bin/nova-api-ec2 +++ b/bin/nova-api-ec2 @@ -38,8 +38,7 @@ from nova import service from nova import utils if __name__ == '__main__': - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() utils.monkey_patch() server = service.WSGIService('ec2') diff --git a/bin/nova-api-metadata b/bin/nova-api-metadata index c4ca541f4..2f2ef9454 100755 --- a/bin/nova-api-metadata +++ b/bin/nova-api-metadata @@ -38,8 +38,7 @@ from nova import service from nova import utils if __name__ == '__main__': - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() utils.monkey_patch() server = service.WSGIService('metadata') diff --git a/bin/nova-api-os-compute b/bin/nova-api-os-compute index 9c49815fe..75c921943 100755 --- a/bin/nova-api-os-compute +++ b/bin/nova-api-os-compute @@ -38,8 +38,7 @@ from nova import service from nova import utils if __name__ == '__main__': - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() utils.monkey_patch() server = service.WSGIService('osapi_compute') diff --git a/bin/nova-api-os-volume b/bin/nova-api-os-volume index 21d5dc3de..b93fd51ae 100755 --- a/bin/nova-api-os-volume +++ b/bin/nova-api-os-volume @@ -38,8 +38,7 @@ from nova import service from nova import utils if __name__ == '__main__': - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() utils.monkey_patch() server = service.WSGIService('osapi_volume') diff --git a/bin/nova-cert b/bin/nova-cert index f6a5cbbd8..a845c1055 100755 --- a/bin/nova-cert +++ b/bin/nova-cert @@ -38,8 +38,7 @@ from nova import service from nova import utils if __name__ == '__main__': - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() utils.monkey_patch() server = service.Service.create(binary='nova-cert') diff --git a/bin/nova-compute b/bin/nova-compute index 8f7e9177d..bef7bce86 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -40,8 +40,7 @@ from nova import service from nova import utils if __name__ == '__main__': - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() utils.monkey_patch() server = service.Service.create(binary='nova-compute') diff --git a/bin/nova-console b/bin/nova-console index 06e352f48..f5a760b37 100755 --- a/bin/nova-console +++ b/bin/nova-console @@ -36,11 +36,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): from nova import flags from nova import log as logging from nova import service -from nova import utils if __name__ == '__main__': - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() server = service.Service.create(binary='nova-console') service.serve(server) diff --git a/bin/nova-consoleauth b/bin/nova-consoleauth index 9c566cb7c..71d77b285 100755 --- a/bin/nova-consoleauth +++ b/bin/nova-consoleauth @@ -35,12 +35,10 @@ from nova.consoleauth import manager from nova import flags from nova import log as logging from nova import service -from nova import utils if __name__ == "__main__": - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() server = service.Service.create(binary='nova-consoleauth') diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index fab334bf5..c11947da2 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -96,8 +96,7 @@ def init_leases(network_id): def main(): """Parse environment and arguments and call the approproate action.""" flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile) - utils.default_cfgfile(flagfile) - argv = FLAGS(sys.argv) + argv = flags.parse_args(sys.argv) logging.setup() rpc.register_opts(FLAGS) diff --git a/bin/nova-manage b/bin/nova-manage index a4eaf9d6b..1554251a0 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -87,6 +87,7 @@ from nova.db import migration from nova import exception from nova import flags from nova import log as logging +from nova.openstack.common import cfg from nova.openstack.common import importutils from nova import quota from nova import rpc @@ -463,7 +464,7 @@ class ProjectCommands(object): @args('--file', dest="filename", metavar='<filename>', help='File name(Default: novarc)') def environment(self, project_id, user_id, filename='novarc'): - """Exports environment variables to an sourcable file""" + """Exports environment variables to a sourceable file""" try: rc = self.manager.get_environment_rc(user_id, project_id) except (exception.UserNotFound, exception.ProjectNotFound) as ex: @@ -633,7 +634,7 @@ class FloatingIpCommands(object): @staticmethod def address_to_hosts(addresses): """ - Iterate over hosts within a address range. + Iterate over hosts within an address range. If an explicit range specifier is missing, the parameter is interpreted as a specific individual address. @@ -1152,7 +1153,7 @@ class VolumeCommands(object): return rpc.cast(ctxt, - db.queue_get_for(ctxt, FLAGS.volume_topic, host), + rpc.queue_get_for(ctxt, FLAGS.volume_topic, host), {"method": "delete_volume", "args": {"volume_id": volume['id']}}) @@ -1170,7 +1171,7 @@ class VolumeCommands(object): instance = db.instance_get(ctxt, volume['instance_id']) host = instance['host'] rpc.cast(ctxt, - db.queue_get_for(ctxt, FLAGS.compute_topic, host), + rpc.queue_get_for(ctxt, FLAGS.compute_topic, host), {"method": "attach_volume", "args": {"instance_id": instance['id'], "volume_id": volume['id'], @@ -1668,26 +1669,24 @@ def methods_of(obj): def main(): """Parse options and call the appropriate class/method.""" - cfgfile = utils.default_cfgfile() - - if cfgfile and not os.access(cfgfile, os.R_OK): - st = os.stat(cfgfile) - print "Could not read %s. Re-running with sudo" % cfgfile - try: - os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) - except Exception: - print 'sudo failed, continuing as if nothing happened' - rpc.register_opts(FLAGS) try: - argv = FLAGS(sys.argv) + argv = flags.parse_args(sys.argv) logging.setup() - except IOError, e: - if e.errno == errno.EACCES: - print _('Please re-run nova-manage as root.') - sys.exit(2) - raise + except cfg.ConfigFilesNotFoundError: + cfgfile = FLAGS.config_file[-1] if FLAGS.config_file else None + if cfgfile and not os.access(cfgfile, os.R_OK): + st = os.stat(cfgfile) + print _("Could not read %s. Re-running with sudo") % cfgfile + try: + os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) + except Exception: + print _('sudo failed, continuing as if nothing happened') + + print _('Please re-run nova-manage as root.') + sys.exit(2) + script_name = argv.pop(0) if len(argv) < 1: print _("\nOpenStack Nova version: %(version)s (%(vcs)s)\n") % \ diff --git a/bin/nova-network b/bin/nova-network index d176eb27c..306eddafa 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -40,8 +40,7 @@ from nova import service from nova import utils if __name__ == '__main__': - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() utils.monkey_patch() server = service.Service.create(binary='nova-network') diff --git a/bin/nova-objectstore b/bin/nova-objectstore index e52160c25..e9bd0f42d 100755 --- a/bin/nova-objectstore +++ b/bin/nova-objectstore @@ -42,8 +42,7 @@ from nova import utils if __name__ == '__main__': - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() utils.monkey_patch() server = s3server.get_wsgi_server() diff --git a/bin/nova-scheduler b/bin/nova-scheduler index 52db4de7e..fb803ab5b 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -42,8 +42,7 @@ from nova import service from nova import utils if __name__ == '__main__': - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() utils.monkey_patch() server = service.Service.create(binary='nova-scheduler') diff --git a/bin/nova-volume b/bin/nova-volume index 1c6ffb58f..d8c13b9a6 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -40,8 +40,7 @@ from nova import service from nova import utils if __name__ == '__main__': - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() utils.monkey_patch() server = service.Service.create(binary='nova-volume') diff --git a/bin/nova-xvpvncproxy b/bin/nova-xvpvncproxy index 9db07f148..d338d3a3e 100755 --- a/bin/nova-xvpvncproxy +++ b/bin/nova-xvpvncproxy @@ -35,15 +35,13 @@ from nova import flags from nova import log as logging from nova import rpc from nova import service -from nova import utils from nova.vnc import xvp_proxy FLAGS = flags.FLAGS if __name__ == "__main__": rpc.register_opts(FLAGS) - utils.default_cfgfile() - flags.FLAGS(sys.argv) + flags.parse_args(sys.argv) logging.setup() wsgi_server = xvp_proxy.get_wsgi_server() diff --git a/bin/volume-usage-audit b/bin/volume-usage-audit index d8591557c..0329d184e 100755 --- a/bin/volume-usage-audit +++ b/bin/volume-usage-audit @@ -57,15 +57,14 @@ from nova import flags from nova import log as logging from nova import rpc from nova import utils -import nova.volume.utils - +from nova.volume import utils as volume_utils FLAGS = flags.FLAGS if __name__ == '__main__': rpc.register_opts(FLAGS) admin_context = context.get_admin_context() - utils.default_flagfile() + utils.default_cfgfile() flags.FLAGS(sys.argv) logging.setup() begin, end = utils.last_completed_audit_period() @@ -77,7 +76,7 @@ if __name__ == '__main__': print "Found %d volumes" % len(volumes) for volume_ref in volumes: try: - nova.volume.utils.notify_usage_exists( + volume_utils.notify_usage_exists( admin_context, volume_ref) except Exception, e: print traceback.format_exc(e) diff --git a/doc/source/devref/xensmvolume.rst b/doc/source/devref/xensmvolume.rst index 8f14c06f8..c64f2be4a 100644 --- a/doc/source/devref/xensmvolume.rst +++ b/doc/source/devref/xensmvolume.rst @@ -10,7 +10,7 @@ The Xen Storage Manager (xensm) driver for Nova-Volume is based on XenAPI Storag - LVHD over FC: SR plugin which represents disks as VHDs on Logical Volumes within a Volume Group created on an HBA LUN, e.g. hardware-based iSCSI or FC support - iSCSI: Base ISCSI SR driver, provides a LUN-per-VDI. Does not support creation of VDIs but accesses existing LUNs on a target. - LVHD over iSCSI: SR plugin which represents disks as Logical Volumes within a Volume Group created on an iSCSI LUN -- EqualLogic: SR driver for mapping of LUNs to VDIs on a EQUALLOGIC array group, providing use of fast snapshot and clone features on the array +- EqualLogic: SR driver for mapping of LUNs to VDIs on an EQUALLOGIC array group, providing use of fast snapshot and clone features on the array Glossary ========= diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini index a5baa00fe..be2b37846 100644 --- a/etc/nova/api-paste.ini +++ b/etc/nova/api-paste.ini @@ -38,7 +38,6 @@ use = egg:Paste#urlmap [composite:ec2cloud] use = call:nova.api.auth:pipeline_factory noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor -deprecated = ec2faultwrap logrequest authenticate cloudrequest validator ec2executor keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor [filter:ec2faultwrap] @@ -50,18 +49,12 @@ paste.filter_factory = nova.api.ec2:RequestLogging.factory [filter:ec2lockout] paste.filter_factory = nova.api.ec2:Lockout.factory -[filter:totoken] -paste.filter_factory = nova.api.ec2:EC2Token.factory - [filter:ec2keystoneauth] paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory [filter:ec2noauth] paste.filter_factory = nova.api.ec2:NoAuth.factory -[filter:authenticate] -paste.filter_factory = nova.api.ec2:Authenticate.factory - [filter:cloudrequest] controller = nova.api.ec2.cloud.CloudController paste.filter_factory = nova.api.ec2:Requestify.factory diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample index fd259c782..3ac5ad6a7 100644 --- a/etc/nova/nova.conf.sample +++ b/etc/nova/nova.conf.sample @@ -4,1116 +4,1777 @@ [DEFAULT] -######### defined in nova.flags ######### +######## defined in nova.crypto ######## + +# ca_file="cacert.pem" +#### (StrOpt) Filename of root CA + +# key_file="private/cakey.pem" +#### (StrOpt) Filename of private key + +# crl_file="crl.pem" +#### (StrOpt) Filename of root Certificate Revocation List + +# keys_path="$state_path/keys" +#### (StrOpt) Where we keep our keys + +# ca_path="$state_path/CA" +#### (StrOpt) Where we keep our root CA + +# use_project_ca=false +#### (BoolOpt) Should we use a CA for each project? + +# user_cert_subject="/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s" +#### (StrOpt) Subject for certificate for users, %s for project, user, +#### timestamp + +# project_cert_subject="/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s" +#### (StrOpt) Subject for certificate for projects, %s for project, +#### timestamp + + +######## defined in nova.flags ######## + +# connection_type=<None> +#### (StrOpt) Virtualization api connection type : libvirt, xenapi, or +#### fake + +# sql_connection="sqlite:///$state_path/$sqlite_db" +#### (StrOpt) The SQLAlchemy connection string used to connect to the +#### database + +# sql_connection_debug=0 +#### (IntOpt) Verbosity of SQL debugging information. 0=None, +#### 100=Everything -###### (BoolOpt) Allow destination machine to match source for resize. Useful when testing in single-host environments. -# allow_resize_to_same_host=false -###### (StrOpt) File name for the paste.deploy config for nova-api # api_paste_config="api-paste.ini" -###### (BoolOpt) whether to rate limit the api -# api_rate_limit=true -###### (StrOpt) The strategy to use for auth. Supports noauth, keystone, and deprecated. -# auth_strategy="noauth" -###### (IntOpt) Seconds for auth tokens to linger -# auth_token_ttl=3600 -###### (StrOpt) AWS Access ID +#### (StrOpt) File name for the paste.deploy config for nova-api + +# pybasedir="/usr/lib/python/site-packages" +#### (StrOpt) Directory where the nova python module is installed + +# bindir="$pybasedir/bin" +#### (StrOpt) Directory where nova binaries are installed + +# state_path="$pybasedir" +#### (StrOpt) Top-level directory for maintaining nova's state + +# lock_path="$pybasedir" +#### (StrOpt) Directory to use for lock files + +# fake_network=false +#### (BoolOpt) If passed, use fake network devices and addresses + +# my_ip="10.210.228.15" +#### (StrOpt) ip address of this host + +# region_list="" +#### (ListOpt) list of region=fqdn pairs separated by commas + # aws_access_key_id="admin" -###### (StrOpt) AWS Access Key +#### (StrOpt) AWS Access ID + # aws_secret_access_key="admin" -###### (IntOpt) interval to pull bandwidth usage info -# bandwith_poll_interval=600 -###### (StrOpt) Directory where nova binaries are installed -# bindir="$pybasedir/bin" -###### (BoolOpt) Cache glance images locally -# cache_images=true -###### (StrOpt) full class name for the Manager for cert -# cert_manager="nova.cert.manager.CertManager" -###### (StrOpt) the topic cert nodes listen on +#### (StrOpt) AWS Access Key + +# glance_host="$my_ip" +#### (StrOpt) default glance hostname or ip + +# glance_port=9292 +#### (IntOpt) default glance port + +# glance_api_servers="$glance_host:$glance_port" +#### (ListOpt) A list of the glance api servers available to nova +#### ([hostname|ip]:port) + +# glance_num_retries=0 +#### (IntOpt) Number retries when downloading an image from glance + +# s3_port=3333 +#### (IntOpt) port used when accessing the s3 api + +# s3_host="$my_ip" +#### (StrOpt) hostname or ip for openstack to use when accessing the s3 +#### api + +# s3_dmz="$my_ip" +#### (StrOpt) hostname or ip for the instances to use when accessing the +#### s3 api + # cert_topic="cert" -###### (StrOpt) The full class name of the compute API class to use -# compute_api_class="nova.compute.api.API" -###### (StrOpt) full class name for the Manager for compute -# compute_manager="nova.compute.manager.ComputeManager" -###### (StrOpt) the topic compute nodes listen on +#### (StrOpt) the topic cert nodes listen on + # compute_topic="compute" -###### (MultiStrOpt) Path to a config file to use. Multiple config files can be specified, with values in later files taking precedence. The default files used are: [] -###### (StrOpt) Virtualization api connection type : libvirt, xenapi, or fake -# connection_type=<None> -###### (StrOpt) full class name for the Manager for console proxy -# console_manager="nova.console.manager.ConsoleProxyManager" -###### (StrOpt) the topic console proxy nodes listen on +#### (StrOpt) the topic compute nodes listen on + # console_topic="console" -###### (StrOpt) the main RabbitMQ exchange to connect to -# control_exchange="nova" -###### (BoolOpt) Print debugging output -# debug=false -###### (StrOpt) Name of network to use to set access ips for instances -# default_access_ip_network_name=<None> -###### (StrOpt) The default format a ephemeral_volume will be formatted with on creation. -# default_ephemeral_format=<None> -###### (StrOpt) default image to use, testing only -# default_image="ami-11111" -###### (StrOpt) default instance type to use, testing only -# default_instance_type="m1.small" -###### (StrOpt) the default project to use for openstack -# default_project="openstack" -###### (StrOpt) availability zone to use when user doesn't specify one -# default_schedule_zone=<None> -###### (StrOpt) the internal ip of the ec2 api server -# ec2_dmz_host="$my_ip" -###### (StrOpt) the ip of the ec2 api server +#### (StrOpt) the topic console proxy nodes listen on + +# scheduler_topic="scheduler" +#### (StrOpt) the topic scheduler nodes listen on + +# volume_topic="volume" +#### (StrOpt) the topic volume nodes listen on + +# network_topic="network" +#### (StrOpt) the topic network nodes listen on + +# api_rate_limit=true +#### (BoolOpt) whether to rate limit the api + +# enabled_apis="ec2,osapi_compute,osapi_volume,metadata" +#### (ListOpt) a list of APIs to enable by default + # ec2_host="$my_ip" -###### (StrOpt) the path prefix used to call the ec2 api server -# ec2_path="/services/Cloud" -###### (IntOpt) the port of the ec2 api server +#### (StrOpt) the ip of the ec2 api server + +# ec2_dmz_host="$my_ip" +#### (StrOpt) the internal ip of the ec2 api server + # ec2_port=8773 -###### (StrOpt) the protocol to use when connecting to the ec2 api server (http, https) +#### (IntOpt) the port of the ec2 api server + # ec2_scheme="http" -###### (BoolOpt) Allows use of instance password during server creation -# enable_instance_password=true -###### (ListOpt) a list of APIs to enable by default -# enabled_apis="ec2,osapi_compute,osapi_volume,metadata" -###### (BoolOpt) If passed, use fake network devices and addresses -# fake_network=false -###### (BoolOpt) If passed, use a fake RabbitMQ provider -# fake_rabbit=false -###### (StrOpt) Firewall driver (defaults to iptables) -# firewall_driver="nova.virt.firewall.IptablesFirewallDriver" -###### (StrOpt) full class name for the DNS Manager for floating IPs -# floating_ip_dns_manager="nova.network.dns_driver.DNSDriver" -###### (ListOpt) A list of the glance api servers available to nova ([hostname|ip]:port) -# glance_api_servers="$glance_host:$glance_port" -###### (StrOpt) default glance hostname or ip -# glance_host="$my_ip" -###### (IntOpt) Number retries when downloading an image from glance -# glance_num_retries=0 -###### (IntOpt) default glance port -# glance_port=9292 -###### (StrOpt) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address. -# host="nova" -###### (StrOpt) The service to use for retrieving and searching images. -# image_service="nova.image.glance.GlanceImageService" -###### (StrOpt) full class name for the DNS Zone for instance IPs -# instance_dns_domain="" -###### (StrOpt) full class name for the DNS Manager for instance IPs -# instance_dns_manager="nova.network.dns_driver.DNSDriver" -###### (StrOpt) time period to generate instance usages for. Time period must be hour, day, month or year -# instance_usage_audit_period="month" -###### (ListOpt) Host reserved for specific images -# isolated_hosts="" -###### (ListOpt) Images to run on isolated host -# isolated_images="" -###### (StrOpt) Directory to use for lock files -# lock_path="$pybasedir" -###### (StrOpt) If this option is specified, the logging configuration file specified is used and overrides any other logging options specified. Please see the Python logging module documentation for details on logging configuration files. -# log-config=<None> -###### (StrOpt) Format string for %(asctime)s in log records. Default: %default -# log-date-format="%Y-%m-%d %H:%M:%S" -###### (StrOpt) (Optional) The directory to keep log files in (will be prepended to --logfile) -# log-dir=<None> -###### (StrOpt) (Optional) Name of log file to output to. If not set, logging will go to stdout. -# log-file=<None> -###### (StrOpt) A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. Default: %default -# log-format="%(asctime)s %(levelname)8s [%(name)s] %(message)s" -###### (StrOpt) Log output to a per-service log file in named directory -# logdir=<None> -###### (StrOpt) Log output to a named file -# logfile=<None> -###### (StrOpt) Default file mode used when creating log files -# logfile_mode="0644" -###### (ListOpt) Memcached servers or None for in process cache. -# memcached_servers=<None> -###### (StrOpt) the ip for the metadata api server -# metadata_host="$my_ip" -###### (IntOpt) the port for the metadata api port -# metadata_port=8775 -###### (BoolOpt) Whether to log monkey patching -# monkey_patch=false -###### (ListOpt) List of modules/decorators to monkey patch -# monkey_patch_modules="nova.api.ec2.cloud:nova.notifier.api.notify_decorator,nova.compute.api:nova.notifier.api.notify_decorator" -###### (StrOpt) ip address of this host -# my_ip="10.0.0.1" -###### (StrOpt) The full class name of the network API class to use -# network_api_class="nova.network.api.API" -###### (StrOpt) Driver to use for network creation -# network_driver="nova.network.linux_net" -###### (StrOpt) full class name for the Manager for network -# network_manager="nova.network.manager.VlanManager" -###### (StrOpt) the topic network nodes listen on -# network_topic="network" -###### (StrOpt) availability zone of this node -# node_availability_zone="nova" -###### (StrOpt) Default driver for sending notifications -# notification_driver="nova.notifier.no_op_notifier" -###### (StrOpt) kernel image that indicates not to use a kernel, but to use a raw disk image instead -# null_kernel="nokernel" -###### (ListOpt) Specify list of extensions to load when using osapi_compute_extension option with nova.api.openstack.compute.contrib.select_extensions +#### (StrOpt) the protocol to use when connecting to the ec2 api server +#### (http, https) + +# ec2_path="/services/Cloud" +#### (StrOpt) the path prefix used to call the ec2 api server + # osapi_compute_ext_list="" -###### (MultiStrOpt) osapi compute extension to load +#### (ListOpt) Specify list of extensions to load when using +#### osapi_compute_extension option with +#### nova.api.openstack.compute.contrib.select_extensions + # osapi_compute_extension="nova.api.openstack.compute.contrib.standard_extensions" -###### (StrOpt) Base URL that will be presented to users in links to the OpenStack Compute API +#### (MultiStrOpt) osapi compute extension to load + +# osapi_volume_ext_list="" +#### (ListOpt) Specify list of extensions to load when using +#### osapi_volume_extension option with +#### nova.api.openstack.volume.contrib.select_extensions + +# osapi_volume_extension="nova.api.openstack.volume.contrib.standard_extensions" +#### (MultiStrOpt) osapi volume extension to load + +# osapi_scheme="http" +#### (StrOpt) the protocol to use when connecting to the openstack api +#### server (http, https) + +# osapi_path="/v1.1/" +#### (StrOpt) the path prefix used to call the openstack api server + # osapi_compute_link_prefix=<None> -###### (StrOpt) Base URL that will be presented to users in links to glance resources +#### (StrOpt) Base URL that will be presented to users in links to the +#### OpenStack Compute API + # osapi_glance_link_prefix=<None> -###### (IntOpt) the maximum number of items returned in a single response from a collection resource +#### (StrOpt) Base URL that will be presented to users in links to glance +#### resources + # osapi_max_limit=1000 -###### (StrOpt) the path prefix used to call the openstack api server -# osapi_path="/v1.1/" -###### (StrOpt) the protocol to use when connecting to the openstack api server (http, https) -# osapi_scheme="http" -###### (ListOpt) Specify list of extensions to load when using osapi_volume_extension option with nova.api.openstack.volume.contrib.select_extensions -# osapi_volume_ext_list="" -###### (MultiStrOpt) osapi volume extension to load -# osapi_volume_extension="nova.api.openstack.volume.contrib.standard_extensions" -###### (IntOpt) Length of generated instance admin passwords -# password_length=12 -###### (StrOpt) Directory where the nova python module is installed -# pybasedir="/usr/lib/python/site-packages" -###### (BoolOpt) use durable queues in RabbitMQ -# rabbit_durable_queues=false -###### (StrOpt) the RabbitMQ host -# rabbit_host="localhost" -###### (IntOpt) maximum retries with trying to connect to RabbitMQ (the default of 0 implies an infinite retry count) -# rabbit_max_retries=0 -###### (StrOpt) the RabbitMQ password -# rabbit_password="guest" -###### (IntOpt) the RabbitMQ port -# rabbit_port=5672 -###### (IntOpt) how long to backoff for between retries when connecting to RabbitMQ -# rabbit_retry_backoff=2 -###### (IntOpt) how frequently to retry connecting with RabbitMQ -# rabbit_retry_interval=1 -###### (BoolOpt) connect over SSL for RabbitMQ -# rabbit_use_ssl=false -###### (StrOpt) the RabbitMQ userid -# rabbit_userid="guest" -###### (StrOpt) the RabbitMQ virtual host -# rabbit_virtual_host="/" -###### (IntOpt) Interval in seconds for reclaiming deleted instances -# reclaim_instance_interval=0 -###### (ListOpt) list of region=fqdn pairs separated by commas -# region_list="" -###### (BoolOpt) Whether to start guests that were running before the host rebooted -# resume_guests_state_on_host_boot=false -###### (StrOpt) Command prefix to use for running commands as root -# root_helper="sudo" -###### (StrOpt) hostname or ip for the instances to use when accessing the s3 api -# s3_dmz="$my_ip" -###### (StrOpt) hostname or ip for openstack to use when accessing the s3 api -# s3_host="$my_ip" -###### (IntOpt) port used when accessing the s3 api -# s3_port=3333 -###### (StrOpt) full class name for the Manager for scheduler -# scheduler_manager="nova.scheduler.manager.SchedulerManager" -###### (StrOpt) the topic scheduler nodes listen on -# scheduler_topic="scheduler" -###### (StrOpt) The full class name of the security group handler class -# security_group_handler="nova.network.quantum.sg.NullSecurityGroupHandler" -###### (IntOpt) maximum time since last check-in for up service -# service_down_time=60 -###### (StrOpt) The SQLAlchemy connection string used to connect to the database -# sql_connection="sqlite:///$state_path/$sqlite_db" -###### (IntOpt) timeout before idle sql connections are reaped +#### (IntOpt) the maximum number of items returned in a single response +#### from a collection resource + +# metadata_host="$my_ip" +#### (StrOpt) the ip for the metadata api server + +# metadata_port=8775 +#### (IntOpt) the port for the metadata api port + +# default_project="openstack" +#### (StrOpt) the default project to use for openstack + +# default_image="ami-11111" +#### (StrOpt) default image to use, testing only + +# default_instance_type="m1.small" +#### (StrOpt) default instance type to use, testing only + +# null_kernel="nokernel" +#### (StrOpt) kernel image that indicates not to use a kernel, but to use +#### a raw disk image instead + +# vpn_image_id="0" +#### (StrOpt) image id used when starting up a cloudpipe vpn server + +# vpn_key_suffix="-vpn" +#### (StrOpt) Suffix to add to project name for vpn key and secgroups + +# auth_token_ttl=3600 +#### (IntOpt) Seconds for auth tokens to linger + +# logfile_mode="0644" +#### (StrOpt) Default file mode used when creating log files + +# sqlite_db="nova.sqlite" +#### (StrOpt) the filename to use with sqlite + +# sqlite_synchronous=true +#### (BoolOpt) If passed, use synchronous mode for sqlite + # sql_idle_timeout=3600 -###### (IntOpt) maximum db connection retries during startup. (setting -1 implies an infinite retry count) +#### (IntOpt) timeout before idle sql connections are reaped + # sql_max_retries=10 -###### (IntOpt) interval between retries of opening a sql connection +#### (IntOpt) maximum db connection retries during startup. (setting -1 +#### implies an infinite retry count) + # sql_retry_interval=10 -###### (StrOpt) the filename to use with sqlite -# sqlite_db="nova.sqlite" -###### (BoolOpt) If passed, use synchronous mode for sqlite -# sqlite_synchronous=true -###### (BoolOpt) Whether to restart guests when the host reboots +#### (IntOpt) interval between retries of opening a sql connection + +# compute_manager="nova.compute.manager.ComputeManager" +#### (StrOpt) full class name for the Manager for compute + +# console_manager="nova.console.manager.ConsoleProxyManager" +#### (StrOpt) full class name for the Manager for console proxy + +# cert_manager="nova.cert.manager.CertManager" +#### (StrOpt) full class name for the Manager for cert + +# instance_dns_manager="nova.network.dns_driver.DNSDriver" +#### (StrOpt) full class name for the DNS Manager for instance IPs + +# instance_dns_domain="" +#### (StrOpt) full class name for the DNS Zone for instance IPs + +# floating_ip_dns_manager="nova.network.dns_driver.DNSDriver" +#### (StrOpt) full class name for the DNS Manager for floating IPs + +# network_manager="nova.network.manager.VlanManager" +#### (StrOpt) full class name for the Manager for network + +# volume_manager="nova.volume.manager.VolumeManager" +#### (StrOpt) full class name for the Manager for volume + +# scheduler_manager="nova.scheduler.manager.SchedulerManager" +#### (StrOpt) full class name for the Manager for scheduler + +# firewall_driver="nova.virt.firewall.IptablesFirewallDriver" +#### (StrOpt) Firewall driver (defaults to iptables) + +# image_service="nova.image.glance.GlanceImageService" +#### (StrOpt) The service to use for retrieving and searching images. + +# host="lx15" +#### (StrOpt) Name of this node. This can be an opaque identifier. It is +#### not necessarily a hostname, FQDN, or IP address. + +# node_availability_zone="nova" +#### (StrOpt) availability zone of this node + +# notification_driver="nova.notifier.no_op_notifier" +#### (StrOpt) Default driver for sending notifications + +# memcached_servers=<None> +#### (ListOpt) Memcached servers or None for in process cache. + +# instance_usage_audit_period="month" +#### (StrOpt) time period to generate instance usages for. Time period +#### must be hour, day, month or year + +# bandwith_poll_interval=600 +#### (IntOpt) interval to pull bandwidth usage info + # start_guests_on_host_boot=false -###### (StrOpt) Top-level directory for maintaining nova's state -# state_path="$pybasedir" -###### (StrOpt) Stub network related code -# stub_network="False" -###### (StrOpt) syslog facility to receive log lines -# syslog-log-facility="LOG_USER" -###### (BoolOpt) Whether to use cow images -# use_cow_images=true -###### (BoolOpt) use ipv6 +#### (BoolOpt) Whether to restart guests when the host reboots + +# resume_guests_state_on_host_boot=false +#### (BoolOpt) Whether to start guests that were running before the host +#### rebooted + +# default_ephemeral_format=<None> +#### (StrOpt) The default format an ephemeral_volume will be formatted +#### with on creation. + +# root_helper="sudo" +#### (StrOpt) Command prefix to use for running commands as root + +# network_driver="nova.network.linux_net" +#### (StrOpt) Driver to use for network creation + # use_ipv6=false -###### (BoolOpt) Log output to standard error -# use_stderr=true -###### (BoolOpt) Use syslog for logging. -# use-syslog=false -###### (BoolOpt) Print more verbose output -# verbose=false -###### (StrOpt) The full class name of the volume API class to use -# volume_api_class="nova.volume.api.API" -###### (StrOpt) full class name for the Manager for volume -# volume_manager="nova.volume.manager.VolumeManager" -###### (StrOpt) the topic volume nodes listen on -# volume_topic="volume" -###### (StrOpt) image id used when starting up a cloudpipe vpn server -# vpn_image_id="0" -###### (StrOpt) Suffix to add to project name for vpn key and secgroups -# vpn_key_suffix="-vpn" -###### (IntOpt) Number of seconds zombie instances are cleaned up. +#### (BoolOpt) use ipv6 + +# enable_instance_password=true +#### (BoolOpt) Allows use of instance password during server creation + +# password_length=12 +#### (IntOpt) Length of generated instance admin passwords + +# monkey_patch=false +#### (BoolOpt) Whether to log monkey patching + +# monkey_patch_modules="nova.api.ec2.cloud:nova.notifier.api.notify_decorator,nova.compute.api:nova.notifier.api.notify_decorator" +#### (ListOpt) List of modules/decorators to monkey patch + +# allow_resize_to_same_host=false +#### (BoolOpt) Allow destination machine to match source for resize. Useful +#### when testing in single-host environments. + +# stub_network=false +#### (BoolOpt) Stub network related code + +# reclaim_instance_interval=0 +#### (IntOpt) Interval in seconds for reclaiming deleted instances + # zombie_instance_updated_at_window=172800 +#### (IntOpt) Number of seconds zombie instances are cleaned up. -######### defined in nova.log ######### +# service_down_time=60 +#### (IntOpt) maximum time since last check-in for up service + +# default_schedule_zone=<None> +#### (StrOpt) availability zone to use when user doesn't specify one + +# isolated_images="" +#### (ListOpt) Images to run on isolated host + +# isolated_hosts="" +#### (ListOpt) Host reserved for specific images + +# cache_images=true +#### (BoolOpt) Cache glance images locally + +# use_cow_images=true +#### (BoolOpt) Whether to use cow images + +# compute_api_class="nova.compute.api.API" +#### (StrOpt) The full class name of the compute API class to use + +# network_api_class="nova.network.api.API" +#### (StrOpt) The full class name of the network API class to use + +# volume_api_class="nova.volume.api.API" +#### (StrOpt) The full class name of the volume API class to use + +# security_group_handler="nova.network.quantum.sg.NullSecurityGroupHandler" +#### (StrOpt) The full class name of the security group handler class + +# default_access_ip_network_name=<None> +#### (StrOpt) Name of network to use to set access ips for instances + +# auth_strategy="noauth" +#### (StrOpt) The strategy to use for auth. Supports noauth, keystone, and +#### deprecated. + +# logdir=<None> +#### (StrOpt) Log output to a per-service log file in named directory + +# logfile=<None> +#### (StrOpt) Log output to a named file + +# use_stderr=true +#### (BoolOpt) Log output to standard error + + +######## defined in nova.log ######## -###### (ListOpt) list of logger=LEVEL pairs -# default_log_levels="amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,eventlet.wsgi.server=WARN" -###### (StrOpt) If an instance is passed with the log message, format it like this -# instance_format="[instance: %(uuid)s] " -###### (StrOpt) format string to use for log messages with context # logging_context_format_string="%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s] %(instance)s%(message)s" -###### (StrOpt) data to append to log format when level is DEBUG -# logging_debug_format_suffix="from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" -###### (StrOpt) format string to use for log messages without context +#### (StrOpt) format string to use for log messages with context + # logging_default_format_string="%(asctime)s %(levelname)s %(name)s [-] %(instance)s%(message)s" -###### (StrOpt) prefix each line of exception output with this format -# logging_exception_prefix="(%(name)s): TRACE: " -###### (BoolOpt) publish error events +#### (StrOpt) format string to use for log messages without context + +# logging_debug_format_suffix="from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" +#### (StrOpt) data to append to log format when level is DEBUG + +# logging_exception_prefix="%(asctime)s TRACE %(name)s %(instance)s" +#### (StrOpt) prefix each line of exception output with this format + +# default_log_levels="amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN" +#### (ListOpt) list of logger=LEVEL pairs + # publish_errors=false +#### (BoolOpt) publish error events -######### defined in nova.utils ######### +# instance_format="[instance: %(uuid)s] " +#### (StrOpt) If an instance is passed with the log message, format it +#### like this -###### (BoolOpt) Whether to disable inter-process locks -# disable_process_locking=false +# instance_uuid_format="[instance: %(uuid)s] " +#### (StrOpt) If an instance UUID is passed with the log message, format +#### it like this -######### defined in nova.service ######### -###### (StrOpt) The backend to use for db -# db_backend="sqlalchemy" -###### (StrOpt) IP address for EC2 API to listen -# ec2_listen="0.0.0.0" -###### (IntOpt) port for ec2 api to listen -# ec2_listen_port=8773 -###### (BoolOpt) Services to be added to the available pool on create -# enable_new_services=true -###### (StrOpt) Template string to be used to generate instance names -# instance_name_template="instance-%08x" -###### (StrOpt) IP address for metadata api to listen -# metadata_listen="0.0.0.0" -###### (IntOpt) port for metadata api to listen -# metadata_listen_port=8775 -###### (StrOpt) OpenStack metadata service manager -# metadata_manager="nova.api.manager.MetadataManager" -###### (StrOpt) IP address for OpenStack API to listen -# osapi_compute_listen="0.0.0.0" -###### (IntOpt) list port for osapi compute -# osapi_compute_listen_port=8774 -###### (StrOpt) IP address for OpenStack Volume API to listen -# osapi_volume_listen="0.0.0.0" -###### (IntOpt) port for os volume api to listen -# osapi_volume_listen_port=8776 -###### (IntOpt) seconds between running periodic tasks -# periodic_interval=60 -###### (IntOpt) seconds between nodes reporting state to datastore -# report_interval=10 -###### (StrOpt) The messaging module to use, defaults to kombu. -# rpc_backend="nova.rpc.impl_kombu" -###### (StrOpt) Template string to be used to generate snapshot names -# snapshot_name_template="snapshot-%08x" -###### (StrOpt) Template string to be used to generate instance names -# volume_name_template="volume-%s" +######## defined in nova.notifications ######## -######### defined in nova.crypto ######### +# notify_on_state_change=<None> +#### (StrOpt) If set, send compute.instance.update notifications on +#### instance state changes. Valid values are None for no +#### notifications, "vm_state" for notifications on VM state +#### changes, or "vm_and_task_state" for notifications on VM and +#### task state changes. -###### (StrOpt) Filename of root CA -# ca_file="cacert.pem" -###### (StrOpt) Where we keep our root CA -# ca_path="$state_path/CA" -###### (StrOpt) Filename of root Certificate Revocation List -# crl_file="crl.pem" -###### (StrOpt) Filename of private key -# key_file="private/cakey.pem" -###### (StrOpt) Where we keep our keys -# keys_path="$state_path/keys" -###### (StrOpt) Subject for certificate for projects, %s for project, timestamp -# project_cert_subject="/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s" -###### (BoolOpt) Should we use a CA for each project? -# use_project_ca=false -###### (StrOpt) Subject for certificate for users, %s for project, user, timestamp -# user_cert_subject="/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s" -######### defined in nova.policy ######### +######## defined in nova.policy ######## -###### (StrOpt) Rule checked when requested rule is not found -# policy_default_rule="default" -###### (StrOpt) JSON file representing policy # policy_file="policy.json" +#### (StrOpt) JSON file representing policy + +# policy_default_rule="default" +#### (StrOpt) Rule checked when requested rule is not found -######### defined in nova.quota ######### -###### (IntOpt) number of instance cores allowed per project +######## defined in nova.quota ######## + +# quota_instances=10 +#### (IntOpt) number of instances allowed per project + # quota_cores=20 -###### (IntOpt) number of floating ips allowed per project -# quota_floating_ips=10 -###### (IntOpt) number of volume gigabytes allowed per project +#### (IntOpt) number of instance cores allowed per project + +# quota_ram=51200 +#### (IntOpt) megabytes of instance ram allowed per project + +# quota_volumes=10 +#### (IntOpt) number of volumes allowed per project + # quota_gigabytes=1000 -###### (IntOpt) number of instances allowed per project -# quota_instances=10 -###### (IntOpt) number of bytes allowed per injected file +#### (IntOpt) number of volume gigabytes allowed per project + +# quota_floating_ips=10 +#### (IntOpt) number of floating ips allowed per project + +# quota_metadata_items=128 +#### (IntOpt) number of metadata items allowed per instance + +# quota_injected_files=5 +#### (IntOpt) number of injected files allowed + # quota_injected_file_content_bytes=10240 -###### (IntOpt) number of bytes allowed per injected file path +#### (IntOpt) number of bytes allowed per injected file + # quota_injected_file_path_bytes=255 -###### (IntOpt) number of injected files allowed -# quota_injected_files=5 -###### (IntOpt) number of metadata items allowed per instance -# quota_metadata_items=128 -###### (IntOpt) megabytes of instance ram allowed per project -# quota_ram=51200 -###### (IntOpt) number of volumes allowed per project -# quota_volumes=10 +#### (IntOpt) number of bytes allowed per injected file path -######### defined in nova.test ######### +# quota_security_groups=10 +#### (IntOpt) number of security groups per project + +# quota_security_group_rules=20 +#### (IntOpt) number of security rules per security group + +# quota_key_pairs=100 +#### (IntOpt) number of key pairs per user + +# reservation_expire=86400 +#### (IntOpt) number of seconds until a reservation expires + +# until_refresh=0 +#### (IntOpt) count of reservations until usage is refreshed + +# max_age=0 +#### (IntOpt) number of seconds between subsequent usage refreshes + +# quota_driver="nova.quota.DbQuotaDriver" +#### (StrOpt) default driver to use for quota checks + + +######## defined in nova.service ######## + +# report_interval=10 +#### (IntOpt) seconds between nodes reporting state to datastore + +# periodic_interval=60 +#### (IntOpt) seconds between running periodic tasks + +# periodic_fuzzy_delay=60 +#### (IntOpt) range of seconds to randomly delay when starting the +#### periodic task scheduler to reduce stampeding. (Disable by +#### setting to 0) + +# ec2_listen="0.0.0.0" +#### (StrOpt) IP address for EC2 API to listen + +# ec2_listen_port=8773 +#### (IntOpt) port for ec2 api to listen + +# osapi_compute_listen="0.0.0.0" +#### (StrOpt) IP address for OpenStack API to listen + +# osapi_compute_listen_port=8774 +#### (IntOpt) list port for osapi compute + +# metadata_manager="nova.api.manager.MetadataManager" +#### (StrOpt) OpenStack metadata service manager + +# metadata_listen="0.0.0.0" +#### (StrOpt) IP address for metadata api to listen + +# metadata_listen_port=8775 +#### (IntOpt) port for metadata api to listen + +# osapi_volume_listen="0.0.0.0" +#### (StrOpt) IP address for OpenStack Volume API to listen + +# osapi_volume_listen_port=8776 +#### (IntOpt) port for os volume api to listen + + +######## defined in nova.test ######## -###### (StrOpt) the topic console auth proxy nodes listen on -# consoleauth_topic="consoleauth" -###### (StrOpt) driver to use for database access -# db_driver="nova.db" -###### (BoolOpt) should we use everything for testing -# fake_tests=true -###### (StrOpt) Timeout after NN seconds when looking for a host. -# find_host_timeout="30" -###### (IntOpt) Size of RPC connection pool -# rpc_conn_pool_size=30 -###### (IntOpt) Seconds to wait for a response from call or multicall -# rpc_response_timeout=60 -###### (IntOpt) Size of RPC thread pool -# rpc_thread_pool_size=1024 -###### (StrOpt) File name of clean sqlite db # sqlite_clean_db="clean.sqlite" -###### (StrOpt) availability zone of this service -# storage_availability_zone="nova" -###### (BoolOpt) if True, will not discover local volumes -# use_local_volumes=true -###### (StrOpt) Driver to use for volume creation -# volume_driver="nova.volume.driver.ISCSIDriver" -###### (BoolOpt) if True will force update capabilities on each check -# volume_force_update_capabilities=false +#### (StrOpt) File name of clean sqlite db -######### defined in nova.auth.ldapdriver ######### +# fake_tests=true +#### (BoolOpt) should we use everything for testing + + +######## defined in nova.api.auth ######## + +# use_forwarded_for=false +#### (BoolOpt) Treat X-Forwarded-For as the canonical remote address. Only +#### enable this if you have a sanitizing proxy. + + +######## defined in nova.api.ec2 ######## + +# lockout_attempts=5 +#### (IntOpt) Number of failed auths before lockout. + +# lockout_minutes=15 +#### (IntOpt) Number of minutes to lockout if triggered. + +# lockout_window=15 +#### (IntOpt) Number of minutes for lockout window. + +# keystone_ec2_url="http://localhost:5000/v2.0/ec2tokens" +#### (StrOpt) URL to get token from ec2 request. + +# ec2_private_dns_show_ip=false +#### (BoolOpt) Return the IP address as private dns hostname in describe +#### instances + + +######## defined in nova.api.openstack.compute ######## + +# allow_instance_snapshots=true +#### (BoolOpt) Permit instance snapshot operations. + + +######## defined in nova.api.sizelimit ######## + +# osapi_max_request_body_size=114688 +#### (IntOpt) the maximum body size per each osapi request(bytes) + + +######## defined in nova.auth.ldapdriver ######## -###### (StrOpt) cn for Cloud Admins -# ldap_cloudadmin="cn=cloudadmins,ou=Groups,dc=example,dc=com" -###### (StrOpt) cn for Developers -# ldap_developer="cn=developers,ou=Groups,dc=example,dc=com" -###### (StrOpt) cn for ItSec -# ldap_itsec="cn=itsec,ou=Groups,dc=example,dc=com" -###### (StrOpt) cn for NetAdmins -# ldap_netadmin="cn=netadmins,ou=Groups,dc=example,dc=com" -###### (StrOpt) LDAP password -# ldap_password="changeme" -###### (StrOpt) OU for Projects -# ldap_project_subtree="ou=Groups,dc=example,dc=com" -###### (IntOpt) Current version of the LDAP schema # ldap_schema_version=2 -###### (StrOpt) cn for Sysadmins -# ldap_sysadmin="cn=sysadmins,ou=Groups,dc=example,dc=com" -###### (StrOpt) Point this at your ldap server +#### (IntOpt) Current version of the LDAP schema + # ldap_url="ldap://localhost" -###### (StrOpt) DN of admin user +#### (StrOpt) Point this at your ldap server + +# ldap_password="changeme" +#### (StrOpt) LDAP password + # ldap_user_dn="cn=Manager,dc=example,dc=com" -###### (StrOpt) Attribute to use as id +#### (StrOpt) DN of admin user + # ldap_user_id_attribute="uid" -###### (BoolOpt) Modify user attributes instead of creating/deleting -# ldap_user_modify_only=false -###### (StrOpt) Attribute to use as name +#### (StrOpt) Attribute to use as id + # ldap_user_name_attribute="cn" -###### (StrOpt) OU for Users -# ldap_user_subtree="ou=Users,dc=example,dc=com" -###### (StrOpt) OID for Users +#### (StrOpt) Attribute to use as name + # ldap_user_unit="Users" -###### (StrOpt) OU for Roles +#### (StrOpt) OID for Users + +# ldap_user_subtree="ou=Users,dc=example,dc=com" +#### (StrOpt) OU for Users + +# ldap_user_modify_only=false +#### (BoolOpt) Modify user attributes instead of creating/deleting + +# ldap_project_subtree="ou=Groups,dc=example,dc=com" +#### (StrOpt) OU for Projects + # role_project_subtree="ou=Groups,dc=example,dc=com" +#### (StrOpt) OU for Roles + +# ldap_cloudadmin="cn=cloudadmins,ou=Groups,dc=example,dc=com" +#### (StrOpt) cn for Cloud Admins + +# ldap_itsec="cn=itsec,ou=Groups,dc=example,dc=com" +#### (StrOpt) cn for ItSec -######### defined in nova.auth.manager ######### +# ldap_sysadmin="cn=sysadmins,ou=Groups,dc=example,dc=com" +#### (StrOpt) cn for Sysadmins + +# ldap_netadmin="cn=netadmins,ou=Groups,dc=example,dc=com" +#### (StrOpt) cn for NetAdmins + +# ldap_developer="cn=developers,ou=Groups,dc=example,dc=com" +#### (StrOpt) cn for Developers + + +######## defined in nova.auth.manager ######## -###### (ListOpt) Allowed roles for project # allowed_roles="cloudadmin,itsec,sysadmin,netadmin,developer" -###### (StrOpt) Driver that auth manager uses -# auth_driver="nova.auth.dbdriver.DbDriver" -###### (StrOpt) Filename of certificate in credentials zip -# credential_cert_file="cert.pem" -###### (StrOpt) Filename of private key in credentials zip -# credential_key_file="pk.pem" -###### (StrOpt) Filename of rc in credentials zip %s will be replaced by name of the region (nova by default) -# credential_rc_file="%src" -###### (StrOpt) Filename of certificate in credentials zip -# credential_vpn_file="nova-vpn.conf" -###### (StrOpt) Template for creating users rc file -# credentials_template="$pybasedir/nova/auth/novarc.template" -###### (ListOpt) Roles that apply to all projects -# global_roles="cloudadmin,itsec" -###### (ListOpt) Roles that ignore authorization checking completely +#### (ListOpt) Allowed roles for project + # superuser_roles="cloudadmin" -###### (StrOpt) Template for creating users vpn file +#### (ListOpt) Roles that ignore authorization checking completely + +# global_roles="cloudadmin,itsec" +#### (ListOpt) Roles that apply to all projects + +# credentials_template="$pybasedir/nova/auth/novarc.template" +#### (StrOpt) Template for creating users rc file + # vpn_client_template="$pybasedir/nova/cloudpipe/client.ovpn.template" +#### (StrOpt) Template for creating users vpn file -######### defined in nova.api.auth ######### +# credential_vpn_file="nova-vpn.conf" +#### (StrOpt) Filename of certificate in credentials zip -###### (BoolOpt) Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy. -# use_forwarded_for=false +# credential_key_file="pk.pem" +#### (StrOpt) Filename of private key in credentials zip -######### defined in nova.api.ec2 ######### +# credential_cert_file="cert.pem" +#### (StrOpt) Filename of certificate in credentials zip -###### (BoolOpt) Return the IP address as private dns hostname in describe instances -# ec2_private_dns_show_ip=false -###### (StrOpt) URL to get token from ec2 request. -# keystone_ec2_url="http://localhost:5000/v2.0/ec2tokens" -###### (IntOpt) Number of failed auths before lockout. -# lockout_attempts=5 -###### (IntOpt) Number of minutes to lockout if triggered. -# lockout_minutes=15 -###### (IntOpt) Number of minutes for lockout window. -# lockout_window=15 +# credential_rc_file="%src" +#### (StrOpt) Filename of rc in credentials zip %s will be replaced by +#### name of the region (nova by default) -######### defined in nova.api.openstack.compute ######### +# auth_driver="nova.auth.dbdriver.DbDriver" +#### (StrOpt) Driver that auth manager uses -###### (BoolOpt) Permit instance snapshot operations. -# allow_instance_snapshots=true -######### defined in nova.vnc ######### +######## defined in nova.cloudpipe.pipelib ######## -###### (StrOpt) location of vnc console proxy, in the form "http://127.0.0.1:6080/vnc_auto.html" -# novncproxy_base_url="http://127.0.0.1:6080/vnc_auto.html" -###### (BoolOpt) enable vnc related features -# vnc_enabled=true -###### (StrOpt) keymap for vnc -# vnc_keymap="en-us" -###### (StrOpt) Ip address on which instance vncserversshould listen -# vncserver_listen="127.0.0.1" -###### (StrOpt) the address to which proxy clients (like nova-xvpvncproxy) should connect -# vncserver_proxyclient_address="127.0.0.1" -###### (StrOpt) location of nova xvp vnc console proxy, in the form "http://127.0.0.1:6081/console" -# xvpvncproxy_base_url="http://127.0.0.1:6081/console" +# vpn_instance_type="m1.tiny" +#### (StrOpt) Instance type for vpn instances -######### defined in nova.vnc.xvp_proxy ######### +# boot_script_template="$pybasedir/nova/cloudpipe/bootscript.template" +#### (StrOpt) Template for cloudpipe instance boot script -###### (StrOpt) Address that the XCP VNC proxy should bind to -# xvpvncproxy_host="0.0.0.0" -###### (IntOpt) Port that the XCP VNC proxy should bind to -# xvpvncproxy_port=6081 +# dmz_net="10.0.0.0" +#### (StrOpt) Network to push into openvpn config + +# dmz_mask="255.255.255.0" +#### (StrOpt) Netmask to push into openvpn config + + +######## defined in nova.common.eventlet_backdoor ######## + +# backdoor_port=<None> +#### (IntOpt) port for eventlet backdoor to listen + + +######## defined in nova.compute.manager ######## + +# instances_path="$state_path/instances" +#### (StrOpt) where instances are stored on disk + +# base_dir_name="_base" +#### (StrOpt) Where cached images are stored under $instances_path.This is +#### NOT the full path - just a folder name.For per-compute-host +#### cached images, set to _base_$my_ip + +# compute_driver="nova.virt.connection.get_connection" +#### (StrOpt) Driver to use for controlling virtualization + +# console_host="lx15" +#### (StrOpt) Console proxy host to use to connect to instances on this +#### host. + +# live_migration_retry_count=30 +#### (IntOpt) Number of 1 second retries needed in live_migration + +# reboot_timeout=0 +#### (IntOpt) Automatically hard reboot an instance if it has been stuck +#### in a rebooting state longer than N seconds. Set to 0 to +#### disable. + +# instance_build_timeout=0 +#### (IntOpt) Amount of time in seconds an instance can be in BUILD before +#### going into ERROR status.Set to 0 to disable. + +# rescue_timeout=0 +#### (IntOpt) Automatically unrescue an instance after N seconds. Set to 0 +#### to disable. + +# resize_confirm_window=0 +#### (IntOpt) Automatically confirm resizes after N seconds. Set to 0 to +#### disable. + +# host_state_interval=120 +#### (IntOpt) Interval in seconds for querying the host status + +# running_deleted_instance_timeout=0 +#### (IntOpt) Number of seconds after being deleted when a running +#### instance should be considered eligible for cleanup. + +# running_deleted_instance_poll_interval=30 +#### (IntOpt) Number of periodic scheduler ticks to wait between runs of +#### the cleanup task. + +# running_deleted_instance_action="log" +#### (StrOpt) Action to take if a running deleted instance is +#### detected.Valid options are 'noop', 'log' and 'reap'. Set to +#### 'noop' to disable. + +# image_cache_manager_interval=40 +#### (IntOpt) Number of periodic scheduler ticks to wait between runs of +#### the image cache manager. -######### defined in nova.ipv6.api ######### +# heal_instance_info_cache_interval=60 +#### (IntOpt) Number of seconds between instance info_cache self healing +#### updates + +# additional_compute_capabilities="" +#### (ListOpt) a list of additional capabilities for this compute host to +#### advertise. Valid entries are name=value pairs this +#### functionality will be replaced when HostAggregates become +#### more funtional for general grouping in Folsom. (see: +#### http://etherpad.openstack.org/FolsomNovaHostAggregates-v2) + + +######## defined in nova.console.manager ######## + +# console_driver="nova.console.xvp.XVPConsoleProxy" +#### (StrOpt) Driver to use for the console proxy + +# stub_compute=false +#### (BoolOpt) Stub calls to compute worker for tests + +# console_public_hostname="lx15" +#### (StrOpt) Publicly visible name for this console host + + +######## defined in nova.console.vmrc ######## + +# console_vmrc_port=443 +#### (IntOpt) port for VMware VMRC connections + +# console_vmrc_error_retries=10 +#### (IntOpt) number of retries for retrieving VMRC information + + +######## defined in nova.console.xvp ######## + +# console_xvp_conf_template="$pybasedir/nova/console/xvp.conf.template" +#### (StrOpt) XVP conf template + +# console_xvp_conf="/etc/xvp.conf" +#### (StrOpt) generated XVP conf file + +# console_xvp_pid="/var/run/xvp.pid" +#### (StrOpt) XVP master process pid file + +# console_xvp_log="/var/log/xvp.log" +#### (StrOpt) XVP log file + +# console_xvp_multiplex_port=5900 +#### (IntOpt) port for XVP to multiplex VNC connections on + + +######## defined in nova.consoleauth ######## + +# consoleauth_topic="consoleauth" +#### (StrOpt) the topic console auth proxy nodes listen on + + +######## defined in nova.consoleauth.manager ######## + +# console_token_ttl=600 +#### (IntOpt) How many seconds before deleting tokens + +# consoleauth_manager="nova.consoleauth.manager.ConsoleAuthManager" +#### (StrOpt) Manager for console auth + + +######## defined in nova.db.api ######## + +# db_backend="sqlalchemy" +#### (StrOpt) The backend to use for db + +# enable_new_services=true +#### (BoolOpt) Services to be added to the available pool on create + +# instance_name_template="instance-%08x" +#### (StrOpt) Template string to be used to generate instance names + +# volume_name_template="volume-%s" +#### (StrOpt) Template string to be used to generate instance names + +# snapshot_name_template="snapshot-%s" +#### (StrOpt) Template string to be used to generate snapshot names + + +######## defined in nova.db.base ######## + +# db_driver="nova.db" +#### (StrOpt) driver to use for database access + + +######## defined in nova.image.s3 ######## + +# image_decryption_dir="/tmp" +#### (StrOpt) parent dir for tempdir used for image decryption + +# s3_access_key="notchecked" +#### (StrOpt) access key to use for s3 server for images + +# s3_secret_key="notchecked" +#### (StrOpt) secret key to use for s3 server for images + +# s3_use_ssl=false +#### (BoolOpt) whether to use ssl when talking to s3 + +# s3_affix_tenant=false +#### (BoolOpt) whether to affix the tenant id to the access key when +#### downloading from s3 + + +######## defined in nova.ipv6.api ######## -###### (StrOpt) Backend to use for IPv6 generation # ipv6_backend="rfc2462" +#### (StrOpt) Backend to use for IPv6 generation -######### defined in nova.network.linux_net ######### -###### (IntOpt) Lifetime of a DHCP lease in seconds -# dhcp_lease_time=120 -###### (StrOpt) location of nova-dhcpbridge -# dhcpbridge="$bindir/nova-dhcpbridge" -###### (StrOpt) location of flagfile for dhcpbridge -# dhcpbridge_flagfile="/etc/nova/nova-dhcpbridge.conf" -###### (StrOpt) dmz range that should be accepted -# dmz_cidr="10.128.0.0/24" -###### (StrOpt) if set, uses specific dns server for dnsmasq -# dns_server=<None> -###### (StrOpt) Override the default dnsmasq settings with this file -# dnsmasq_config_file="" -###### (StrOpt) Base DN for DNS entries in ldap -# ldap_dns_base_dn="ou=hosts,dc=example,dc=org" -###### (StrOpt) password for ldap DNS +######## defined in nova.network.ldapdns ######## + +# ldap_dns_url="ldap://ldap.example.com:389" +#### (StrOpt) URL for ldap server which will store dns entries + +# ldap_dns_user="uid=admin,ou=people,dc=example,dc=org" +#### (StrOpt) user for ldap DNS + # ldap_dns_password="password" -###### (MultiStrOpt) DNS Servers for ldap dns driver -# ldap_dns_servers="dns.example.org" -###### (StrOpt) Expiry interval (in seconds) for ldap dns driver Statement of Authority -# ldap_dns_soa_expiry="86400" -###### (StrOpt) Hostmaster for ldap dns driver Statement of Authority +#### (StrOpt) password for ldap DNS + # ldap_dns_soa_hostmaster="hostmaster@example.org" -###### (StrOpt) Minimum interval (in seconds) for ldap dns driver Statement of Authority -# ldap_dns_soa_minimum="7200" -###### (StrOpt) Refresh interval (in seconds) for ldap dns driver Statement of Authority +#### (StrOpt) Hostmaster for ldap dns driver Statement of Authority + +# ldap_dns_servers="dns.example.org" +#### (MultiStrOpt) DNS Servers for ldap dns driver + +# ldap_dns_base_dn="ou=hosts,dc=example,dc=org" +#### (StrOpt) Base DN for DNS entries in ldap + # ldap_dns_soa_refresh="1800" -###### (StrOpt) Retry interval (in seconds) for ldap dns driver Statement of Authority +#### (StrOpt) Refresh interval (in seconds) for ldap dns driver Statement +#### of Authority + # ldap_dns_soa_retry="3600" -###### (StrOpt) URL for ldap server which will store dns entries -# ldap_dns_url="ldap://ldap.example.com:389" -###### (StrOpt) user for ldap DNS -# ldap_dns_user="uid=admin,ou=people,dc=example,dc=org" -###### (StrOpt) Driver used to create ethernet devices. -# linuxnet_interface_driver="nova.network.linux_net.LinuxBridgeInterfaceDriver" -###### (StrOpt) Name of Open vSwitch bridge used with linuxnet -# linuxnet_ovs_integration_bridge="br-int" -###### (StrOpt) MTU setting for vlan -# network_device_mtu=<None> -###### (StrOpt) Location to keep network config files +#### (StrOpt) Retry interval (in seconds) for ldap dns driver Statement of +#### Authority + +# ldap_dns_soa_expiry="86400" +#### (StrOpt) Expiry interval (in seconds) for ldap dns driver Statement +#### of Authority + +# ldap_dns_soa_minimum="7200" +#### (StrOpt) Minimum interval (in seconds) for ldap dns driver Statement +#### of Authority + + +######## defined in nova.network.linux_net ######## + +# dhcpbridge_flagfile="/etc/nova/nova-dhcpbridge.conf" +#### (StrOpt) location of flagfile for dhcpbridge + # networks_path="$state_path/networks" -###### (StrOpt) Interface for public IP addresses +#### (StrOpt) Location to keep network config files + # public_interface="eth0" -###### (StrOpt) Public IP of network host +#### (StrOpt) Interface for public IP addresses + +# network_device_mtu=<None> +#### (StrOpt) MTU setting for vlan + +# dhcpbridge="$bindir/nova-dhcpbridge" +#### (StrOpt) location of nova-dhcpbridge + # routing_source_ip="$my_ip" -###### (BoolOpt) send gratuitous ARPs for HA setup +#### (StrOpt) Public IP of network host + +# dhcp_lease_time=120 +#### (IntOpt) Lifetime of a DHCP lease in seconds + +# dns_server=<None> +#### (StrOpt) if set, uses specific dns server for dnsmasq + +# dmz_cidr="" +#### (ListOpt) A list of dmz range that should be accepted + +# dnsmasq_config_file="" +#### (StrOpt) Override the default dnsmasq settings with this file + +# linuxnet_interface_driver="nova.network.linux_net.LinuxBridgeInterfaceDriver" +#### (StrOpt) Driver used to create ethernet devices. + +# linuxnet_ovs_integration_bridge="br-int" +#### (StrOpt) Name of Open vSwitch bridge used with linuxnet + # send_arp_for_ha=false -###### (BoolOpt) Use single default gateway. Only first nic of vm will get default gateway from dhcp server +#### (BoolOpt) send gratuitous ARPs for HA setup + # use_single_default_gateway=false +#### (BoolOpt) Use single default gateway. Only first nic of vm will get +#### default gateway from dhcp server -######### defined in nova.network.manager ######### -###### (BoolOpt) Autoassigning floating ip to VM -# auto_assign_floating_ip=false -###### (IntOpt) Number of addresses reserved for vpn clients -# cnt_vpn_clients=0 -###### (IntOpt) Number of attempts to create unique mac address -# create_unique_mac_address_attempts=5 -###### (StrOpt) Default pool for floating ips -# default_floating_pool="nova" -###### (StrOpt) domain to use for building the hostnames -# dhcp_domain="novalocal" -###### (BoolOpt) If True, skip using the queue and make local calls -# fake_call=false -###### (IntOpt) Seconds after which a deallocated ip is disassociated -# fixed_ip_disassociate_timeout=600 -###### (StrOpt) Fixed IP address block -# fixed_range="10.0.0.0/8" -###### (StrOpt) Fixed IPv6 address block -# fixed_range_v6="fd00::/48" -###### (BoolOpt) Whether to attempt to inject network setup into guest -# flat_injected=false -###### (StrOpt) FlatDhcp will bridge into this interface if set -# flat_interface=<None> -###### (StrOpt) Bridge for simple network instances +######## defined in nova.network.manager ######## + # flat_network_bridge=<None> -###### (StrOpt) Dns for simple network +#### (StrOpt) Bridge for simple network instances + # flat_network_dns="8.8.4.4" -###### (StrOpt) Floating IP address block +#### (StrOpt) Dns for simple network + +# flat_injected=false +#### (BoolOpt) Whether to attempt to inject network setup into guest + +# flat_interface=<None> +#### (StrOpt) FlatDhcp will bridge into this interface if set + +# vlan_start=100 +#### (IntOpt) First VLAN for private networks + +# vlan_interface=<None> +#### (StrOpt) vlans will bridge into this interface if set + +# num_networks=1 +#### (IntOpt) Number of networks to support + +# vpn_ip="$my_ip" +#### (StrOpt) Public IP for the cloudpipe VPN servers + +# vpn_start=1000 +#### (IntOpt) First Vpn port for private networks + +# multi_host=false +#### (BoolOpt) Default value for multi_host in networks + +# network_size=256 +#### (IntOpt) Number of addresses in each private subnet + # floating_range="4.4.4.0/24" -###### (BoolOpt) If True, send a dhcp release on instance termination -# force_dhcp_release=false -###### (StrOpt) Default IPv4 gateway +#### (StrOpt) Floating IP address block + +# default_floating_pool="nova" +#### (StrOpt) Default pool for floating ips + +# fixed_range="10.0.0.0/8" +#### (StrOpt) Fixed IP address block + +# fixed_range_v6="fd00::/48" +#### (StrOpt) Fixed IPv6 address block + # gateway=<None> -###### (StrOpt) Default IPv6 gateway +#### (StrOpt) Default IPv4 gateway + # gateway_v6=<None> -###### (StrOpt) Indicates underlying L3 management library +#### (StrOpt) Default IPv6 gateway + +# cnt_vpn_clients=0 +#### (IntOpt) Number of addresses reserved for vpn clients + +# fixed_ip_disassociate_timeout=600 +#### (IntOpt) Seconds after which a deallocated ip is disassociated + +# create_unique_mac_address_attempts=5 +#### (IntOpt) Number of attempts to create unique mac address + +# auto_assign_floating_ip=false +#### (BoolOpt) Autoassigning floating ip to VM + +# network_host="lx15" +#### (StrOpt) Network host to use for ip allocation in flat modes + +# fake_call=false +#### (BoolOpt) If True, skip using the queue and make local calls + +# force_dhcp_release=false +#### (BoolOpt) If True, send a dhcp release on instance termination + +# dhcp_domain="novalocal" +#### (StrOpt) domain to use for building the hostnames + # l3_lib="nova.network.l3.LinuxNetL3" -###### (BoolOpt) Default value for multi_host in networks -# multi_host=false -###### (StrOpt) Network host to use for ip allocation in flat modes -# network_host="nova" -###### (IntOpt) Number of addresses in each private subnet -# network_size=256 -###### (IntOpt) Number of networks to support -# num_networks=1 -###### (StrOpt) vlans will bridge into this interface if set -# vlan_interface=<None> -###### (IntOpt) First VLAN for private networks -# vlan_start=100 -###### (StrOpt) Public IP for the cloudpipe VPN servers -# vpn_ip="$my_ip" -###### (IntOpt) First Vpn port for private networks -# vpn_start=1000 +#### (StrOpt) Indicates underlying L3 management library + -######### defined in nova.network.quantum.manager ######### +######## defined in nova.network.quantum.manager ######## + +# quantum_ipam_lib="nova.network.quantum.nova_ipam_lib" +#### (StrOpt) Indicates underlying IP address management library + +# use_melange_mac_generation=false +#### (BoolOpt) Use Melange for assignment of MAC addresses + +# quantum_use_dhcp=false +#### (BoolOpt) Whether or not to enable DHCP for networks + +# quantum_use_port_security=false +#### (BoolOpt) Whether or not to enable port security + +# quantum_port_security_include_link_local=false +#### (BoolOpt) Add the link local address to the port security list + + +######## defined in nova.network.quantum.melange_connection ######## -###### (StrOpt) HOST for connecting to melange # melange_host="127.0.0.1" -###### (IntOpt) Number retries when contacting melange -# melange_num_retries=0 -###### (IntOpt) PORT for connecting to melange +#### (StrOpt) HOST for connecting to melange + # melange_port=9898 -###### (StrOpt) HOST for connecting to quantum +#### (IntOpt) PORT for connecting to melange + +# melange_num_retries=0 +#### (IntOpt) Number retries when contacting melange + + +######## defined in nova.network.quantum.quantum_connection ######## + # quantum_connection_host="127.0.0.1" -###### (StrOpt) PORT for connecting to quantum +#### (StrOpt) HOST for connecting to quantum + # quantum_connection_port=9696 -###### (StrOpt) Default tenant id when creating quantum networks +#### (IntOpt) PORT for connecting to quantum + # quantum_default_tenant_id="default" -###### (StrOpt) Indicates underlying IP address management library -# quantum_ipam_lib="nova.network.quantum.nova_ipam_lib" -###### (BoolOpt) Add the link local address to the port security list -# quantum_port_security_include_link_local=false -###### (BoolOpt) Whether or not to enable DHCP for networks -# quantum_use_dhcp=false -###### (BoolOpt) Whether or not to enable port security -# quantum_use_port_security=false -###### (BoolOpt) Use Melange for assignment of MAC addresses -# use_melange_mac_generation=false +#### (StrOpt) Default tenant id when creating quantum networks -######### defined in nova.compute.manager ######### +# quantum_request_timeout=20 +#### (IntOpt) Maximum amount of time to wait for quantum request + + +######## defined in nova.notifier.api ######## -###### (StrOpt) Driver to use for controlling virtualization -# compute_driver="nova.virt.connection.get_connection" -###### (StrOpt) Console proxy host to use to connect to instances on this host. -# console_host="nova" -###### (StrOpt) Default notification level for outgoing notifications # default_notification_level="INFO" -###### (StrOpt) Default publisher_id for outgoing notifications +#### (StrOpt) Default notification level for outgoing notifications + # default_publisher_id="$host" -###### (IntOpt) Number of seconds between instance info_cache self healing updates -# heal_instance_info_cache_interval=60 -###### (IntOpt) Interval in seconds for querying the host status -# host_state_interval=120 -###### (IntOpt) Number of periodic scheduler ticks to wait between runs of the image cache manager. -# image_cache_manager_interval=3600 -###### (StrOpt) where instances are stored on disk -# instances_path="$state_path/instances" -###### (IntOpt) Number of 1 second retries needed in live_migration -# live_migration_retry_count=30 -###### (IntOpt) Automatically hard reboot an instance if it has been stuck in a rebooting state longer than N seconds. Set to 0 to disable. -# reboot_timeout=0 -###### (IntOpt) Automatically unrescue an instance after N seconds. Set to 0 to disable. -# rescue_timeout=0 -###### (IntOpt) Automatically confirm resizes after N seconds. Set to 0 to disable. -# resize_confirm_window=0 -###### (StrOpt) Action to take if a running deleted instance is detected.Valid options are 'noop', 'log' and 'reap'. Set to 'noop' to disable. -# running_deleted_instance_action="log" -###### (IntOpt) Number of periodic scheduler ticks to wait between runs of the cleanup task. -# running_deleted_instance_poll_interval=30 -###### (IntOpt) Number of seconds after being deleted when a running instance should be considered eligible for cleanup. -# running_deleted_instance_timeout=0 +#### (StrOpt) Default publisher_id for outgoing notifications + + +######## defined in nova.notifier.list_notifier ######## + +# list_notifier_drivers="nova.notifier.no_op_notifier" +#### (MultiStrOpt) List of drivers to send notifications + + +######## defined in nova.notifier.rabbit_notifier ######## + +# notification_topics="notifications" +#### (ListOpt) AMQP topic used for Nova notifications + + +######## defined in nova.objectstore.s3server ######## + +# buckets_path="$state_path/buckets" +#### (StrOpt) path to s3 buckets + +# s3_listen="0.0.0.0" +#### (StrOpt) IP address for S3 API to listen + +# s3_listen_port=3333 +#### (IntOpt) port for s3 api to listen + + +######## defined in nova.rpc ######## + +# rpc_backend="nova.rpc.impl_kombu" +#### (StrOpt) The messaging module to use, defaults to kombu. + +# rpc_thread_pool_size=64 +#### (IntOpt) Size of RPC thread pool + +# rpc_conn_pool_size=30 +#### (IntOpt) Size of RPC connection pool + +# rpc_response_timeout=60 +#### (IntOpt) Seconds to wait for a response from call or multicall + +# allowed_rpc_exception_modules="nova.exception" +#### (ListOpt) Modules of exceptions that are permitted to be recreatedupon +#### receiving exception data from an rpc call. + +# control_exchange="nova" +#### (StrOpt) AMQP exchange to connect to if using RabbitMQ or Qpid + +# fake_rabbit=false +#### (BoolOpt) If passed, use a fake RabbitMQ provider + + +######## defined in nova.rpc.impl_kombu ######## + +# kombu_ssl_version="" +#### (StrOpt) SSL version to use (valid only if SSL enabled) + +# kombu_ssl_keyfile="" +#### (StrOpt) SSL key file (valid only if SSL enabled) + +# kombu_ssl_certfile="" +#### (StrOpt) SSL cert file (valid only if SSL enabled) + +# kombu_ssl_ca_certs="" +#### (StrOpt) SSL certification authority file (valid only if SSL enabled) + +# rabbit_host="localhost" +#### (StrOpt) the RabbitMQ host + +# rabbit_port=5672 +#### (IntOpt) the RabbitMQ port + +# rabbit_use_ssl=false +#### (BoolOpt) connect over SSL for RabbitMQ + +# rabbit_userid="guest" +#### (StrOpt) the RabbitMQ userid + +# rabbit_password="guest" +#### (StrOpt) the RabbitMQ password + +# rabbit_virtual_host="/" +#### (StrOpt) the RabbitMQ virtual host + +# rabbit_retry_interval=1 +#### (IntOpt) how frequently to retry connecting with RabbitMQ + +# rabbit_retry_backoff=2 +#### (IntOpt) how long to backoff for between retries when connecting to +#### RabbitMQ + +# rabbit_max_retries=0 +#### (IntOpt) maximum retries with trying to connect to RabbitMQ (the +#### default of 0 implies an infinite retry count) + +# rabbit_durable_queues=false +#### (BoolOpt) use durable queues in RabbitMQ + + +######## defined in nova.rpc.impl_qpid ######## + +# qpid_hostname="localhost" +#### (StrOpt) Qpid broker hostname + +# qpid_port="5672" +#### (StrOpt) Qpid broker port + +# qpid_username="" +#### (StrOpt) Username for qpid connection + +# qpid_password="" +#### (StrOpt) Password for qpid connection + +# qpid_sasl_mechanisms="" +#### (StrOpt) Space separated list of SASL mechanisms to use for auth + +# qpid_reconnect=true +#### (BoolOpt) Automatically reconnect + +# qpid_reconnect_timeout=0 +#### (IntOpt) Reconnection timeout in seconds + +# qpid_reconnect_limit=0 +#### (IntOpt) Max reconnections before giving up + +# qpid_reconnect_interval_min=0 +#### (IntOpt) Minimum seconds between reconnection attempts + +# qpid_reconnect_interval_max=0 +#### (IntOpt) Maximum seconds between reconnection attempts + +# qpid_reconnect_interval=0 +#### (IntOpt) Equivalent to setting max and min to the same value + +# qpid_heartbeat=5 +#### (IntOpt) Seconds between connection keepalive heartbeats + +# qpid_protocol="tcp" +#### (StrOpt) Transport to use, either 'tcp' or 'ssl' + +# qpid_tcp_nodelay=true +#### (BoolOpt) Disable Nagle algorithm + + +######## defined in nova.scheduler.driver ######## + +# scheduler_host_manager="nova.scheduler.host_manager.HostManager" +#### (StrOpt) The scheduler host manager class to use + + +######## defined in nova.scheduler.filters.core_filter ######## + +# cpu_allocation_ratio=16.0 +#### (FloatOpt) Virtual CPU to Physical CPU allocation ratio + + +######## defined in nova.scheduler.filters.ram_filter ######## + +# ram_allocation_ratio=1.5 +#### (FloatOpt) virtual ram to physical ram allocation ratio + + +######## defined in nova.scheduler.host_manager ######## + +# reserved_host_disk_mb=0 +#### (IntOpt) Amount of disk in MB to reserve for host/dom0 + +# reserved_host_memory_mb=512 +#### (IntOpt) Amount of memory in MB to reserve for host/dom0 + +# scheduler_available_filters="nova.scheduler.filters.standard_filters" +#### (MultiStrOpt) Filter classes available to the scheduler which may be +#### specified more than once. An entry of +#### "nova.scheduler.filters.standard_filters" maps to all +#### filters included with nova. + +# scheduler_default_filters="AvailabilityZoneFilter,RamFilter,ComputeFilter" +#### (ListOpt) Which filter class names to use for filtering hosts when not +#### specified in the request. + + +######## defined in nova.scheduler.least_cost ######## + +# least_cost_functions="nova.scheduler.least_cost.compute_fill_first_cost_fn" +#### (ListOpt) Which cost functions the LeastCostScheduler should use + +# noop_cost_fn_weight=1.0 +#### (FloatOpt) How much weight to give the noop cost function + +# compute_fill_first_cost_fn_weight=-1.0 +#### (FloatOpt) How much weight to give the fill-first cost function. A +#### negative value will reverse behavior: e.g. spread-first + + +######## defined in nova.scheduler.manager ######## + +# scheduler_driver="nova.scheduler.multi.MultiScheduler" +#### (StrOpt) Default driver to use for the scheduler + + +######## defined in nova.scheduler.multi ######## + +# compute_scheduler_driver="nova.scheduler.filter_scheduler.FilterScheduler" +#### (StrOpt) Driver to use for scheduling compute calls + +# volume_scheduler_driver="nova.scheduler.chance.ChanceScheduler" +#### (StrOpt) Driver to use for scheduling volume calls + + +######## defined in nova.scheduler.scheduler_options ######## + +# scheduler_json_config_location="" +#### (StrOpt) Absolute path to scheduler configuration JSON file. + + +######## defined in nova.scheduler.simple ######## + +# max_cores=16 +#### (IntOpt) maximum number of instance cores to allow per host + +# max_gigabytes=10000 +#### (IntOpt) maximum number of volume gigabytes to allow per host + +# max_networks=1000 +#### (IntOpt) maximum number of networks to allow per host + +# skip_isolated_core_check=true +#### (BoolOpt) Allow overcommitting vcpus on isolated hosts -######### defined in nova.virt.baremetal.nodes ######### -###### (StrOpt) Bare-metal driver runs on +######## defined in nova.virt.baremetal.nodes ######## + # baremetal_driver="tilera" -###### (StrOpt) Tilera command line program for Bare-metal driver -# tile_monitor="/usr/local/TileraMDE/bin/tile-monitor" +#### (StrOpt) Bare-metal driver runs on + -######### defined in nova.virt.baremetal.proxy ######### +######## defined in nova.virt.baremetal.proxy ######## -###### (BoolOpt) Whether to allow in project network traffic -# baremetal_allow_project_net_traffic=true -###### (StrOpt) Template file for injected network -# baremetal_injected_network_template="$pybasedir/nova/virt/interfaces.template" -###### (StrOpt) baremetal domain type # baremetal_type="baremetal" -###### (StrOpt) Override the default baremetal URI -# baremetal_uri="" -###### (BoolOpt) Force backing images to raw format -# force_raw_images=true -###### (ListOpt) Order of methods used to mount disk images -# img_handlers="loop,nbd,guestfs" -###### (StrOpt) Template file for injected network +#### (StrOpt) baremetal domain type + + +######## defined in nova.virt.baremetal.tilera ######## + +# tile_monitor="/usr/local/TileraMDE/bin/tile-monitor" +#### (StrOpt) Tilera command line program for Bare-metal driver + + +######## defined in nova.virt.disk.api ######## + # injected_network_template="$pybasedir/nova/virt/interfaces.template" -###### (IntOpt) maximum number of possible nbd devices -# max_nbd_devices=16 -###### (IntOpt) time to wait for a NBD device coming up -# timeout_nbd=10 -###### (MultiStrOpt) mkfs commands for ephemeral device. The format is <os_type>=<mkfs command> +#### (StrOpt) Template file for injected network + +# img_handlers="loop,nbd,guestfs" +#### (ListOpt) Order of methods used to mount disk images + # virt_mkfs="default=mkfs.ext3 -L %(fs_label)s -F %(target)s" # virt_mkfs="linux=mkfs.ext3 -L %(fs_label)s -F %(target)s" # virt_mkfs="windows=mkfs.ntfs --force --fast --label %(fs_label)s %(target)s" +#### (MultiStrOpt) mkfs commands for ephemeral device. The format is +#### <os_type>=<mkfs command> -######### defined in nova.virt.firewall ######### -###### (BoolOpt) Whether to allow network traffic from same network +######## defined in nova.virt.disk.nbd ######## + +# timeout_nbd=10 +#### (IntOpt) time to wait for a NBD device coming up + +# max_nbd_devices=16 +#### (IntOpt) maximum number of possible nbd devices + + +######## defined in nova.virt.firewall ######## + # allow_same_net_traffic=true +#### (BoolOpt) Whether to allow network traffic from same network -######### defined in nova.virt.libvirt.connection ######### -###### (StrOpt) Define block migration behavior. -# block_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_NON_SHARED_INC" -###### (BoolOpt) Write a checksum for files in _base to disk -# checksum_base_images=false -###### (StrOpt) CpuInfo XML Template (Used only live migration now) -# cpuinfo_xml_template="$pybasedir/nova/virt/cpuinfo.xml.template" -###### (StrOpt) Override the default disk prefix for the devices attached to a server, which is dependent on libvirt_type. (valid options are: sd, xvd, uvd, vd) -# libvirt_disk_prefix=<None> -###### (BoolOpt) Inject the admin password at boot time, without an agent. -# libvirt_inject_password=false -###### (BoolOpt) Use a separated OS thread pool to realize non-blocking libvirt calls -# libvirt_nonblocking=false -###### (StrOpt) Libvirt domain type (valid options are: kvm, lxc, qemu, uml, xen) +######## defined in nova.virt.images ######## + +# force_raw_images=true +#### (BoolOpt) Force backing images to raw format + + +######## defined in nova.virt.libvirt.connection ######## + +# rescue_image_id=<None> +#### (StrOpt) Rescue ami image + +# rescue_kernel_id=<None> +#### (StrOpt) Rescue aki image + +# rescue_ramdisk_id=<None> +#### (StrOpt) Rescue ari image + # libvirt_type="kvm" -###### (StrOpt) Override the default libvirt URI (which is dependent on libvirt_type) +#### (StrOpt) Libvirt domain type (valid options are: kvm, lxc, qemu, uml, +#### xen) + # libvirt_uri="" -###### (BoolOpt) Use virtio for bridge interfaces -# libvirt_use_virtio_for_bridges=false -###### (StrOpt) The libvirt VIF driver to configure the VIFs. +#### (StrOpt) Override the default libvirt URI (which is dependent on +#### libvirt_type) + +# libvirt_inject_password=false +#### (BoolOpt) Inject the admin password at boot time, without an agent. + +# libvirt_inject_key=true +#### (BoolOpt) Inject the ssh public key at boot time + +# libvirt_inject_partition=1 +#### (IntOpt) The partition to inject to : -1 => inspect (libguestfs +#### only), 0 => not partitioned, >0 => partition number + +# use_usb_tablet=true +#### (BoolOpt) Sync virtual and real mouse cursors in Windows VMs + +# live_migration_uri="qemu+tcp://%s/system" +#### (StrOpt) Define protocol used by live_migration feature + +# live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER" +#### (StrOpt) Define live migration behavior. + +# block_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_NON_SHARED_INC" +#### (StrOpt) Define block migration behavior. + +# live_migration_bandwidth=0 +#### (IntOpt) Define live migration behavior + +# snapshot_image_format=<None> +#### (StrOpt) Snapshot image format (valid options are : raw, qcow2, vmdk, +#### vdi). Defaults to same as source image + # libvirt_vif_driver="nova.virt.libvirt.vif.LibvirtBridgeDriver" -###### (StrOpt) Type of VIF to create. -# libvirt_vif_type="bridge" -###### (ListOpt) Libvirt handlers for remote volumes. +#### (StrOpt) The libvirt VIF driver to configure the VIFs. + # libvirt_volume_drivers="iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver" -###### (IntOpt) Number of seconds to wait for instance to shut down after soft reboot request is made. We fall back to hard reboot if instance does not shutdown within this window. +#### (ListOpt) Libvirt handlers for remote volumes. + +# libvirt_disk_prefix=<None> +#### (StrOpt) Override the default disk prefix for the devices attached to +#### a server, which is dependent on libvirt_type. (valid options +#### are: sd, xvd, uvd, vd) + # libvirt_wait_soft_reboot_seconds=120 -###### (StrOpt) Libvirt XML Template -# libvirt_xml_template="$pybasedir/nova/virt/libvirt.xml.template" -###### (IntOpt) Define live migration behavior -# live_migration_bandwidth=0 -###### (StrOpt) Define live migration behavior. -# live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER" -###### (StrOpt) Define protocol used by live_migration feature -# live_migration_uri="qemu+tcp://%s/system" -###### (BoolOpt) Should unused base images be removed? +#### (IntOpt) Number of seconds to wait for instance to shut down after +#### soft reboot request is made. We fall back to hard reboot if +#### instance does not shutdown within this window. + +# libvirt_nonblocking=false +#### (BoolOpt) Use a separated OS thread pool to realize non-blocking +#### libvirt calls + +# force_config_drive=<None> +#### (StrOpt) Set to force injection to take place on a config drive (if +#### set, valid options are: always) + + +######## defined in nova.virt.libvirt.imagecache ######## + # remove_unused_base_images=false -###### (IntOpt) Unused unresized base images younger than this will not be removed -# remove_unused_original_minimum_age_seconds=86400 -###### (IntOpt) Unused resized base images younger than this will not be removed +#### (BoolOpt) Should unused base images be removed? + # remove_unused_resized_minimum_age_seconds=3600 -###### (StrOpt) Rescue ami image -# rescue_image_id=<None> -###### (StrOpt) Rescue aki image -# rescue_kernel_id=<None> -###### (StrOpt) Rescue ari image -# rescue_ramdisk_id=<None> -###### (StrOpt) Snapshot image format (valid options are : raw, qcow2, vmdk, vdi). Defaults to same as source image -# snapshot_image_format=<None> -###### (BoolOpt) Sync virtual and real mouse cursors in Windows VMs -# use_usb_tablet=true +#### (IntOpt) Unused resized base images younger than this will not be +#### removed + +# remove_unused_original_minimum_age_seconds=86400 +#### (IntOpt) Unused unresized base images younger than this will not be +#### removed + +# checksum_base_images=false +#### (BoolOpt) Write a checksum for files in _base to disk + -######### defined in nova.virt.libvirt.vif ######### +######## defined in nova.virt.libvirt.utils ######## + +# image_info_filename_pattern="$instances_path/$base_dir_name/%(image)s.info" +#### (StrOpt) Allows image information files to be stored in non-standard +#### locations + + +######## defined in nova.virt.libvirt.vif ######## -###### (StrOpt) Name of Integration Bridge used by Open vSwitch # libvirt_ovs_bridge="br-int" +#### (StrOpt) Name of Integration Bridge used by Open vSwitch + +# libvirt_use_virtio_for_bridges=false +#### (BoolOpt) Use virtio for bridge interfaces -######### defined in nova.virt.vmwareapi.vim ######### -###### (StrOpt) VIM Service WSDL Location e.g http://<server>/vimService.wsdl. Due to a bug in vSphere ESX 4.1 default wsdl. Refer readme-vmware to setup +######## defined in nova.virt.vmwareapi.vim ######## + # vmwareapi_wsdl_loc=<None> +#### (StrOpt) VIM Service WSDL Location e.g +#### http://<server>/vimService.wsdl. Due to a bug in vSphere ESX +#### 4.1 default wsdl. Refer readme-vmware to setup + -######### defined in nova.virt.vmwareapi.vmops ######### +######## defined in nova.virt.vmwareapi.vmops ######## -###### (StrOpt) The VMWare VIF driver to configure the VIFs. # vmware_vif_driver="nova.virt.vmwareapi.vif.VMWareVlanBridgeDriver" +#### (StrOpt) The VMWare VIF driver to configure the VIFs. -######### defined in nova.virt.vmwareapi_conn ######### -###### (FloatOpt) The number of times we retry on failures, e.g., socket error, etc. Used only if connection_type is vmwareapi -# vmwareapi_api_retry_count=10 -###### (StrOpt) URL for connection to VMWare ESX host.Required if connection_type is vmwareapi. +######## defined in nova.virt.vmwareapi_conn ######## + # vmwareapi_host_ip=<None> -###### (StrOpt) Password for connection to VMWare ESX host. Used only if connection_type is vmwareapi. -# vmwareapi_host_password=<None> -###### (StrOpt) Username for connection to VMWare ESX host. Used only if connection_type is vmwareapi. +#### (StrOpt) URL for connection to VMWare ESX host.Required if +#### connection_type is vmwareapi. + # vmwareapi_host_username=<None> -###### (FloatOpt) The interval used for polling of remote tasks. Used only if connection_type is vmwareapi +#### (StrOpt) Username for connection to VMWare ESX host. Used only if +#### connection_type is vmwareapi. + +# vmwareapi_host_password=<None> +#### (StrOpt) Password for connection to VMWare ESX host. Used only if +#### connection_type is vmwareapi. + # vmwareapi_task_poll_interval=5.0 -###### (StrOpt) Physical ethernet adapter name for vlan networking +#### (FloatOpt) The interval used for polling of remote tasks. Used only if +#### connection_type is vmwareapi + +# vmwareapi_api_retry_count=10 +#### (IntOpt) The number of times we retry on failures, e.g., socket +#### error, etc. Used only if connection_type is vmwareapi + # vmwareapi_vlan_interface="vmnic0" +#### (StrOpt) Physical ethernet adapter name for vlan networking -######### defined in nova.virt.xenapi.pool ######### -###### (IntOpt) time to wait for a block device to be created -# block_device_creation_timeout=10 -###### (StrOpt) Default OS type -# default_os_type="linux" -###### (IntOpt) maximum size in bytes of kernel or ramdisk images -# max_kernel_ramdisk_size=16777216 -###### (StrOpt) Filter for finding the SR to be used to install guest instances on. The default value is the Local Storage in default XenServer/XCP installations. To select an SR with a different matching criteria, you could set it to other-config:my_favorite_sr=true. On the other hand, to fall back on the Default SR, as displayed by XenCenter, set this flag to: default-sr:true -# sr_matching_filter="other-config:i18n-key=local-storage" -###### (BoolOpt) To use for hosts with different CPUs -# use_join_force=true -###### (BoolOpt) Whether to use sparse_copy for copying data on a resize down (False will use standard dd). This speeds up resizes down considerably since large runs of zeros won't have to be rsynced -# xenapi_sparse_copy=true +######## defined in nova.virt.xenapi.connection ######## -######### defined in nova.virt.xenapi.vif ######### +# xenapi_connection_url=<None> +#### (StrOpt) URL for connection to XenServer/Xen Cloud Platform. Required +#### if connection_type=xenapi. -###### (StrOpt) Name of Integration Bridge used by Open vSwitch -# xenapi_ovs_integration_bridge="xapi1" +# xenapi_connection_username="root" +#### (StrOpt) Username for connection to XenServer/Xen Cloud Platform. +#### Used only if connection_type=xenapi. + +# xenapi_connection_password=<None> +#### (StrOpt) Password for connection to XenServer/Xen Cloud Platform. +#### Used only if connection_type=xenapi. -######### defined in nova.virt.xenapi.vmops ######### +# xenapi_connection_concurrent=5 +#### (IntOpt) Maximum number of concurrent XenAPI connections. Used only +#### if connection_type=xenapi. -###### (IntOpt) number of seconds to wait for agent to be fully operational -# agent_version_timeout=300 -###### (BoolOpt) Whether to generate swap (False means fetching it from OVA) -# xenapi_generate_swap=false -###### (IntOpt) number of seconds to wait for instance to go to running state -# xenapi_running_timeout=60 -###### (StrOpt) The XenAPI VIF driver using XenServer Network APIs. -# xenapi_vif_driver="nova.virt.xenapi.vif.XenAPIBridgeDriver" +# xenapi_vhd_coalesce_poll_interval=5.0 +#### (FloatOpt) The interval used for polling of coalescing vhds. Used only +#### if connection_type=xenapi. -######### defined in nova.virt.xenapi_conn ######### +# xenapi_check_host=true +#### (BoolOpt) Ensure compute service is running on host XenAPI connects +#### to. + +# xenapi_vhd_coalesce_max_attempts=5 +#### (IntOpt) Max number of times to poll for VHD to coalesce. Used only +#### if connection_type=xenapi. -###### (StrOpt) IQN Prefix -# iqn_prefix="iqn.2010-10.org.openstack" -###### (StrOpt) iSCSI Target Host -# target_host=<None> -###### (StrOpt) iSCSI Target Port, 3260 Default -# target_port="3260" -###### (StrOpt) Specifies the path in which the xenapi guest agent should be located. If the agent is present, network configuration is not injected into the image. Used if connection_type=xenapi and flat_injected=True # xenapi_agent_path="usr/sbin/xe-update-networking" -###### (IntOpt) Maximum number of concurrent XenAPI connections. Used only if connection_type=xenapi. -# xenapi_connection_concurrent=5 -###### (StrOpt) Password for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi. -# xenapi_connection_password=<None> -###### (StrOpt) URL for connection to XenServer/Xen Cloud Platform. Required if connection_type=xenapi. -# xenapi_connection_url=<None> -###### (StrOpt) Username for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi. -# xenapi_connection_username="root" -###### (IntOpt) Timeout in seconds for XenAPI login. -# xenapi_login_timeout=10 -###### (BoolOpt) Used to enable the remapping of VBD dev (Works around an issue in Ubuntu Maverick) -# xenapi_remap_vbd_dev=false -###### (StrOpt) Specify prefix to remap VBD dev to (ex. /dev/xvdb -> /dev/sdb) -# xenapi_remap_vbd_dev_prefix="sd" -###### (StrOpt) Base path to the storage repository +#### (StrOpt) Specifies the path in which the xenapi guest agent should be +#### located. If the agent is present, network configuration is +#### not injected into the image. Used if connection_type=xenapi +#### and flat_injected=True + # xenapi_sr_base_path="/var/run/sr-mount" -###### (IntOpt) Max number of times to poll for VHD to coalesce. Used only if connection_type=xenapi. -# xenapi_vhd_coalesce_max_attempts=5 -###### (FloatOpt) The interval used for polling of coalescing vhds. Used only if connection_type=xenapi. -# xenapi_vhd_coalesce_poll_interval=5.0 +#### (StrOpt) Base path to the storage repository -######### defined in nova.console.manager ######### +# target_host=<None> +#### (StrOpt) iSCSI Target Host -###### (StrOpt) Driver to use for the console proxy -# console_driver="nova.console.xvp.XVPConsoleProxy" -###### (StrOpt) Publicly visible name for this console host -# console_public_hostname="nova" -###### (BoolOpt) Stub calls to compute worker for tests -# stub_compute=false +# target_port="3260" +#### (StrOpt) iSCSI Target Port, 3260 Default -######### defined in nova.console.vmrc ######### +# iqn_prefix="iqn.2010-10.org.openstack" +#### (StrOpt) IQN Prefix -###### (IntOpt) number of retries for retrieving VMRC information -# console_vmrc_error_retries=10 -###### (IntOpt) port for VMware VMRC connections -# console_vmrc_port=443 +# xenapi_remap_vbd_dev=false +#### (BoolOpt) Used to enable the remapping of VBD dev (Works around an +#### issue in Ubuntu Maverick) -######### defined in nova.console.xvp ######### +# xenapi_remap_vbd_dev_prefix="sd" +#### (StrOpt) Specify prefix to remap VBD dev to (ex. /dev/xvdb -> +#### /dev/sdb) -###### (StrOpt) generated XVP conf file -# console_xvp_conf="/etc/xvp.conf" -###### (StrOpt) XVP conf template -# console_xvp_conf_template="$pybasedir/nova/console/xvp.conf.template" -###### (StrOpt) XVP log file -# console_xvp_log="/var/log/xvp.log" -###### (IntOpt) port for XVP to multiplex VNC connections on -# console_xvp_multiplex_port=5900 -###### (StrOpt) XVP master process pid file -# console_xvp_pid="/var/run/xvp.pid" +# xenapi_login_timeout=10 +#### (IntOpt) Timeout in seconds for XenAPI login. -######### defined in nova.consoleauth.manager ######### -###### (IntOpt) How many seconds before deleting tokens -# console_token_ttl=600 -###### (StrOpt) Manager for console auth -# consoleauth_manager="nova.consoleauth.manager.ConsoleAuthManager" +######## defined in nova.virt.xenapi.pool ######## -######### defined in nova.image.s3 ######### +# use_join_force=true +#### (BoolOpt) To use for hosts with different CPUs -###### (StrOpt) parent dir for tempdir used for image decryption -# image_decryption_dir="/tmp" -###### (StrOpt) access key to use for s3 server for images -# s3_access_key="notchecked" -###### (BoolOpt) whether to affix the tenant id to the access key when downloading from s3 -# s3_affix_tenant=false -###### (StrOpt) secret key to use for s3 server for images -# s3_secret_key="notchecked" -###### (BoolOpt) whether to use ssl when talking to s3 -# s3_use_ssl=false -######### defined in nova.cloudpipe.pipelib ######### +######## defined in nova.virt.xenapi.vif ######## -###### (StrOpt) Template for cloudpipe instance boot script -# boot_script_template="$pybasedir/nova/cloudpipe/bootscript.template" -###### (StrOpt) Netmask to push into openvpn config -# dmz_mask="255.255.255.0" -###### (StrOpt) Network to push into openvpn config -# dmz_net="10.0.0.0" -###### (StrOpt) Instance type for vpn instances -# vpn_instance_type="m1.tiny" +# xenapi_ovs_integration_bridge="xapi1" +#### (StrOpt) Name of Integration Bridge used by Open vSwitch -######### defined in nova.notifier.list_notifier ######### -###### (MultiStrOpt) List of drivers to send notifications -# list_notifier_drivers="nova.notifier.no_op_notifier" +######## defined in nova.virt.xenapi.vm_utils ######## -######### defined in nova.notifier.rabbit_notifier ######### +# default_os_type="linux" +#### (StrOpt) Default OS type -###### (ListOpt) AMQP topic used for Nova notifications -# notification_topics="notifications" +# block_device_creation_timeout=10 +#### (IntOpt) Time to wait for a block device to be created -######### defined in nova.objectstore.s3server ######### +# max_kernel_ramdisk_size=16777216 +#### (IntOpt) Maximum size in bytes of kernel or ramdisk images -###### (StrOpt) path to s3 buckets -# buckets_path="$state_path/buckets" -###### (StrOpt) IP address for S3 API to listen -# s3_listen="0.0.0.0" -###### (IntOpt) port for s3 api to listen -# s3_listen_port=3333 +# sr_matching_filter="other-config:i18n-key=local-storage" +#### (StrOpt) Filter for finding the SR to be used to install guest +#### instances on. The default value is the Local Storage in +#### default XenServer/XCP installations. To select an SR with a +#### different matching criteria, you could set it to other- +#### config:my_favorite_sr=true. On the other hand, to fall back +#### on the Default SR, as displayed by XenCenter, set this flag +#### to: default-sr:true -######### defined in nova.rpc.common ######### +# xenapi_sparse_copy=true +#### (BoolOpt) Whether to use sparse_copy for copying data on a resize down +#### (False will use standard dd). This speeds up resizes down +#### considerably since large runs of zeros won't have to be +#### rsynced -###### (ListOpt) Modules of exceptions that are permitted to be recreated -# allowed_rpc_exception_modules="nova.exception" +# xenapi_num_vbd_unplug_retries=10 +#### (IntOpt) Maximum number of retries to unplug VBD -######### defined in nova.rpc.impl_kombu ######### -###### (StrOpt) SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs="" -###### (StrOpt) SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile="" -###### (StrOpt) SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile="" -###### (StrOpt) SSL version to use (valid only if SSL enabled) -# kombu_ssl_version="" +######## defined in nova.virt.xenapi.vmops ######## -######### defined in nova.rpc.impl_qpid ######### +# agent_version_timeout=300 +#### (IntOpt) number of seconds to wait for agent to be fully operational -###### (IntOpt) Seconds between connection keepalive heartbeats -# qpid_heartbeat=5 -###### (StrOpt) Qpid broker hostname -# qpid_hostname="localhost" -###### (StrOpt) Password for qpid connection -# qpid_password="" -###### (StrOpt) Qpid broker port -# qpid_port="5672" -###### (StrOpt) Transport to use, either 'tcp' or 'ssl' -# qpid_protocol="tcp" -###### (BoolOpt) Automatically reconnect -# qpid_reconnect=true -###### (IntOpt) Equivalent to setting max and min to the same value -# qpid_reconnect_interval=0 -###### (IntOpt) Maximum seconds between reconnection attempts -# qpid_reconnect_interval_max=0 -###### (IntOpt) Minimum seconds between reconnection attempts -# qpid_reconnect_interval_min=0 -###### (IntOpt) Max reconnections before giving up -# qpid_reconnect_limit=0 -###### (IntOpt) Reconnection timeout in seconds -# qpid_reconnect_timeout=0 -###### (StrOpt) Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms="" -###### (BoolOpt) Disable Nagle algorithm -# qpid_tcp_nodelay=true -###### (StrOpt) Username for qpid connection -# qpid_username="" +# xenapi_running_timeout=60 +#### (IntOpt) number of seconds to wait for instance to go to running +#### state -######### defined in nova.scheduler.driver ######### +# xenapi_vif_driver="nova.virt.xenapi.vif.XenAPIBridgeDriver" +#### (StrOpt) The XenAPI VIF driver using XenServer Network APIs. -###### (StrOpt) The scheduler host manager class to use -# scheduler_host_manager="nova.scheduler.host_manager.HostManager" +# xenapi_generate_swap=false +#### (BoolOpt) Whether to generate swap (False means fetching it from OVA) -######### defined in nova.scheduler.filters.core_filter ######### -###### (FloatOpt) Virtual CPU to Physical CPU allocation ratio -# cpu_allocation_ratio=16.0 +######## defined in nova.vnc ######## -######### defined in nova.scheduler.filters.ram_filter ######### +# novncproxy_base_url="http://127.0.0.1:6080/vnc_auto.html" +#### (StrOpt) location of vnc console proxy, in the form +#### "http://127.0.0.1:6080/vnc_auto.html" -###### (FloatOpt) virtual ram to physical ram allocation ratio -# ram_allocation_ratio=1.5 +# xvpvncproxy_base_url="http://127.0.0.1:6081/console" +#### (StrOpt) location of nova xvp vnc console proxy, in the form +#### "http://127.0.0.1:6081/console" -######### defined in nova.scheduler.host_manager ######### +# vncserver_listen="127.0.0.1" +#### (StrOpt) Ip address on which instance vncserversshould listen -###### (IntOpt) Amount of disk in MB to reserve for host/dom0 -# reserved_host_disk_mb=0 -###### (IntOpt) Amount of memory in MB to reserve for host/dom0 -# reserved_host_memory_mb=512 -###### (MultiStrOpt) Filter classes available to the scheduler which may be specified more than once. An entry of "nova.scheduler.filters.standard_filters" maps to all filters included with nova. -# scheduler_available_filters="nova.scheduler.filters.standard_filters" -###### (ListOpt) Which filter class names to use for filtering hosts when not specified in the request. -# scheduler_default_filters="AvailabilityZoneFilter,RamFilter,ComputeFilter" +# vncserver_proxyclient_address="127.0.0.1" +#### (StrOpt) the address to which proxy clients (like nova-xvpvncproxy) +#### should connect -######### defined in nova.scheduler.least_cost ######### +# vnc_enabled=true +#### (BoolOpt) enable vnc related features -###### (FloatOpt) How much weight to give the fill-first cost function. A negative value will reverse behavior: e.g. spread-first -# compute_fill_first_cost_fn_weight=-1.0 -###### (ListOpt) Which cost functions the LeastCostScheduler should use -# least_cost_functions="nova.scheduler.least_cost.compute_fill_first_cost_fn" -###### (FloatOpt) How much weight to give the noop cost function -# noop_cost_fn_weight=1.0 +# vnc_keymap="en-us" +#### (StrOpt) keymap for vnc -######### defined in nova.scheduler.manager ######### -###### (StrOpt) Default driver to use for the scheduler -# scheduler_driver="nova.scheduler.multi.MultiScheduler" +######## defined in nova.vnc.xvp_proxy ######## -######### defined in nova.scheduler.multi ######### +# xvpvncproxy_port=6081 +#### (IntOpt) Port that the XCP VNC proxy should bind to -###### (StrOpt) Driver to use for scheduling compute calls -# compute_scheduler_driver="nova.scheduler.filter_scheduler.FilterScheduler" -###### (StrOpt) Driver to use for scheduling volume calls -# volume_scheduler_driver="nova.scheduler.chance.ChanceScheduler" +# xvpvncproxy_host="0.0.0.0" +#### (StrOpt) Address that the XCP VNC proxy should bind to -######### defined in nova.scheduler.scheduler_options ######### -###### (StrOpt) Absolute path to scheduler configuration JSON file. -# scheduler_json_config_location="" +######## defined in nova.volume.driver ######## -######### defined in nova.scheduler.simple ######### +# volume_group="nova-volumes" +#### (StrOpt) Name for the VG that will contain exported volumes -###### (IntOpt) maximum number of instance cores to allow per host -# max_cores=16 -###### (IntOpt) maximum number of volume gigabytes to allow per host -# max_gigabytes=10000 -###### (IntOpt) maximum number of networks to allow per host -# max_networks=1000 -###### (BoolOpt) Allow overcommitting vcpus on isolated hosts -# skip_isolated_core_check=true +# num_shell_tries=3 +#### (IntOpt) number of times to attempt to run flakey shell commands -######### defined in nova.volume.driver ######### +# num_iscsi_scan_tries=3 +#### (IntOpt) number of times to rescan iSCSI target to find volume -###### (StrOpt) iscsi target user-land tool to use -# iscsi_helper="ietadm" -###### (StrOpt) use this ip for iscsi -# iscsi_ip_address="$my_ip" -###### (IntOpt) Number of iscsi target ids per host # iscsi_num_targets=100 -###### (IntOpt) The port that the iSCSI daemon is listening on -# iscsi_port=3260 -###### (StrOpt) prefix for iscsi volumes +#### (IntOpt) Number of iscsi target ids per host + # iscsi_target_prefix="iqn.2010-10.org.openstack:" -###### (StrOpt) number of times to rescan iSCSI target to find volume -# num_iscsi_scan_tries="3" -###### (StrOpt) number of times to attempt to run flakey shell commands -# num_shell_tries="3" -###### (StrOpt) the rbd pool in which volumes are stored +#### (StrOpt) prefix for iscsi volumes + +# iscsi_ip_address="$my_ip" +#### (StrOpt) use this ip for iscsi + +# iscsi_port=3260 +#### (IntOpt) The port that the iSCSI daemon is listening on + # rbd_pool="rbd" -###### (StrOpt) Name for the VG that will contain exported volumes -# volume_group="nova-volumes" +#### (StrOpt) the RADOS pool in which rbd volumes are stored + +# rbd_user=<None> +#### (StrOpt) the RADOS client name for accessing rbd volumes -######### defined in nova.volume.netapp ######### +# rbd_secret_uuid=<None> +#### (StrOpt) the libvirt uuid of the secret for the rbd_uservolumes + + +######## defined in nova.volume.iscsi ######## + +# iscsi_helper="ietadm" +#### (StrOpt) iscsi target user-land tool to use + + +######## defined in nova.volume.manager ######## + +# storage_availability_zone="nova" +#### (StrOpt) availability zone of this service + +# volume_driver="nova.volume.driver.ISCSIDriver" +#### (StrOpt) Driver to use for volume creation + +# use_local_volumes=true +#### (BoolOpt) if True, will not discover local volumes + +# volume_force_update_capabilities=false +#### (BoolOpt) if True will force update capabilities on each check + + +######## defined in nova.volume.netapp ######## + +# netapp_wsdl_url=<None> +#### (StrOpt) URL of the WSDL file for the DFM server -###### (StrOpt) User name for the DFM server # netapp_login=<None> -###### (StrOpt) Password for the DFM server +#### (StrOpt) User name for the DFM server + # netapp_password=<None> -###### (StrOpt) Hostname for the DFM server +#### (StrOpt) Password for the DFM server + # netapp_server_hostname=<None> -###### (IntOpt) Port number for the DFM server +#### (StrOpt) Hostname for the DFM server + # netapp_server_port=8088 -###### (StrOpt) Storage service to use for provisioning +#### (IntOpt) Port number for the DFM server + # netapp_storage_service=<None> -###### (StrOpt) Vfiler to use for provisioning +#### (StrOpt) Storage service to use for provisioning + # netapp_vfiler=<None> -###### (StrOpt) URL of the WSDL file for the DFM server -# netapp_wsdl_url=<None> +#### (StrOpt) Vfiler to use for provisioning -######### defined in nova.volume.nexenta.volume ######### -###### (StrOpt) block size for volumes (blank=default,8KB) -# nexenta_blocksize="" -###### (StrOpt) IP address of Nexenta SA +######## defined in nova.volume.nexenta.volume ######## + # nexenta_host="" -###### (IntOpt) Nexenta target portal port -# nexenta_iscsi_target_portal_port=3260 -###### (StrOpt) Password to connect to Nexenta SA -# nexenta_password="nexenta" -###### (IntOpt) HTTP port to connect to Nexenta REST API server +#### (StrOpt) IP address of Nexenta SA + # nexenta_rest_port=2000 -###### (StrOpt) Use http or https for REST connection (default auto) +#### (IntOpt) HTTP port to connect to Nexenta REST API server + # nexenta_rest_protocol="auto" -###### (BoolOpt) flag to create sparse volumes -# nexenta_sparse=false -###### (StrOpt) prefix for iSCSI target groups on SA -# nexenta_target_group_prefix="nova/" -###### (StrOpt) IQN prefix for iSCSI targets -# nexenta_target_prefix="iqn.1986-03.com.sun:02:nova-" -###### (StrOpt) User name to connect to Nexenta SA +#### (StrOpt) Use http or https for REST connection (default auto) + # nexenta_user="admin" -###### (StrOpt) pool on SA that will hold all volumes +#### (StrOpt) User name to connect to Nexenta SA + +# nexenta_password="nexenta" +#### (StrOpt) Password to connect to Nexenta SA + +# nexenta_iscsi_target_portal_port=3260 +#### (IntOpt) Nexenta target portal port + # nexenta_volume="nova" +#### (StrOpt) pool on SA that will hold all volumes -######### defined in nova.volume.san ######### +# nexenta_target_prefix="iqn.1986-03.com.sun:02:nova-" +#### (StrOpt) IQN prefix for iSCSI targets + +# nexenta_target_group_prefix="nova/" +#### (StrOpt) prefix for iSCSI target groups on SA + +# nexenta_blocksize="" +#### (StrOpt) block size for volumes (blank=default,8KB) + +# nexenta_sparse=false +#### (BoolOpt) flag to create sparse volumes + + +######## defined in nova.volume.san ######## + +# san_thin_provision=true +#### (BoolOpt) Use thin provisioning for SAN volumes? -###### (StrOpt) Cluster name to use for creating volumes -# san_clustername="" -###### (StrOpt) IP address of SAN controller # san_ip="" -###### (BoolOpt) Execute commands locally instead of over SSH; use if the volume service is running on the SAN device -# san_is_local=false -###### (StrOpt) Username for SAN controller +#### (StrOpt) IP address of SAN controller + # san_login="admin" -###### (StrOpt) Password for SAN controller +#### (StrOpt) Username for SAN controller + # san_password="" -###### (StrOpt) Filename of private key to use for SSH authentication +#### (StrOpt) Password for SAN controller + # san_private_key="" -###### (IntOpt) SSH port to use with SAN +#### (StrOpt) Filename of private key to use for SSH authentication + +# san_clustername="" +#### (StrOpt) Cluster name to use for creating volumes + # san_ssh_port=22 -###### (BoolOpt) Use thin provisioning for SAN volumes? -# san_thin_provision=true -###### (StrOpt) The ZFS path under which to create zvols for volumes. +#### (IntOpt) SSH port to use with SAN + +# san_is_local=false +#### (BoolOpt) Execute commands locally instead of over SSH; use if the +#### volume service is running on the SAN device + # san_zfs_volume_base="rpool/" +#### (StrOpt) The ZFS path under which to create zvols for volumes. + -# Total option count: 467 +# Total option count: 477 diff --git a/etc/nova/policy.json b/etc/nova/policy.json index aa44be0bd..6fa6da5fb 100644 --- a/etc/nova/policy.json +++ b/etc/nova/policy.json @@ -43,6 +43,7 @@ "compute_extension:keypairs": [], "compute_extension:multinic": [], "compute_extension:networks": [["rule:admin_api"]], + "compute_extension:networks:view": [], "compute_extension:quotas": [], "compute_extension:quota_classes": [], "compute_extension:rescue": [], diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 33a5c4af4..fcc719969 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -31,7 +31,6 @@ from nova.api.ec2 import apirequest from nova.api.ec2 import ec2utils from nova.api.ec2 import faults from nova.api import validator -from nova.auth import manager from nova import context from nova import exception from nova import flags @@ -188,76 +187,6 @@ class Lockout(wsgi.Middleware): return res -class EC2Token(wsgi.Middleware): - """Deprecated, only here to make merging easier.""" - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - # Read request signature and access id. - try: - signature = req.params['Signature'] - access = req.params['AWSAccessKeyId'] - except KeyError, e: - LOG.exception(e) - raise webob.exc.HTTPBadRequest() - - # Make a copy of args for authentication and signature verification. - auth_params = dict(req.params) - # Not part of authentication args - auth_params.pop('Signature') - - if "ec2" in FLAGS.keystone_ec2_url: - LOG.warning("Configuration setting for keystone_ec2_url needs " - "to be updated to /tokens only. The /ec2 prefix is " - "being deprecated") - # Authenticate the request. - creds = {'ec2Credentials': {'access': access, - 'signature': signature, - 'host': req.host, - 'verb': req.method, - 'path': req.path, - 'params': auth_params, - }} - else: - # Authenticate the request. - creds = {'auth': {'OS-KSEC2:ec2Credentials': {'access': access, - 'signature': signature, - 'host': req.host, - 'verb': req.method, - 'path': req.path, - 'params': auth_params, - }}} - creds_json = jsonutils.dumps(creds) - headers = {'Content-Type': 'application/json'} - - # Disable "has no x member" pylint error - # for httplib and urlparse - # pylint: disable-msg=E1101 - o = urlparse.urlparse(FLAGS.keystone_ec2_url) - if o.scheme == "http": - conn = httplib.HTTPConnection(o.netloc) - else: - conn = httplib.HTTPSConnection(o.netloc) - conn.request('POST', o.path, body=creds_json, headers=headers) - response = conn.getresponse().read() - conn.close() - - # NOTE(vish): We could save a call to keystone by - # having keystone return token, tenant, - # user, and roles from this call. - - result = jsonutils.loads(response) - try: - token_id = result['access']['token']['id'] - except (AttributeError, KeyError), e: - LOG.exception(e) - raise webob.exc.HTTPBadRequest() - - # Authenticated! - req.headers['X-Auth-Token'] = token_id - return self.application - - class EC2KeystoneAuth(wsgi.Middleware): """Authenticate an EC2 request with keystone and convert to context.""" @@ -357,57 +286,6 @@ class NoAuth(wsgi.Middleware): return self.application -class Authenticate(wsgi.Middleware): - """Authenticate an EC2 request and add 'nova.context' to WSGI environ.""" - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - # Read request signature and access id. - try: - signature = req.params['Signature'] - access = req.params['AWSAccessKeyId'] - except KeyError: - raise webob.exc.HTTPBadRequest() - - # Make a copy of args for authentication and signature verification. - auth_params = dict(req.params) - # Not part of authentication args - auth_params.pop('Signature') - - # Authenticate the request. - authman = manager.AuthManager() - try: - (user, project) = authman.authenticate( - access, - signature, - auth_params, - req.method, - req.host, - req.path) - # Be explicit for what exceptions are 403, the rest bubble as 500 - except (exception.NotFound, exception.NotAuthorized, - exception.InvalidSignature) as ex: - LOG.audit(_("Authentication Failure: %s"), unicode(ex)) - raise webob.exc.HTTPForbidden() - - # Authenticated! - remote_address = req.remote_addr - if FLAGS.use_forwarded_for: - remote_address = req.headers.get('X-Forwarded-For', remote_address) - roles = authman.get_active_roles(user, project) - ctxt = context.RequestContext(user_id=user.id, - project_id=project.id, - is_admin=user.is_admin(), - roles=roles, - remote_address=remote_address) - req.environ['nova.context'] = ctxt - uname = user.name - pname = project.name - msg = _('Authenticated Request For %(uname)s:%(pname)s)') % locals() - LOG.audit(msg, context=req.environ['nova.context']) - return self.application - - class Requestify(wsgi.Middleware): def __init__(self, app, controller): diff --git a/nova/api/openstack/compute/contrib/networks.py b/nova/api/openstack/compute/contrib/networks.py index 59bed97c2..20e481bc7 100644 --- a/nova/api/openstack/compute/contrib/networks.py +++ b/nova/api/openstack/compute/contrib/networks.py @@ -29,17 +29,23 @@ import nova.network.api FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('compute', 'networks') +authorize_view = extensions.extension_authorizer('compute', 'networks:view') -def network_dict(network): +def network_dict(context, network): + fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2', + 'cidr_v6', 'gateway_v6', 'label', 'netmask_v6') + admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted', + 'injected', 'bridge', 'vlan', 'vpn_public_address', + 'vpn_public_port', 'vpn_private_address', 'dhcp_start', + 'project_id', 'host', 'bridge_interface', 'multi_host', + 'priority', 'rxtx_base') if network: - fields = ('bridge', 'vpn_public_port', 'dhcp_start', - 'bridge_interface', 'updated_at', 'id', 'cidr_v6', - 'deleted_at', 'gateway', 'label', 'project_id', - 'vpn_private_address', 'deleted', 'vlan', 'broadcast', - 'netmask', 'injected', 'cidr', 'vpn_public_address', - 'multi_host', 'dns1', 'host', 'gateway_v6', 'netmask_v6', - 'created_at') + # NOTE(mnaser): We display a limited set of fields so users can know + # what networks are available, extra system-only fields + # are only visible if they are an admin. + if context.is_admin: + fields += admin_fields result = dict((field, network[field]) for field in fields) if 'uuid' in network: result['id'] = network['uuid'] @@ -79,20 +85,20 @@ class NetworkController(object): def index(self, req): context = req.environ['nova.context'] - authorize(context) + authorize_view(context) networks = self.network_api.get_all(context) - result = [network_dict(net_ref) for net_ref in networks] + result = [network_dict(context, net_ref) for net_ref in networks] return {'networks': result} def show(self, req, id): context = req.environ['nova.context'] - authorize(context) + authorize_view(context) LOG.debug(_("Showing network with id %s") % id) try: network = self.network_api.get(context, id) except exception.NetworkNotFound: raise exc.HTTPNotFound(_("Network not found")) - return {'network': network_dict(network)} + return {'network': network_dict(context, network)} def delete(self, req, id): context = req.environ['nova.context'] diff --git a/nova/api/openstack/compute/contrib/simple_tenant_usage.py b/nova/api/openstack/compute/contrib/simple_tenant_usage.py index eb8f86cc8..5f8765de5 100644 --- a/nova/api/openstack/compute/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/compute/contrib/simple_tenant_usage.py @@ -152,7 +152,7 @@ class SimpleTenantUsageController(object): else: info['state'] = instance['vm_state'] - now = datetime.datetime.utcnow() + now = utils.utcnow() if info['state'] == 'terminated': delta = info['ended_at'] - info['started_at'] @@ -202,10 +202,11 @@ class SimpleTenantUsageController(object): def _get_datetime_range(self, req): qs = req.environ.get('QUERY_STRING', '') env = urlparse.parse_qs(qs) + # NOTE(lzyeval): env.get() always returns a list period_start = self._parse_datetime(env.get('start', [None])[0]) period_stop = self._parse_datetime(env.get('end', [None])[0]) - detailed = bool(env.get('detailed', False)) + detailed = env.get('detailed', ['0'])[0] == '1' return (period_start, period_stop, detailed) @wsgi.serializers(xml=SimpleTenantUsagesTemplate) diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index 539a5c42d..c46a7c2ef 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -35,7 +35,7 @@ attributetype ( attributetype ( novaAttrs:4 NAME 'isNovaAdmin' - DESC 'Is user an nova administrator?' + DESC 'Is user a nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE diff --git a/nova/compute/api.py b/nova/compute/api.py index 3fd358a34..9f96b8e6b 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -387,6 +387,9 @@ class API(base.Base): if reservation_id is None: reservation_id = utils.generate_uid('r') + # grab the architecture from glance + architecture = image['properties'].get('architecture', 'Unknown') + root_device_name = block_device.properties_root_device_name( image['properties']) @@ -421,6 +424,7 @@ class API(base.Base): 'access_ip_v6': access_ip_v6, 'availability_zone': availability_zone, 'root_device_name': root_device_name, + 'architecture': architecture, 'progress': 0} options_from_image = self._inherit_properties_from_image( @@ -636,6 +640,8 @@ class API(base.Base): updates['vm_state'] = vm_states.BUILDING updates['task_state'] = task_states.SCHEDULING + updates['architecture'] = image['properties'].get('architecture') + if (image['properties'].get('mappings', []) or image['properties'].get('block_device_mapping', []) or block_device_mapping): @@ -835,7 +841,7 @@ class API(base.Base): in self.db.service_get_all_compute_sorted(context)] for host in hosts: rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), + rpc.queue_get_for(context, FLAGS.compute_topic, host), {'method': 'refresh_provider_fw_rules', 'args': {}}) def _is_security_group_associated_with_server(self, security_group, @@ -1808,7 +1814,6 @@ class HostAPI(base.Base): """Reboots, shuts down or powers up the host.""" # NOTE(comstud): No instance_uuid argument to this compute manager # call - topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) return self.compute_rpcapi.host_power_action(context, action=action, host=host) diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index c4a6f66f9..66c73c624 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -144,5 +144,5 @@ def get_instance_type_by_flavor_id(flavorid): :raises: FlavorNotFound """ - ctxt = context.get_admin_context() + ctxt = context.get_admin_context(read_deleted="yes") return db.instance_type_get_by_flavor_id(ctxt, flavorid) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6a0251a15..7ab27f740 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -315,9 +315,9 @@ class ComputeManager(manager.SchedulerDependentManager): """ #TODO(mdragon): perhaps make this variable by console_type? - return self.db.queue_get_for(context, - FLAGS.console_topic, - FLAGS.console_host) + return rpc.queue_get_for(context, + FLAGS.console_topic, + FLAGS.console_host) def get_console_pool_info(self, context, console_type): return self.driver.get_console_pool_info(console_type) @@ -361,7 +361,7 @@ class ComputeManager(manager.SchedulerDependentManager): def _legacy_nw_info(self, network_info): """Converts the model nw_info object to legacy style""" if self.driver.legacy_nwinfo(): - network_info = compute_utils.legacy_network_info(network_info) + network_info = network_info.legacy() return network_info def _setup_block_device_mapping(self, context, instance): @@ -1257,7 +1257,7 @@ class ComputeManager(manager.SchedulerDependentManager): network_info = self._get_instance_nw_info(context, instance_ref) self.driver.destroy(instance_ref, self._legacy_nw_info(network_info)) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, + topic = rpc.queue_get_for(context, FLAGS.compute_topic, migration_ref['source_compute']) rpc.cast(context, topic, {'method': 'finish_revert_resize', @@ -1349,7 +1349,7 @@ class ComputeManager(manager.SchedulerDependentManager): 'status': 'pre-migrating'}) LOG.audit(_('Migrating'), context=context, instance=instance_ref) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, + topic = rpc.queue_get_for(context, FLAGS.compute_topic, instance_ref['host']) rpc.cast(context, topic, {'method': 'resize_instance', @@ -1378,7 +1378,7 @@ class ComputeManager(manager.SchedulerDependentManager): try: network_info = self._get_instance_nw_info(context, instance_ref) except Exception, error: - with utils.save_and_reraise_exception(): + with excutils.save_and_reraise_exception(): msg = _('%s. Setting instance vm_state to ERROR') LOG.error(msg % error) self._set_instance_error_state(context, instance_uuid) @@ -1412,9 +1412,9 @@ class ComputeManager(manager.SchedulerDependentManager): service = self.db.service_get_by_host_and_topic( context, migration_ref['dest_compute'], FLAGS.compute_topic) - topic = self.db.queue_get_for(context, - FLAGS.compute_topic, - migration_ref['dest_compute']) + topic = rpc.queue_get_for(context, + FLAGS.compute_topic, + migration_ref['dest_compute']) params = {'migration_id': migration_id, 'disk_info': disk_info, 'instance_uuid': instance_ref['uuid'], @@ -2040,7 +2040,7 @@ class ComputeManager(manager.SchedulerDependentManager): disk = None rpc.call(context, - self.db.queue_get_for(context, FLAGS.compute_topic, dest), + rpc.queue_get_for(context, FLAGS.compute_topic, dest), {'method': 'pre_live_migration', 'args': {'instance_id': instance_id, 'block_migration': block_migration, @@ -2122,7 +2122,7 @@ class ComputeManager(manager.SchedulerDependentManager): # Define domain at destination host, without doing it, # pause/suspend/terminate do not work. rpc.call(ctxt, - self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + rpc.queue_get_for(ctxt, FLAGS.compute_topic, dest), {"method": "post_live_migration_at_destination", "args": {'instance_id': instance_ref['id'], 'block_migration': block_migration}}) @@ -2227,7 +2227,7 @@ class ComputeManager(manager.SchedulerDependentManager): # any empty images has to be deleted. if block_migration: rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, dest), + rpc.queue_get_for(context, FLAGS.compute_topic, dest), {"method": "rollback_live_migration_at_destination", "args": {'instance_id': instance_ref['id']}}) diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py index a2a3b281c..5db0282c3 100644 --- a/nova/compute/rpcapi.py +++ b/nova/compute/rpcapi.py @@ -18,16 +18,16 @@ Client side of the compute RPC API. """ -from nova.db import base from nova import exception from nova import flags +from nova import rpc import nova.rpc.proxy FLAGS = flags.FLAGS -class ComputeAPI(nova.rpc.proxy.RpcProxy, base.Base): +class ComputeAPI(nova.rpc.proxy.RpcProxy): '''Client side of the compute rpc API. API version history: @@ -58,7 +58,7 @@ class ComputeAPI(nova.rpc.proxy.RpcProxy, base.Base): if not host: raise exception.NovaException(_('Unable to find host for ' 'Instance %s') % instance['uuid']) - return self.db.queue_get_for(ctxt, self.topic, host) + return rpc.queue_get_for(ctxt, self.topic, host) def add_aggregate_host(self, ctxt, aggregate_id, host_param, host): '''Add aggregate host. diff --git a/nova/compute/utils.py b/nova/compute/utils.py index ab5ccfbf0..f5f3c5f5e 100644 --- a/nova/compute/utils.py +++ b/nova/compute/utils.py @@ -16,11 +16,8 @@ """Compute-related Utilities and helpers.""" -import netaddr - import nova.context from nova import db -from nova import exception from nova import flags from nova import log from nova import network @@ -79,121 +76,6 @@ def notify_usage_exists(context, instance_ref, current_period=False, system_metadata=system_metadata, extra_usage_info=extra_info) -def legacy_network_info(network_model): - """ - Return the legacy network_info representation of the network_model - """ - def get_ip(ip): - if not ip: - return None - return ip['address'] - - def fixed_ip_dict(ip, subnet): - if ip['version'] == 4: - netmask = str(subnet.as_netaddr().netmask) - else: - netmask = subnet.as_netaddr()._prefixlen - - return {'ip': ip['address'], - 'enabled': '1', - 'netmask': netmask, - 'gateway': get_ip(subnet['gateway'])} - - def get_meta(model, key, default=None): - if 'meta' in model and key in model['meta']: - return model['meta'][key] - return default - - def convert_routes(routes): - routes_list = [] - for route in routes: - r = {'route': str(netaddr.IPNetwork(route['cidr']).network), - 'netmask': str(netaddr.IPNetwork(route['cidr']).netmask), - 'gateway': get_ip(route['gateway'])} - routes_list.append(r) - return routes_list - - network_info = [] - for vif in network_model: - if not vif['network'] or not vif['network']['subnets']: - continue - network = vif['network'] - - # NOTE(jkoelker) The legacy format only supports one subnet per - # network, so we only use the 1st one of each type - # NOTE(tr3buchet): o.O - v4_subnets = [] - v6_subnets = [] - for subnet in vif['network']['subnets']: - if subnet['version'] == 4: - v4_subnets.append(subnet) - else: - v6_subnets.append(subnet) - - subnet_v4 = None - subnet_v6 = None - - if v4_subnets: - subnet_v4 = v4_subnets[0] - - if v6_subnets: - subnet_v6 = v6_subnets[0] - - if not subnet_v4: - raise exception.NovaException( - message=_('v4 subnets are required for legacy nw_info')) - - routes = convert_routes(subnet_v4['routes']) - - should_create_bridge = get_meta(network, 'should_create_bridge', - False) - should_create_vlan = get_meta(network, 'should_create_vlan', False) - gateway = get_ip(subnet_v4['gateway']) - dhcp_server = get_meta(subnet_v4, 'dhcp_server') - network_dict = dict(bridge=network['bridge'], - id=network['id'], - cidr=subnet_v4['cidr'], - cidr_v6=subnet_v6['cidr'] if subnet_v6 else None, - vlan=get_meta(network, 'vlan'), - injected=get_meta(network, 'injected', False), - multi_host=get_meta(network, 'multi_host', - False), - bridge_interface=get_meta(network, - 'bridge_interface')) - # NOTE(tr3buchet): the 'ips' bit here is tricky, we support a single - # subnet but we want all the IPs to be there - # so we use the v4_subnets[0] and its IPs are first - # so that eth0 will be from subnet_v4, the rest of the - # IPs will be aliased eth0:1 etc and the gateways from - # their subnets will not be used - info_dict = dict(label=network['label'], - broadcast=str(subnet_v4.as_netaddr().broadcast), - mac=vif['address'], - vif_uuid=vif['id'], - rxtx_cap=get_meta(network, 'rxtx_cap', 0), - dns=[get_ip(ip) for ip in subnet_v4['dns']], - ips=[fixed_ip_dict(ip, subnet) - for subnet in v4_subnets - for ip in subnet['ips']], - should_create_bridge=should_create_bridge, - should_create_vlan=should_create_vlan, - dhcp_server=dhcp_server) - if routes: - info_dict['routes'] = routes - - if gateway: - info_dict['gateway'] = gateway - - if v6_subnets: - if subnet_v6['gateway']: - info_dict['gateway_v6'] = get_ip(subnet_v6['gateway']) - info_dict['ip6s'] = [fixed_ip_dict(ip, subnet_v6) - for ip in subnet_v6['ips']] - - network_info.append((network_dict, info_dict)) - return network_info - - def notify_about_instance_usage(context, instance, event_suffix, network_info=None, system_metadata=None, extra_usage_info=None, host=None): diff --git a/nova/console/api.py b/nova/console/api.py index 0feaae488..20f00030e 100644 --- a/nova/console/api.py +++ b/nova/console/api.py @@ -44,8 +44,8 @@ class API(base.Base): def delete_console(self, context, instance_id, console_id): instance_id = self._translate_uuid_if_necessary(context, instance_id) console = self.db.console_get(context, console_id, instance_id) - topic = self.db.queue_get_for(context, FLAGS.console_topic, - pool['host']) + topic = rpc.queue_get_for(context, FLAGS.console_topic, + pool['host']) rpcapi = console_rpcapi.ConsoleAPI(topic=topic) rpcapi.remove_console(context, console['id']) @@ -61,9 +61,9 @@ class API(base.Base): rpcapi.add_console(context, instance['id']) def _get_console_topic(self, context, instance_host): - topic = self.db.queue_get_for(context, - FLAGS.compute_topic, - instance_host) + topic = rpc.queue_get_for(context, + FLAGS.compute_topic, + instance_host) return rpc.call(context, topic, {'method': 'get_console_topic', 'args': {'fake': 1}}) diff --git a/nova/console/manager.py b/nova/console/manager.py index 8a42b449a..96a9ef31c 100644 --- a/nova/console/manager.py +++ b/nova/console/manager.py @@ -122,7 +122,7 @@ class ConsoleProxyManager(manager.Manager): 'password': '1234pass'} else: pool_info = rpc.call(context, - self.db.queue_get_for(context, + rpc.queue_get_for(context, FLAGS.compute_topic, instance_host), {'method': 'get_console_pool_info', diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py index 0968ecd31..2008ff1b2 100644 --- a/nova/console/vmrc_manager.py +++ b/nova/console/vmrc_manager.py @@ -138,9 +138,9 @@ class ConsoleVMRCManager(manager.Manager): console_type) except exception.NotFound: pool_info = rpc.call(context, - self.db.queue_get_for(context, - FLAGS.compute_topic, - instance_host), + rpc.queue_get_for(context, + FLAGS.compute_topic, + instance_host), {'method': 'get_console_pool_info', 'args': {'console_type': console_type}}) pool_info['password'] = self.driver.fix_pool_password( diff --git a/nova/db/api.py b/nova/db/api.py index c43a48c19..d33584036 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -141,7 +141,7 @@ def service_get_all_volume_sorted(context): def service_get_by_args(context, host, binary): - """Get the state of an service by node name and binary.""" + """Get the state of a service by node name and binary.""" return IMPL.service_get_by_args(context, host, binary) @@ -151,7 +151,7 @@ def service_create(context, values): def service_update(context, service_id, values): - """Set the given properties on an service and update it. + """Set the given properties on a service and update it. Raises NotFound if service does not exist. @@ -163,7 +163,7 @@ def service_update(context, service_id, values): def compute_node_get(context, compute_id): - """Get an computeNode or raise if it does not exist.""" + """Get a computeNode or raise if it does not exist.""" return IMPL.compute_node_get(context, compute_id) @@ -178,7 +178,7 @@ def compute_node_create(context, values): def compute_node_update(context, compute_id, values, auto_adjust=True): - """Set the given properties on an computeNode and update it. + """Set the given properties on a computeNode and update it. Raises NotFound if computeNode does not exist. """ @@ -258,7 +258,7 @@ def floating_ip_count_by_project(context, project_id, session=None): def floating_ip_deallocate(context, address): - """Deallocate an floating ip by address.""" + """Deallocate a floating ip by address.""" return IMPL.floating_ip_deallocate(context, address) @@ -268,7 +268,7 @@ def floating_ip_destroy(context, address): def floating_ip_disassociate(context, address): - """Disassociate an floating ip from a fixed ip by address. + """Disassociate a floating ip from a fixed ip by address. :returns: the address of the existing fixed ip. @@ -278,7 +278,7 @@ def floating_ip_disassociate(context, address): def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): - """Associate an floating ip to a fixed_ip by address.""" + """Associate a floating ip to a fixed_ip by address.""" return IMPL.floating_ip_fixed_ip_associate(context, floating_address, fixed_address, @@ -769,7 +769,7 @@ def network_disassociate(context, network_id): def network_get(context, network_id): - """Get an network or raise if it does not exist.""" + """Get a network or raise if it does not exist.""" return IMPL.network_get(context, network_id) @@ -837,7 +837,7 @@ def network_set_host(context, network_id, host_id): def network_update(context, network_id, values): - """Set the given properties on an network and update it. + """Set the given properties on a network and update it. Raises NotFound if network does not exist. @@ -848,14 +848,6 @@ def network_update(context, network_id, values): ################### -def queue_get_for(context, topic, physical_node_id): - """Return a channel to send a message to a node with a topic.""" - return IMPL.queue_get_for(context, topic, physical_node_id) - - -################### - - def iscsi_target_count_by_host(context, host): """Return count of export devices.""" return IMPL.iscsi_target_count_by_host(context, host) @@ -1093,7 +1085,7 @@ def volume_get_all_by_host(context, host): def volume_get_all_by_instance_uuid(context, instance_uuid): - """Get all volumes belonging to a instance.""" + """Get all volumes belonging to an instance.""" return IMPL.volume_get_all_by_instance_uuid(context, instance_uuid) @@ -1113,7 +1105,7 @@ def volume_get_iscsi_target_num(context, volume_id): def volume_update(context, volume_id, values): - """Set the given properties on an volume and update it. + """Set the given properties on a volume and update it. Raises NotFound if volume does not exist. @@ -1174,7 +1166,7 @@ def snapshot_get_all_for_volume(context, volume_id): def snapshot_update(context, snapshot_id, values): - """Set the given properties on an snapshot and update it. + """Set the given properties on a snapshot and update it. Raises NotFound if snapshot does not exist. @@ -1202,7 +1194,7 @@ def block_device_mapping_update_or_create(context, values): def block_device_mapping_get_all_by_instance(context, instance_uuid): - """Get all block device mapping belonging to a instance""" + """Get all block device mapping belonging to an instance""" return IMPL.block_device_mapping_get_all_by_instance(context, instance_uuid) @@ -1532,7 +1524,7 @@ def instance_type_get_by_flavor_id(context, id): def instance_type_destroy(context, name): - """Delete a instance type.""" + """Delete an instance type.""" return IMPL.instance_type_destroy(context, name) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 5386b0f41..30c556b4d 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1592,8 +1592,7 @@ def instance_get_floating_address(context, instance_id): @require_admin_context def instance_get_all_hung_in_rebooting(context, reboot_window, session=None): - reboot_window = datetime.datetime.utcnow() - datetime.timedelta( - seconds=reboot_window) + reboot_window = utils.utcnow() - datetime.timedelta(seconds=reboot_window) if not session: session = get_session() @@ -1995,7 +1994,7 @@ def network_get(context, network_id, session=None): return result -@require_admin_context +@require_context def network_get_all(context): result = model_query(context, models.Network, read_deleted="no").all() @@ -2204,14 +2203,6 @@ def network_update(context, network_id, values): ################### -def queue_get_for(context, topic, physical_node_id): - # FIXME(ja): this should be servername? - return "%s.%s" % (topic, physical_node_id) - - -################### - - @require_admin_context def iscsi_target_count_by_host(context, host): return model_query(context, models.IscsiTarget).\ @@ -2811,6 +2802,7 @@ def volume_attached(context, volume_id, instance_uuid, mountpoint): volume_ref['mountpoint'] = mountpoint volume_ref['attach_status'] = 'attached' volume_ref['instance_uuid'] = instance_uuid + volume_ref['attach_time'] = utils.utcnow() volume_ref.save(session=session) @@ -2849,6 +2841,7 @@ def volume_data_get_for_project(context, project_id, session=None): def volume_destroy(context, volume_id): session = get_session() with session.begin(): + volume_ref = volume_get(context, volume_id, session=session) session.query(models.Volume).\ filter_by(id=volume_id).\ update({'deleted': True, @@ -2862,6 +2855,7 @@ def volume_destroy(context, volume_id): update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) + return volume_ref @require_admin_context @@ -2953,6 +2947,7 @@ def volume_get_iscsi_target_num(context, volume_id): @require_context def volume_update(context, volume_id, values): session = get_session() + volume_ref = volume_get(context, volume_id, session=session) metadata = values.get('metadata') if metadata is not None: volume_metadata_update(context, @@ -2960,10 +2955,11 @@ def volume_update(context, volume_id, values): values.pop('metadata'), delete=True) with session.begin(): - volume_ref = volume_get(context, volume_id, session=session) volume_ref.update(values) volume_ref.save(session=session) + return volume_ref + @require_context def ec2_volume_create(context, volume_uuid, id=None): @@ -3775,8 +3771,8 @@ def migration_get_by_instance_and_status(context, instance_uuid, status): @require_admin_context def migration_get_all_unconfirmed(context, confirm_window, session=None): - confirm_window = datetime.datetime.utcnow() - datetime.timedelta( - seconds=confirm_window) + confirm_window = (utils.utcnow() - + datetime.timedelta(seconds=confirm_window)) return model_query(context, models.Migration, session=session, read_deleted="yes").\ diff --git a/nova/db/sqlalchemy/migrate_repo/versions/098_update_volume_attach_time.py b/nova/db/sqlalchemy/migrate_repo/versions/098_update_volume_attach_time.py new file mode 100644 index 000000000..46f681100 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/098_update_volume_attach_time.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2012 Canonical Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import select, Column, Table, MetaData, String, DateTime + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + attach_datetime = Column('attachtime_datetime', DateTime(timezone=False)) + attach_datetime.create(volumes) + + old_attachtime = volumes.c.attach_time + + try: + volumes_list = list(volumes.select().execute()) + for v in volumes_list: + attach_time = select([volumes.c.attach_time], + volumes.c.id == v['id']) + volumes.update().\ + where(volumes.c.id == v['id']).\ + values(attach_datetime=attach_time).execute() + except Exception: + attach_datetime.drop() + raise + + old_attachtime.alter(name='attach_time_old') + attach_datetime.alter(name='attach_time') + old_attachtime.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + attach_string = Column('attachtime_string', String(255)) + attach_string.create(volumes) + + old_attachtime = volumes.c.attach_time + + try: + volumes_list = list(volumes.select().execute()) + for v in volumes_list: + attach_time = select([volumes.c.attach_time], + volumes.c.id == v['id']) + volumes.update().\ + where(volumes.c.id == v['id']).\ + values(attach_string=attach_time).execute() + except Exception: + attach_datetime.drop() + raise + + old_attachtime.alter(name='attach_time_old') + attach_string.alter(name='attach_time') + old_attachtime.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 78fcaa7e5..452816bb7 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -247,7 +247,7 @@ class Instance(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) - # To remember on which host a instance booted. + # To remember on which host an instance booted. # An instance may have moved to another host by live migraiton. launched_on = Column(Text) locked = Column(Boolean) @@ -342,7 +342,7 @@ class Volume(BASE, NovaBase): availability_zone = Column(String(255)) # TODO(vish): foreign key? instance_uuid = Column(String(36)) mountpoint = Column(String(255)) - attach_time = Column(String(255)) # TODO(vish): datetime + attach_time = Column(DateTime) status = Column(String(255)) # TODO(vish): enum? attach_status = Column(String(255)) # TODO(vish): enum @@ -465,9 +465,11 @@ class Reservation(BASE, NovaBase): uuid = Column(String(36), nullable=False) usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False) + # NOTE(dprince): Force innerjoin below for lockmode update on PostgreSQL usage = relationship(QuotaUsage, backref=backref('reservations'), foreign_keys=usage_id, + innerjoin=True, primaryjoin='and_(' 'Reservation.usage_id == QuotaUsage.id,' 'Reservation.deleted == False)') @@ -716,7 +718,7 @@ class FixedIp(BASE, NovaBase): virtual_interface_id = Column(Integer, nullable=True) instance_id = Column(Integer, nullable=True) # associated means that a fixed_ip has its instance_id column set - # allocated means that a fixed_ip has a its virtual_interface_id column set + # allocated means that a fixed_ip has its virtual_interface_id column set allocated = Column(Boolean, default=False) # leased means dhcp bridge has leased the ip leased = Column(Boolean, default=False) @@ -1049,51 +1051,3 @@ class InstanceFault(BASE, NovaBase): code = Column(Integer(), nullable=False) message = Column(String(255)) details = Column(Text) - - -def register_models(): - """Register Models and create metadata. - - Called from nova.db.sqlalchemy.__init__ as part of loading the driver, - it will never need to be called explicitly elsewhere unless the - connection is lost and needs to be reestablished. - """ - from sqlalchemy import create_engine - models = (AgentBuild, - Aggregate, - AggregateHost, - AggregateMetadata, - AuthToken, - Certificate, - Cell, - Console, - ConsolePool, - FixedIp, - FloatingIp, - Instance, - InstanceFault, - InstanceMetadata, - InstanceTypeExtraSpecs, - InstanceTypes, - IscsiTarget, - Migration, - Network, - Project, - SecurityGroup, - SecurityGroupIngressRule, - SecurityGroupInstanceAssociation, - Service, - SMBackendConf, - SMFlavors, - SMVolume, - User, - Volume, - VolumeMetadata, - VolumeTypeExtraSpecs, - VolumeTypes, - VolumeIdMapping, - SnapshotIdMapping, - ) - engine = create_engine(FLAGS.sql_connection, echo=False) - for model in models: - model.metadata.create_all(engine) diff --git a/nova/flags.py b/nova/flags.py index 337dc979c..34d5fc814 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -33,19 +33,14 @@ import sys from nova.openstack.common import cfg -class NovaConfigOpts(cfg.CommonConfigOpts): +FLAGS = cfg.CONF - def __init__(self, *args, **kwargs): - if 'project' not in kwargs: - kwargs['project'] = 'nova' - super(NovaConfigOpts, self).__init__(*args, **kwargs) - self.disable_interspersed_args() - def __call__(self, argv): - return argv[:1] + super(NovaConfigOpts, self).__call__(argv[1:]) - - -FLAGS = NovaConfigOpts() +def parse_args(argv, default_config_files=None): + FLAGS.disable_interspersed_args() + return argv[:1] + FLAGS(argv[1:], + project='nova', + default_config_files=default_config_files) class UnrecognizedFlag(Exception): @@ -125,9 +120,6 @@ debug_opts = [ cfg.BoolOpt('fake_network', default=False, help='If passed, use fake network devices and addresses'), - cfg.BoolOpt('fake_rabbit', - default=False, - help='If passed, use a fake RabbitMQ provider'), ] FLAGS.register_cli_opts(log_opts) @@ -189,41 +181,6 @@ global_opts = [ cfg.StrOpt('network_topic', default='network', help='the topic network nodes listen on'), - cfg.StrOpt('rabbit_host', - default='localhost', - help='the RabbitMQ host'), - cfg.IntOpt('rabbit_port', - default=5672, - help='the RabbitMQ port'), - cfg.BoolOpt('rabbit_use_ssl', - default=False, - help='connect over SSL for RabbitMQ'), - cfg.StrOpt('rabbit_userid', - default='guest', - help='the RabbitMQ userid'), - cfg.StrOpt('rabbit_password', - default='guest', - help='the RabbitMQ password'), - cfg.StrOpt('rabbit_virtual_host', - default='/', - help='the RabbitMQ virtual host'), - cfg.IntOpt('rabbit_retry_interval', - default=1, - help='how frequently to retry connecting with RabbitMQ'), - cfg.IntOpt('rabbit_retry_backoff', - default=2, - help='how long to backoff for between retries when connecting ' - 'to RabbitMQ'), - cfg.IntOpt('rabbit_max_retries', - default=0, - help='maximum retries with trying to connect to RabbitMQ ' - '(the default of 0 implies an infinite retry count)'), - cfg.StrOpt('control_exchange', - default='nova', - help='the main RabbitMQ exchange to connect to'), - cfg.BoolOpt('rabbit_durable_queues', - default=False, - help='use durable queues in RabbitMQ'), cfg.BoolOpt('api_rate_limit', default=True, help='whether to rate limit the api'), @@ -394,7 +351,7 @@ global_opts = [ 'host rebooted'), cfg.StrOpt('default_ephemeral_format', default=None, - help='The default format a ephemeral_volume will be ' + help='The default format an ephemeral_volume will be ' 'formatted with on creation.'), cfg.StrOpt('root_helper', default='sudo', diff --git a/nova/image/fake.py b/nova/image/fake.py index 4f29cdb6e..996459081 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -15,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -"""Implementation of an fake image service""" +"""Implementation of a fake image service""" import copy import datetime @@ -210,7 +210,8 @@ class _FakeImageService(object): self._imagedata[image_id] = data.read() return self.images[image_id] - def update(self, context, image_id, metadata, data=None): + def update(self, context, image_id, metadata, data=None, + headers=None): """Replace the contents of the given image with the new data. :raises: ImageNotFound if the image does not exist. @@ -218,7 +219,20 @@ class _FakeImageService(object): """ if not self.images.get(image_id): raise exception.ImageNotFound(image_id=image_id) - self.images[image_id] = copy.deepcopy(metadata) + try: + purge = headers['x-glance-registry-purge-props'] + except Exception: + purge = True + if purge: + self.images[image_id] = copy.deepcopy(metadata) + else: + image = self.images[image_id] + try: + image['properties'].update(metadata.pop('properties')) + except Exception: + pass + image.update(metadata) + return self.images[image_id] def delete(self, context, image_id): """Delete the given image. diff --git a/nova/image/glance.py b/nova/image/glance.py index 1e98cbe11..dc7ae89ad 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -290,7 +290,7 @@ class GlanceImageService(object): base_image_meta) return base_image_meta - def update(self, context, image_id, image_meta, data=None): + def update(self, context, image_id, image_meta, data=None, features=None): """Replace the contents of the given image with the new data. :raises: ImageNotFound if the image does not exist. @@ -301,7 +301,8 @@ class GlanceImageService(object): image_meta = self._translate_to_glance(image_meta) client = self._get_client(context) try: - image_meta = client.update_image(image_id, image_meta, data) + image_meta = client.update_image(image_id, image_meta, data, + features) except Exception: _reraise_translated_image_exception(image_id) diff --git a/nova/image/s3.py b/nova/image/s3.py index 9ed060464..d0746922f 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -288,8 +288,20 @@ class S3ImageService(object): context.update_store() log_vars = {'image_location': image_location, 'image_path': image_path} - metadata['properties']['image_state'] = 'downloading' - self.service.update(context, image_uuid, metadata) + + def _update_image_state(context, image_uuid, image_state): + metadata = {'properties': {'image_state': image_state}} + headers = {'x-glance-registry-purge-props': False} + self.service.update(context, image_uuid, metadata, None, + headers) + + def _update_image_data(context, image_uuid, image_data): + metadata = {} + headers = {'x-glance-registry-purge-props': False} + self.service.update(context, image_uuid, metadata, image_data, + headers) + + _update_image_state(context, image_uuid, 'downloading') try: parts = [] @@ -310,12 +322,10 @@ class S3ImageService(object): except Exception: LOG.exception(_("Failed to download %(image_location)s " "to %(image_path)s"), log_vars) - metadata['properties']['image_state'] = 'failed_download' - self.service.update(context, image_uuid, metadata) + _update_image_state(context, image_uuid, 'failed_download') return - metadata['properties']['image_state'] = 'decrypting' - self.service.update(context, image_uuid, metadata) + _update_image_state(context, image_uuid, 'decrypting') try: hex_key = manifest.find('image/ec2_encrypted_key').text @@ -329,38 +339,33 @@ class S3ImageService(object): except Exception: LOG.exception(_("Failed to decrypt %(image_location)s " "to %(image_path)s"), log_vars) - metadata['properties']['image_state'] = 'failed_decrypt' - self.service.update(context, image_uuid, metadata) + _update_image_state(context, image_uuid, 'failed_decrypt') return - metadata['properties']['image_state'] = 'untarring' - self.service.update(context, image_uuid, metadata) + _update_image_state(context, image_uuid, 'untarring') try: unz_filename = self._untarzip_image(image_path, dec_filename) except Exception: LOG.exception(_("Failed to untar %(image_location)s " "to %(image_path)s"), log_vars) - metadata['properties']['image_state'] = 'failed_untar' - self.service.update(context, image_uuid, metadata) + _update_image_state(context, image_uuid, 'failed_untar') return - metadata['properties']['image_state'] = 'uploading' - self.service.update(context, image_uuid, metadata) + _update_image_state(context, image_uuid, 'uploading') try: with open(unz_filename) as image_file: - self.service.update(context, image_uuid, - metadata, image_file) + _update_image_data(context, image_uuid, image_file) except Exception: LOG.exception(_("Failed to upload %(image_location)s " "to %(image_path)s"), log_vars) - metadata['properties']['image_state'] = 'failed_upload' - self.service.update(context, image_uuid, metadata) + _update_image_state(context, image_uuid, 'failed_upload') return - metadata['properties']['image_state'] = 'available' - metadata['status'] = 'active' - self.service.update(context, image_uuid, metadata) + metadata = {'status': 'active', + 'properties': {'image_state': 'available'}} + headers = {'x-glance-registry-purge-props': False} + self.service.update(context, image_uuid, metadata, None, headers) shutil.rmtree(image_path) diff --git a/nova/network/manager.py b/nova/network/manager.py index ac77ebaf7..1f1580634 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -201,9 +201,7 @@ class RPCAllocateFixedIP(object): jsonutils.to_primitive(network)}}) if host != self.host: # need to call allocate_fixed_ip to correct network host - topic = self.db.queue_get_for(context, - FLAGS.network_topic, - host) + topic = rpc.queue_get_for(context, FLAGS.network_topic, host) args = {} args['instance_id'] = instance_id args['network_id'] = network['id'] @@ -241,7 +239,7 @@ class RPCAllocateFixedIP(object): host = network['host'] if host != self.host: # need to call deallocate_fixed_ip on correct network host - topic = self.db.queue_get_for(context, FLAGS.network_topic, host) + topic = rpc.queue_get_for(context, FLAGS.network_topic, host) args = {'address': address, 'host': host} rpc.cast(context, topic, @@ -513,7 +511,7 @@ class FloatingIP(object): else: # send to correct host rpc.cast(context, - self.db.queue_get_for(context, FLAGS.network_topic, host), + rpc.queue_get_for(context, FLAGS.network_topic, host), {'method': '_associate_floating_ip', 'args': {'floating_address': floating_address, 'fixed_address': fixed_address, @@ -583,7 +581,7 @@ class FloatingIP(object): else: # send to correct host rpc.cast(context, - self.db.queue_get_for(context, FLAGS.network_topic, host), + rpc.queue_get_for(context, FLAGS.network_topic, host), {'method': '_disassociate_floating_ip', 'args': {'address': address, 'interface': interface}}) @@ -1013,7 +1011,7 @@ class NetworkManager(manager.SchedulerDependentManager): nw_info = self.build_network_info_model(context, vifs, networks, rxtx_factor, host) self.db.instance_info_cache_update(context, instance_uuid, - {'network_info': nw_info.as_cache()}) + {'network_info': nw_info.json()}) return nw_info def build_network_info_model(self, context, vifs, networks, @@ -1177,8 +1175,11 @@ class NetworkManager(manager.SchedulerDependentManager): @wrap_check_policy def add_fixed_ip_to_instance(self, context, instance_id, host, network_id): """Adds a fixed ip to an instance from specified network.""" - networks = [self._get_network_by_id(context, network_id)] - self._allocate_fixed_ips(context, instance_id, host, networks) + if utils.is_uuid_like(network_id): + network = self.get_network(context, network_id) + else: + network = self._get_network_by_id(context, network_id) + self._allocate_fixed_ips(context, instance_id, host, [network]) @wrap_check_policy def remove_fixed_ip_from_instance(self, context, instance_id, host, @@ -1542,8 +1543,7 @@ class NetworkManager(manager.SchedulerDependentManager): call_func(context, network) else: # i'm not the right host, run call on correct host - topic = self.db.queue_get_for(context, FLAGS.network_topic, - host) + topic = rpc.queue_get_for(context, FLAGS.network_topic, host) args = {'network_id': network['id'], 'teardown': teardown} # NOTE(tr3buchet): the call is just to wait for completion green_pool.spawn_n(rpc.call, context, topic, diff --git a/nova/network/model.py b/nova/network/model.py index 843416591..84a8ab35e 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -31,12 +31,16 @@ class Model(dict): def __repr__(self): return self.__class__.__name__ + '(' + dict.__repr__(self) + ')' - def set_meta(self, kwargs): + def _set_meta(self, kwargs): # pull meta out of kwargs if it's there self['meta'] = kwargs.pop('meta', {}) # update meta with any additional kwargs that may exist self['meta'].update(kwargs) + def get_meta(self, key, default=None): + """calls get(key, default) on self['meta']""" + return self['meta'].get(key, default) + class IP(Model): """Represents an IP address in Nova""" @@ -47,7 +51,7 @@ class IP(Model): self['type'] = type self['version'] = kwargs.pop('version', None) - self.set_meta(kwargs) + self._set_meta(kwargs) # determine version from address if not passed in if self['address'] and not self['version']: @@ -106,7 +110,7 @@ class Route(Model): self['gateway'] = gateway self['interface'] = interface - self.set_meta(kwargs) + self._set_meta(kwargs) @classmethod def hydrate(cls, route): @@ -128,7 +132,7 @@ class Subnet(Model): self['routes'] = routes or [] self['version'] = kwargs.pop('version', None) - self.set_meta(kwargs) + self._set_meta(kwargs) if self['cidr'] and not self['version']: self['version'] = netaddr.IPNetwork(self['cidr']).version @@ -173,7 +177,7 @@ class Network(Model): self['label'] = label self['subnets'] = subnets or [] - self.set_meta(kwargs) + self._set_meta(kwargs) def add_subnet(self, subnet): if subnet not in self['subnets']: @@ -197,7 +201,7 @@ class VIF(Model): self['address'] = address self['network'] = network or None - self.set_meta(kwargs) + self._set_meta(kwargs) def __eq__(self, other): return self['id'] == other['id'] @@ -270,5 +274,115 @@ class NetworkInfo(list): network_info = json.loads(network_info) return NetworkInfo([VIF.hydrate(vif) for vif in network_info]) - def as_cache(self): + def json(self): return json.dumps(self) + + def legacy(self): + """ + Return the legacy network_info representation of self + """ + def get_ip(ip): + if not ip: + return None + return ip['address'] + + def fixed_ip_dict(ip, subnet): + if ip['version'] == 4: + netmask = str(subnet.as_netaddr().netmask) + else: + netmask = subnet.as_netaddr()._prefixlen + + return {'ip': ip['address'], + 'enabled': '1', + 'netmask': netmask, + 'gateway': get_ip(subnet['gateway'])} + + def convert_routes(routes): + routes_list = [] + for route in routes: + r = {'route': str(netaddr.IPNetwork(route['cidr']).network), + 'netmask': str(netaddr.IPNetwork(route['cidr']).netmask), + 'gateway': get_ip(route['gateway'])} + routes_list.append(r) + return routes_list + + network_info = [] + for vif in self: + # if vif doesn't have network or that network has no subnets, quit + if not vif['network'] or not vif['network']['subnets']: + continue + network = vif['network'] + + # NOTE(jkoelker) The legacy format only supports one subnet per + # network, so we only use the 1st one of each type + # NOTE(tr3buchet): o.O + v4_subnets = [] + v6_subnets = [] + for subnet in vif['network']['subnets']: + if subnet['version'] == 4: + v4_subnets.append(subnet) + else: + v6_subnets.append(subnet) + + subnet_v4 = None + subnet_v6 = None + + if v4_subnets: + subnet_v4 = v4_subnets[0] + + if v6_subnets: + subnet_v6 = v6_subnets[0] + + if not subnet_v4: + msg = _('v4 subnets are required for legacy nw_info') + raise exception.NovaException(message=msg) + + routes = convert_routes(subnet_v4['routes']) + should_create_bridge = network.get_meta('should_create_bridge', + False) + should_create_vlan = network.get_meta('should_create_vlan', False) + gateway = get_ip(subnet_v4['gateway']) + dhcp_server = subnet_v4.get_meta('dhcp_server', gateway) + + network_dict = \ + {'bridge': network['bridge'], + 'id': network['id'], + 'cidr': subnet_v4['cidr'], + 'cidr_v6': subnet_v6['cidr'] if subnet_v6 else None, + 'vlan': network.get_meta('vlan'), + 'injected': network.get_meta('injected', False), + 'multi_host': network.get_meta('multi_host', False), + 'bridge_interface': network.get_meta('bridge_interface')} + # NOTE(tr3buchet): 'ips' bit here is tricky, we support a single + # subnet but we want all the IPs to be there + # so use the v4_subnets[0] and its IPs are first + # so that eth0 will be from subnet_v4, the rest of + # the IPs will be aliased eth0:1 etc and the + # gateways from their subnets will not be used + info_dict = {'label': network['label'], + 'broadcast': str(subnet_v4.as_netaddr().broadcast), + 'mac': vif['address'], + 'vif_uuid': vif['id'], + 'rxtx_cap': vif.get_meta('rxtx_cap', 0), + 'dns': [get_ip(ip) for ip in subnet_v4['dns']], + 'ips': [fixed_ip_dict(ip, subnet) + for subnet in v4_subnets + for ip in subnet['ips']], + 'should_create_bridge': should_create_bridge, + 'should_create_vlan': should_create_vlan, + 'dhcp_server': dhcp_server} + if routes: + info_dict['routes'] = routes + + if gateway: + info_dict['gateway'] = gateway + + if v6_subnets: + if subnet_v6['gateway']: + info_dict['gateway_v6'] = get_ip(subnet_v6['gateway']) + # NOTE(tr3buchet): only supporting single v6 subnet here + info_dict['ip6s'] = [fixed_ip_dict(ip, subnet_v6) + for ip in subnet_v6['ips']] + + network_info.append((network_dict, info_dict)) + return network_info diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index c034b5e2a..50bc8cf14 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -291,7 +291,7 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager): if net_ref['host'] == self.host: self.kill_dhcp(net_ref) else: - topic = self.db.queue_get_for(context, + topic = rpc.queue_get_for(context, FLAGS.network_topic, net_ref['host']) @@ -389,7 +389,7 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager): self.enable_dhcp(context, network['quantum_net_id'], network, vif_rec, network['net_tenant_id']) else: - topic = self.db.queue_get_for(context, + topic = rpc.queue_get_for(context, FLAGS.network_topic, network['host']) rpc.call(context, topic, {'method': 'enable_dhcp', 'args': {'quantum_net_id': network['quantum_net_id'], @@ -577,7 +577,7 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager): nw_info = self.build_network_info_model(context, vifs, networks, rxtx_factor, host) db.instance_info_cache_update(context, instance_uuid, - {'network_info': nw_info.as_cache()}) + {'network_info': nw_info.json()}) return nw_info @@ -608,7 +608,7 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager): self.update_dhcp(context, ipam_tenant_id, network, vif, project_id) else: - topic = self.db.queue_get_for(context, + topic = rpc.queue_get_for(context, FLAGS.network_topic, network['host']) rpc.call(context, topic, {'method': 'update_dhcp', 'args': {'ipam_tenant_id': ipam_tenant_id, diff --git a/nova/network/quantum/melange_ipam_lib.py b/nova/network/quantum/melange_ipam_lib.py index fbb1dfad6..6ceabc8a4 100644 --- a/nova/network/quantum/melange_ipam_lib.py +++ b/nova/network/quantum/melange_ipam_lib.py @@ -50,7 +50,7 @@ class QuantumMelangeIPAMLib(object): """Contact Melange and create a subnet for any non-NULL IPv4 or IPv6 subnets. - Also create a entry in the Nova networks DB, but only + Also create an entry in the Nova networks DB, but only to store values not represented in Melange or to temporarily provide compatibility with Nova code that accesses IPAM data directly via the DB (e.g., nova-api) diff --git a/nova/openstack/common/cfg.py b/nova/openstack/common/cfg.py index 1b1b45d63..dd367aeb6 100644 --- a/nova/openstack/common/cfg.py +++ b/nova/openstack/common/cfg.py @@ -95,7 +95,7 @@ and --config-dir:: class ConfigOpts(object): - def __init__(self, ...): + def __call__(self, ...): opts = [ MultiStrOpt('config-file', @@ -233,6 +233,22 @@ log files: ... ] +This module also contains a global instance of the CommonConfigOpts class +in order to support a common usage pattern in OpenStack: + + from openstack.common import cfg + + opts = [ + cfg.StrOpt('bind_host' default='0.0.0.0'), + cfg.IntOpt('bind_port', default=9292), + ] + + CONF = cfg.CONF + CONF.register_opts(opts) + + def start(server, app): + server.start(app, CONF.bind_port, CONF.bind_host) + """ import collections @@ -473,7 +489,7 @@ class Opt(object): metavar: the name shown as the argument to a CLI option in --help output help: - an string explaining how the options value is used + a string explaining how the options value is used """ multi = False @@ -655,7 +671,7 @@ class IntOpt(Opt): """Int opt values are converted to integers using the int() builtin.""" def _get_from_config_parser(self, cparser, section): - """Retrieve the opt value as a integer from ConfigParser.""" + """Retrieve the opt value as an integer from ConfigParser.""" return [int(v) for v in cparser.get(section, self.dest)] def _get_optparse_kwargs(self, group, **kwargs): @@ -768,6 +784,14 @@ class OptGroup(object): return True + def _unregister_opt(self, opt): + """Remove an opt from this group. + + :param opt: an Opt object + """ + if opt.dest in self._opts: + del self._opts[opt.dest] + def _get_optparse_group(self, parser): """Build an optparse.OptionGroup for this group.""" if self._optparse_group is None: @@ -775,6 +799,10 @@ class OptGroup(object): self.help) return self._optparse_group + def _clear(self): + """Clear this group's option parsing state.""" + self._optparse_group = None + class ParseError(iniparser.ParseError): def __init__(self, msg, lineno, line, filename): @@ -849,57 +877,41 @@ class ConfigOpts(collections.Mapping): the values of options. """ - def __init__(self, - project=None, - prog=None, - version=None, - usage=None, - default_config_files=None): - """Construct a ConfigOpts object. + def __init__(self): + """Construct a ConfigOpts object.""" + self._opts = {} # dict of dicts of (opt:, override:, default:) + self._groups = {} - Automatically registers the --config-file option with either a supplied - list of default config files, or a list from find_config_files(). + self._args = None + self._oparser = None + self._cparser = None + self._cli_values = {} + self.__cache = {} + self._config_opts = [] + self._disable_interspersed_args = False - :param project: the toplevel project name, used to locate config files - :param prog: the name of the program (defaults to sys.argv[0] basename) - :param version: the program version (for --version) - :param usage: a usage string (%prog will be expanded) - :param default_config_files: config files to use by default - """ + def _setup(self, project, prog, version, usage, default_config_files): + """Initialize a ConfigOpts object for option parsing.""" if prog is None: prog = os.path.basename(sys.argv[0]) if default_config_files is None: default_config_files = find_config_files(project, prog) - self.project = project - self.prog = prog - self.version = version - self.usage = usage - self.default_config_files = default_config_files - - self._opts = {} # dict of dicts of (opt:, override:, default:) - self._groups = {} - - self._args = None - self._cli_values = {} - - self._oparser = optparse.OptionParser(prog=self.prog, - version=self.version, - usage=self.usage) - self._cparser = None + self._oparser = optparse.OptionParser(prog=prog, + version=version, + usage=usage) + if self._disable_interspersed_args: + self._oparser.disable_interspersed_args() - self.__cache = {} - - opts = [ + self._config_opts = [ MultiStrOpt('config-file', - default=self.default_config_files, + default=default_config_files, metavar='PATH', help='Path to a config file to use. Multiple config ' 'files can be specified, with values in later ' 'files taking precedence. The default files ' - ' used are: %s' % - (self.default_config_files, )), + ' used are: %s' % (default_config_files, )), StrOpt('config-dir', metavar='DIR', help='Path to a config directory to pull *.conf ' @@ -910,7 +922,13 @@ class ConfigOpts(collections.Mapping): 'hence over-ridden options in the directory take ' 'precedence.'), ] - self.register_cli_opts(opts) + self.register_cli_opts(self._config_opts) + + self.project = project + self.prog = prog + self.version = version + self.usage = usage + self.default_config_files = default_config_files def __clear_cache(f): @functools.wraps(f) @@ -921,7 +939,13 @@ class ConfigOpts(collections.Mapping): return __inner - def __call__(self, args=None): + def __call__(self, + args=None, + project=None, + prog=None, + version=None, + usage=None, + default_config_files=None): """Parse command line arguments and config files. Calling a ConfigOpts object causes the supplied command line arguments @@ -931,35 +955,34 @@ class ConfigOpts(collections.Mapping): The object may be called multiple times, each time causing the previous set of values to be overwritten. + Automatically registers the --config-file option with either a supplied + list of default config files, or a list from find_config_files(). + If the --config-dir option is set, any *.conf files from this directory are pulled in, after all the file(s) specified by the --config-file option. - :params args: command line arguments (defaults to sys.argv[1:]) + :param args: command line arguments (defaults to sys.argv[1:]) + :param project: the toplevel project name, used to locate config files + :param prog: the name of the program (defaults to sys.argv[0] basename) + :param version: the program version (for --version) + :param usage: a usage string (%prog will be expanded) + :param default_config_files: config files to use by default :returns: the list of arguments left over after parsing options :raises: SystemExit, ConfigFilesNotFoundError, ConfigFileParseError, - RequiredOptError + RequiredOptError, DuplicateOptError """ self.clear() - self._args = args + self._setup(project, prog, version, usage, default_config_files) - (values, args) = self._oparser.parse_args(self._args) + self._cli_values, leftovers = self._parse_cli_opts(args) - self._cli_values = vars(values) - - def _list_config_dir(): - return sorted(glob.glob(os.path.join(self.config_dir, '*.conf'))) - - from_file = list(self.config_file) - - from_dir = _list_config_dir() if self.config_dir else [] - - self._parse_config_files(from_file + from_dir) + self._parse_config_files() self._check_required_opts() - return args + return leftovers def __getattr__(self, name): """Look up an option value and perform string substitution. @@ -996,8 +1019,12 @@ class ConfigOpts(collections.Mapping): def clear(self): """Clear the state of the object to before it was called.""" self._args = None - self._cli_values = {} + self._cli_values.clear() + self._oparser = None self._cparser = None + self.unregister_opts(self._config_opts) + for group in self._groups.values(): + group._clear() @__clear_cache def register_opt(self, opt, group=None): @@ -1044,15 +1071,7 @@ class ConfigOpts(collections.Mapping): if self._args is not None: raise ArgsAlreadyParsedError("cannot register CLI option") - if not self.register_opt(opt, group, clear_cache=False): - return False - - if group is not None: - group = self._get_group(group, autocreate=True) - - opt._add_to_cli(self._oparser, group) - - return True + return self.register_opt(opt, group, clear_cache=False) @__clear_cache def register_cli_opts(self, opts, group=None): @@ -1074,6 +1093,28 @@ class ConfigOpts(collections.Mapping): self._groups[group.name] = copy.copy(group) @__clear_cache + def unregister_opt(self, opt, group=None): + """Unregister an option. + + :param opt: an Opt object + :param group: an optional OptGroup object or group name + :raises: ArgsAlreadyParsedError, NoSuchGroupError + """ + if self._args is not None: + raise ArgsAlreadyParsedError("reset before unregistering options") + + if group is not None: + self._get_group(group)._unregister_opt(opt) + elif opt.dest in self._opts: + del self._opts[opt.dest] + + @__clear_cache + def unregister_opts(self, opts, group=None): + """Unregister multiple CLI option schemas at once.""" + for opt in opts: + self.unregister_opt(opt, group, clear_cache=False) + + @__clear_cache def set_override(self, name, override, group=None): """Override an opt value. @@ -1103,16 +1144,24 @@ class ConfigOpts(collections.Mapping): opt_info = self._get_opt_info(name, group) opt_info['default'] = default + def _all_opt_infos(self): + """A generator function for iteration opt infos.""" + for info in self._opts.values(): + yield info, None + for group in self._groups.values(): + for info in group._opts.values(): + yield info, group + + def _all_opts(self): + """A generator function for iteration opts.""" + for info, group in self._all_opt_infos(): + yield info['opt'], group + def _unset_defaults_and_overrides(self): """Unset any default or override on all options.""" - def unset(opts): - for info in opts.values(): - info['default'] = None - info['override'] = None - - unset(self._opts) - for group in self._groups.values(): - unset(group._opts) + for info, group in self._all_opt_infos(): + info['default'] = None + info['override'] = None def disable_interspersed_args(self): """Set parsing to stop on the first non-option. @@ -1131,13 +1180,13 @@ class ConfigOpts(collections.Mapping): i.e. argument parsing is stopped at the first non-option argument. """ - self._oparser.disable_interspersed_args() + self._disable_interspersed_args = True def enable_interspersed_args(self): """Set parsing to not stop on the first non-option. This it the default behaviour.""" - self._oparser.enable_interspersed_args() + self._disable_interspersed_args = False def find_file(self, name): """Locate a file located alongside the config files. @@ -1289,7 +1338,7 @@ class ConfigOpts(collections.Mapping): return value def _get_group(self, group_or_name, autocreate=False): - """Looks up a OptGroup object. + """Looks up an OptGroup object. Helper function to return an OptGroup given a parameter which can either be the group's name or an OptGroup object. @@ -1331,11 +1380,17 @@ class ConfigOpts(collections.Mapping): return opts[opt_name] - def _parse_config_files(self, config_files): - """Parse the supplied configuration files. + def _parse_config_files(self): + """Parse the config files from --config-file and --config-dir. :raises: ConfigFilesNotFoundError, ConfigFileParseError """ + config_files = list(self.config_file) + + if self.config_dir: + config_dir_glob = os.path.join(self.config_dir, '*.conf') + config_files += sorted(glob.glob(config_dir_glob)) + self._cparser = MultiConfigParser() try: @@ -1347,8 +1402,12 @@ class ConfigOpts(collections.Mapping): not_read_ok = filter(lambda f: f not in read_ok, config_files) raise ConfigFilesNotFoundError(not_read_ok) - def _do_check_required_opts(self, opts, group=None): - for info in opts.values(): + def _check_required_opts(self): + """Check that all opts marked as required have values specified. + + :raises: RequiredOptError + """ + for info, group in self._all_opt_infos(): default, opt, override = [info[k] for k in sorted(info.keys())] if opt.required: @@ -1359,15 +1418,25 @@ class ConfigOpts(collections.Mapping): if self._get(opt.name, group) is None: raise RequiredOptError(opt.name, group) - def _check_required_opts(self): - """Check that all opts marked as required have values specified. + def _parse_cli_opts(self, args): + """Parse command line options. + + Initializes the command line option parser and parses the supplied + command line arguments. + + :param args: the command line arguments + :returns: a dict of parsed option values + :raises: SystemExit, DuplicateOptError - :raises: RequiredOptError """ - self._do_check_required_opts(self._opts) + self._args = args - for group in self._groups.values(): - self._do_check_required_opts(group._opts, group) + for opt, group in self._all_opts(): + opt._add_to_cli(self._oparser, group) + + values, leftovers = self._oparser.parse_args(args) + + return vars(values), leftovers class GroupAttr(collections.Mapping): @@ -1483,7 +1552,10 @@ class CommonConfigOpts(ConfigOpts): help='syslog facility to receive log lines') ] - def __init__(self, **kwargs): - super(CommonConfigOpts, self).__init__(**kwargs) + def __init__(self): + super(CommonConfigOpts, self).__init__() self.register_cli_opts(self.common_cli_opts) self.register_cli_opts(self.logging_cli_opts) + + +CONF = CommonConfigOpts() diff --git a/nova/quota.py b/nova/quota.py index a5c751bec..eb769d5e4 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -89,189 +89,6 @@ FLAGS = flags.FLAGS FLAGS.register_opts(quota_opts) -quota_resources = ['metadata_items', 'injected_file_content_bytes', - 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances', - 'injected_files', 'cores', 'security_groups', 'security_group_rules', - 'key_pairs'] - - -def _get_default_quotas(): - defaults = { - 'instances': FLAGS.quota_instances, - 'cores': FLAGS.quota_cores, - 'ram': FLAGS.quota_ram, - 'volumes': FLAGS.quota_volumes, - 'gigabytes': FLAGS.quota_gigabytes, - 'floating_ips': FLAGS.quota_floating_ips, - 'metadata_items': FLAGS.quota_metadata_items, - 'injected_files': FLAGS.quota_injected_files, - 'injected_file_content_bytes': - FLAGS.quota_injected_file_content_bytes, - 'security_groups': FLAGS.quota_security_groups, - 'security_group_rules': FLAGS.quota_security_group_rules, - 'key_pairs': FLAGS.quota_key_pairs, - } - # -1 in the quota flags means unlimited - return defaults - - -def get_class_quotas(context, quota_class, defaults=None): - """Update defaults with the quota class values.""" - - if not defaults: - defaults = _get_default_quotas() - - quota = db.quota_class_get_all_by_name(context, quota_class) - for key in defaults.keys(): - if key in quota: - defaults[key] = quota[key] - - return defaults - - -def get_project_quotas(context, project_id): - defaults = _get_default_quotas() - if context.quota_class: - get_class_quotas(context, context.quota_class, defaults) - quota = db.quota_get_all_by_project(context, project_id) - for key in defaults.keys(): - if key in quota: - defaults[key] = quota[key] - return defaults - - -def _get_request_allotment(requested, used, quota): - if quota == -1: - return requested - return quota - used - - -def allowed_instances(context, requested_instances, instance_type): - """Check quota and return min(requested_instances, allowed_instances).""" - project_id = context.project_id - context = context.elevated() - requested_cores = requested_instances * instance_type['vcpus'] - requested_ram = requested_instances * instance_type['memory_mb'] - usage = db.instance_data_get_for_project(context, project_id) - used_instances, used_cores, used_ram = usage - quota = get_project_quotas(context, project_id) - allowed_instances = _get_request_allotment(requested_instances, - used_instances, - quota['instances']) - allowed_cores = _get_request_allotment(requested_cores, used_cores, - quota['cores']) - allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram']) - if instance_type['vcpus']: - allowed_instances = min(allowed_instances, - allowed_cores // instance_type['vcpus']) - if instance_type['memory_mb']: - allowed_instances = min(allowed_instances, - allowed_ram // instance_type['memory_mb']) - - return min(requested_instances, allowed_instances) - - -def allowed_volumes(context, requested_volumes, size): - """Check quota and return min(requested_volumes, allowed_volumes).""" - project_id = context.project_id - context = context.elevated() - size = int(size) - requested_gigabytes = requested_volumes * size - used_volumes, used_gigabytes = db.volume_data_get_for_project(context, - project_id) - quota = get_project_quotas(context, project_id) - allowed_volumes = _get_request_allotment(requested_volumes, used_volumes, - quota['volumes']) - allowed_gigabytes = _get_request_allotment(requested_gigabytes, - used_gigabytes, - quota['gigabytes']) - if size != 0: - allowed_volumes = min(allowed_volumes, - int(allowed_gigabytes // size)) - return min(requested_volumes, allowed_volumes) - - -def allowed_floating_ips(context, requested_floating_ips): - """Check quota and return min(requested, allowed) floating ips.""" - project_id = context.project_id - context = context.elevated() - used_floating_ips = db.floating_ip_count_by_project(context, project_id) - quota = get_project_quotas(context, project_id) - allowed_floating_ips = _get_request_allotment(requested_floating_ips, - used_floating_ips, - quota['floating_ips']) - return min(requested_floating_ips, allowed_floating_ips) - - -def allowed_security_groups(context, requested_security_groups): - """Check quota and return min(requested, allowed) security groups.""" - project_id = context.project_id - context = context.elevated() - used_sec_groups = db.security_group_count_by_project(context, project_id) - quota = get_project_quotas(context, project_id) - allowed_sec_groups = _get_request_allotment(requested_security_groups, - used_sec_groups, - quota['security_groups']) - return min(requested_security_groups, allowed_sec_groups) - - -def allowed_security_group_rules(context, security_group_id, - requested_rules): - """Check quota and return min(requested, allowed) sec group rules.""" - project_id = context.project_id - context = context.elevated() - used_rules = db.security_group_rule_count_by_group(context, - security_group_id) - quota = get_project_quotas(context, project_id) - allowed_rules = _get_request_allotment(requested_rules, - used_rules, - quota['security_group_rules']) - return min(requested_rules, allowed_rules) - - -def allowed_key_pairs(context, requested_key_pairs): - """Check quota and return min(requested, allowed) key pairs.""" - user_id = context.user_id - project_id = context.project_id - context = context.elevated() - used_key_pairs = db.key_pair_count_by_user(context, user_id) - quota = get_project_quotas(context, project_id) - allowed_key_pairs = _get_request_allotment(requested_key_pairs, - used_key_pairs, - quota['key_pairs']) - return min(requested_key_pairs, allowed_key_pairs) - - -def _calculate_simple_quota(context, resource, requested): - """Check quota for resource; return min(requested, allowed).""" - quota = get_project_quotas(context, context.project_id) - allowed = _get_request_allotment(requested, 0, quota[resource]) - return min(requested, allowed) - - -def allowed_metadata_items(context, requested_metadata_items): - """Return the number of metadata items allowed.""" - return _calculate_simple_quota(context, 'metadata_items', - requested_metadata_items) - - -def allowed_injected_files(context, requested_injected_files): - """Return the number of injected files allowed.""" - return _calculate_simple_quota(context, 'injected_files', - requested_injected_files) - - -def allowed_injected_file_content_bytes(context, requested_bytes): - """Return the number of bytes allowed per injected file content.""" - resource = 'injected_file_content_bytes' - return _calculate_simple_quota(context, resource, requested_bytes) - - -def allowed_injected_file_path_bytes(context): - """Return the number of bytes allowed in an injected file path.""" - return FLAGS.quota_injected_file_path_bytes - - class DbQuotaDriver(object): """ Driver to perform necessary checks to enforce quotas and obtain diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index b48e47610..1980f9679 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -46,6 +46,12 @@ rpc_opts = [ default=['nova.exception'], help='Modules of exceptions that are permitted to be recreated' 'upon receiving exception data from an rpc call.'), + cfg.StrOpt('control_exchange', + default='nova', + help='AMQP exchange to connect to if using RabbitMQ or Qpid'), + cfg.BoolOpt('fake_rabbit', + default=False, + help='If passed, use a fake RabbitMQ provider'), ] _CONF = None @@ -220,6 +226,11 @@ def fanout_cast_to_server(context, server_params, topic, msg): topic, msg) +def queue_get_for(context, topic, host): + """Get a queue name for a given topic + host.""" + return '%s.%s' % (topic, host) + + _RPCIMPL = None diff --git a/nova/rpc/amqp.py b/nova/rpc/amqp.py index 0e079f533..8df16ff9d 100644 --- a/nova/rpc/amqp.py +++ b/nova/rpc/amqp.py @@ -26,6 +26,7 @@ AMQP, but is deprecated and predates this code. """ import inspect +import logging import sys import uuid @@ -33,8 +34,6 @@ from eventlet import greenpool from eventlet import pools from eventlet import semaphore -from nova import context -from nova import log as logging from nova.openstack.common import excutils from nova.openstack.common import local import nova.rpc.common as rpc_common @@ -75,13 +74,14 @@ def get_connection_pool(conf, connection_cls): class ConnectionContext(rpc_common.Connection): """The class that is actually returned to the caller of - create_connection(). This is a essentially a wrapper around - Connection that supports 'with' and can return a new Connection or - one from a pool. It will also catch when an instance of this class - is to be deleted so that we can return Connections to the pool on - exceptions and so forth without making the caller be responsible for - catching all exceptions and making sure to return a connection to - the pool. + create_connection(). This is essentially a wrapper around + Connection that supports 'with'. It can also return a new + Connection, or one from a pool. The function will also catch + when an instance of this class is to be deleted. With that + we can return Connections to the pool on exceptions and so + forth without making the caller be responsible for catching + them. If possible the function makes sure to return a + connection to the pool. """ def __init__(self, conf, connection_pool, pooled=True, server_params=None): @@ -132,6 +132,9 @@ class ConnectionContext(rpc_common.Connection): def create_consumer(self, topic, proxy, fanout=False): self.connection.create_consumer(topic, proxy, fanout) + def create_worker(self, topic, proxy, pool_name): + self.connection.create_worker(topic, proxy, pool_name) + def consume_in_thread(self): self.connection.consume_in_thread() @@ -165,12 +168,12 @@ def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None, conn.direct_send(msg_id, msg) -class RpcContext(context.RequestContext): +class RpcContext(rpc_common.CommonRpcContext): """Context that supports replying to a rpc.call""" - def __init__(self, *args, **kwargs): + def __init__(self, **kwargs): self.msg_id = kwargs.pop('msg_id', None) self.conf = kwargs.pop('conf') - super(RpcContext, self).__init__(*args, **kwargs) + super(RpcContext, self).__init__(**kwargs) def reply(self, reply=None, failure=None, ending=False, connection_pool=None): diff --git a/nova/rpc/common.py b/nova/rpc/common.py index 886917733..c5f88f90b 100644 --- a/nova/rpc/common.py +++ b/nova/rpc/common.py @@ -18,13 +18,14 @@ # under the License. import copy +import logging import sys import traceback -from nova import log as logging from nova.openstack.common import cfg from nova.openstack.common import importutils from nova.openstack.common import jsonutils +from nova.openstack.common import local LOG = logging.getLogger(__name__) @@ -131,6 +132,25 @@ class Connection(object): """ raise NotImplementedError() + def create_worker(self, conf, topic, proxy, pool_name): + """Create a worker on this connection. + + A worker is like a regular consumer of messages directed to a + topic, except that it is part of a set of such consumers (the + "pool") which may run in parallel. Every pool of workers will + receive a given message, but only one worker in the pool will + be asked to process it. Load is distributed across the members + of the pool in round-robin fashion. + + :param conf: An openstack.common.cfg configuration object. + :param topic: This is a name associated with what to consume from. + Multiple instances of a service may consume from the same + topic. + :param proxy: The object that will handle all incoming messages. + :param pool_name: String containing the name of the pool of workers + """ + raise NotImplementedError() + def consume_in_thread(self): """Spawn a thread to handle incoming messages. @@ -248,3 +268,46 @@ def deserialize_remote_exception(conf, data): # first exception argument. failure.args = (message,) + failure.args[1:] return failure + + +class CommonRpcContext(object): + def __init__(self, **kwargs): + self.values = kwargs + + def __getattr__(self, key): + try: + return self.values[key] + except KeyError: + raise AttributeError(key) + + def to_dict(self): + return copy.deepcopy(self.values) + + @classmethod + def from_dict(cls, values): + return cls(**values) + + def update_store(self): + local.store.context = self + + def elevated(self, read_deleted=None, overwrite=False): + """Return a version of this context with admin flag set.""" + # TODO(russellb) This method is a bit of a nova-ism. It makes + # some assumptions about the data in the request context sent + # across rpc, while the rest of this class does not. We could get + # rid of this if we changed the nova code that uses this to + # convert the RpcContext back to its native RequestContext doing + # something like nova.context.RequestContext.from_dict(ctxt.to_dict()) + + context = copy.deepcopy(self) + context.values['is_admin'] = True + + context.values.setdefault('roles', []) + + if 'admin' not in context.values['roles']: + context.values['roles'].append('admin') + + if read_deleted is not None: + context.values['read_deleted'] = read_deleted + + return context diff --git a/nova/rpc/impl_fake.py b/nova/rpc/impl_fake.py index 70a8ca5f7..54bd2497b 100644 --- a/nova/rpc/impl_fake.py +++ b/nova/rpc/impl_fake.py @@ -23,15 +23,14 @@ import time import eventlet -from nova import context from nova.rpc import common as rpc_common CONSUMERS = {} -class RpcContext(context.RequestContext): - def __init__(self, *args, **kwargs): - super(RpcContext, self).__init__(*args, **kwargs) +class RpcContext(rpc_common.CommonRpcContext): + def __init__(self, **kwargs): + super(RpcContext, self).__init__(**kwargs) self._response = [] self._done = False @@ -169,10 +168,11 @@ def fanout_cast(conf, context, topic, msg): if not method: return args = msg.get('args', {}) + version = msg.get('version', None) for consumer in CONSUMERS.get(topic, []): try: - consumer.call(context, method, args, None) + consumer.call(context, version, method, args, None) except Exception: pass diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 7285acbe3..b9fb081bd 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +import functools import itertools import socket import ssl @@ -46,6 +47,39 @@ kombu_opts = [ default='', help=('SSL certification authority file ' '(valid only if SSL enabled)')), + cfg.StrOpt('rabbit_host', + default='localhost', + help='the RabbitMQ host'), + cfg.IntOpt('rabbit_port', + default=5672, + help='the RabbitMQ port'), + cfg.BoolOpt('rabbit_use_ssl', + default=False, + help='connect over SSL for RabbitMQ'), + cfg.StrOpt('rabbit_userid', + default='guest', + help='the RabbitMQ userid'), + cfg.StrOpt('rabbit_password', + default='guest', + help='the RabbitMQ password'), + cfg.StrOpt('rabbit_virtual_host', + default='/', + help='the RabbitMQ virtual host'), + cfg.IntOpt('rabbit_retry_interval', + default=1, + help='how frequently to retry connecting with RabbitMQ'), + cfg.IntOpt('rabbit_retry_backoff', + default=2, + help='how long to backoff for between retries when connecting ' + 'to RabbitMQ'), + cfg.IntOpt('rabbit_max_retries', + default=0, + help='maximum retries with trying to connect to RabbitMQ ' + '(the default of 0 implies an infinite retry count)'), + cfg.BoolOpt('rabbit_durable_queues', + default=False, + help='use durable queues in RabbitMQ'), + ] LOG = rpc_common.LOG @@ -156,15 +190,19 @@ class DirectConsumer(ConsumerBase): class TopicConsumer(ConsumerBase): """Consumer class for 'topic'""" - def __init__(self, conf, channel, topic, callback, tag, **kwargs): + def __init__(self, conf, channel, topic, callback, tag, name=None, + **kwargs): """Init a 'topic' queue. - 'channel' is the amqp channel to use - 'topic' is the topic to listen on - 'callback' is the callback to call when messages are received - 'tag' is a unique ID for the consumer on the channel + :param channel: the amqp channel to use + :param topic: the topic to listen on + :paramtype topic: str + :param callback: the callback to call when messages are received + :param tag: a unique ID for the consumer on the channel + :param name: optional queue name, defaults to topic + :paramtype name: str - Other kombu options may be passed + Other kombu options may be passed as keyword arguments """ # Default options options = {'durable': conf.rabbit_durable_queues, @@ -180,7 +218,7 @@ class TopicConsumer(ConsumerBase): channel, callback, tag, - name=topic, + name=name or topic, exchange=exchange, routing_key=topic, **options) @@ -602,9 +640,12 @@ class Connection(object): """ self.declare_consumer(DirectConsumer, topic, callback) - def declare_topic_consumer(self, topic, callback=None): + def declare_topic_consumer(self, topic, callback=None, queue_name=None): """Create a 'topic' consumer.""" - self.declare_consumer(TopicConsumer, topic, callback) + self.declare_consumer(functools.partial(TopicConsumer, + name=queue_name, + ), + topic, callback) def declare_fanout_consumer(self, topic, callback): """Create a 'fanout' consumer""" @@ -649,13 +690,19 @@ class Connection(object): def create_consumer(self, topic, proxy, fanout=False): """Create a consumer that calls a method in a proxy object""" proxy_cb = rpc_amqp.ProxyCallback(self.conf, proxy, - rpc_amqp.get_connection_pool(self, Connection)) + rpc_amqp.get_connection_pool(self.conf, Connection)) if fanout: self.declare_fanout_consumer(topic, proxy_cb) else: self.declare_topic_consumer(topic, proxy_cb) + def create_worker(self, topic, proxy, pool_name): + """Create a worker that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback(self.conf, proxy, + rpc_amqp.get_connection_pool(self.conf, Connection)) + self.declare_topic_consumer(topic, proxy_cb, pool_name) + def create_connection(conf, new=True): """Create a connection""" diff --git a/nova/rpc/impl_qpid.py b/nova/rpc/impl_qpid.py index f2f669a54..388b99542 100644 --- a/nova/rpc/impl_qpid.py +++ b/nova/rpc/impl_qpid.py @@ -15,8 +15,10 @@ # License for the specific language governing permissions and limitations # under the License. +import functools import itertools import json +import logging import time import uuid @@ -25,7 +27,6 @@ import greenlet import qpid.messaging import qpid.messaging.exceptions -from nova import log as logging from nova.openstack.common import cfg from nova.rpc import amqp as rpc_amqp from nova.rpc import common as rpc_common @@ -161,17 +162,19 @@ class DirectConsumer(ConsumerBase): class TopicConsumer(ConsumerBase): """Consumer class for 'topic'""" - def __init__(self, conf, session, topic, callback): + def __init__(self, conf, session, topic, callback, name=None): """Init a 'topic' queue. - 'session' is the amqp session to use - 'topic' is the topic to listen on - 'callback' is the callback to call when messages are received + :param session: the amqp session to use + :param topic: is the topic to listen on + :paramtype topic: str + :param callback: the callback to call when messages are received + :param name: optional queue name, defaults to topic """ super(TopicConsumer, self).__init__(session, callback, "%s/%s" % (conf.control_exchange, topic), {}, - topic, {}) + name or topic, {}) class FanoutConsumer(ConsumerBase): @@ -448,9 +451,12 @@ class Connection(object): """ self.declare_consumer(DirectConsumer, topic, callback) - def declare_topic_consumer(self, topic, callback=None): + def declare_topic_consumer(self, topic, callback=None, queue_name=None): """Create a 'topic' consumer.""" - self.declare_consumer(TopicConsumer, topic, callback) + self.declare_consumer(functools.partial(TopicConsumer, + name=queue_name, + ), + topic, callback) def declare_fanout_consumer(self, topic, callback): """Create a 'fanout' consumer""" @@ -495,7 +501,7 @@ class Connection(object): def create_consumer(self, topic, proxy, fanout=False): """Create a consumer that calls a method in a proxy object""" proxy_cb = rpc_amqp.ProxyCallback(self.conf, proxy, - rpc_amqp.get_connection_pool(self, Connection)) + rpc_amqp.get_connection_pool(self.conf, Connection)) if fanout: consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb) @@ -506,6 +512,18 @@ class Connection(object): return consumer + def create_worker(self, topic, proxy, pool_name): + """Create a worker that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback(self.conf, proxy, + rpc_amqp.get_connection_pool(self.conf, Connection)) + + consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb, + name=pool_name) + + self._register_consumer(consumer) + + return consumer + def create_connection(conf, new=True): """Create a connection""" diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 8e49e5aa4..364278584 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -62,8 +62,8 @@ def cast_to_volume_host(context, host, method, update_db=True, **kwargs): db.volume_update(context, volume_id, {'host': host, 'scheduled_at': now}) rpc.cast(context, - db.queue_get_for(context, 'volume', host), - {"method": method, "args": kwargs}) + rpc.queue_get_for(context, 'volume', host), + {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals()) @@ -79,8 +79,8 @@ def cast_to_compute_host(context, host, method, update_db=True, **kwargs): db.instance_update(context, instance_uuid, {'host': host, 'scheduled_at': now}) rpc.cast(context, - db.queue_get_for(context, 'compute', host), - {"method": method, "args": kwargs}) + rpc.queue_get_for(context, 'compute', host), + {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals()) @@ -88,8 +88,8 @@ def cast_to_network_host(context, host, method, update_db=False, **kwargs): """Cast request to a network host queue""" rpc.cast(context, - db.queue_get_for(context, 'network', host), - {"method": method, "args": kwargs}) + rpc.queue_get_for(context, 'network', host), + {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to network '%(host)s'") % locals()) @@ -106,8 +106,8 @@ def cast_to_host(context, topic, host, method, update_db=True, **kwargs): func(context, host, method, update_db=update_db, **kwargs) else: rpc.cast(context, - db.queue_get_for(context, topic, host), - {"method": method, "args": kwargs}) + rpc.queue_get_for(context, topic, host), + {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to %(topic)s '%(host)s'") % locals()) @@ -355,7 +355,7 @@ class Scheduler(object): # Checking cpuinfo. try: rpc.call(context, - db.queue_get_for(context, FLAGS.compute_topic, dest), + rpc.queue_get_for(context, FLAGS.compute_topic, dest), {"method": 'compare_cpu', "args": {'cpu_info': oservice_ref['cpu_info']}}) @@ -443,7 +443,7 @@ class Scheduler(object): available = available_gb * (1024 ** 3) # Getting necessary disk size - topic = db.queue_get_for(context, FLAGS.compute_topic, + topic = rpc.queue_get_for(context, FLAGS.compute_topic, instance_ref['host']) ret = rpc.call(context, topic, {"method": 'get_instance_disk_info', @@ -492,8 +492,8 @@ class Scheduler(object): """ src = instance_ref['host'] - dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest) - src_t = db.queue_get_for(context, FLAGS.compute_topic, src) + dst_t = rpc.queue_get_for(context, FLAGS.compute_topic, dest) + src_t = rpc.queue_get_for(context, FLAGS.compute_topic, src) filename = rpc.call(context, dst_t, {"method": 'create_shared_storage_test_file'}) diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py new file mode 100644 index 000000000..e081daf37 --- /dev/null +++ b/nova/scheduler/filters/trusted_filter.py @@ -0,0 +1,207 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Intel, Inc. +# Copyright (c) 2011-2012 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Filter to add support for Trusted Computing Pools. + +Filter that only schedules tasks on a host if the integrity (trust) +of that host matches the trust requested in the `extra_specs' for the +flavor. The `extra_specs' will contain a key/value pair where the +key is `trust'. The value of this pair (`trusted'/`untrusted') must +match the integrity of that host (obtained from the Attestation +service) before the task can be scheduled on that host. + +Note that the parameters to control access to the Attestation Service +are in the `nova.conf' file in a separate `trust' section. For example, +the config file will look something like: + + [DEFAULT] + verbose=True + ... + [trust] + server=attester.mynetwork.com + +Details on the specific parameters can be found in the file `trust_attest.py'. + +Details on setting up and using an Attestation Service can be found at +the Open Attestation project at: + + https://github.com/OpenAttestation/OpenAttestation +""" + +import httplib +import json +import socket +import ssl + +from nova import flags +from nova import log as logging +from nova.openstack.common import cfg +from nova.scheduler import filters +from nova import utils + + +LOG = logging.getLogger(__name__) + +trusted_opts = [ + cfg.StrOpt('server', + default=None, + help='attestation server http'), + cfg.StrOpt('server_ca_file', + default=None, + help='attestation server Cert file for Identity verification'), + cfg.StrOpt('port', + default='8443', + help='attestation server port'), + cfg.StrOpt('api_url', + default='/OpenAttestationWebServices/V1.0', + help='attestation web API URL'), + cfg.StrOpt('auth_blob', + default=None, + help='attestation authorization blob - must change'), +] + +FLAGS = flags.FLAGS +trust_group = cfg.OptGroup(name='trusted_computing', title='Trust parameters') +FLAGS.register_group(trust_group) +FLAGS.register_opts(trusted_opts, group='trusted_computing') + + +class HTTPSClientAuthConnection(httplib.HTTPSConnection): + """ + Class to make a HTTPS connection, with support for full client-based + SSL Authentication + """ + + def __init__(self, host, port, key_file, cert_file, ca_file, timeout=None): + httplib.HTTPSConnection.__init__(self, host, + key_file=key_file, + cert_file=cert_file) + self.host = host + self.port = port + self.key_file = key_file + self.cert_file = cert_file + self.ca_file = ca_file + self.timeout = timeout + + def connect(self): + """ + Connect to a host on a given (SSL) port. + If ca_file is pointing somewhere, use it to check Server Certificate. + + Redefined/copied and extended from httplib.py:1105 (Python 2.6.x). + This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to + ssl.wrap_socket(), which forces SSL to check server certificate + against our client certificate. + """ + sock = socket.create_connection((self.host, self.port), self.timeout) + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, + ca_certs=self.ca_file, + cert_reqs=ssl.CERT_REQUIRED) + + +class AttestationService(httplib.HTTPSConnection): + # Provide access wrapper to attestation server to get integrity report. + + def __init__(self): + self.api_url = FLAGS.trusted_computing.api_url + self.host = FLAGS.trusted_computing.server + self.port = FLAGS.trusted_computing.port + self.auth_blob = FLAGS.trusted_computing.auth_blob + self.key_file = None + self.cert_file = None + self.ca_file = FLAGS.trusted_computing.server_ca_file + self.request_count = 100 + + def _do_request(self, method, action_url, body, headers): + # Connects to the server and issues a request. + # :returns: result data + # :raises: IOError if the request fails + + action_url = "%s/%s" % (self.api_url, action_url) + try: + c = HTTPSClientAuthConnection(self.host, self.port, + key_file=self.key_file, + cert_file=self.cert_file, + ca_file=self.ca_file) + c.request(method, action_url, body, headers) + res = c.getresponse() + status_code = res.status + if status_code in (httplib.OK, + httplib.CREATED, + httplib.ACCEPTED, + httplib.NO_CONTENT): + return httplib.OK, res + return status_code, None + + except (socket.error, IOError) as e: + return IOError, None + + def _request(self, cmd, subcmd, host): + body = {} + body['count'] = 1 + body['hosts'] = host + cooked = json.dumps(body) + headers = {} + headers['content-type'] = 'application/json' + headers['Accept'] = 'application/json' + if self.auth_blob: + headers['x-auth-blob'] = self.auth_blob + status, res = self._do_request(cmd, subcmd, cooked, headers) + if status == httplib.OK: + data = res.read() + return status, json.loads(data) + else: + return status, None + + def _check_trust(self, data, host): + for item in data: + for state in item['hosts']: + if state['host_name'] == host: + return state['trust_lvl'] + return "" + + def do_attestation(self, host): + state = [] + status, data = self._request("POST", "PollHosts", host) + if status != httplib.OK: + return {} + state.append(data) + return self._check_trust(state, host) + + +class TrustedFilter(filters.BaseHostFilter): + """Trusted filter to support Trusted Compute Pools.""" + + def __init__(self): + self.attestation_service = AttestationService() + + def _is_trusted(self, host, trust): + level = self.attestation_service.do_attestation(host) + LOG.debug(_("TCP: trust state of " + "%(host)s:%(level)s(%(trust)s)") % locals()) + return trust == level + + def host_passes(self, host_state, filter_properties): + instance = filter_properties.get('instance_type', {}) + extra = instance.get('extra_specs', {}) + trust = extra.get('trusted_host') + host = host_state.host + if trust: + return self._is_trusted(host, trust) + return True diff --git a/nova/scheduler/scheduler_options.py b/nova/scheduler/scheduler_options.py index 5ae2f1f62..8bb8b6525 100644 --- a/nova/scheduler/scheduler_options.py +++ b/nova/scheduler/scheduler_options.py @@ -1,3 +1,5 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # @@ -27,6 +29,7 @@ import os from nova import flags from nova import log as logging from nova.openstack.common import cfg +from nova import utils scheduler_json_config_location_opt = cfg.StrOpt( @@ -78,7 +81,7 @@ class SchedulerOptions(object): def _get_time_now(self): """Get current UTC. Broken out for testing.""" - return datetime.datetime.utcnow() + return utils.utcnow() def get_configuration(self, filename=None): """Check the json file for changes and load it if needed.""" diff --git a/nova/test.py b/nova/test.py index 4f9c607cb..478eddded 100644 --- a/nova/test.py +++ b/nova/test.py @@ -39,6 +39,7 @@ from nova import log as logging from nova.openstack.common import cfg from nova import service from nova import tests +from nova.tests import fake_flags from nova import utils from nova.virt import fake @@ -125,6 +126,10 @@ class TestCase(unittest.TestCase): def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() + + fake_flags.set_defaults(FLAGS) + flags.parse_args([], default_config_files=[]) + # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. @@ -137,7 +142,6 @@ class TestCase(unittest.TestCase): self.stubs = stubout.StubOutForTesting() self.injected = [] self._services = [] - self._overridden_opts = [] def tearDown(self): """Runs after each test method to tear down test environment.""" @@ -156,7 +160,7 @@ class TestCase(unittest.TestCase): nova.image.fake.FakeImageService_reset() # Reset any overridden flags - self.reset_flags() + FLAGS.reset() # Stop any timers for x in self.injected: @@ -182,17 +186,6 @@ class TestCase(unittest.TestCase): """Override flag variables for a test.""" for k, v in kw.iteritems(): FLAGS.set_override(k, v) - self._overridden_opts.append(k) - - def reset_flags(self): - """Resets all flag variables for the test. - - Runs after each test. - - """ - for k in self._overridden_opts: - FLAGS.set_override(k, None) - self._overridden_opts = [] def start_service(self, name, host=None, **kwargs): host = host and host or uuid.uuid4().hex diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py index 448e260b5..d04681935 100644 --- a/nova/tests/__init__.py +++ b/nova/tests/__init__.py @@ -68,6 +68,7 @@ def setup(): from nova.db import migration from nova.network import manager as network_manager from nova.tests import fake_flags + fake_flags.set_defaults(FLAGS) rpc.register_opts(FLAGS) if FLAGS.sql_connection == "sqlite://": diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 08fad0988..c9080c0e3 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -623,7 +623,7 @@ class CloudTestCase(test.TestCase): self.assertEqual(result['volumeSet'][0]['availabilityZone'], availabilityZone) - db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id)) + db.volume_destroy(self.context, ec2utils.ec2_vol_id_to_uuid(volume_id)) def test_create_volume_from_snapshot(self): """Makes sure create_volume works when we specify a snapshot.""" @@ -640,7 +640,7 @@ class CloudTestCase(test.TestCase): self.assertEqual(len(result['volumeSet']), 2) self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id) - db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id)) + db.volume_destroy(self.context, ec2utils.ec2_vol_id_to_uuid(volume_id)) db.snapshot_destroy(self.context, snap['id']) db.volume_destroy(self.context, vol['id']) @@ -1130,7 +1130,7 @@ class CloudTestCase(test.TestCase): result3 = describe_images(self.context, ['ami-00000001', 'ami-00000002']) self.assertEqual(2, len(result3['imagesSet'])) - # provide an non-existing image_id + # provide a non-existing image_id self.stubs.UnsetAll() self.stubs.Set(fake._FakeImageService, 'show', fake_show_none) self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show_none) @@ -2083,7 +2083,7 @@ class CloudTestCase(test.TestCase): 'snapshot_id': snapshot2_id, 'delete_on_termination': True}]} ec2_instance_id = self._run_instance(**kwargs) - instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) + instance_id = ec2utils.ec2_vol_id_to_uuid(ec2_instance_id) instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context, ec2_instance_id) @@ -2122,8 +2122,6 @@ class CloudTestCase(test.TestCase): for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id): self.cloud.delete_snapshot(self.context, snapshot_id) - db.volume_destroy(self.context, vol['id']) - def test_create_image(self): """Make sure that CreateImage works""" # enforce periodic tasks run in short time to avoid wait for 60s. diff --git a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py index e2a249a2f..f6e1fbcb6 100644 --- a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py +++ b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import datetime - from lxml import etree from nova.api.openstack import common @@ -156,7 +154,7 @@ class CloudpipesXMLSerializerTest(test.TestCase): public_ip='1.2.3.4', public_port='321', instance_id='1234-1234-1234-1234', - created_at=utils.isotime(datetime.datetime.utcnow()), + created_at=utils.isotime(utils.utcnow()), state='running')), dict(cloudpipe=dict( project_id='4321', diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py index c3f44fb70..91858f156 100644 --- a/nova/tests/api/openstack/compute/contrib/test_networks.py +++ b/nova/tests/api/openstack/compute/contrib/test_networks.py @@ -32,14 +32,14 @@ FAKE_NETWORKS = [ 'id': 1, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf047', 'cidr_v6': None, 'deleted_at': None, 'gateway': '10.0.0.1', 'label': 'mynet_0', - 'project_id': '1234', + 'project_id': '1234', 'rxtx_base': None, 'vpn_private_address': '10.0.0.2', 'deleted': False, 'vlan': 100, 'broadcast': '10.0.0.7', 'netmask': '255.255.255.248', 'injected': False, 'cidr': '10.0.0.0/29', 'vpn_public_address': '127.0.0.1', 'multi_host': False, - 'dns1': None, 'host': 'nsokolov-desktop', - 'gateway_v6': None, 'netmask_v6': None, + 'dns1': None, 'dns2': None, 'host': 'nsokolov-desktop', + 'gateway_v6': None, 'netmask_v6': None, 'priority': None, 'created_at': '2011-08-15 06:19:19.387525', }, { @@ -49,16 +49,32 @@ FAKE_NETWORKS = [ 'deleted_at': None, 'gateway': '10.0.0.9', 'label': 'mynet_1', 'project_id': None, 'vpn_private_address': '10.0.0.10', 'deleted': False, - 'vlan': 101, 'broadcast': '10.0.0.15', + 'vlan': 101, 'broadcast': '10.0.0.15', 'rxtx_base': None, 'netmask': '255.255.255.248', 'injected': False, 'cidr': '10.0.0.10/29', 'vpn_public_address': None, - 'multi_host': False, 'dns1': None, 'host': None, - 'gateway_v6': None, 'netmask_v6': None, + 'multi_host': False, 'dns1': None, 'dns2': None, 'host': None, + 'gateway_v6': None, 'netmask_v6': None, 'priority': None, 'created_at': '2011-08-15 06:19:19.885495', }, ] +FAKE_USER_NETWORKS = [ + { + 'id': 1, 'cidr': '10.0.0.0/29', 'netmask': '255.255.255.248', + 'gateway': '10.0.0.1', 'broadcast': '10.0.0.7', 'dns1': None, + 'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_0', + 'netmask_v6': None, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf047', + }, + { + 'id': 2, 'cidr': '10.0.0.10/29', 'netmask': '255.255.255.248', + 'gateway': '10.0.0.9', 'broadcast': '10.0.0.15', 'dns1': None, + 'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_1', + 'netmask_v6': None, + }, +] + + class FakeNetworkAPI(object): def __init__(self): @@ -97,9 +113,19 @@ class NetworksTest(test.TestCase): fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) - def test_network_list_all(self): + def test_network_list_all_as_user(self): + self.maxDiff = None req = fakes.HTTPRequest.blank('/v2/1234/os-networks') res_dict = self.controller.index(req) + expected = copy.deepcopy(FAKE_USER_NETWORKS) + expected[0]['id'] = expected[0]['uuid'] + del expected[0]['uuid'] + self.assertEquals(res_dict, {'networks': expected}) + + def test_network_list_all_as_admin(self): + req = fakes.HTTPRequest.blank('/v2/1234/os-networks') + req.environ["nova.context"].is_admin = True + res_dict = self.controller.index(req) expected = copy.deepcopy(FAKE_NETWORKS) expected[0]['id'] = expected[0]['uuid'] del expected[0]['uuid'] @@ -117,9 +143,19 @@ class NetworksTest(test.TestCase): self.controller.action, req, 100, {'disassociate': None}) - def test_network_get(self): + def test_network_get_as_user(self): + uuid = FAKE_USER_NETWORKS[0]['uuid'] + req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid) + res_dict = self.controller.show(req, uuid) + expected = {'network': copy.deepcopy(FAKE_USER_NETWORKS[0])} + expected['network']['id'] = expected['network']['uuid'] + del expected['network']['uuid'] + self.assertEqual(res_dict, expected) + + def test_network_get_as_admin(self): uuid = FAKE_NETWORKS[0]['uuid'] req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid) + req.environ["nova.context"].is_admin = True res_dict = self.controller.show(req, uuid) expected = {'network': copy.deepcopy(FAKE_NETWORKS[0])} expected['network']['id'] = expected['network']['uuid'] diff --git a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py index 09b59a137..c3c28e9e4 100644 --- a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py +++ b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py @@ -29,6 +29,7 @@ from nova import flags from nova import policy from nova import test from nova.tests.api.openstack import fakes +from nova import utils FLAGS = flags.FLAGS @@ -40,7 +41,7 @@ ROOT_GB = 10 EPHEMERAL_GB = 20 MEMORY_MB = 1024 VCPUS = 2 -STOP = datetime.datetime.utcnow() +STOP = utils.utcnow() START = STOP - datetime.timedelta(hours=HOURS) @@ -115,11 +116,11 @@ class SimpleTenantUsageTest(test.TestCase): SERVERS * VCPUS * HOURS) self.assertFalse(usages[i].get('server_usages')) - def test_verify_detailed_index(self): + def _get_tenant_usages(self, detailed=''): req = webob.Request.blank( '/v2/faketenant_0/os-simple-tenant-usage?' - 'detailed=1&start=%s&end=%s' % - (START.isoformat(), STOP.isoformat())) + 'detailed=%s&start=%s&end=%s' % + (detailed, START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" @@ -127,12 +128,26 @@ class SimpleTenantUsageTest(test.TestCase): fake_auth_context=self.admin_context)) self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) - usages = res_dict['tenant_usages'] + return res_dict['tenant_usages'] + + def test_verify_detailed_index(self): + usages = self._get_tenant_usages('1') for i in xrange(TENANTS): servers = usages[i]['server_usages'] for j in xrange(SERVERS): self.assertEqual(int(servers[j]['hours']), HOURS) + def test_verify_simple_index(self): + usages = self._get_tenant_usages(detailed='0') + for i in xrange(TENANTS): + self.assertEqual(usages[i].get('server_usages'), None) + + def test_verify_simple_index_empty_param(self): + # NOTE(lzyeval): 'detailed=&start=..&end=..' + usages = self._get_tenant_usages() + for i in xrange(TENANTS): + self.assertEqual(usages[i].get('server_usages'), None) + def test_verify_show(self): req = webob.Request.blank( '/v2/faketenant_0/os-simple-tenant-usage/' @@ -211,7 +226,7 @@ class SimpleTenantUsageSerializerTest(test.TestCase): def test_serializer_show(self): serializer = simple_tenant_usage.SimpleTenantUsageTemplate() - today = datetime.datetime.now() + today = utils.utcnow() yesterday = today - datetime.timedelta(days=1) raw_usage = dict( tenant_id='tenant', @@ -257,7 +272,7 @@ class SimpleTenantUsageSerializerTest(test.TestCase): def test_serializer_index(self): serializer = simple_tenant_usage.SimpleTenantUsagesTemplate() - today = datetime.datetime.now() + today = utils.utcnow() yesterday = today - datetime.timedelta(days=1) raw_usages = [dict( tenant_id='tenant1', diff --git a/nova/tests/api/openstack/compute/contrib/test_snapshots.py b/nova/tests/api/openstack/compute/contrib/test_snapshots.py index a62ea84c1..f0a735ccd 100644 --- a/nova/tests/api/openstack/compute/contrib/test_snapshots.py +++ b/nova/tests/api/openstack/compute/contrib/test_snapshots.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import datetime import json from lxml import etree @@ -26,6 +25,7 @@ from nova import flags from nova import log as logging from nova import test from nova.tests.api.openstack import fakes +from nova import utils from nova import volume FLAGS = flags.FLAGS @@ -250,7 +250,7 @@ class SnapshotSerializerTest(test.TestCase): id='snap_id', status='snap_status', size=1024, - createdAt=datetime.datetime.now(), + createdAt=utils.utcnow(), displayName='snap_name', displayDescription='snap_desc', volumeId='vol_id', @@ -268,7 +268,7 @@ class SnapshotSerializerTest(test.TestCase): id='snap1_id', status='snap1_status', size=1024, - createdAt=datetime.datetime.now(), + createdAt=utils.utcnow(), displayName='snap1_name', displayDescription='snap1_desc', volumeId='vol1_id', @@ -277,7 +277,7 @@ class SnapshotSerializerTest(test.TestCase): id='snap2_id', status='snap2_status', size=1024, - createdAt=datetime.datetime.now(), + createdAt=utils.utcnow(), displayName='snap2_name', displayDescription='snap2_desc', volumeId='vol2_id', diff --git a/nova/tests/api/openstack/compute/contrib/test_volumes.py b/nova/tests/api/openstack/compute/contrib/test_volumes.py index 92382edc7..aee7260ef 100644 --- a/nova/tests/api/openstack/compute/contrib/test_volumes.py +++ b/nova/tests/api/openstack/compute/contrib/test_volumes.py @@ -27,6 +27,7 @@ import nova.db from nova import flags from nova import test from nova.tests.api.openstack import fakes +from nova import utils from nova import volume from webob import exc @@ -363,7 +364,7 @@ class VolumeSerializerTest(test.TestCase): status='vol_status', size=1024, availabilityZone='vol_availability', - createdAt=datetime.datetime.now(), + createdAt=utils.utcnow(), attachments=[dict( id='vol_id', volumeId='vol_id', @@ -392,7 +393,7 @@ class VolumeSerializerTest(test.TestCase): status='vol1_status', size=1024, availabilityZone='vol1_availability', - createdAt=datetime.datetime.now(), + createdAt=utils.utcnow(), attachments=[dict( id='vol1_id', volumeId='vol1_id', @@ -412,7 +413,7 @@ class VolumeSerializerTest(test.TestCase): status='vol2_status', size=1024, availabilityZone='vol2_availability', - createdAt=datetime.datetime.now(), + createdAt=utils.utcnow(), attachments=[dict( id='vol2_id', volumeId='vol2_id', diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index aefe19581..83a8963a5 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -1491,7 +1491,7 @@ class ServersControllerCreateTest(test.TestCase): self.stubs.Set(nova.rpc, 'call', rpc_call_wrapper) self.stubs.Set(nova.db, 'instance_update_and_get_original', server_update) - self.stubs.Set(nova.db, 'queue_get_for', queue_get_for) + self.stubs.Set(nova.rpc, 'queue_get_for', queue_get_for) self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip', fake_method) diff --git a/nova/tests/api/openstack/volume/test_snapshots.py b/nova/tests/api/openstack/volume/test_snapshots.py index ac5e97cfa..ada6c42a1 100644 --- a/nova/tests/api/openstack/volume/test_snapshots.py +++ b/nova/tests/api/openstack/volume/test_snapshots.py @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import datetime - from lxml import etree import webob @@ -24,6 +22,7 @@ from nova import flags from nova import log as logging from nova import test from nova.tests.api.openstack import fakes +from nova import utils from nova import volume FLAGS = flags.FLAGS @@ -171,7 +170,7 @@ class SnapshotSerializerTest(test.TestCase): id='snap_id', status='snap_status', size=1024, - created_at=datetime.datetime.now(), + created_at=utils.utcnow(), display_name='snap_name', display_description='snap_desc', volume_id='vol_id', @@ -189,7 +188,7 @@ class SnapshotSerializerTest(test.TestCase): id='snap1_id', status='snap1_status', size=1024, - created_at=datetime.datetime.now(), + created_at=utils.utcnow(), display_name='snap1_name', display_description='snap1_desc', volume_id='vol1_id', @@ -198,7 +197,7 @@ class SnapshotSerializerTest(test.TestCase): id='snap2_id', status='snap2_status', size=1024, - created_at=datetime.datetime.now(), + created_at=utils.utcnow(), display_name='snap2_name', display_description='snap2_desc', volume_id='vol2_id', diff --git a/nova/tests/api/openstack/volume/test_volumes.py b/nova/tests/api/openstack/volume/test_volumes.py index 2b96d15a3..8798d5274 100644 --- a/nova/tests/api/openstack/volume/test_volumes.py +++ b/nova/tests/api/openstack/volume/test_volumes.py @@ -22,6 +22,7 @@ from nova.api.openstack.volume import volumes from nova import flags from nova import test from nova.tests.api.openstack import fakes +from nova import utils from nova.volume import api as volume_api @@ -214,7 +215,7 @@ class VolumeSerializerTest(test.TestCase): status='vol_status', size=1024, availability_zone='vol_availability', - created_at=datetime.datetime.now(), + created_at=utils.utcnow(), attachments=[dict( id='vol_id', volume_id='vol_id', @@ -243,7 +244,7 @@ class VolumeSerializerTest(test.TestCase): status='vol1_status', size=1024, availability_zone='vol1_availability', - created_at=datetime.datetime.now(), + created_at=utils.utcnow(), attachments=[dict( id='vol1_id', volume_id='vol1_id', @@ -263,7 +264,7 @@ class VolumeSerializerTest(test.TestCase): status='vol2_status', size=1024, availability_zone='vol2_availability', - created_at=datetime.datetime.now(), + created_at=utils.utcnow(), attachments=[dict( id='vol2_id', volume_id='vol2_id', diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 51e36daab..f1903bd4c 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -154,6 +154,7 @@ class BaseTestCase(test.TestCase): inst['vcpus'] = 0 inst['root_gb'] = 0 inst['ephemeral_gb'] = 0 + inst['architecture'] = 'x86_64' inst.update(params) return db.instance_create(self.context, inst) @@ -1425,7 +1426,7 @@ class ComputeTestCase(BaseTestCase): inst_ref = self._create_fake_instance({'host': 'dummy'}) c = context.get_admin_context() - topic = db.queue_get_for(c, FLAGS.compute_topic, inst_ref['host']) + topic = rpc.queue_get_for(c, FLAGS.compute_topic, inst_ref['host']) # creating volume testdata volume_id = db.volume_create(c, {'size': 1})['id'] @@ -1480,7 +1481,7 @@ class ComputeTestCase(BaseTestCase): instance_id = instance['id'] c = context.get_admin_context() inst_ref = db.instance_get(c, instance_id) - topic = db.queue_get_for(c, FLAGS.compute_topic, inst_ref['host']) + topic = rpc.queue_get_for(c, FLAGS.compute_topic, inst_ref['host']) # create self.mox.StubOutWithMock(rpc, 'call') @@ -1522,7 +1523,7 @@ class ComputeTestCase(BaseTestCase): self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') self.compute.driver.unfilter_instance(i_ref, []) self.mox.StubOutWithMock(rpc, 'call') - rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest), + rpc.call(c, rpc.queue_get_for(c, FLAGS.compute_topic, dest), {"method": "post_live_migration_at_destination", "args": {'instance_id': i_ref['id'], 'block_migration': False}}) self.mox.StubOutWithMock(self.compute.driver, 'unplug_vifs') @@ -3439,6 +3440,24 @@ class ComputeAPITestCase(BaseTestCase): db.instance_destroy(self.context, refs[0]['id']) + def test_instance_architecture(self): + """Test the instance architecture""" + i_ref = self._create_fake_instance() + self.assertEqual(i_ref['architecture'], 'x86_64') + db.instance_destroy(self.context, i_ref['id']) + + def test_instance_unknown_architecture(self): + """Test if the architecture is unknown.""" + instance = self._create_fake_instance( + params={'architecture': ''}) + try: + self.compute.run_instance(self.context, instance['uuid']) + instances = db.instance_get_all(context.get_admin_context()) + instance = instances[0] + self.assertNotEqual(instance['architecture'], 'Unknown') + finally: + db.instance_destroy(self.context, instance['id']) + def test_instance_name_template(self): """Test the instance_name template""" self.flags(instance_name_template='instance-%d') diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py index f97ddb730..a78fd2e12 100644 --- a/nova/tests/db/fakes.py +++ b/nova/tests/db/fakes.py @@ -304,9 +304,6 @@ def stub_out_db_network_api(stubs): return [FakeModel(n) for n in networks if n['project_id'] == project_id] - def fake_queue_get_for(context, topic, node): - return "%s.%s" % (topic, node) - funcs = [fake_floating_ip_allocate_address, fake_floating_ip_deallocate, fake_floating_ip_disassociate, @@ -335,8 +332,7 @@ def stub_out_db_network_api(stubs): fake_network_get_all_by_instance, fake_network_set_host, fake_network_update, - fake_project_get_networks, - fake_queue_get_for] + fake_project_get_networks] stub_out(stubs, funcs) diff --git a/nova/tests/db/nova.austin.sqlite b/nova/tests/db/nova.austin.sqlite Binary files differdeleted file mode 100644 index ad1326bce..000000000 --- a/nova/tests/db/nova.austin.sqlite +++ /dev/null diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 9f0c9983d..f25a9be45 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -20,27 +20,30 @@ from nova import flags FLAGS = flags.FLAGS -flags.DECLARE('volume_driver', 'nova.volume.manager') -FLAGS.set_default('volume_driver', 'nova.volume.driver.FakeISCSIDriver') -FLAGS.set_default('connection_type', 'fake') -FLAGS.set_default('fake_rabbit', True) -FLAGS.set_default('rpc_backend', 'nova.rpc.impl_fake') flags.DECLARE('auth_driver', 'nova.auth.manager') -FLAGS.set_default('auth_driver', 'nova.auth.dbdriver.DbDriver') -flags.DECLARE('network_size', 'nova.network.manager') -flags.DECLARE('num_networks', 'nova.network.manager') +flags.DECLARE('compute_scheduler_driver', 'nova.scheduler.multi') flags.DECLARE('fake_network', 'nova.network.manager') -FLAGS.set_default('network_size', 8) -FLAGS.set_default('num_networks', 2) -FLAGS.set_default('fake_network', True) -FLAGS.set_default('image_service', 'nova.image.fake.FakeImageService') flags.DECLARE('iscsi_num_targets', 'nova.volume.driver') -FLAGS.set_default('iscsi_num_targets', 8) -FLAGS.set_default('verbose', True) -FLAGS.set_default('sql_connection', "sqlite://") -FLAGS.set_default('use_ipv6', True) -FLAGS.set_default('flat_network_bridge', 'br100') -FLAGS.set_default('sqlite_synchronous', False) +flags.DECLARE('network_size', 'nova.network.manager') +flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('policy_file', 'nova.policy') -flags.DECLARE('compute_scheduler_driver', 'nova.scheduler.multi') -FLAGS.set_default('api_paste_config', '$state_path/etc/nova/api-paste.ini') +flags.DECLARE('volume_driver', 'nova.volume.manager') + + +def set_defaults(conf): + conf.set_default('api_paste_config', '$state_path/etc/nova/api-paste.ini') + conf.set_default('auth_driver', 'nova.auth.dbdriver.DbDriver') + conf.set_default('connection_type', 'fake') + conf.set_default('fake_network', True) + conf.set_default('fake_rabbit', True) + conf.set_default('flat_network_bridge', 'br100') + conf.set_default('image_service', 'nova.image.fake.FakeImageService') + conf.set_default('iscsi_num_targets', 8) + conf.set_default('network_size', 8) + conf.set_default('num_networks', 2) + conf.set_default('rpc_backend', 'nova.rpc.impl_fake') + conf.set_default('sql_connection', "sqlite://") + conf.set_default('sqlite_synchronous', False) + conf.set_default('use_ipv6', True) + conf.set_default('verbose', True) + conf.set_default('volume_driver', 'nova.volume.driver.FakeISCSIDriver') diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index d2d9ba017..ef98d3337 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -15,7 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -import nova.compute.utils import nova.context from nova import db from nova import exception @@ -180,11 +179,13 @@ def fake_network(network_id, ipv6=None): 'host': None, 'project_id': 'fake_project', 'vpn_public_address': '192.168.%d.2' % network_id, - 'rxtx_base': '%d' % network_id * 10} + 'rxtx_base': network_id * 10} if ipv6: fake_network['cidr_v6'] = '2001:db8:0:%x::/64' % network_id fake_network['gateway_v6'] = '2001:db8:0:%x::1' % network_id fake_network['netmask_v6'] = '64' + if FLAGS.flat_injected: + fake_network['injected'] = True return fake_network @@ -353,7 +354,7 @@ def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2, 0, 0, 3, None) if spectacular: return nw_model - return nova.compute.utils.legacy_network_info(nw_model) + return nw_model.legacy() def stub_out_nw_api_get_instance_nw_info(stubs, func=None, diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index ccf431b69..e3bdf9dab 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -140,7 +140,7 @@ class StubGlanceClient(object): return self.images[-1] - def update_image(self, image_id, metadata, data): + def update_image(self, image_id, metadata, data, features): for i, image in enumerate(self.images): if image['id'] == str(image_id): if 'id' in metadata: diff --git a/nova/tests/image/test_s3.py b/nova/tests/image/test_s3.py index 124666df9..9f078fc49 100644 --- a/nova/tests/image/test_s3.py +++ b/nova/tests/image/test_s3.py @@ -15,7 +15,11 @@ # License for the specific language governing permissions and limitations # under the License. +import binascii +import eventlet +import mox import os +import tempfile from nova import context import nova.db.api @@ -58,6 +62,21 @@ ami_manifest_xml = """<?xml version="1.0" ?> </manifest> """ +file_manifest_xml = """<?xml version="1.0" ?> +<manifest> +  +</manifest> +""" + class TestS3ImageService(test.TestCase): def setUp(self): @@ -133,6 +152,46 @@ class TestS3ImageService(test.TestCase): 'no_device': True}] self.assertEqual(block_device_mapping, expected_bdm) + def test_s3_create_is_public(self): + metadata = {'properties': { + 'image_location': 'mybucket/my.img.manifest.xml'}, + 'name': 'mybucket/my.img'} + handle, tempf = tempfile.mkstemp(dir='/tmp') + + ignore = mox.IgnoreArg() + mockobj = self.mox.CreateMockAnything() + self.stubs.Set(self.image_service, '_conn', mockobj) + mockobj(ignore).AndReturn(mockobj) + self.stubs.Set(mockobj, 'get_bucket', mockobj) + mockobj(ignore).AndReturn(mockobj) + self.stubs.Set(mockobj, 'get_key', mockobj) + mockobj(ignore).AndReturn(mockobj) + self.stubs.Set(mockobj, 'get_contents_as_string', mockobj) + mockobj().AndReturn(file_manifest_xml) + self.stubs.Set(self.image_service, '_download_file', mockobj) + mockobj(ignore, ignore, ignore).AndReturn(tempf) + self.stubs.Set(binascii, 'a2b_hex', mockobj) + mockobj(ignore).AndReturn('foo') + mockobj(ignore).AndReturn('foo') + self.stubs.Set(self.image_service, '_decrypt_image', mockobj) + mockobj(ignore, ignore, ignore, ignore, ignore).AndReturn(mockobj) + self.stubs.Set(self.image_service, '_untarzip_image', mockobj) + mockobj(ignore, ignore).AndReturn(tempf) + self.mox.ReplayAll() + + img = self.image_service._s3_create(self.context, metadata) + eventlet.sleep() + translated = self.image_service._translate_id_to_uuid(context, img) + uuid = translated['id'] + self.glance_service = nova.image.get_default_image_service() + updated_image = self.glance_service.update(self.context, uuid, + {'is_public': True}, None, + {'x-glance-registry-purge-props': False}) + self.assertTrue(updated_image['is_public']) + self.assertEqual(updated_image['status'], 'active') + self.assertEqual(updated_image['properties']['image_state'], + 'available') + def test_s3_malicious_tarballs(self): self.assertRaises(exception.NovaException, self.image_service._test_for_malicious_tarball, diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py index f6931a876..ff38af492 100644 --- a/nova/tests/network/test_manager.py +++ b/nova/tests/network/test_manager.py @@ -166,7 +166,7 @@ class FlatNetworkTestCase(test.TestCase): self.assertDictMatch(nw, check) check = {'broadcast': '192.168.%d.255' % nid, - 'dhcp_server': None, + 'dhcp_server': '192.168.1.1', 'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid], 'gateway': '192.168.%d.1' % nid, 'gateway_v6': 'fe80::def', @@ -174,7 +174,7 @@ class FlatNetworkTestCase(test.TestCase): 'ips': 'DONTCARE', 'label': 'test%d' % nid, 'mac': 'DE:AD:BE:EF:00:%02x' % nid, - 'rxtx_cap': 0, + 'rxtx_cap': 30, 'vif_uuid': '00000000-0000-0000-0000-00000000000000%02d' % nid, 'should_create_vlan': False, @@ -268,7 +268,7 @@ class FlatNetworkTestCase(test.TestCase): self.network.validate_networks(self.context, requested_networks) - def test_add_fixed_ip_instance_without_vpn_requested_networks(self): + def test_add_fixed_ip_instance_using_id_without_vpn(self): self.mox.StubOutWithMock(db, 'network_get') self.mox.StubOutWithMock(db, 'network_update') self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') @@ -301,6 +301,39 @@ class FlatNetworkTestCase(test.TestCase): self.network.add_fixed_ip_to_instance(self.context, 1, HOST, networks[0]['id']) + def test_add_fixed_ip_instance_using_uuid_without_vpn(self): + self.mox.StubOutWithMock(db, 'network_get_by_uuid') + self.mox.StubOutWithMock(db, 'network_update') + self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') + self.mox.StubOutWithMock(db, 'instance_get') + self.mox.StubOutWithMock(db, + 'virtual_interface_get_by_instance_and_network') + self.mox.StubOutWithMock(db, 'fixed_ip_update') + + db.fixed_ip_update(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()) + db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), + mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) + + db.instance_get(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn({'security_groups': + [{'id': 0}]}) + db.instance_get(self.context, + 1).AndReturn({'display_name': HOST, + 'uuid': 'test-00001'}) + db.instance_get(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn({'availability_zone': ''}) + db.fixed_ip_associate_pool(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn('192.168.0.101') + db.network_get_by_uuid(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(networks[0]) + db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) + self.mox.ReplayAll() + self.network.add_fixed_ip_to_instance(self.context, 1, HOST, + networks[0]['uuid']) + def test_mini_dns_driver(self): zone1 = "example.org" zone2 = "example.com" diff --git a/nova/tests/policy.json b/nova/tests/policy.json index 83929d0bf..aa9c79749 100644 --- a/nova/tests/policy.json +++ b/nova/tests/policy.json @@ -99,6 +99,7 @@ "compute_extension:keypairs": [], "compute_extension:multinic": [], "compute_extension:networks": [], + "compute_extension:networks:view": [], "compute_extension:quotas": [], "compute_extension:quota_classes": [], "compute_extension:rescue": [], diff --git a/nova/tests/rpc/common.py b/nova/tests/rpc/common.py index c07ddfa1a..84dd79890 100644 --- a/nova/tests/rpc/common.py +++ b/nova/tests/rpc/common.py @@ -21,6 +21,7 @@ Unit Tests for remote procedure calls shared between all implementations import time +import eventlet from eventlet import greenthread import nose @@ -39,28 +40,36 @@ LOG = logging.getLogger(__name__) class BaseRpcTestCase(test.TestCase): - def setUp(self, supports_timeouts=True): + def setUp(self, supports_timeouts=True, topic='test', + topic_nested='nested'): super(BaseRpcTestCase, self).setUp() + self.topic = topic or self.topic + self.topic_nested = topic_nested or self.topic_nested self.supports_timeouts = supports_timeouts self.context = context.get_admin_context() + if self.rpc: - self.conn = self.rpc.create_connection(FLAGS, True) receiver = TestReceiver() - self.dispatcher = rpc_dispatcher.RpcDispatcher([receiver]) - self.conn.create_consumer('test', self.dispatcher, False) - self.conn.consume_in_thread() + self.conn = self._create_consumer(receiver, self.topic) def tearDown(self): if self.rpc: self.conn.close() super(BaseRpcTestCase, self).tearDown() + def _create_consumer(self, proxy, topic, fanout=False): + dispatcher = rpc_dispatcher.RpcDispatcher([proxy]) + conn = self.rpc.create_connection(FLAGS, True) + conn.create_consumer(topic, dispatcher, fanout) + conn.consume_in_thread() + return conn + def test_call_succeed(self): if not self.rpc: raise nose.SkipTest('rpc driver not available.') value = 42 - result = self.rpc.call(FLAGS, self.context, 'test', + result = self.rpc.call(FLAGS, self.context, self.topic, {"method": "echo", "args": {"value": value}}) self.assertEqual(value, result) @@ -69,7 +78,7 @@ class BaseRpcTestCase(test.TestCase): raise nose.SkipTest('rpc driver not available.') value = 42 - result = self.rpc.call(FLAGS, self.context, 'test', + result = self.rpc.call(FLAGS, self.context, self.topic, {"method": "echo_three_times_yield", "args": {"value": value}}) self.assertEqual(value + 2, result) @@ -80,7 +89,7 @@ class BaseRpcTestCase(test.TestCase): value = 42 result = self.rpc.multicall(FLAGS, self.context, - 'test', + self.topic, {"method": "echo", "args": {"value": value}}) for i, x in enumerate(result): @@ -94,7 +103,7 @@ class BaseRpcTestCase(test.TestCase): value = 42 result = self.rpc.multicall(FLAGS, self.context, - 'test', + self.topic, {"method": "multicall_three_nones", "args": {"value": value}}) for i, x in enumerate(result): @@ -108,7 +117,7 @@ class BaseRpcTestCase(test.TestCase): value = 42 result = self.rpc.multicall(FLAGS, self.context, - 'test', + self.topic, {"method": "echo_three_times_yield", "args": {"value": value}}) for i, x in enumerate(result): @@ -121,10 +130,54 @@ class BaseRpcTestCase(test.TestCase): """Makes sure a context is passed through rpc call.""" value = 42 result = self.rpc.call(FLAGS, self.context, - 'test', {"method": "context", + self.topic, {"method": "context", "args": {"value": value}}) self.assertEqual(self.context.to_dict(), result) + def _test_cast(self, fanout=False): + """Test casts by pushing items through a channeled queue.""" + + # Not a true global, but capitalized so + # it is clear it is leaking scope into Nested() + QUEUE = eventlet.queue.Queue() + + if not self.rpc: + raise nose.SkipTest('rpc driver not available.') + + # We use the nested topic so we don't need QUEUE to be a proper + # global, and do not keep state outside this test. + class Nested(object): + @staticmethod + def put_queue(context, value): + LOG.debug("Got value in put_queue: %s", value) + QUEUE.put(value) + + nested = Nested() + conn = self._create_consumer(nested, self.topic_nested, fanout) + value = 42 + + method = (self.rpc.cast, self.rpc.fanout_cast)[fanout] + method(FLAGS, self.context, + self.topic_nested, + {"method": "put_queue", + "args": {"value": value}}) + + try: + # If it does not succeed in 2 seconds, give up and assume + # failure. + result = QUEUE.get(True, 2) + except Exception: + self.assertEqual(value, None) + + conn.close() + self.assertEqual(value, result) + + def test_cast_success(self): + self._test_cast(False) + + def test_fanout_success(self): + self._test_cast(True) + def test_nested_calls(self): if not self.rpc: raise nose.SkipTest('rpc driver not available.') @@ -133,7 +186,7 @@ class BaseRpcTestCase(test.TestCase): class Nested(object): @staticmethod def echo(context, queue, value): - """Calls echo in the passed queue""" + """Calls echo in the passed queue.""" LOG.debug(_("Nested received %(queue)s, %(value)s") % locals()) # TODO(comstud): @@ -147,15 +200,13 @@ class BaseRpcTestCase(test.TestCase): return value nested = Nested() - dispatcher = rpc_dispatcher.RpcDispatcher([nested]) - conn = self.rpc.create_connection(FLAGS, True) - conn.create_consumer('nested', dispatcher, False) - conn.consume_in_thread() + conn = self._create_consumer(nested, self.topic_nested) + value = 42 result = self.rpc.call(FLAGS, self.context, - 'nested', {"method": "echo", - "args": {"queue": "test", - "value": value}}) + self.topic_nested, + {"method": "echo", + "args": {"queue": "test", "value": value}}) conn.close() self.assertEqual(value, result) @@ -163,7 +214,7 @@ class BaseRpcTestCase(test.TestCase): if not self.rpc: raise nose.SkipTest('rpc driver not available.') - """Make sure rpc.call will time out""" + """Make sure rpc.call will time out.""" if not self.supports_timeouts: raise nose.SkipTest(_("RPC backend does not support timeouts")) @@ -171,12 +222,12 @@ class BaseRpcTestCase(test.TestCase): self.assertRaises(rpc_common.Timeout, self.rpc.call, FLAGS, self.context, - 'test', + self.topic, {"method": "block", "args": {"value": value}}, timeout=1) try: self.rpc.call(FLAGS, self.context, - 'test', + self.topic, {"method": "block", "args": {"value": value}}, timeout=1) @@ -186,7 +237,7 @@ class BaseRpcTestCase(test.TestCase): class BaseRpcAMQPTestCase(BaseRpcTestCase): - """Base test class for all AMQP-based RPC tests""" + """Base test class for all AMQP-based RPC tests.""" def test_proxycallback_handles_exceptions(self): """Make sure exceptions unpacking messages don't cause hangs.""" if not self.rpc: @@ -203,7 +254,7 @@ class BaseRpcAMQPTestCase(BaseRpcTestCase): self.stubs.Set(rpc_amqp, 'unpack_context', fake_unpack_context) value = 41 - self.rpc.cast(FLAGS, self.context, 'test', + self.rpc.cast(FLAGS, self.context, self.topic, {"method": "echo", "args": {"value": value}}) # Wait for the cast to complete. @@ -212,14 +263,14 @@ class BaseRpcAMQPTestCase(BaseRpcTestCase): break greenthread.sleep(0.1) else: - self.fail("Timeout waiting for message to be consued") + self.fail("Timeout waiting for message to be consumed") # Now see if we get a response even though we raised an # exception for the cast above. self.stubs.Set(rpc_amqp, 'unpack_context', orig_unpack) value = 42 - result = self.rpc.call(FLAGS, self.context, 'test', + result = self.rpc.call(FLAGS, self.context, self.topic, {"method": "echo", "args": {"value": value}}) self.assertEqual(value, result) diff --git a/nova/tests/rpc/test_kombu.py b/nova/tests/rpc/test_kombu.py index 837495273..a69dcdfe9 100644 --- a/nova/tests/rpc/test_kombu.py +++ b/nova/tests/rpc/test_kombu.py @@ -102,6 +102,31 @@ class RpcKombuTestCase(common.BaseRpcAMQPTestCase): self.assertEqual(self.received_message, message) @test.skip_if(kombu is None, "Test requires kombu") + def test_topic_multiple_queues(self): + """Test sending to a topic exchange with multiple queues""" + + conn = self.rpc.create_connection(FLAGS) + message = 'topic test message' + + self.received_message_1 = None + self.received_message_2 = None + + def _callback1(message): + self.received_message_1 = message + + def _callback2(message): + self.received_message_2 = message + + conn.declare_topic_consumer('a_topic', _callback1, queue_name='queue1') + conn.declare_topic_consumer('a_topic', _callback2, queue_name='queue2') + conn.topic_send('a_topic', message) + conn.consume(limit=2) + conn.close() + + self.assertEqual(self.received_message_1, message) + self.assertEqual(self.received_message_2, message) + + @test.skip_if(kombu is None, "Test requires kombu") def test_direct_send_receive(self): """Test sending to a direct exchange/queue""" conn = self.rpc.create_connection(FLAGS) diff --git a/nova/tests/rpc/test_qpid.py b/nova/tests/rpc/test_qpid.py index f01c8a25b..c523f3fe3 100644 --- a/nova/tests/rpc/test_qpid.py +++ b/nova/tests/rpc/test_qpid.py @@ -147,6 +147,35 @@ class RpcQpidTestCase(test.TestCase): def test_create_consumer_fanout(self): self._test_create_consumer(fanout=True) + @test.skip_if(qpid is None, "Test requires qpid") + def test_create_worker(self): + self.mock_connection = self.mox.CreateMock(self.orig_connection) + self.mock_session = self.mox.CreateMock(self.orig_session) + self.mock_receiver = self.mox.CreateMock(self.orig_receiver) + + self.mock_connection.opened().AndReturn(False) + self.mock_connection.open() + self.mock_connection.session().AndReturn(self.mock_session) + expected_address = ( + 'nova/impl_qpid_test ; {"node": {"x-declare": ' + '{"auto-delete": true, "durable": true}, "type": "topic"}, ' + '"create": "always", "link": {"x-declare": {"auto-delete": ' + 'true, "exclusive": false, "durable": false}, "durable": ' + 'true, "name": "impl.qpid.test.workers"}}') + self.mock_session.receiver(expected_address).AndReturn( + self.mock_receiver) + self.mock_receiver.capacity = 1 + self.mock_connection.close() + + self.mox.ReplayAll() + + connection = impl_qpid.create_connection(FLAGS) + connection.create_worker("impl_qpid_test", + lambda *_x, **_y: None, + 'impl.qpid.test.workers', + ) + connection.close() + def _test_cast(self, fanout, server_params=None): self.mock_connection = self.mox.CreateMock(self.orig_connection) self.mock_session = self.mox.CreateMock(self.orig_session) diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py index 22a162aa2..b7a5402c8 100644 --- a/nova/tests/scheduler/test_host_filters.py +++ b/nova/tests/scheduler/test_host_filters.py @@ -15,17 +15,45 @@ Tests For Scheduler Host Filters. """ +import httplib import json +import stubout from nova import context from nova import exception from nova import flags from nova.scheduler import filters +from nova.scheduler.filters.trusted_filter import AttestationService from nova import test from nova.tests.scheduler import fakes from nova import utils +DATA = '' + + +def stub_out_https_backend(stubs): + """ + Stubs out the httplib.HTTPRequest.getresponse to return + faked-out data instead of grabbing actual contents of a resource + + The stubbed getresponse() returns an iterator over + the data "I am a teapot, short and stout\n" + + :param stubs: Set of stubout stubs + """ + + class FakeHTTPResponse(object): + + def read(self): + return DATA + + def fake_do_request(self, *args, **kwargs): + return httplib.OK, FakeHTTPResponse() + + stubs.Set(AttestationService, '_do_request', fake_do_request) + + class TestFilter(filters.BaseHostFilter): pass @@ -40,6 +68,8 @@ class HostFiltersTestCase(test.TestCase): def setUp(self): super(HostFiltersTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + stub_out_https_backend(self.stubs) self.context = context.RequestContext('fake', 'fake') self.json_query = json.dumps( ['and', ['>=', '$free_ram_mb', 1024], @@ -586,6 +616,57 @@ class HostFiltersTestCase(test.TestCase): filter_properties = {'scheduler_hints': {'query': json.dumps(raw)}} self.assertTrue(filt_cls.host_passes(host, filter_properties)) + def test_trusted_filter_default_passes(self): + self._stub_service_is_up(True) + filt_cls = self.class_map['TrustedFilter']() + filter_properties = {'instance_type': {'memory_mb': 1024}} + host = fakes.FakeHostState('host1', 'compute', {}) + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + + def test_trusted_filter_trusted_and_trusted_passes(self): + global DATA + DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"trusted"}]}' + self._stub_service_is_up(True) + filt_cls = self.class_map['TrustedFilter']() + extra_specs = {'trusted_host': 'trusted'} + filter_properties = {'instance_type': {'memory_mb': 1024, + 'extra_specs': extra_specs}} + host = fakes.FakeHostState('host1', 'compute', {}) + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + + def test_trusted_filter_trusted_and_untrusted_fails(self): + global DATA + DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"untrusted"}]}' + self._stub_service_is_up(True) + filt_cls = self.class_map['TrustedFilter']() + extra_specs = {'trusted_host': 'trusted'} + filter_properties = {'instance_type': {'memory_mb': 1024, + 'extra_specs': extra_specs}} + host = fakes.FakeHostState('host1', 'compute', {}) + self.assertFalse(filt_cls.host_passes(host, filter_properties)) + + def test_trusted_filter_untrusted_and_trusted_fails(self): + global DATA + DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"trusted"}]}' + self._stub_service_is_up(True) + filt_cls = self.class_map['TrustedFilter']() + extra_specs = {'trusted_host': 'untrusted'} + filter_properties = {'instance_type': {'memory_mb': 1024, + 'extra_specs': extra_specs}} + host = fakes.FakeHostState('host1', 'compute', {}) + self.assertFalse(filt_cls.host_passes(host, filter_properties)) + + def test_trusted_filter_untrusted_and_untrusted_passes(self): + global DATA + DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"untrusted"}]}' + self._stub_service_is_up(True) + filt_cls = self.class_map['TrustedFilter']() + extra_specs = {'trusted_host': 'untrusted'} + filter_properties = {'instance_type': {'memory_mb': 1024, + 'extra_specs': extra_specs}} + host = fakes.FakeHostState('host1', 'compute', {}) + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + def test_core_filter_passes(self): filt_cls = self.class_map['CoreFilter']() filter_properties = {'instance_type': {'vcpus': 1}} diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index b7915ed8e..302f22939 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -228,7 +228,7 @@ class SchedulerManagerTestCase(test.TestCase): *self.fake_args, **self.fake_kwargs) def test_run_instance_exception_puts_instance_in_error_state(self): - """Test that an NoValidHost exception for run_instance puts + """Test that a NoValidHost exception for run_instance puts the instance in ERROR state and eats the exception. """ @@ -476,7 +476,7 @@ class SchedulerTestCase(test.TestCase): self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') self.mox.StubOutWithMock(self.driver, '_get_compute_info') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') @@ -504,7 +504,7 @@ class SchedulerTestCase(test.TestCase): # assert_compute_node_has_enough_disk() self.driver._get_compute_info(self.context, dest, 'disk_available_least').AndReturn(1025) - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue1') rpc.call(self.context, 'src_queue1', {'method': 'get_instance_disk_info', @@ -512,9 +512,9 @@ class SchedulerTestCase(test.TestCase): json.dumps([{'disk_size': 1024 * (1024 ** 3)}])) # Common checks (shared storage ok, same hypervisor,e tc) - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') tmp_filename = 'test-filename' rpc.call(self.context, 'dest_queue', @@ -535,7 +535,7 @@ class SchedulerTestCase(test.TestCase): [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1, 'cpu_info': 'fake_cpu_info'}]}]) - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') rpc.call(self.context, 'dest_queue', {'method': 'compare_cpu', @@ -696,7 +696,7 @@ class SchedulerTestCase(test.TestCase): 'assert_compute_node_has_enough_memory') self.mox.StubOutWithMock(self.driver, '_get_compute_info') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') dest = 'fake_host2' @@ -717,7 +717,7 @@ class SchedulerTestCase(test.TestCase): # Not enough disk self.driver._get_compute_info(self.context, dest, 'disk_available_least').AndReturn(1023) - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') rpc.call(self.context, 'src_queue', {'method': 'get_instance_disk_info', @@ -737,7 +737,7 @@ class SchedulerTestCase(test.TestCase): self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') @@ -751,9 +751,9 @@ class SchedulerTestCase(test.TestCase): self.driver._live_migration_dest_check(self.context, instance, dest, block_migration, disk_over_commit) - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') tmp_filename = 'test-filename' rpc.call(self.context, 'dest_queue', @@ -779,7 +779,7 @@ class SchedulerTestCase(test.TestCase): self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') @@ -793,9 +793,9 @@ class SchedulerTestCase(test.TestCase): self.driver._live_migration_dest_check(self.context, instance, dest, block_migration, disk_over_commit) - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') tmp_filename = 'test-filename' rpc.call(self.context, 'dest_queue', @@ -819,7 +819,7 @@ class SchedulerTestCase(test.TestCase): self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') @@ -834,9 +834,9 @@ class SchedulerTestCase(test.TestCase): self.driver._live_migration_dest_check(self.context, instance, dest, block_migration, disk_over_commit) - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') tmp_filename = 'test-filename' rpc.call(self.context, 'dest_queue', @@ -868,7 +868,7 @@ class SchedulerTestCase(test.TestCase): self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') @@ -883,9 +883,9 @@ class SchedulerTestCase(test.TestCase): self.driver._live_migration_dest_check(self.context, instance, dest, block_migration, disk_over_commit) - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') tmp_filename = 'test-filename' rpc.call(self.context, 'dest_queue', @@ -916,7 +916,7 @@ class SchedulerTestCase(test.TestCase): self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') @@ -931,9 +931,9 @@ class SchedulerTestCase(test.TestCase): self.driver._live_migration_dest_check(self.context, instance, dest, block_migration, disk_over_commit) - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') tmp_filename = 'test-filename' rpc.call(self.context, 'dest_queue', @@ -953,7 +953,7 @@ class SchedulerTestCase(test.TestCase): [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1, 'cpu_info': 'fake_cpu_info'}]}]) - db.queue_get_for(self.context, FLAGS.compute_topic, + rpc.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') rpc.call(self.context, 'dest_queue', {'method': 'compare_cpu', @@ -1018,13 +1018,13 @@ class SchedulerDriverModuleTestCase(test.TestCase): self.mox.StubOutWithMock(utils, 'utcnow') self.mox.StubOutWithMock(db, 'volume_update') - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') utils.utcnow().AndReturn('fake-now') db.volume_update(self.context, 31337, {'host': host, 'scheduled_at': 'fake-now'}) - db.queue_get_for(self.context, 'volume', host).AndReturn(queue) + rpc.queue_get_for(self.context, 'volume', host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) @@ -1039,10 +1039,10 @@ class SchedulerDriverModuleTestCase(test.TestCase): fake_kwargs = {'extra_arg': 'meow'} queue = 'fake_queue' - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') - db.queue_get_for(self.context, 'volume', host).AndReturn(queue) + rpc.queue_get_for(self.context, 'volume', host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) @@ -1057,10 +1057,10 @@ class SchedulerDriverModuleTestCase(test.TestCase): fake_kwargs = {'extra_arg': 'meow'} queue = 'fake_queue' - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') - db.queue_get_for(self.context, 'volume', host).AndReturn(queue) + rpc.queue_get_for(self.context, 'volume', host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) @@ -1078,13 +1078,13 @@ class SchedulerDriverModuleTestCase(test.TestCase): self.mox.StubOutWithMock(utils, 'utcnow') self.mox.StubOutWithMock(db, 'instance_update') - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') utils.utcnow().AndReturn('fake-now') db.instance_update(self.context, 31337, {'host': host, 'scheduled_at': 'fake-now'}) - db.queue_get_for(self.context, 'compute', host).AndReturn(queue) + rpc.queue_get_for(self.context, 'compute', host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) @@ -1099,10 +1099,10 @@ class SchedulerDriverModuleTestCase(test.TestCase): fake_kwargs = {'extra_arg': 'meow'} queue = 'fake_queue' - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') - db.queue_get_for(self.context, 'compute', host).AndReturn(queue) + rpc.queue_get_for(self.context, 'compute', host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) @@ -1117,10 +1117,10 @@ class SchedulerDriverModuleTestCase(test.TestCase): fake_kwargs = {'extra_arg': 'meow'} queue = 'fake_queue' - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') - db.queue_get_for(self.context, 'compute', host).AndReturn(queue) + rpc.queue_get_for(self.context, 'compute', host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) @@ -1135,10 +1135,10 @@ class SchedulerDriverModuleTestCase(test.TestCase): fake_kwargs = {'extra_arg': 'meow'} queue = 'fake_queue' - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') - db.queue_get_for(self.context, 'network', host).AndReturn(queue) + rpc.queue_get_for(self.context, 'network', host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) @@ -1193,10 +1193,10 @@ class SchedulerDriverModuleTestCase(test.TestCase): topic = 'unknown' queue = 'fake_queue' - self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') - db.queue_get_for(self.context, topic, host).AndReturn(queue) + rpc.queue_get_for(self.context, topic, host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index b0367dd8f..37fdec74a 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -32,7 +32,6 @@ from nova.api import ec2 from nova.api.ec2 import apirequest from nova.api.ec2 import ec2utils from nova import block_device -from nova.compute import api as compute_api from nova import context from nova import exception from nova import test @@ -214,9 +213,9 @@ class ApiEc2TestCase(test.TestCase): # NOTE(vish): skipping the Authorizer roles = ['sysadmin', 'netadmin'] ctxt = context.RequestContext('fake', 'fake', roles=roles) - self.app = auth.InjectContext(ctxt, - ec2.Requestify(ec2.Authorizer(ec2.Executor()), - 'nova.api.ec2.cloud.CloudController')) + self.app = auth.InjectContext(ctxt, ec2.FaultWrapper( + ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor() + ), 'nova.api.ec2.cloud.CloudController')))) def expect_http(self, host=None, is_secure=False, api_version=None): """Returns a new EC2 connection""" diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py index b16ffd335..8ce2ab6ee 100644 --- a/nova/tests/test_db_api.py +++ b/nova/tests/test_db_api.py @@ -96,7 +96,7 @@ class DbApiTestCase(test.TestCase): db.migration_update(ctxt, migration.id, {"status": "CONFIRMED"}) # Ensure the new migration is not returned. - updated_at = datetime.datetime.utcnow() + updated_at = utils.utcnow() values = {"status": "finished", "updated_at": updated_at} migration = db.migration_create(ctxt, values) results = db.migration_get_all_unconfirmed(ctxt, 10) @@ -120,7 +120,7 @@ class DbApiTestCase(test.TestCase): db.instance_update(ctxt, instance.id, {"task_state": None}) # Ensure the newly rebooted instance is not returned. - updated_at = datetime.datetime.utcnow() + updated_at = utils.utcnow() values = {"task_state": "rebooting", "updated_at": updated_at} instance = db.instance_create(ctxt, values) results = db.instance_get_all_hung_in_rebooting(ctxt, 10) diff --git a/nova/tests/test_flags.py b/nova/tests/test_flags.py index 925de1f5f..f1baffa0a 100644 --- a/nova/tests/test_flags.py +++ b/nova/tests/test_flags.py @@ -34,54 +34,50 @@ class FlagsTestCase(test.TestCase): def setUp(self): super(FlagsTestCase, self).setUp() - self.FLAGS = flags.NovaConfigOpts() - self.global_FLAGS = flags.FLAGS - self.flags(config_file=[]) def test_declare(self): - self.assert_('answer' not in self.global_FLAGS) + self.assert_('answer' not in FLAGS) flags.DECLARE('answer', 'nova.tests.declare_flags') - self.assert_('answer' in self.global_FLAGS) - self.assertEqual(self.global_FLAGS.answer, 42) + self.assert_('answer' in FLAGS) + self.assertEqual(FLAGS.answer, 42) # Make sure we don't overwrite anything - self.global_FLAGS.set_override('answer', 256) - self.assertEqual(self.global_FLAGS.answer, 256) + FLAGS.set_override('answer', 256) + self.assertEqual(FLAGS.answer, 256) flags.DECLARE('answer', 'nova.tests.declare_flags') - self.assertEqual(self.global_FLAGS.answer, 256) + self.assertEqual(FLAGS.answer, 256) def test_getopt_non_interspersed_args(self): - self.assert_('runtime_answer' not in self.global_FLAGS) + self.assert_('runtime_answer' not in FLAGS) argv = ['flags_test', 'extra_arg', '--runtime_answer=60'] - args = self.global_FLAGS(argv) + args = flags.parse_args(argv, default_config_files=[]) self.assertEqual(len(args), 3) self.assertEqual(argv, args) def test_runtime_and_unknown_flags(self): - self.assert_('runtime_answer' not in self.global_FLAGS) + self.assert_('runtime_answer' not in FLAGS) import nova.tests.runtime_flags - self.assert_('runtime_answer' in self.global_FLAGS) - self.assertEqual(self.global_FLAGS.runtime_answer, 54) + self.assert_('runtime_answer' in FLAGS) + self.assertEqual(FLAGS.runtime_answer, 54) def test_long_vs_short_flags(self): - self.global_FLAGS.clear() - self.global_FLAGS.register_cli_opt(cfg.StrOpt('duplicate_answer_long', - default='val', - help='desc')) + FLAGS.clear() + FLAGS.register_cli_opt(cfg.StrOpt('duplicate_answer_long', + default='val', + help='desc')) argv = ['flags_test', '--duplicate_answer=60', 'extra_arg'] - args = self.global_FLAGS(argv) + args = flags.parse_args(argv, default_config_files=[]) - self.assert_('duplicate_answer' not in self.global_FLAGS) - self.assert_(self.global_FLAGS.duplicate_answer_long, 60) + self.assert_('duplicate_answer' not in FLAGS) + self.assert_(FLAGS.duplicate_answer_long, 60) - self.global_FLAGS.clear() - self.global_FLAGS.register_cli_opt(cfg.IntOpt('duplicate_answer', - default=60, - help='desc')) - args = self.global_FLAGS(argv) - self.assertEqual(self.global_FLAGS.duplicate_answer, 60) - self.assertEqual(self.global_FLAGS.duplicate_answer_long, 'val') + FLAGS.clear() + FLAGS.register_cli_opt(cfg.IntOpt('duplicate_answer', + default=60, help='desc')) + args = flags.parse_args(argv, default_config_files=[]) + self.assertEqual(FLAGS.duplicate_answer, 60) + self.assertEqual(FLAGS.duplicate_answer_long, 'val') def test_flag_leak_left(self): self.assertEqual(FLAGS.flags_unittest, 'foo') @@ -97,19 +93,5 @@ class FlagsTestCase(test.TestCase): self.assertEqual(FLAGS.flags_unittest, 'foo') self.flags(flags_unittest='bar') self.assertEqual(FLAGS.flags_unittest, 'bar') - self.reset_flags() + FLAGS.reset() self.assertEqual(FLAGS.flags_unittest, 'foo') - - def test_defaults(self): - self.FLAGS.register_opt(cfg.StrOpt('foo', default='bar', help='desc')) - self.assertEqual(self.FLAGS.foo, 'bar') - - self.FLAGS.set_default('foo', 'blaa') - self.assertEqual(self.FLAGS.foo, 'blaa') - - def test_templated_values(self): - self.FLAGS.register_opt(cfg.StrOpt('foo', default='foo', help='desc')) - self.FLAGS.register_opt(cfg.StrOpt('bar', default='bar', help='desc')) - self.FLAGS.register_opt(cfg.StrOpt('blaa', - default='$foo$bar', help='desc')) - self.assertEqual(self.FLAGS.blaa, 'foobar') diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py index 0baeee770..ab4af3bc1 100644 --- a/nova/tests/test_instance_types.py +++ b/nova/tests/test_instance_types.py @@ -201,6 +201,22 @@ class InstanceTypeTestCase(test.TestCase): fetched = instance_types.get_instance_type_by_flavor_id(flavorid) self.assertEqual(default_instance_type, fetched) + def test_can_read_deleted_types_using_flavor_id(self): + """Ensure deleted instance types can be read when querying flavor_id""" + inst_type_name = "test" + inst_type_flavor_id = "test1" + + inst_type = instance_types.create(inst_type_name, 256, 1, 120, 100, + inst_type_flavor_id) + self.assertEqual(inst_type_name, inst_type["name"]) + + # NOTE(jk0): The deleted flavor will show up here because the context + # in get_instance_type_by_flavor_id() is set to use read_deleted. + instance_types.destroy(inst_type["name"]) + deleted_inst_type = instance_types.get_instance_type_by_flavor_id( + inst_type_flavor_id) + self.assertEqual(inst_type_name, deleted_inst_type["name"]) + def test_will_list_deleted_type_for_active_instance(self): """Ensure deleted instance types with active instances can be read""" ctxt = context.get_admin_context() diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 9ba45481c..329f81ab4 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -30,7 +30,6 @@ from xml.dom import minidom from nova.api.ec2 import cloud from nova.compute import instance_types from nova.compute import power_state -from nova.compute import utils as compute_utils from nova.compute import vm_states from nova import context from nova import db @@ -1892,7 +1891,7 @@ class IptablesFirewallTestCase(test.TestCase): _fake_stub_out_get_nw_info(self.stubs, lambda *a, **kw: network_model) - network_info = compute_utils.legacy_network_info(network_model) + network_info = network_model.legacy() self.fw.prepare_instance_filter(instance_ref, network_info) self.fw.apply_instance_filter(instance_ref, network_info) diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py index dfa18325a..81b44e861 100644 --- a/nova/tests/test_libvirt_vif.py +++ b/nova/tests/test_libvirt_vif.py @@ -75,7 +75,7 @@ class LibvirtVifTestCase(test.TestCase): conf.memory = 100 * 1024 conf.vcpus = 4 - nic = driver.plug(self.instance, self.net, self.mapping) + nic = driver.plug(self.instance, (self.net, self.mapping)) conf.add_device(nic) return conf.to_xml() @@ -93,7 +93,7 @@ class LibvirtVifTestCase(test.TestCase): mac = node.find("mac").get("address") self.assertEqual(mac, self.mapping['mac']) - d.unplug(None, self.net, self.mapping) + d.unplug(None, (self.net, self.mapping)) def test_ovs_ethernet_driver(self): d = vif.LibvirtOpenVswitchDriver() @@ -111,7 +111,7 @@ class LibvirtVifTestCase(test.TestCase): script = node.find("script").get("path") self.assertEquals(script, "") - d.unplug(None, self.net, self.mapping) + d.unplug(None, (self.net, self.mapping)) def test_ovs_virtualport_driver(self): d = vif.LibvirtOpenVswitchVirtualPortDriver() @@ -137,7 +137,7 @@ class LibvirtVifTestCase(test.TestCase): iface_id_found = True self.assertTrue(iface_id_found) - d.unplug(None, self.net, self.mapping) + d.unplug(None, (self.net, self.mapping)) def test_quantum_bridge_ethernet_driver(self): d = vif.QuantumLinuxBridgeVIFDriver() @@ -155,4 +155,4 @@ class LibvirtVifTestCase(test.TestCase): script = node.find("script").get("path") self.assertEquals(script, "") - d.unplug(None, self.net, self.mapping) + d.unplug(None, (self.net, self.mapping)) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 4bb0e8b2e..f17243cdc 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -37,233 +37,7 @@ from nova import volume FLAGS = flags.FLAGS -class GetQuotaTestCase(test.TestCase): - def setUp(self): - super(GetQuotaTestCase, self).setUp() - self.flags(quota_instances=10, - quota_cores=20, - quota_ram=50 * 1024, - quota_volumes=10, - quota_gigabytes=1000, - quota_floating_ips=10, - quota_security_groups=10, - quota_security_group_rules=20, - quota_key_pairs=10, - quota_metadata_items=128, - quota_injected_files=5, - quota_injected_file_content_bytes=10 * 1024) - self.context = context.RequestContext('admin', 'admin', is_admin=True) - - def _stub_class(self): - def fake_quota_class_get_all_by_name(context, quota_class): - result = dict(class_name=quota_class) - if quota_class == 'test_class': - result.update( - instances=5, - cores=10, - ram=25 * 1024, - volumes=5, - gigabytes=500, - floating_ips=5, - quota_security_groups=10, - quota_security_group_rules=20, - quota_key_pairs=10, - metadata_items=64, - injected_files=2, - injected_file_content_bytes=5 * 1024, - invalid_quota=100, - ) - return result - - self.stubs.Set(db, 'quota_class_get_all_by_name', - fake_quota_class_get_all_by_name) - - def _stub_project(self, override=False): - def fake_quota_get_all_by_project(context, project_id): - result = dict(project_id=project_id) - if override: - result.update( - instances=2, - cores=5, - ram=12 * 1024, - volumes=2, - gigabytes=250, - floating_ips=2, - security_groups=5, - security_group_rules=10, - key_pairs=5, - metadata_items=32, - injected_files=1, - injected_file_content_bytes=2 * 1024, - invalid_quota=50, - ) - return result - - self.stubs.Set(db, 'quota_get_all_by_project', - fake_quota_get_all_by_project) - - def test_default_quotas(self): - result = quota._get_default_quotas() - self.assertEqual(result, dict( - instances=10, - cores=20, - ram=50 * 1024, - volumes=10, - gigabytes=1000, - floating_ips=10, - security_groups=10, - security_group_rules=20, - key_pairs=10, - metadata_items=128, - injected_files=5, - injected_file_content_bytes=10 * 1024, - )) - - def test_default_quotas_unlimited(self): - self.flags(quota_instances=-1, - quota_cores=-1, - quota_ram=-1, - quota_volumes=-1, - quota_gigabytes=-1, - quota_floating_ips=-1, - quota_security_groups=-1, - quota_security_group_rules=-1, - quota_key_pairs=-1, - quota_metadata_items=-1, - quota_injected_files=-1, - quota_injected_file_content_bytes=-1) - result = quota._get_default_quotas() - self.assertEqual(result, dict( - instances=-1, - cores=-1, - ram=-1, - volumes=-1, - gigabytes=-1, - floating_ips=-1, - security_groups=-1, - security_group_rules=-1, - key_pairs=-1, - metadata_items=-1, - injected_files=-1, - injected_file_content_bytes=-1, - )) - - def test_class_quotas_noclass(self): - self._stub_class() - result = quota.get_class_quotas(self.context, 'noclass') - self.assertEqual(result, dict( - instances=10, - cores=20, - ram=50 * 1024, - volumes=10, - gigabytes=1000, - floating_ips=10, - security_groups=10, - security_group_rules=20, - key_pairs=10, - metadata_items=128, - injected_files=5, - injected_file_content_bytes=10 * 1024, - )) - - def test_class_quotas(self): - self._stub_class() - result = quota.get_class_quotas(self.context, 'test_class') - self.assertEqual(result, dict( - instances=5, - cores=10, - ram=25 * 1024, - volumes=5, - gigabytes=500, - floating_ips=5, - security_groups=10, - security_group_rules=20, - key_pairs=10, - metadata_items=64, - injected_files=2, - injected_file_content_bytes=5 * 1024, - )) - - def test_project_quotas_defaults_noclass(self): - self._stub_class() - self._stub_project() - result = quota.get_project_quotas(self.context, 'admin') - self.assertEqual(result, dict( - instances=10, - cores=20, - ram=50 * 1024, - volumes=10, - gigabytes=1000, - floating_ips=10, - security_groups=10, - security_group_rules=20, - key_pairs=10, - metadata_items=128, - injected_files=5, - injected_file_content_bytes=10 * 1024, - )) - - def test_project_quotas_overrides_noclass(self): - self._stub_class() - self._stub_project(True) - result = quota.get_project_quotas(self.context, 'admin') - self.assertEqual(result, dict( - instances=2, - cores=5, - ram=12 * 1024, - volumes=2, - gigabytes=250, - floating_ips=2, - security_groups=5, - security_group_rules=10, - key_pairs=5, - metadata_items=32, - injected_files=1, - injected_file_content_bytes=2 * 1024, - )) - - def test_project_quotas_defaults_withclass(self): - self._stub_class() - self._stub_project() - self.context.quota_class = 'test_class' - result = quota.get_project_quotas(self.context, 'admin') - self.assertEqual(result, dict( - instances=5, - cores=10, - ram=25 * 1024, - volumes=5, - gigabytes=500, - floating_ips=5, - security_groups=10, - security_group_rules=20, - key_pairs=10, - metadata_items=64, - injected_files=2, - injected_file_content_bytes=5 * 1024, - )) - - def test_project_quotas_overrides_withclass(self): - self._stub_class() - self._stub_project(True) - self.context.quota_class = 'test_class' - result = quota.get_project_quotas(self.context, 'admin') - self.assertEqual(result, dict( - instances=2, - cores=5, - ram=12 * 1024, - volumes=2, - gigabytes=250, - floating_ips=2, - security_groups=5, - security_group_rules=10, - key_pairs=5, - metadata_items=32, - injected_files=1, - injected_file_content_bytes=2 * 1024, - )) - - -class OldQuotaTestCase(test.TestCase): +class QuotaIntegrationTestCase(test.TestCase): class StubImageService(object): @@ -271,7 +45,7 @@ class OldQuotaTestCase(test.TestCase): return {"properties": {}} def setUp(self): - super(OldQuotaTestCase, self).setUp() + super(QuotaIntegrationTestCase, self).setUp() self.flags(connection_type='fake', quota_instances=2, quota_cores=4, @@ -324,178 +98,6 @@ class OldQuotaTestCase(test.TestCase): vol['size'] = size return db.volume_create(self.context, vol)['id'] - def _get_instance_type(self, name): - instance_types = { - 'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1), - 'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2), - 'm1.medium': - dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3), - 'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4), - 'm1.xlarge': - dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5), - 'm1.nocpu': dict(memory_mb=512, vcpus=0, root_gb=0, flavorid=6), - 'm1.nomem': dict(memory_mb=0, vcpus=1, root_gb=0, flavorid=7)} - return instance_types[name] - - def test_quota_no_mem_no_cpu(self): - num_instances = quota.allowed_instances(self.context, 100, - self._get_instance_type('m1.nocpu')) - self.assertEqual(num_instances, 2) - num_instances = quota.allowed_instances(self.context, 100, - self._get_instance_type('m1.nomem')) - self.assertEqual(num_instances, 2) - - def test_quota_overrides(self): - """Make sure overriding a projects quotas works""" - num_instances = quota.allowed_instances(self.context, 100, - self._get_instance_type('m1.small')) - self.assertEqual(num_instances, 2) - db.quota_create(self.context, self.project_id, 'instances', 10) - num_instances = quota.allowed_instances(self.context, 100, - self._get_instance_type('m1.small')) - self.assertEqual(num_instances, 4) - db.quota_create(self.context, self.project_id, 'cores', 100) - num_instances = quota.allowed_instances(self.context, 100, - self._get_instance_type('m1.small')) - self.assertEqual(num_instances, 10) - db.quota_create(self.context, self.project_id, 'ram', 3 * 2048) - num_instances = quota.allowed_instances(self.context, 100, - self._get_instance_type('m1.small')) - self.assertEqual(num_instances, 3) - - # metadata_items - too_many_items = FLAGS.quota_metadata_items + 1000 - num_metadata_items = quota.allowed_metadata_items(self.context, - too_many_items) - self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items) - db.quota_create(self.context, self.project_id, 'metadata_items', 5) - num_metadata_items = quota.allowed_metadata_items(self.context, - too_many_items) - self.assertEqual(num_metadata_items, 5) - - # Cleanup - db.quota_destroy_all_by_project(self.context, self.project_id) - - def test_unlimited_instances(self): - self.flags(quota_instances=2, quota_ram=-1, quota_cores=-1) - instance_type = self._get_instance_type('m1.small') - num_instances = quota.allowed_instances(self.context, 100, - instance_type) - self.assertEqual(num_instances, 2) - db.quota_create(self.context, self.project_id, 'instances', -1) - num_instances = quota.allowed_instances(self.context, 100, - instance_type) - self.assertEqual(num_instances, 100) - num_instances = quota.allowed_instances(self.context, 101, - instance_type) - self.assertEqual(num_instances, 101) - - def test_unlimited_ram(self): - self.flags(quota_instances=-1, quota_ram=2 * 2048, quota_cores=-1) - instance_type = self._get_instance_type('m1.small') - num_instances = quota.allowed_instances(self.context, 100, - instance_type) - self.assertEqual(num_instances, 2) - db.quota_create(self.context, self.project_id, 'ram', -1) - num_instances = quota.allowed_instances(self.context, 100, - instance_type) - self.assertEqual(num_instances, 100) - num_instances = quota.allowed_instances(self.context, 101, - instance_type) - self.assertEqual(num_instances, 101) - - def test_unlimited_cores(self): - self.flags(quota_instances=-1, quota_ram=-1, quota_cores=2) - instance_type = self._get_instance_type('m1.small') - num_instances = quota.allowed_instances(self.context, 100, - instance_type) - self.assertEqual(num_instances, 2) - db.quota_create(self.context, self.project_id, 'cores', -1) - num_instances = quota.allowed_instances(self.context, 100, - instance_type) - self.assertEqual(num_instances, 100) - num_instances = quota.allowed_instances(self.context, 101, - instance_type) - self.assertEqual(num_instances, 101) - - def test_unlimited_volumes(self): - self.flags(quota_volumes=10, quota_gigabytes=-1) - volumes = quota.allowed_volumes(self.context, 100, 1) - self.assertEqual(volumes, 10) - db.quota_create(self.context, self.project_id, 'volumes', -1) - volumes = quota.allowed_volumes(self.context, 100, 1) - self.assertEqual(volumes, 100) - volumes = quota.allowed_volumes(self.context, 101, 1) - self.assertEqual(volumes, 101) - - def test_unlimited_gigabytes(self): - self.flags(quota_volumes=-1, quota_gigabytes=10) - volumes = quota.allowed_volumes(self.context, 100, 1) - self.assertEqual(volumes, 10) - db.quota_create(self.context, self.project_id, 'gigabytes', -1) - volumes = quota.allowed_volumes(self.context, 100, 1) - self.assertEqual(volumes, 100) - volumes = quota.allowed_volumes(self.context, 101, 1) - self.assertEqual(volumes, 101) - - def test_unlimited_floating_ips(self): - self.flags(quota_floating_ips=10) - floating_ips = quota.allowed_floating_ips(self.context, 100) - self.assertEqual(floating_ips, 10) - db.quota_create(self.context, self.project_id, 'floating_ips', -1) - floating_ips = quota.allowed_floating_ips(self.context, 100) - self.assertEqual(floating_ips, 100) - floating_ips = quota.allowed_floating_ips(self.context, 101) - self.assertEqual(floating_ips, 101) - - def test_unlimited_security_groups(self): - self.flags(quota_security_groups=10) - security_groups = quota.allowed_security_groups(self.context, 100) - self.assertEqual(security_groups, 10) - db.quota_create(self.context, self.project_id, 'security_groups', -1) - security_groups = quota.allowed_security_groups(self.context, 100) - self.assertEqual(security_groups, 100) - security_groups = quota.allowed_security_groups(self.context, 101) - self.assertEqual(security_groups, 101) - - def test_unlimited_key_pairs(self): - self.flags(quota_key_pairs=10) - key_pairs = quota.allowed_key_pairs(self.context, 100) - self.assertEqual(key_pairs, 10) - db.quota_create(self.context, self.project_id, 'key_pairs', -1) - key_pairs = quota.allowed_key_pairs(self.context, 100) - self.assertEqual(key_pairs, 100) - key_pairs = quota.allowed_key_pairs(self.context, 101) - self.assertEqual(key_pairs, 101) - - def test_unlimited_security_group_rules(self): - - def fake_security_group_rule_count_by_group(context, sec_group_id): - return 0 - - self.stubs.Set(db, 'security_group_rule_count_by_group', - fake_security_group_rule_count_by_group) - - self.flags(quota_security_group_rules=20) - rules = quota.allowed_security_group_rules(self.context, 1234, 100) - self.assertEqual(rules, 20) - db.quota_create(self.context, self.project_id, 'security_group_rules', - -1) - rules = quota.allowed_security_group_rules(self.context, 1234, 100) - self.assertEqual(rules, 100) - rules = quota.allowed_security_group_rules(self.context, 1234, 101) - self.assertEqual(rules, 101) - - def test_unlimited_metadata_items(self): - self.flags(quota_metadata_items=10) - items = quota.allowed_metadata_items(self.context, 100) - self.assertEqual(items, 10) - db.quota_create(self.context, self.project_id, 'metadata_items', -1) - items = quota.allowed_metadata_items(self.context, 100) - self.assertEqual(items, 100) - items = quota.allowed_metadata_items(self.context, 101) - self.assertEqual(items, 101) - def test_too_many_instances(self): instance_ids = [] for i in range(FLAGS.quota_instances): @@ -573,48 +175,6 @@ class OldQuotaTestCase(test.TestCase): image_href=image_uuid, metadata=metadata) - def test_default_allowed_injected_files(self): - self.flags(quota_injected_files=55) - self.assertEqual(quota.allowed_injected_files(self.context, 100), 55) - - def test_overridden_allowed_injected_files(self): - self.flags(quota_injected_files=5) - db.quota_create(self.context, self.project_id, 'injected_files', 77) - self.assertEqual(quota.allowed_injected_files(self.context, 100), 77) - - def test_unlimited_default_allowed_injected_files(self): - self.flags(quota_injected_files=-1) - self.assertEqual(quota.allowed_injected_files(self.context, 100), 100) - - def test_unlimited_db_allowed_injected_files(self): - self.flags(quota_injected_files=5) - db.quota_create(self.context, self.project_id, 'injected_files', -1) - self.assertEqual(quota.allowed_injected_files(self.context, 100), 100) - - def test_default_allowed_injected_file_content_bytes(self): - self.flags(quota_injected_file_content_bytes=12345) - limit = quota.allowed_injected_file_content_bytes(self.context, 23456) - self.assertEqual(limit, 12345) - - def test_overridden_allowed_injected_file_content_bytes(self): - self.flags(quota_injected_file_content_bytes=12345) - db.quota_create(self.context, self.project_id, - 'injected_file_content_bytes', 5678) - limit = quota.allowed_injected_file_content_bytes(self.context, 23456) - self.assertEqual(limit, 5678) - - def test_unlimited_default_allowed_injected_file_content_bytes(self): - self.flags(quota_injected_file_content_bytes=-1) - limit = quota.allowed_injected_file_content_bytes(self.context, 23456) - self.assertEqual(limit, 23456) - - def test_unlimited_db_allowed_injected_file_content_bytes(self): - self.flags(quota_injected_file_content_bytes=12345) - db.quota_create(self.context, self.project_id, - 'injected_file_content_bytes', -1) - limit = quota.allowed_injected_file_content_bytes(self.context, 23456) - self.assertEqual(limit, 23456) - def _create_with_injected_files(self, files): self.flags(image_service='nova.image.fake.FakeImageService') api = compute.API(image_service=self.StubImageService()) @@ -659,11 +219,6 @@ class OldQuotaTestCase(test.TestCase): self.assertRaises(exception.QuotaError, self._create_with_injected_files, files) - def test_allowed_injected_file_path_bytes(self): - self.assertEqual( - quota.allowed_injected_file_path_bytes(self.context), - FLAGS.quota_injected_file_path_bytes) - def test_max_injected_file_path_bytes(self): max = FLAGS.quota_injected_file_path_bytes path = ''.join(['a' for i in xrange(max)]) @@ -677,15 +232,6 @@ class OldQuotaTestCase(test.TestCase): self.assertRaises(exception.QuotaError, self._create_with_injected_files, files) - def test_quota_class_unlimited(self): - self.flags(quota_floating_ips=10) - items = quota.allowed_floating_ips(self.context, 10) - self.assertEqual(items, 10) - self.context.quota_class = 'foo' - db.quota_class_create(self.context, 'foo', 'floating_ips', -1) - items = quota.allowed_floating_ips(self.context, 100) - self.assertEqual(items, 100) - class FakeContext(object): def __init__(self, project_id, quota_class): diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index f9b54badc..2909b51f7 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -395,6 +395,8 @@ class VolumeTestCase(test.TestCase): self.assertEquals(len(test_notifier.NOTIFICATIONS), 2) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['event_type'], 'volume.create.start') + payload = msg['payload'] + self.assertEquals(payload['status'], 'creating') msg = test_notifier.NOTIFICATIONS[1] self.assertEquals(msg['priority'], 'INFO') self.assertEquals(msg['event_type'], 'volume.create.end') @@ -402,7 +404,7 @@ class VolumeTestCase(test.TestCase): self.assertEquals(payload['tenant_id'], volume['project_id']) self.assertEquals(payload['user_id'], volume['user_id']) self.assertEquals(payload['volume_id'], volume['id']) - self.assertEquals(payload['status'], 'creating') + self.assertEquals(payload['status'], 'available') self.assertEquals(payload['size'], volume['size']) self.assertTrue('display_name' in payload) self.assertTrue('snapshot_id' in payload) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 3056661b4..1e2ef964a 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -18,7 +18,6 @@ import ast import contextlib -import datetime import functools import os import re @@ -29,7 +28,6 @@ from nova.compute import aggregate_states from nova.compute import instance_types from nova.compute import power_state from nova.compute import task_states -from nova.compute import utils as compute_utils from nova.compute import vm_states from nova import context from nova import db @@ -43,6 +41,7 @@ from nova.tests import fake_network from nova.tests import fake_utils from nova.tests.glance import stubs as glance_stubs from nova.tests.xenapi import stubs +from nova import utils from nova.virt.xenapi import connection as xenapi_conn from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import vm_utils @@ -272,14 +271,18 @@ class XenAPIVMTestCase(test.TestCase): fake_diagnostics = { 'vbd_xvdb_write': '0.0', - 'memory_target': '10961792000.0000', - 'memory_internal_free': '3612860.6020', - 'memory': '10961792000.0000', + 'memory_target': '4294967296.0000', + 'memory_internal_free': '1415564.0000', + 'memory': '4294967296.0000', 'vbd_xvda_write': '0.0', - 'cpu0': '0.0110', - 'vif_0_tx': '752.4007', + 'cpu0': '0.0042', + 'vif_0_tx': '287.4134', 'vbd_xvda_read': '0.0', - 'vif_0_rx': '4837.8805' + 'vif_0_rx': '1816.0144', + 'vif_2_rx': '0.0', + 'vif_2_tx': '0.0', + 'vbd_xvdb_read': '0.0', + 'last_update': '1328795567', } instance = self._create_instance() expected = self.conn.get_diagnostics(instance) @@ -377,24 +380,28 @@ class XenAPIVMTestCase(test.TestCase): if check_injection: xenstore_data = self.vm['xenstore_data'] self.assertEquals(xenstore_data['vm-data/hostname'], 'test') - key = 'vm-data/networking/DEADBEEF0000' + key = 'vm-data/networking/DEADBEEF0001' xenstore_value = xenstore_data[key] tcpip_data = ast.literal_eval(xenstore_value) self.assertEquals(tcpip_data, - {'broadcast': '192.168.0.255', - 'dns': ['192.168.0.1'], - 'gateway': '192.168.0.1', - 'gateway_v6': 'dead:beef::1', + {'broadcast': '192.168.1.255', + 'dns': ['192.168.1.3', '192.168.1.4'], + 'gateway': '192.168.1.1', + 'gateway_v6': 'fe80::def', 'ip6s': [{'enabled': '1', - 'ip': 'dead:beef::dcad:beff:feef:0', - 'netmask': '64'}], + 'ip': '2001:db8:0:1::1', + 'netmask': 64, + 'gateway': 'fe80::def'}], 'ips': [{'enabled': '1', - 'ip': '192.168.0.100', - 'netmask': '255.255.255.0'}], - 'dhcp_server': '192.168.0.1', - 'label': 'fake', - 'mac': 'DE:AD:BE:EF:00:00', - 'rxtx_cap': 3}) + 'ip': '192.168.1.100', + 'netmask': '255.255.255.0', + 'gateway': '192.168.1.1'}, + {'enabled': '1', + 'ip': '192.168.1.101', + 'netmask': '255.255.255.0', + 'gateway': '192.168.1.1'}], + 'label': 'test1', + 'mac': 'DE:AD:BE:EF:00:01'}) def check_vm_params_for_windows(self): self.assertEquals(self.vm['platform']['nx'], 'true') @@ -442,7 +449,7 @@ class XenAPIVMTestCase(test.TestCase): # If the cache is turned on then the base disk will be # there even after the cleanup if 'other_config' in vdi_rec: - if vdi_rec['other_config']['image-id'] is None: + if 'image-id' not in vdi_rec['other_config']: self.fail('Found unexpected VDI:%s' % vdi_ref) else: self.fail('Found unexpected VDI:%s' % vdi_ref) @@ -467,27 +474,12 @@ class XenAPIVMTestCase(test.TestCase): instance = db.instance_create(self.context, instance_values) else: instance = db.instance_get(self.context, instance_id) - network_info = [({'bridge': 'fa0', 'id': 0, - 'injected': True, - 'cidr': '192.168.0.0/24', - 'cidr_v6': 'dead:beef::1/120', - }, - {'broadcast': '192.168.0.255', - 'dns': ['192.168.0.1'], - 'gateway': '192.168.0.1', - 'gateway_v6': 'dead:beef::1', - 'ip6s': [{'enabled': '1', - 'ip': 'dead:beef::dcad:beff:feef:0', - 'netmask': '64'}], - 'ips': [{'enabled': '1', - 'ip': '192.168.0.100', - 'netmask': '255.255.255.0'}], - 'dhcp_server': '192.168.0.1', - 'label': 'fake', - 'mac': 'DE:AD:BE:EF:00:00', - 'rxtx_cap': 3})] + + network_info = fake_network.fake_get_instance_nw_info(self.stubs, + spectacular=True) if empty_dns: - network_info[0][1]['dns'] = [] + # NOTE(tr3buchet): this is a terrible way to do this... + network_info[0]['network']['subnets'][0]['dns'] = [] # admin_pass isn't part of the DB model, but it does get set as # an attribute for spawn to use @@ -605,11 +597,11 @@ class XenAPIVMTestCase(test.TestCase): index = config.index('auto eth0') self.assertEquals(config[index + 1:index + 8], [ 'iface eth0 inet static', - 'address 192.168.0.100', + 'address 192.168.1.100', 'netmask 255.255.255.0', - 'broadcast 192.168.0.255', - 'gateway 192.168.0.1', - 'dns-nameservers 192.168.0.1', + 'broadcast 192.168.1.255', + 'gateway 192.168.1.1', + 'dns-nameservers 192.168.1.3 192.168.1.4', '']) self._tee_executed = True return '', '' @@ -708,7 +700,7 @@ class XenAPIVMTestCase(test.TestCase): vif_rec = xenapi_fake.get_record('VIF', vif_ref) self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit') self.assertEquals(vif_rec['qos_algorithm_params']['kbps'], - str(3 * 1024)) + str(3 * 10 * 1024)) def test_rescue(self): instance = self._create_instance() @@ -778,25 +770,8 @@ class XenAPIVMTestCase(test.TestCase): 'os_type': 'linux', 'architecture': 'x86-64'} instance = db.instance_create(self.context, instance_values) - network_info = [({'bridge': 'fa0', 'id': 0, - 'injected': False, - 'cidr': '192.168.0.0/24', - 'cidr_v6': 'dead:beef::1/120', - }, - {'broadcast': '192.168.0.255', - 'dns': ['192.168.0.1'], - 'gateway': '192.168.0.1', - 'gateway_v6': 'dead:beef::1', - 'ip6s': [{'enabled': '1', - 'ip': 'dead:beef::dcad:beff:feef:0', - 'netmask': '64'}], - 'ips': [{'enabled': '1', - 'ip': '192.168.0.100', - 'netmask': '255.255.255.0'}], - 'dhcp_server': '192.168.0.1', - 'label': 'fake', - 'mac': 'DE:AD:BE:EF:00:00', - 'rxtx_cap': 3})] + network_info = fake_network.fake_get_instance_nw_info(self.stubs, + spectacular=True) image_meta = {'id': glance_stubs.FakeGlance.IMAGE_VHD, 'disk_format': 'vhd'} if spawn: @@ -951,20 +926,8 @@ class XenAPIMigrateInstance(test.TestCase): fake_finish_revert_migration) conn = xenapi_conn.get_connection(False) - network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, - {'broadcast': '192.168.0.255', - 'dns': ['192.168.0.1'], - 'gateway': '192.168.0.1', - 'gateway_v6': 'dead:beef::1', - 'ip6s': [{'enabled': '1', - 'ip': 'dead:beef::dcad:beff:feef:0', - 'netmask': '64'}], - 'ips': [{'enabled': '1', - 'ip': '192.168.0.100', - 'netmask': '255.255.255.0'}], - 'label': 'fake', - 'mac': 'DE:AD:BE:EF:00:00', - 'rxtx_cap': 3})] + network_info = fake_network.fake_get_instance_nw_info(self.stubs, + spectacular=True) image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'} base = xenapi_fake.create_vdi('hurr', 'fake') base_uuid = xenapi_fake.get_record('VDI', base)['uuid'] @@ -995,20 +958,8 @@ class XenAPIMigrateInstance(test.TestCase): "VDI_resize_online", fake_vdi_resize) conn = xenapi_conn.get_connection(False) - network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, - {'broadcast': '192.168.0.255', - 'dns': ['192.168.0.1'], - 'gateway': '192.168.0.1', - 'gateway_v6': 'dead:beef::1', - 'ip6s': [{'enabled': '1', - 'ip': 'dead:beef::dcad:beff:feef:0', - 'netmask': '64'}], - 'ips': [{'enabled': '1', - 'ip': '192.168.0.100', - 'netmask': '255.255.255.0'}], - 'label': 'fake', - 'mac': 'DE:AD:BE:EF:00:00', - 'rxtx_cap': 3})] + network_info = fake_network.fake_get_instance_nw_info(self.stubs, + spectacular=True) image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'} conn.finish_migration(self.context, self.migration, instance, dict(base_copy='hurr', cow='durr'), @@ -1029,20 +980,8 @@ class XenAPIMigrateInstance(test.TestCase): self.stubs.Set(stubs.FakeSessionForVMTests, "VDI_resize_online", fake_vdi_resize) conn = xenapi_conn.get_connection(False) - network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, - {'broadcast': '192.168.0.255', - 'dns': ['192.168.0.1'], - 'gateway': '192.168.0.1', - 'gateway_v6': 'dead:beef::1', - 'ip6s': [{'enabled': '1', - 'ip': 'dead:beef::dcad:beff:feef:0', - 'netmask': '64'}], - 'ips': [{'enabled': '1', - 'ip': '192.168.0.100', - 'netmask': '255.255.255.0'}], - 'label': 'fake', - 'mac': 'DE:AD:BE:EF:00:00', - 'rxtx_cap': 3})] + network_info = fake_network.fake_get_instance_nw_info(self.stubs, + spectacular=True) image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'} conn.finish_migration(self.context, self.migration, instance, dict(base_copy='hurr', cow='durr'), @@ -1057,21 +996,8 @@ class XenAPIMigrateInstance(test.TestCase): self.stubs.Set(stubs.FakeSessionForVMTests, "VDI_resize_online", fake_vdi_resize) conn = xenapi_conn.get_connection(False) - network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, - {'broadcast': '192.168.0.255', - 'dns': ['192.168.0.1'], - 'gateway': '192.168.0.1', - 'gateway_v6': 'dead:beef::1', - 'ip6s': [{'enabled': '1', - 'ip': 'dead:beef::dcad:beff:feef:0', - 'netmask': '64'}], - 'ips': [{'enabled': '1', - 'ip': '192.168.0.100', - 'netmask': '255.255.255.0'}], - 'label': 'fake', - 'mac': 'DE:AD:BE:EF:00:00', - 'rxtx_cap': 3})] - + network_info = fake_network.fake_get_instance_nw_info(self.stubs, + spectacular=True) # Resize instance would be determined by the compute call image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'} conn.finish_migration(self.context, self.migration, instance, @@ -1398,8 +1324,7 @@ class XenAPIBWUsageTestCase(test.TestCase): self.name = "instance-0001" self.uuid = "1-2-3-4-5" - result = self.conn.get_all_bw_usage([testinstance()], - datetime.datetime.utcnow()) + result = self.conn.get_all_bw_usage([testinstance()], utils.utcnow()) self.assertEqual(result, []) @@ -1569,7 +1494,7 @@ class XenAPIDom0IptablesFirewallTestCase(test.TestCase): fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs, lambda *a, **kw: network_model) - network_info = compute_utils.legacy_network_info(network_model) + network_info = network_model.legacy() self.fw.prepare_instance_filter(instance_ref, network_info) self.fw.apply_instance_filter(instance_ref, network_info) diff --git a/nova/utils.py b/nova/utils.py index af55bdb2e..ae99212e9 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -284,27 +284,6 @@ def novadir(): return os.path.abspath(nova.__file__).split('nova/__init__.py')[0] -def default_cfgfile(filename='nova.conf', args=None): - if args is None: - args = sys.argv - for arg in args: - if arg.find('config-file') != -1: - return arg[arg.index('config-file') + len('config-file') + 1:] - else: - if not os.path.isabs(filename): - # turn relative filename into an absolute path - script_dir = os.path.dirname(inspect.stack()[-1][1]) - filename = os.path.abspath(os.path.join(script_dir, filename)) - if not os.path.exists(filename): - filename = "./nova.conf" - if not os.path.exists(filename): - filename = '/etc/nova/nova.conf' - if os.path.exists(filename): - cfgfile = '--config-file=%s' % filename - args.insert(1, cfgfile) - return filename - - def debug(arg): LOG.debug(_('debug in callback: %s'), arg) return arg @@ -488,7 +467,7 @@ def utcnow_ts(): return time.mktime(utcnow().timetuple()) -def set_time_override(override_time=datetime.datetime.utcnow()): +def set_time_override(override_time=utcnow()): """Override utils.utcnow to return a constant time.""" utcnow.override_time = override_time @@ -524,7 +503,7 @@ def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): def isotime(at=None): """Stringify time in ISO 8601 format""" if not at: - at = datetime.datetime.utcnow() + at = utcnow() str = at.strftime(ISO_TIME_FORMAT) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' str += ('Z' if tz == 'UTC' else tz) @@ -1137,7 +1116,7 @@ def generate_glance_url(): def generate_image_url(image_ref): - """Generate a image URL from an image_ref.""" + """Generate an image URL from an image_ref.""" return "%s/images/%s" % (generate_glance_url(), image_ref) diff --git a/nova/virt/baremetal/dom.py b/nova/virt/baremetal/dom.py index de027be2e..0890de5d4 100644 --- a/nova/virt/baremetal/dom.py +++ b/nova/virt/baremetal/dom.py @@ -160,8 +160,6 @@ class BareMetalDom(object): self.domains.remove(fd) msg = _("Domains: %s") LOG.debug(msg % (self.domains)) - msg = _("Nodes: %s") - LOG.debug(msg % (self.baremetal_nodes.nodes)) self.store_domain() msg = _("After storing domains: %s") LOG.debug(msg % (self.domains)) diff --git a/nova/virt/baremetal/proxy.py b/nova/virt/baremetal/proxy.py index ba8296127..1ee927c03 100644 --- a/nova/virt/baremetal/proxy.py +++ b/nova/virt/baremetal/proxy.py @@ -32,7 +32,6 @@ A connection to a hypervisor through baremetal. import hashlib import os import shutil -import time from nova.compute import instance_types from nova.compute import power_state @@ -47,7 +46,6 @@ from nova.openstack.common import cfg from nova import utils from nova.virt.baremetal import dom from nova.virt.baremetal import nodes -from nova.virt import disk from nova.virt.disk import api as disk from nova.virt import driver from nova.virt.libvirt import utils as libvirt_utils @@ -88,16 +86,17 @@ def _late_load_cheetah(): class ProxyConnection(driver.ComputeDriver): def __init__(self, read_only): + # Note that baremetal doesn't have a read-only connection + # mode, so the read_only parameter is ignored super(ProxyConnection, self).__init__() self.baremetal_nodes = nodes.get_baremetal_nodes() self._wrapped_conn = None - self.read_only = read_only self._host_state = None @property def HostState(self): if not self._host_state: - self._host_state = HostState(self.read_only) + self._host_state = HostState(self) return self._host_state def init_host(self, host): @@ -130,8 +129,6 @@ class ProxyConnection(driver.ComputeDriver): def destroy(self, instance, network_info, block_device_info=None, cleanup=True): - timer = utils.LoopingCall(f=None) - while True: try: self._conn.destroy_domain(instance['name']) @@ -160,15 +157,15 @@ class ProxyConnection(driver.ComputeDriver): @exception.wrap_exception def attach_volume(self, instance_name, device_path, mountpoint): - raise exception.APIError("attach_volume not supported for baremetal.") + raise exception.Invalid("attach_volume not supported for baremetal.") @exception.wrap_exception def detach_volume(self, instance_name, mountpoint): - raise exception.APIError("detach_volume not supported for baremetal.") + raise exception.Invalid("detach_volume not supported for baremetal.") @exception.wrap_exception def snapshot(self, instance, image_id): - raise exception.APIError("snapshot not supported for baremetal.") + raise exception.Invalid("snapshot not supported for baremetal.") @exception.wrap_exception def reboot(self, instance): @@ -199,7 +196,6 @@ class ProxyConnection(driver.ComputeDriver): """ self.destroy(instance, False) - xml_dict = self.to_xml_dict(instance, rescue=True) rescue_images = {'image_id': FLAGS.baremetal_rescue_image_id, 'kernel_id': FLAGS.baremetal_rescue_kernel_id, 'ramdisk_id': FLAGS.baremetal_rescue_ramdisk_id} @@ -218,7 +214,7 @@ class ProxyConnection(driver.ComputeDriver): except Exception: LOG.exception(_('_wait_for_rescue failed'), instance=instance) timer.stop() - timer.f = _wait_for_reboot + timer.f = _wait_for_rescue return timer.start(interval=0.5) @exception.wrap_exception @@ -275,13 +271,13 @@ class ProxyConnection(driver.ComputeDriver): else: LOG.debug(_('instance %s:not booted'), instance['name'], instance=instance) - except Exception as Exn: - LOG.debug(_("Bremetal assignment is overcommitted."), + except Exception: + LOG.exception(_("Baremetal assignment is overcommitted."), instance=instance) (old_ref, new_ref) = db.instance_update_and_get_original( context, instance['id'], - {'vm_state': vm_states.OVERCOMMIT, - 'power_state': power_state.SUSPENDED}) + {'vm_state': vm_states.ERROR, + 'power_state': power_state.FAILED}) notifications.send_update(context, old_ref, new_ref) timer.stop() @@ -414,8 +410,7 @@ class ProxyConnection(driver.ComputeDriver): cow=False, # FLAGS.use_cow_images, image_id=disk_images['image_id'], user_id=inst['user_id'], - project_id=inst['project_id'], - size=size) + project_id=inst['project_id']) # For now, we assume that if we're not using a kernel, we're using a # partitioned disk image where the target partition is the first @@ -499,9 +494,9 @@ class ProxyConnection(driver.ComputeDriver): block_device_info=None): # block_device_mapping = driver.block_device_info_get_mapping( # block_device_info) - map = 0 - for (network, mapping) in network_info: - map += 1 + _map = 0 + for (_, mapping) in network_info: + _map += 1 nics = [] # FIXME(vish): stick this in db @@ -614,11 +609,7 @@ class ProxyConnection(driver.ComputeDriver): :returns: The total number of vcpu that currently used. """ - - total = 0 - for dom_id in self._conn.list_domains(): - total += 1 - return total + return len(self._conn.list_domains()) def get_memory_mb_used(self): """Get the free memory size(MB) of physical computer. @@ -765,9 +756,9 @@ class HostState(object): node is running on. """ - def __init__(self, read_only): + def __init__(self, connection): super(HostState, self).__init__() - self.read_only = read_only + self.connection = connection self._stats = {} self.update_status() @@ -784,11 +775,10 @@ class HostState(object): We can get host status information. """ LOG.debug(_("Updating host stats")) - connection = get_connection(self.read_only) data = {} - data["vcpus"] = connection.get_vcpu_total() - data["vcpus_used"] = connection.get_vcpu_used() - data["cpu_info"] = connection.get_cpu_info() + data["vcpus"] = self.connection.get_vcpu_total() + data["vcpus_used"] = self.connection.get_vcpu_used() + data["cpu_info"] = self.connection.get_cpu_info() data["cpu_arch"] = FLAGS.cpu_arch data["xpus"] = FLAGS.xpus data["xpu_arch"] = FLAGS.xpu_arch @@ -796,12 +786,12 @@ class HostState(object): data["net_arch"] = FLAGS.net_arch data["net_info"] = FLAGS.net_info data["net_mbps"] = FLAGS.net_mbps - data["disk_total"] = connection.get_local_gb_total() - data["disk_used"] = connection.get_local_gb_used() + data["disk_total"] = self.connection.get_local_gb_total() + data["disk_used"] = self.connection.get_local_gb_used() data["disk_available"] = data["disk_total"] - data["disk_used"] - data["host_memory_total"] = connection.get_memory_mb_total() + data["host_memory_total"] = self.connection.get_memory_mb_total() data["host_memory_free"] = (data["host_memory_total"] - - connection.get_memory_mb_used()) - data["hypervisor_type"] = connection.get_hypervisor_type() - data["hypervisor_version"] = connection.get_hypervisor_version() + self.connection.get_memory_mb_used()) + data["hypervisor_type"] = self.connection.get_hypervisor_type() + data["hypervisor_version"] = self.connection.get_hypervisor_version() self._stats = data diff --git a/nova/virt/connection.py b/nova/virt/connection.py index b8a8c26cc..84f84744b 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -45,18 +45,19 @@ if FLAGS.connection_type == 'baremetal': def get_connection(read_only=False): """ Returns an object representing the connection to a virtualization - platform. + platform, or to an on-demand bare-metal provisioning platform. This could be :mod:`nova.virt.fake.FakeConnection` in test mode, a connection to KVM, QEMU, or UML via :mod:`libvirt_conn`, or a connection - to XenServer or Xen Cloud Platform via :mod:`xenapi`. + to XenServer or Xen Cloud Platform via :mod:`xenapi`. Other platforms are + also supported. Any object returned here must conform to the interface documented by :mod:`FakeConnection`. **Related flags** - :connection_type: A string literal that falls through a if/elif structure + :connection_type: A string literal that falls through an if/elif structure to determine what virtualization mechanism to use. Values may be @@ -64,6 +65,8 @@ def get_connection(read_only=False): * libvirt * xenapi * vmwareapi + * baremetal + """ # TODO(termie): maybe lazy load after initial check for permissions # TODO(termie): check whether we can be disconnected @@ -82,6 +85,6 @@ def get_connection(read_only=False): raise Exception('Unknown connection type "%s"' % t) if conn is None: - LOG.error(_('Failed to open connection to the hypervisor')) + LOG.error(_('Failed to open connection to underlying virt platform')) sys.exit(1) return utils.check_isinstance(conn, driver.ComputeDriver) diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 1a7326675..57f7940ad 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -23,8 +23,6 @@ Driver base-classes: """ from nova.compute import power_state -from nova import context as nova_context -from nova import db from nova import flags from nova import log as logging diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py index aa05a2261..bca50d21f 100644 --- a/nova/virt/firewall.py +++ b/nova/virt/firewall.py @@ -97,6 +97,14 @@ class FirewallDriver(object): """Check nova-instance-instance-xxx exists""" raise NotImplementedError() + def _handle_network_info_model(self, network_info): + # make sure this is legacy network_info + try: + return network_info.legacy() + except AttributeError: + # no "legacy" function means network_info is legacy + return network_info + class IptablesFirewallDriver(FirewallDriver): """Driver which enforces security groups through iptables rules.""" @@ -121,6 +129,9 @@ class IptablesFirewallDriver(FirewallDriver): pass def unfilter_instance(self, instance, network_info): + # make sure this is legacy nw_info + network_info = self._handle_network_info_model(network_info) + if self.instances.pop(instance['id'], None): # NOTE(vish): use the passed info instead of the stored info self.network_infos.pop(instance['id']) @@ -131,6 +142,9 @@ class IptablesFirewallDriver(FirewallDriver): 'filtered'), instance=instance) def prepare_instance_filter(self, instance, network_info): + # make sure this is legacy nw_info + network_info = self._handle_network_info_model(network_info) + self.instances[instance['id']] = instance self.network_infos[instance['id']] = network_info self.add_filters_for_instance(instance) @@ -146,6 +160,9 @@ class IptablesFirewallDriver(FirewallDriver): """Creates a rule corresponding to each ip that defines a jump to the corresponding instance - chain for all the traffic destined to that ip.""" + # make sure this is legacy nw_info + network_info = self._handle_network_info_model(network_info) + ips_v4 = [ip['ip'] for (_n, mapping) in network_info for ip in mapping['ips']] ipv4_rules = self._create_filter(ips_v4, chain_name) @@ -206,6 +223,9 @@ class IptablesFirewallDriver(FirewallDriver): ipv6_rules += ['-j $provider'] def _do_dhcp_rules(self, ipv4_rules, network_info): + # make sure this is legacy nw_info + network_info = self._handle_network_info_model(network_info) + dhcp_servers = [info['dhcp_server'] for (_n, info) in network_info] for dhcp_server in dhcp_servers: @@ -214,6 +234,9 @@ class IptablesFirewallDriver(FirewallDriver): '-j ACCEPT' % (dhcp_server,)) def _do_project_network_rules(self, ipv4_rules, ipv6_rules, network_info): + # make sure this is legacy nw_info + network_info = self._handle_network_info_model(network_info) + cidrs = [network['cidr'] for (network, _i) in network_info] for cidr in cidrs: ipv4_rules.append('-s %s -j ACCEPT' % (cidr,)) @@ -225,6 +248,9 @@ class IptablesFirewallDriver(FirewallDriver): ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,)) def _do_ra_rules(self, ipv6_rules, network_info): + # make sure this is legacy nw_info + network_info = self._handle_network_info_model(network_info) + gateways_v6 = [mapping['gateway_v6'] for (_n, mapping) in network_info] for gateway_v6 in gateways_v6: @@ -259,6 +285,9 @@ class IptablesFirewallDriver(FirewallDriver): rule.to_port)] def instance_rules(self, instance, network_info): + # make sure this is legacy nw_info + network_info = self._handle_network_info_model(network_info) + ctxt = context.get_admin_context() ipv4_rules = [] diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 7e012a87c..8a24c4e55 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -370,12 +370,12 @@ class LibvirtConnection(driver.ComputeDriver): def plug_vifs(self, instance, network_info): """Plug VIFs into networks.""" for (network, mapping) in network_info: - self.vif_driver.plug(instance, network, mapping) + self.vif_driver.plug(instance, (network, mapping)) def unplug_vifs(self, instance, network_info): """Unplug VIFs from networks.""" for (network, mapping) in network_info: - self.vif_driver.unplug(instance, network, mapping) + self.vif_driver.unplug(instance, (network, mapping)) def _destroy(self, instance, network_info, block_device_info=None, cleanup=True): @@ -1626,7 +1626,7 @@ class LibvirtConnection(driver.ComputeDriver): guest.add_device(diskconfig) for (network, mapping) in network_info: - cfg = self.vif_driver.plug(instance, network, mapping) + cfg = self.vif_driver.plug(instance, (network, mapping)) guest.add_device(cfg) if FLAGS.libvirt_type == "qemu" or FLAGS.libvirt_type == "kvm": @@ -2037,7 +2037,7 @@ class LibvirtConnection(driver.ComputeDriver): def update_available_resource(self, ctxt, host): """Updates compute manager resource info on ComputeNode table. - This method is called as an periodic tasks and is used only + This method is called as a periodic task and is used only in live migration currently. :param ctxt: security context diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 9cc801cc8..4b7d8889d 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -160,7 +160,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver): # in the thread pool no matter what. tpool.execute(self._conn.nwfilterDefineXML, xml) else: - # NOTE(maoy): self._conn is a eventlet.tpool.Proxy object + # NOTE(maoy): self._conn is an eventlet.tpool.Proxy object self._conn.nwfilterDefineXML(xml) def unfilter_instance(self, instance, network_info): diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 07ac50520..5333e0992 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -83,8 +83,9 @@ class LibvirtBridgeDriver(vif.VIFDriver): return conf - def plug(self, instance, network, mapping): + def plug(self, instance, vif): """Ensure that the bridge exists, and add VIF to it.""" + network, mapping = vif if (not network.get('multi_host') and mapping.get('should_create_bridge')): if mapping.get('should_create_vlan'): @@ -107,7 +108,7 @@ class LibvirtBridgeDriver(vif.VIFDriver): return self._get_configurations(instance, network, mapping) - def unplug(self, instance, network, mapping): + def unplug(self, instance, vif): """No manual unplugging required.""" pass @@ -120,7 +121,8 @@ class LibvirtOpenVswitchDriver(vif.VIFDriver): def get_dev_name(_self, iface_id): return "tap" + iface_id[0:11] - def plug(self, instance, network, mapping): + def plug(self, instance, vif): + network, mapping = vif iface_id = mapping['vif_uuid'] dev = self.get_dev_name(iface_id) if not linux_net._device_exists(dev): @@ -157,9 +159,10 @@ class LibvirtOpenVswitchDriver(vif.VIFDriver): return conf - def unplug(self, instance, network, mapping): + def unplug(self, instance, vif): """Unplug the VIF from the network by deleting the port from the bridge.""" + network, mapping = vif dev = self.get_dev_name(mapping['vif_uuid']) try: utils.execute('ovs-vsctl', 'del-port', @@ -173,8 +176,9 @@ class LibvirtOpenVswitchVirtualPortDriver(vif.VIFDriver): """VIF driver for Open vSwitch that uses integrated libvirt OVS virtual port XML (introduced in libvirt 0.9.11).""" - def plug(self, instance, network, mapping): + def plug(self, instance, vif): """ Pass data required to create OVS virtual port element""" + network, mapping = vif conf = config.LibvirtConfigGuestInterface() @@ -188,7 +192,7 @@ class LibvirtOpenVswitchVirtualPortDriver(vif.VIFDriver): return conf - def unplug(self, instance, network, mapping): + def unplug(self, instance, vif): """No action needed. Libvirt takes care of cleanup""" pass @@ -199,7 +203,8 @@ class QuantumLinuxBridgeVIFDriver(vif.VIFDriver): def get_dev_name(self, iface_id): return "tap" + iface_id[0:11] - def plug(self, instance, network, mapping): + def plug(self, instance, vif): + network, mapping = vif iface_id = mapping['vif_uuid'] dev = self.get_dev_name(iface_id) @@ -215,9 +220,10 @@ class QuantumLinuxBridgeVIFDriver(vif.VIFDriver): return conf - def unplug(self, instance, network, mapping): + def unplug(self, instance, vif): """Unplug the VIF from the network by deleting the port from the bridge.""" + network, mapping = vif dev = self.get_dev_name(mapping['vif_uuid']) try: utils.execute('ip', 'link', 'delete', dev, run_as_root=True) diff --git a/nova/virt/vif.py b/nova/virt/vif.py index 07139b200..69cfd996c 100644 --- a/nova/virt/vif.py +++ b/nova/virt/vif.py @@ -25,10 +25,10 @@ class VIFDriver(object): # advantage of any kwargs should they need to pass - def plug(self, instance, network, mapping, **kwargs): + def plug(self, instance, vif, **kwargs): """Plug VIF into network.""" raise NotImplementedError() - def unplug(self, instance, network, mapping, **kwargs): + def unplug(self, instance, vif, **kwargs): """Unplug VIF from network.""" raise NotImplementedError() diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py index 0a5d78b51..52205c461 100644 --- a/nova/virt/vmwareapi/vif.py +++ b/nova/virt/vmwareapi/vif.py @@ -33,7 +33,7 @@ FLAGS.set_default('vmwareapi_vlan_interface', 'vmnic0') class VMWareVlanBridgeDriver(vif.VIFDriver): """VIF Driver to setup bridge/VLAN networking using VMWare API.""" - def plug(self, instance, network, mapping): + def plug(self, instance, vif): """Plug the VIF to specified instance using information passed. Currently we are plugging the VIF(s) during instance creation itself. We can use this method when we add support to add additional NIC to @@ -83,7 +83,7 @@ class VMWareVlanBridgeDriver(vif.VIFDriver): raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num, pgroup=pg_vlanid) - def unplug(self, instance, network, mapping): + def unplug(self, instance, vif): """Cleanup operations like deleting port group if no instance is associated with it.""" pass diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 1e35776fa..000dc6981 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -825,9 +825,9 @@ class VMWareVMOps(object): def plug_vifs(self, instance, network_info): """Plug VIFs into networks.""" for (network, mapping) in network_info: - self._vif_driver.plug(instance, network, mapping) + self._vif_driver.plug(instance, (network, mapping)) def _unplug_vifs(self, instance, network_info): """Unplug VIFs from networks.""" for (network, mapping) in network_info: - self._vif_driver.unplug(instance, network, mapping) + self._vif_driver.unplug(instance, (network, mapping)) diff --git a/nova/virt/xenapi/connection.py b/nova/virt/xenapi/connection.py index 184e34aa4..bd26e86a6 100644 --- a/nova/virt/xenapi/connection.py +++ b/nova/virt/xenapi/connection.py @@ -492,6 +492,13 @@ class XenAPIConnection(driver.ComputeDriver): return self._pool.remove_from_aggregate(context, aggregate, host, **kwargs) + def legacy_nwinfo(self): + """ + Indicate if the driver requires the legacy network_info format. + """ + # TODO(tr3buchet): remove this function once all virts return false + return False + class XenAPISession(object): """The session to invoke XenAPI SDK calls""" diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 223f8b109..532c9f70d 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -151,7 +151,7 @@ def create_vdi(name_label, sr_ref, **kwargs): 'sharable': False, 'other_config': {}, 'location': '', - 'xenstore_data': '', + 'xenstore_data': {}, 'sm_config': {}, 'physical_utilisation': '123', 'managed': True, @@ -442,13 +442,14 @@ class SessionBase(object): return _db_content['PIF'] def VM_get_xenstore_data(self, _1, vm_ref): - return _db_content['VM'][vm_ref].get('xenstore_data', '') + return _db_content['VM'][vm_ref].get('xenstore_data', {}) def VM_remove_from_xenstore_data(self, _1, vm_ref, key): db_ref = _db_content['VM'][vm_ref] if not 'xenstore_data' in db_ref: return - db_ref['xenstore_data'][key] = None + if key in db_ref['xenstore_data']: + del db_ref['xenstore_data'][key] def VM_add_to_xenstore_data(self, _1, vm_ref, key, value): db_ref = _db_content['VM'][vm_ref] @@ -460,7 +461,8 @@ class SessionBase(object): db_ref = _db_content['VDI'][vdi_ref] if not 'other_config' in db_ref: return - db_ref['other_config'][key] = None + if key in db_ref['other_config']: + del db_ref['other_config'][key] def VDI_add_to_other_config(self, _1, vdi_ref, key, value): db_ref = _db_content['VDI'][vdi_ref] diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py index f3cce22a9..6bf3edad4 100644 --- a/nova/virt/xenapi/pool.py +++ b/nova/virt/xenapi/pool.py @@ -195,7 +195,7 @@ def forward_request(context, request_type, master, aggregate_id, # because this might be 169.254.0.1, i.e. xenapi # NOTE: password in clear is not great, but it'll do for now sender_url = swap_xapi_host(FLAGS.xenapi_connection_url, slave_address) - rpc.cast(context, db.queue_get_for(context, FLAGS.compute_topic, master), + rpc.cast(context, rpc.queue_get_for(context, FLAGS.compute_topic, master), {"method": request_type, "args": {"aggregate_id": aggregate_id, "host": slave_compute, diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py index 900d0cc5f..905cbc30c 100644 --- a/nova/virt/xenapi/vif.py +++ b/nova/virt/xenapi/vif.py @@ -44,39 +44,40 @@ class XenVIFDriver(vif.VIFDriver): class XenAPIBridgeDriver(XenVIFDriver): """VIF Driver for XenAPI that uses XenAPI to create Networks.""" - def plug(self, instance, network, mapping, vm_ref=None, device=None): + def plug(self, instance, vif, vm_ref=None, device=None): if not vm_ref: vm_ref = vm_utils.VMHelper.lookup(self._session, instance.name) if not device: device = 0 - if mapping.get('should_create_vlan'): - network_ref = self._ensure_vlan_bridge(network) + if vif['network'].get_meta('should_create_vlan'): + network_ref = self._ensure_vlan_bridge(vif['network']) else: network_ref = network_utils.NetworkHelper.find_network_with_bridge( - self._session, network['bridge']) + self._session, vif['network']['bridge']) vif_rec = {} vif_rec['device'] = str(device) vif_rec['network'] = network_ref vif_rec['VM'] = vm_ref - vif_rec['MAC'] = mapping['mac'] + vif_rec['MAC'] = vif['address'] vif_rec['MTU'] = '1500' vif_rec['other_config'] = {} - if "rxtx_cap" in mapping: - vif_rec['qos_algorithm_type'] = "ratelimit" - vif_rec['qos_algorithm_params'] = {"kbps": - str(mapping['rxtx_cap'] * 1024)} + if vif.get_meta('rxtx_cap'): + vif_rec['qos_algorithm_type'] = 'ratelimit' + vif_rec['qos_algorithm_params'] = {'kbps': + str(int(vif.get_meta('rxtx_cap')) * 1024)} else: - vif_rec['qos_algorithm_type'] = "" + vif_rec['qos_algorithm_type'] = '' vif_rec['qos_algorithm_params'] = {} return vif_rec def _ensure_vlan_bridge(self, network): """Ensure that a VLAN bridge exists""" - vlan_num = network['vlan'] + vlan_num = network.get_meta('vlan') bridge = network['bridge'] - bridge_interface = FLAGS.vlan_interface or network['bridge_interface'] + bridge_interface = FLAGS.vlan_interface or \ + network.get_meta('bridge_interface') # Check whether bridge already exists # Retrieve network whose name_label is "bridge" network_ref = network_utils.NetworkHelper.find_network_with_name_label( @@ -86,8 +87,8 @@ class XenAPIBridgeDriver(XenVIFDriver): # 1 - create network description = 'network for nova bridge %s' % bridge network_rec = {'name_label': bridge, - 'name_description': description, - 'other_config': {}} + 'name_description': description, + 'other_config': {}} network_ref = self._session.call_xenapi('network.create', network_rec) # 2 - find PIF for VLAN NOTE(salvatore-orlando): using double @@ -101,7 +102,7 @@ class XenAPIBridgeDriver(XenVIFDriver): # Multiple PIF are ok: we are dealing with a pool if len(pifs) == 0: raise Exception(_('Found no PIF for device %s') % - bridge_interface) + bridge_interface) for pif_ref in pifs.keys(): self._session.call_xenapi('VLAN.create', pif_ref, @@ -126,14 +127,14 @@ class XenAPIBridgeDriver(XenVIFDriver): return network_ref - def unplug(self, instance, network, mapping): + def unplug(self, instance, vif): pass class XenAPIOpenVswitchDriver(XenVIFDriver): """VIF driver for Open vSwitch with XenAPI.""" - def plug(self, instance, network, mapping, vm_ref=None, device=None): + def plug(self, instance, vif, vm_ref=None, device=None): if not vm_ref: vm_ref = vm_utils.VMHelper.lookup(self._session, instance.name) @@ -148,14 +149,14 @@ class XenAPIOpenVswitchDriver(XenVIFDriver): vif_rec['device'] = str(device) vif_rec['network'] = network_ref vif_rec['VM'] = vm_ref - vif_rec['MAC'] = mapping['mac'] + vif_rec['MAC'] = vif['address'] vif_rec['MTU'] = '1500' - vif_rec['qos_algorithm_type'] = "" + vif_rec['qos_algorithm_type'] = '' vif_rec['qos_algorithm_params'] = {} # OVS on the hypervisor monitors this key and uses it to # set the iface-id attribute - vif_rec['other_config'] = {"nicira-iface-id": mapping['vif_uuid']} + vif_rec['other_config'] = {'nicira-iface-id': vif['id']} return vif_rec - def unplug(self, instance, network, mapping): + def unplug(self, instance, vif): pass diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index deeebbcb9..508fd34e9 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -1068,19 +1068,32 @@ class VMHelper(xenapi.HelperBase): def compile_diagnostics(cls, record): """Compile VM diagnostics data""" try: + keys = [] diags = {} vm_uuid = record["uuid"] xml = get_rrd(get_rrd_server(), vm_uuid) if xml: rrd = minidom.parseString(xml) for i, node in enumerate(rrd.firstChild.childNodes): - # We don't want all of the extra garbage - if i >= 3 and i <= 11: + # Provide the last update of the information + if node.localName == 'lastupdate': + diags['last_update'] = node.firstChild.data + + # Create a list of the diagnostic keys (in their order) + if node.localName == 'ds': ref = node.childNodes # Name and Value if len(ref) > 6: - _ref_zero = ref[0].firstChild.data - diags[_ref_zero] = ref[6].firstChild.data + keys.append(ref[0].firstChild.data) + + # Read the last row of the first RRA to get the latest info + if node.localName == 'rra': + rows = node.childNodes[4].childNodes + last_row = rows[rows.length - 1].childNodes + for j, value in enumerate(last_row): + diags[keys[j]] = value.firstChild.data + break + return diags except expat.ExpatError as e: LOG.exception(_('Unable to parse rrd of %(vm_uuid)s') % locals()) @@ -1769,7 +1782,7 @@ def _mounted_processing(device, key, net, metadata): 'non-linux instances): %s') % err) -def _prepare_injectables(inst, networks_info): +def _prepare_injectables(inst, network_info): """ prepares the ssh key and the network configuration file to be injected into the disk image @@ -1783,38 +1796,84 @@ def _prepare_injectables(inst, networks_info): metadata = inst['metadata'] key = str(inst['key_data']) net = None - if networks_info: + if network_info: ifc_num = -1 interfaces_info = [] - have_injected_networks = False - for (network_ref, info) in networks_info: + for vif in network_info: ifc_num += 1 - if not network_ref['injected']: + try: + if not vif['network'].get_meta('injected'): + # network is not specified injected + continue + except KeyError: + # vif network is None continue - have_injected_networks = True - ip_v4 = ip_v6 = None - if 'ips' in info and len(info['ips']) > 0: - ip_v4 = info['ips'][0] - if 'ip6s' in info and len(info['ip6s']) > 0: - ip_v6 = info['ip6s'][0] - if len(info['dns']) > 0: - dns = info['dns'][0] - else: - dns = '' + # NOTE(tr3buchet): using all subnets in case dns is stored in a + # subnet that isn't chosen as first v4 or v6 + # subnet in the case where there is more than one + # dns = list of address of each dns entry from each vif subnet + dns = [ip['address'] for subnet in vif['network']['subnets'] + for ip in subnet['dns']] + dns = ' '.join(dns).strip() + interface_info = {'name': 'eth%d' % ifc_num, - 'address': ip_v4 and ip_v4['ip'] or '', - 'netmask': ip_v4 and ip_v4['netmask'] or '', - 'gateway': info['gateway'], - 'broadcast': info['broadcast'], - 'dns': dns, - 'address_v6': ip_v6 and ip_v6['ip'] or '', - 'netmask_v6': ip_v6 and ip_v6['netmask'] or '', - 'gateway_v6': ip_v6 and info['gateway_v6'] or '', + 'address': '', + 'netmask': '', + 'gateway': '', + 'broadcast': '', + 'dns': dns or '', + 'address_v6': '', + 'netmask_v6': '', + 'gateway_v6': '', 'use_ipv6': FLAGS.use_ipv6} + + # NOTE(tr3buchet): the original code used the old network_info + # which only supported a single ipv4 subnet + # (and optionally, a single ipv6 subnet). + # I modified it to use the new network info model, + # which adds support for multiple v4 or v6 + # subnets. I chose to ignore any additional + # subnets, just as the original code ignored + # additional IP information + + # populate v4 info if v4 subnet and ip exist + try: + # grab the first v4 subnet (or it raises) + subnet = [s for s in vif['network']['subnets'] + if s['version'] == 4][0] + # get the subnet's first ip (or it raises) + ip = subnet['ips'][0] + + # populate interface_info + subnet_netaddr = subnet.as_netaddr() + interface_info['address'] = ip['address'] + interface_info['netmask'] = subnet_netaddr.netmask + interface_info['gateway'] = subnet['gateway']['address'] + interface_info['broadcast'] = subnet_netaddr.broadcast + except IndexError: + # there isn't a v4 subnet or there are no ips + pass + + # populate v6 info if v6 subnet and ip exist + try: + # grab the first v6 subnet (or it raises) + subnet = [s for s in vif['network']['subnets'] + if s['version'] == 6][0] + # get the subnet's first ip (or it raises) + ip = subnet['ips'][0] + + # populate interface_info + interface_info['address_v6'] = ip['address'] + interface_info['netmask_v6'] = subnet.as_netaddr().netmask + interface_info['gateway_v6'] = subnet['gateway']['address'] + except IndexError: + # there isn't a v6 subnet or there are no ips + pass + interfaces_info.append(interface_info) - if have_injected_networks: + if interfaces_info: net = str(template(template_data, searchList=[{'interfaces': interfaces_info, 'use_ipv6': FLAGS.use_ipv6}])) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 7ac455367..8ede49983 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1357,6 +1357,70 @@ class VMOps(object): return {'host': FLAGS.vncserver_proxyclient_address, 'port': 80, 'internal_access_path': path} + def _vif_xenstore_data(self, vif): + """convert a network info vif to injectable instance data""" + + def get_ip(ip): + if not ip: + return None + return ip['address'] + + def fixed_ip_dict(ip, subnet): + if ip['version'] == 4: + netmask = str(subnet.as_netaddr().netmask) + else: + netmask = subnet.as_netaddr()._prefixlen + + return {'ip': ip['address'], + 'enabled': '1', + 'netmask': netmask, + 'gateway': get_ip(subnet['gateway'])} + + def convert_route(route): + return {'route': str(netaddr.IPNetwork(route['cidr']).network), + 'netmask': str(netaddr.IPNetwork(route['cidr']).netmask), + 'gateway': get_ip(route['gateway'])} + + network = vif['network'] + v4_subnets = [subnet for subnet in network['subnets'] + if subnet['version'] == 4] + v6_subnets = [subnet for subnet in network['subnets'] + if subnet['version'] == 6] + + # NOTE(tr3buchet): routes and DNS come from all subnets + routes = [convert_route(route) for subnet in network['subnets'] + for route in subnet['routes']] + dns = [get_ip(ip) for subnet in network['subnets'] + for ip in subnet['dns']] + + info_dict = {'label': network['label'], + 'mac': vif['address']} + + if v4_subnets: + # NOTE(tr3buchet): gateway and broadcast from first subnet + # primary IP will be from first subnet + # subnets are generally unordered :( + info_dict['gateway'] = get_ip(v4_subnets[0]['gateway']) + info_dict['broadcast'] = str(v4_subnets[0].as_netaddr().broadcast) + info_dict['ips'] = [fixed_ip_dict(ip, subnet) + for subnet in v4_subnets + for ip in subnet['ips']] + if v6_subnets: + # NOTE(tr3buchet): gateway from first subnet + # primary IP will be from first subnet + # subnets are generally unordered :( + info_dict['gateway_v6'] = get_ip(v6_subnets[0]['gateway']) + info_dict['ip6s'] = [fixed_ip_dict(ip, subnet) + for subnet in v6_subnets + for ip in subnet['ips']] + if routes: + info_dict['routes'] = routes + + if dns: + info_dict['dns'] = dns + + return info_dict + def inject_network_info(self, instance, network_info, vm_ref=None): """ Generate the network info and make calls to place it into the @@ -1367,11 +1431,13 @@ class VMOps(object): vm_ref = vm_ref or self._get_vm_opaque_ref(instance) LOG.debug(_("Injecting network info to xenstore"), instance=instance) - for (network, info) in network_info: - location = 'vm-data/networking/%s' % info['mac'].replace(':', '') - self._add_to_param_xenstore(vm_ref, location, json.dumps(info)) + for vif in network_info: + xs_data = self._vif_xenstore_data(vif) + location = \ + 'vm-data/networking/%s' % vif['address'].replace(':', '') + self._add_to_param_xenstore(vm_ref, location, json.dumps(xs_data)) try: - self._write_to_xenstore(instance, location, info, + self._write_to_xenstore(instance, location, xs_data, vm_ref=vm_ref) except KeyError: # catch KeyError for domid if instance isn't running @@ -1385,8 +1451,8 @@ class VMOps(object): # this function raises if vm_ref is not a vm_opaque_ref self._session.call_xenapi("VM.get_record", vm_ref) - for device, (network, info) in enumerate(network_info): - vif_rec = self.vif_driver.plug(instance, network, info, + for device, vif in enumerate(network_info): + vif_rec = self.vif_driver.plug(instance, vif, vm_ref=vm_ref, device=device) network_ref = vif_rec['network'] LOG.debug(_('Creating VIF for network %(network_ref)s'), @@ -1397,13 +1463,13 @@ class VMOps(object): def plug_vifs(self, instance, network_info): """Set up VIF networking on the host.""" - for device, (network, mapping) in enumerate(network_info): - self.vif_driver.plug(instance, network, mapping, device=device) + for device, vif in enumerate(network_info): + self.vif_driver.plug(instance, vif, device=device) def unplug_vifs(self, instance, network_info): if network_info: - for (network, mapping) in network_info: - self.vif_driver.unplug(instance, network, mapping) + for vif in network_info: + self.vif_driver.unplug(instance, vif) def reset_network(self, instance, vm_ref=None): """Calls resetnetwork method in agent.""" diff --git a/nova/volume/api.py b/nova/volume/api.py index ab9c70523..0b9b4afff 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -152,7 +152,7 @@ class API(base.Base): 'terminated_at': now}) host = volume['host'] rpc.cast(context, - self.db.queue_get_for(context, FLAGS.volume_topic, host), + rpc.queue_get_for(context, FLAGS.volume_topic, host), {"method": "delete_volume", "args": {"volume_id": volume_id}}) @@ -238,7 +238,7 @@ class API(base.Base): def remove_from_compute(self, context, volume, instance_id, host): """Remove volume from specified compute host.""" rpc.call(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), + rpc.queue_get_for(context, FLAGS.compute_topic, host), {"method": "remove_volume_connection", "args": {'instance_id': instance_id, 'volume_id': volume['id']}}) @@ -255,7 +255,7 @@ class API(base.Base): @wrap_check_policy def attach(self, context, volume, instance_uuid, mountpoint): host = volume['host'] - queue = self.db.queue_get_for(context, FLAGS.volume_topic, host) + queue = rpc.queue_get_for(context, FLAGS.volume_topic, host) return rpc.call(context, queue, {"method": "attach_volume", "args": {"volume_id": volume['id'], @@ -265,7 +265,7 @@ class API(base.Base): @wrap_check_policy def detach(self, context, volume): host = volume['host'] - queue = self.db.queue_get_for(context, FLAGS.volume_topic, host) + queue = rpc.queue_get_for(context, FLAGS.volume_topic, host) return rpc.call(context, queue, {"method": "detach_volume", "args": {"volume_id": volume['id']}}) @@ -273,7 +273,7 @@ class API(base.Base): @wrap_check_policy def initialize_connection(self, context, volume, connector): host = volume['host'] - queue = self.db.queue_get_for(context, FLAGS.volume_topic, host) + queue = rpc.queue_get_for(context, FLAGS.volume_topic, host) return rpc.call(context, queue, {"method": "initialize_connection", "args": {"volume_id": volume['id'], @@ -283,7 +283,7 @@ class API(base.Base): def terminate_connection(self, context, volume, connector): self.unreserve_volume(context, volume) host = volume['host'] - queue = self.db.queue_get_for(context, FLAGS.volume_topic, host) + queue = rpc.queue_get_for(context, FLAGS.volume_topic, host) return rpc.call(context, queue, {"method": "terminate_connection", "args": {"volume_id": volume['id'], @@ -310,7 +310,7 @@ class API(base.Base): snapshot = self.db.snapshot_create(context, options) host = volume['host'] rpc.cast(context, - self.db.queue_get_for(context, FLAGS.volume_topic, host), + rpc.queue_get_for(context, FLAGS.volume_topic, host), {"method": "create_snapshot", "args": {"volume_id": volume['id'], "snapshot_id": snapshot['id']}}) @@ -334,7 +334,7 @@ class API(base.Base): volume = self.db.volume_get(context, snapshot['volume_id']) host = volume['host'] rpc.cast(context, - self.db.queue_get_for(context, FLAGS.volume_topic, host), + rpc.queue_get_for(context, FLAGS.volume_topic, host), {"method": "delete_snapshot", "args": {"snapshot_id": snapshot['id']}}) @@ -346,7 +346,7 @@ class API(base.Base): @wrap_check_policy def delete_volume_metadata(self, context, volume, key): - """Delete the given metadata item from an volume.""" + """Delete the given metadata item from a volume.""" self.db.volume_metadata_delete(context, volume['id'], key) @wrap_check_policy diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 5e57a42fb..6ca923bd0 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -152,7 +152,7 @@ class VolumeManager(manager.SchedulerDependentManager): volume_ref['id'], {'status': 'error'}) now = utils.utcnow() - self.db.volume_update(context, + volume_ref = self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) LOG.debug(_("volume %s: created successfully"), volume_ref['name']) @@ -197,7 +197,7 @@ class VolumeManager(manager.SchedulerDependentManager): reservations = None LOG.exception(_("Failed to update usages deleting volume")) - self.db.volume_destroy(context, volume_id) + volume_ref = self.db.volume_destroy(context, volume_id) LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) self._notify_about_volume_usage(context, volume_ref, "delete.end") diff --git a/nova/volume/san.py b/nova/volume/san.py index de30a36af..cfe9173c0 100644 --- a/nova/volume/san.py +++ b/nova/volume/san.py @@ -361,7 +361,7 @@ class SolarisISCSIDriver(SanISCSIDriver): iscsi_name = self._build_iscsi_target_name(volume) target_group_name = 'tg-%s' % volume['name'] - # Create a iSCSI target, mapped to just this volume + # Create an iSCSI target, mapped to just this volume if force_create or not self._target_group_exists(target_group_name): self._execute('/usr/sbin/stmfadm', 'create-tg', target_group_name) diff --git a/smoketests/test_sysadmin.py b/smoketests/test_sysadmin.py index d4dc2eaf3..5e599d15b 100644 --- a/smoketests/test_sysadmin.py +++ b/smoketests/test_sysadmin.py @@ -255,7 +255,7 @@ class VolumeTests(base.UserSmokeTestCase): def test_003_can_mount_volume(self): ip = self.data['instance'].private_ip_address conn = self.connect_ssh(ip, TEST_KEY) - # NOTE(vish): this will create an dev for images that don't have + # NOTE(vish): this will create a dev for images that don't have # udev rules stdin, stdout, stderr = conn.exec_command( 'grep %s /proc/partitions | ' diff --git a/tools/conf/create_conf.py b/tools/conf/create_conf.py deleted file mode 100644 index bc551a22e..000000000 --- a/tools/conf/create_conf.py +++ /dev/null @@ -1,159 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 SINA Corporation -# All Rights Reserved. -# Author: Zhongyue Luo <lzyeval@gmail.com> -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Generates a nova.conf file.""" - -import os -import re -import sys - - -_PY_EXT = ".py" -_FLAGS = "FLAGS" - -_STROPT = "StrOpt" -_BOOLOPT = "BoolOpt" -_INTOPT = "IntOpt" -_FLOATOPT = "FloatOpt" -_LISTOPT = "ListOpt" -_MULTISTROPT = "MultiStrOpt" - -_OPTION_CACHE = list() -_OPTION_REGEX = re.compile(r"(%s)" % "|".join([_STROPT, _BOOLOPT, _INTOPT, - _FLOATOPT, _LISTOPT, - _MULTISTROPT])) - -_BASEDIR = os.path.abspath(os.path.dirname(__file__) + "../../") - - -def main(srcfiles): - - def mod_prefer(mod_str): - prefer = ["flags.py", "log.py", "utils.py", "service.py"] - return prefer.index(mod_str) if mod_str in prefer else ord(mod_str[0]) - - def pkg_prefer(pkg_str): - prefer = ["auth", "api", "vnc", "ipv6", "network", "compute", "virt", - "console", "consoleauth", "image"] - return prefer.index(pkg_str) if pkg_str in prefer else ord(pkg_str[0]) - - print '#' * 20 + '\n# nova.conf sample #\n' + '#' * 20 - # NOTE(lzyeval): sort top level modules and packages - # to process modules first - print - print '[DEFAULT]' - print - mods_by_pkg = dict() - for filepath in srcfiles: - pkg_name = filepath.split(os.sep)[3] - mod_str = '.'.join(['.'.join(filepath.split(os.sep)[2:-1]), - os.path.basename(filepath).split('.')[0]]) - mods = mods_by_pkg.get(pkg_name, list()) - if not mods: - mods_by_pkg[pkg_name] = mods - mods.append(mod_str) - # NOTE(lzyeval): place top level modules before packages - pkg_names = filter(lambda x: x.endswith(_PY_EXT), mods_by_pkg.keys()) - pkg_names.sort(key=lambda x: mod_prefer(x)) - ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys()) - ext_names.sort(key=lambda x: pkg_prefer(x)) - pkg_names.extend(ext_names) - for pkg_name in pkg_names: - mods = mods_by_pkg.get(pkg_name) - mods.sort() - for mod_str in mods: - print_module(mod_str) - - -def print_module(mod_str): - opts = list() - flags = None - if mod_str.endswith('.__init__'): - mod_str = mod_str[:mod_str.rfind(".")] - try: - __import__(mod_str) - flags = getattr(sys.modules[mod_str], _FLAGS) - except (ValueError, AttributeError), err: - return - except ImportError, ie: - sys.stderr.write("%s\n" % str(ie)) - return - except Exception, e: - return - for opt_name in sorted(flags.keys()): - # check if option was processed - if opt_name in _OPTION_CACHE: - continue - opt_dict = flags._get_opt_info(opt_name) - opts.append(opt_dict['opt']) - _OPTION_CACHE.append(opt_name) - # return if flags has no unique options - if not opts: - return - # print out module info - print '######### defined in %s #########' % mod_str - print - for opt in opts: - print_opt(opt) - print - - -def convert_abspath(s): - """Set up a reasonably sensible default for pybasedir.""" - if not s.startswith(_BASEDIR): - return s - return s.replace(_BASEDIR, '/usr/lib/python/site-packages') - - -def print_opt(opt): - opt_type = None - try: - opt_type = _OPTION_REGEX.search(str(type(opt))).group(0) - except (ValueError, AttributeError), err: - sys.stderr.write("%s\n" % str(err)) - sys.exit(1) - # print out option info - print "######", "".join(["(", opt_type, ")"]), opt.help - - name, default = opt.name, opt.default - - if isinstance(default, basestring): - default = convert_abspath(default) - - if default is None: - print '# %s=<None>' % name - else: - if opt_type == 'StrOpt': - print '# %s="%s"' % (name, default) - elif opt_type == 'ListOpt': - print '# %s="%s"' % (name, ','.join(default)) - elif opt_type == 'MultiStrOpt': - for default in default: - print '# %s="%s"' % (name, default) - elif opt_type == 'BoolOpt': - print '# %s=%s' % (name, str(default).lower()) - else: - print '# %s=%s' % (name, default) - - -if __name__ == '__main__': - if len(sys.argv) < 2: - print "usage: python %s [srcfile]...\n" % sys.argv[0] - sys.exit(0) - main(sys.argv[1:]) - print "#", "Total option count: %d" % len(_OPTION_CACHE) diff --git a/tools/conf/extract_opts.py b/tools/conf/extract_opts.py new file mode 100644 index 000000000..256ca7f5f --- /dev/null +++ b/tools/conf/extract_opts.py @@ -0,0 +1,161 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 SINA Corporation +# All Rights Reserved. +# Author: Zhongyue Luo <lzyeval@gmail.com> +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Extracts OpenStack config option info from module(s).""" + +import os +import re +import sys +import textwrap + +from nova.openstack.common import cfg +from nova.openstack.common import importutils + + +STROPT = "StrOpt" +BOOLOPT = "BoolOpt" +INTOPT = "IntOpt" +FLOATOPT = "FloatOpt" +LISTOPT = "ListOpt" +MULTISTROPT = "MultiStrOpt" + +OPTION_COUNT = 0 +OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT, + FLOATOPT, LISTOPT, + MULTISTROPT])) +OPTION_HELP_INDENT = "####" + +PY_EXT = ".py" +BASEDIR = os.path.abspath(os.path.dirname(__file__) + "../../") +WORDWRAP_WIDTH = 60 + + +def main(srcfiles): + print '\n'.join(['#' * 20, '# nova.conf sample #', '#' * 20, + '', '[DEFAULT]', '']) + mods_by_pkg = dict() + for filepath in srcfiles: + pkg_name = filepath.split(os.sep)[3] + mod_str = '.'.join(['.'.join(filepath.split(os.sep)[2:-1]), + os.path.basename(filepath).split('.')[0]]) + mods_by_pkg.setdefault(pkg_name, list()).append(mod_str) + # NOTE(lzyeval): place top level modules before packages + pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys()) + pkg_names.sort() + ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys()) + ext_names.sort() + pkg_names.extend(ext_names) + for pkg_name in pkg_names: + mods = mods_by_pkg.get(pkg_name) + mods.sort() + for mod_str in mods: + _print_module(mod_str) + print "# Total option count: %d" % OPTION_COUNT + + +def _print_module(mod_str): + global OPTION_COUNT + opts = list() + mod_obj = None + if mod_str.endswith('.__init__'): + mod_str = mod_str[:mod_str.rfind(".")] + try: + mod_obj = importutils.import_module(mod_str) + except (ValueError, AttributeError), err: + return + except ImportError, ie: + sys.stderr.write("%s\n" % str(ie)) + return + except Exception, e: + return + for attr_str in dir(mod_obj): + attr_obj = getattr(mod_obj, attr_str) + if isinstance(attr_obj, cfg.Opt): + opts.append(attr_obj) + elif (isinstance(attr_obj, list) and + all(map(lambda x: isinstance(x, cfg.Opt), attr_obj))): + opts.extend(attr_obj) + # NOTE(lzyeval): return if module has no options + if not opts: + return + OPTION_COUNT += len(opts) + print '######## defined in %s ########\n' % mod_str + for opt in opts: + _print_opt(opt) + print + + +def _convert_abspath(s): + """Set up a reasonably sensible default for pybasedir.""" + if not s.startswith(BASEDIR): + return s + return s.replace(BASEDIR, '/usr/lib/python/site-packages') + + +def _wrap(msg, indent): + padding = ' ' * indent + prefix = "\n%s %s " % (OPTION_HELP_INDENT, padding) + return prefix.join(textwrap.wrap(msg, WORDWRAP_WIDTH)) + + +def _print_opt(opt): + opt_name, opt_default, opt_help = opt.name, opt.default, opt.help + if not opt_help: + sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name) + opt_type = None + try: + opt_type = OPTION_REGEX.search(str(type(opt))).group(0) + except (ValueError, AttributeError), err: + sys.stderr.write("%s\n" % str(err)) + sys.exit(1) + try: + if opt_default is None: + print '# %s=<None>' % opt_name + elif opt_type == STROPT: + assert(isinstance(opt_default, basestring)) + print '# %s="%s"' % (opt_name, _convert_abspath(opt_default)) + elif opt_type == BOOLOPT: + assert(isinstance(opt_default, bool)) + print '# %s=%s' % (opt_name, str(opt_default).lower()) + elif opt_type == INTOPT: + assert(isinstance(opt_default, int) and + not isinstance(opt_default, bool)) + print '# %s=%s' % (opt_name, opt_default) + elif opt_type == FLOATOPT: + assert(isinstance(opt_default, float)) + print '# %s=%s' % (opt_name, opt_default) + elif opt_type == LISTOPT: + assert(isinstance(opt_default, list)) + print '# %s="%s"' % (opt_name, ','.join(opt_default)) + elif opt_type == MULTISTROPT: + assert(isinstance(opt_default, list)) + for default in opt_default: + print '# %s="%s"' % (opt_name, default) + except Exception: + sys.stderr.write('Error in option "%s"\n' % opt_name) + sys.exit(1) + opt_type_tag = "(%s)" % opt_type + print OPTION_HELP_INDENT, opt_type_tag, _wrap(opt_help, len(opt_type_tag)) + print + + +if __name__ == '__main__': + if len(sys.argv) < 2: + print "usage: python %s [srcfile]...\n" % sys.argv[0] + sys.exit(0) + main(sys.argv[1:]) diff --git a/tools/conf/generate_sample.sh b/tools/conf/generate_sample.sh index 8a4f55524..2a6b17aa4 100755 --- a/tools/conf/generate_sample.sh +++ b/tools/conf/generate_sample.sh @@ -19,7 +19,7 @@ pushd $(cd $(dirname "$0") && pwd) >/dev/null find ../../nova -type f -name "*.py" ! -path "../../nova/tests/*" -exec \ - grep -l "Opt(" {} \; | sort -u | xargs python create_conf.py > \ + grep -l "Opt(" {} \; | sort -u | xargs python extract_opts.py > \ ../../etc/nova/nova.conf.sample popd >/dev/null diff --git a/tools/hacking.py b/tools/hacking.py index 90a096e0f..7fea734b6 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -75,7 +75,7 @@ def nova_todo_format(physical_line): """ pos = physical_line.find('TODO') pos1 = physical_line.find('TODO(') - pos2 = physical_line.find('#') # make sure its a comment + pos2 = physical_line.find('#') # make sure it's a comment if (pos != pos1 and pos2 >= 0 and pos2 < pos): return pos, "NOVA N101: Use TODO(NAME)" diff --git a/tools/xenserver/vm_vdi_cleaner.py b/tools/xenserver/vm_vdi_cleaner.py index 0e423462b..bccf50b25 100755 --- a/tools/xenserver/vm_vdi_cleaner.py +++ b/tools/xenserver/vm_vdi_cleaner.py @@ -40,8 +40,7 @@ FLAGS = flags.FLAGS # flagfile. To isolate this awful practice, we're supplying a dummy # argument list. dummy = ["fakearg"] -utils.default_cfgfile(args=dummy) -FLAGS(dummy) +flags.parse_args(dummy) class UnrecognizedNameLabel(Exception): @@ -15,8 +15,8 @@ downloadcache = ~/cache/pip deps = pep8==1.1 commands = /bin/bash run_tests.sh -N --pep8 -[testenv:coverage] -commands = /bin/bash run_tests.sh -N -P --coverage +[testenv:cover] +commands = /bin/bash run_tests.sh -N -P --cover-package=nova --cover-erase --with-xcoverage [testenv:venv] commands = {posargs} @@ -30,8 +30,7 @@ basepython = python2.7 deps = file://{toxinidir}/.cache.bundle [testenv:jenkinscover] -deps = file://{toxinidir}/.cache.bundle -commands = /bin/bash run_tests.sh -N --coverage +commands = /bin/bash run_tests.sh -N -P --cover-package=nova --cover-erase --with-xcoverage [testenv:jenkinsvenv] deps = file://{toxinidir}/.cache.bundle |