diff options
| author | Sandy Walsh <sandy.walsh@rackspace.com> | 2011-01-25 13:34:50 -0600 |
|---|---|---|
| committer | Sandy Walsh <sandy.walsh@rackspace.com> | 2011-01-25 13:34:50 -0600 |
| commit | a6052241ec7bce94b81e8d4fa1d43353e4eec51b (patch) | |
| tree | ad4a2dc6a1495bf546d2095ff9f975714ba81421 | |
| parent | 5ff189808d45582f0799c14eaaec687a3cf8ad5e (diff) | |
| parent | b2d0a77c53d1bd108d233e58f68655381cec4e16 (diff) | |
| download | nova-a6052241ec7bce94b81e8d4fa1d43353e4eec51b.tar.gz nova-a6052241ec7bce94b81e8d4fa1d43353e4eec51b.tar.xz nova-a6052241ec7bce94b81e8d4fa1d43353e4eec51b.zip | |
merge with trunk
64 files changed, 811 insertions, 488 deletions
@@ -21,6 +21,7 @@ Jay Pipes <jaypipes@gmail.com> Jesse Andrews <anotherjesse@gmail.com> Joe Heck <heckj@mac.com> Joel Moore <joelbm24@gmail.com> +John Dewey <john@dewey.ws> Jonathan Bryce <jbryce@jbryce.com> Josh Durgin <joshd@hq.newdream.net> Josh Kearney <josh.kearney@rackspace.com> @@ -40,6 +41,7 @@ Nachi Ueno <ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp> <nati.ueno@gmail Paul Voccio <paul@openstack.org> Rick Clark <rick@openstack.org> Rick Harris <rconradharris@gmail.com> +Rob Kost <kost@isi.edu> Ryan Lane <rlane@wikimedia.org> Ryan Lucio <rlucio@internap.com> Salvatore Orlando <salvatore.orlando@eu.citrix.com> diff --git a/MANIFEST.in b/MANIFEST.in index 07e4dd516..3908830d7 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -5,6 +5,7 @@ graft CA graft doc graft smoketests graft tools +graft etc include nova/api/openstack/notes.txt include nova/auth/novarc.template include nova/auth/slap.sh diff --git a/bin/nova-api b/bin/nova-api index 7b4fbeab1..11176a021 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -36,6 +36,7 @@ gettext.install('nova', unicode=1) from nova import flags from nova import log as logging +from nova import version from nova import wsgi logging.basicConfig() @@ -55,7 +56,7 @@ def run_app(paste_config_file): if config is None: LOG.debug(_("No paste configuration for app: %s"), api) continue - LOG.debug(_("App Config: %s\n%r"), api, config) + LOG.debug(_("App Config: %(api)s\n%(config)r") % locals()) wsgi.paste_config_to_flags(config, { "verbose": FLAGS.verbose, "%s_host" % api: config.get('host', '0.0.0.0'), @@ -79,6 +80,8 @@ def run_app(paste_config_file): if __name__ == '__main__': FLAGS(sys.argv) + LOG.audit(_("Starting nova-api node (version %s)"), + version.version_string_with_vcs()) conf = wsgi.paste_config_file('nova-api.conf') if conf: run_app(conf) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 1a994d956..d38ba2543 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -120,9 +120,9 @@ def main(): mac = argv[2] ip = argv[3] hostname = argv[4] - LOG.debug(_("Called %s for mac %s with ip %s and " - "hostname %s on interface %s"), - action, mac, ip, hostname, interface) + msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s and" + " hostname %(hostname)s on interface %(interface)s") % locals() + LOG.debug(msg) globals()[action + '_lease'](mac, ip, hostname, interface) else: print init_leases(interface) diff --git a/bin/nova-direct-api b/bin/nova-direct-api index e7dd14fb2..173b39bdb 100755 --- a/bin/nova-direct-api +++ b/bin/nova-direct-api @@ -49,7 +49,7 @@ if __name__ == '__main__': utils.default_flagfile() FLAGS(sys.argv) - direct.register_service('compute', compute_api.ComputeAPI()) + direct.register_service('compute', compute_api.API()) direct.register_service('reflect', direct.Reflection()) router = direct.Router() with_json = direct.JsonParamsMiddleware(router) diff --git a/bin/nova-manage b/bin/nova-manage index d0901ddfc..1c885f8a6 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -79,7 +79,9 @@ from nova import exception from nova import flags from nova import log as logging from nova import quota +from nova import rpc from nova import utils +from nova.api.ec2.cloud import ec2_id_to_id from nova.auth import manager from nova.cloudpipe import pipelib from nova.db import migration @@ -95,6 +97,16 @@ flags.DECLARE('vpn_start', 'nova.network.manager') flags.DECLARE('fixed_range_v6', 'nova.network.manager') +def param2id(object_id): + """Helper function to convert various id types to internal id. + args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10' + """ + if '-' in object_id: + return ec2_id_to_id(object_id) + else: + return int(object_id) + + class VpnCommands(object): """Class for managing VPNs.""" @@ -461,9 +473,12 @@ class NetworkCommands(object): fixed_range_v6 = FLAGS.fixed_range_v6 net_manager = utils.import_object(FLAGS.network_manager) net_manager.create_networks(context.get_admin_context(), - fixed_range, int(num_networks), - int(network_size), int(vlan_start), - int(vpn_start), fixed_range_v6) + cidr=fixed_range, + num_networks=int(num_networks), + network_size=int(network_size), + vlan_start=int(vlan_start), + vpn_start=int(vpn_start), + cidr_v6=fixed_range_v6) class ServiceCommands(object): @@ -535,6 +550,46 @@ class DbCommands(object): print migration.db_version() +class VolumeCommands(object): + """Methods for dealing with a cloud in an odd state""" + + def delete(self, volume_id): + """Delete a volume, bypassing the check that it + must be available. + args: volume_id_id""" + ctxt = context.get_admin_context() + volume = db.volume_get(ctxt, param2id(volume_id)) + host = volume['host'] + if volume['status'] == 'in-use': + print "Volume is in-use." + print "Detach volume from instance and then try again." + return + + rpc.cast(ctxt, + db.queue_get_for(ctxt, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"volume_id": volume['id']}}) + + def reattach(self, volume_id): + """Re-attach a volume that has previously been attached + to an instance. Typically called after a compute host + has been rebooted. + args: volume_id_id""" + ctxt = context.get_admin_context() + volume = db.volume_get(ctxt, param2id(volume_id)) + if not volume['instance_id']: + print "volume is not attached to an instance" + return + instance = db.instance_get(ctxt, volume['instance_id']) + host = instance['host'] + rpc.cast(ctxt, + db.queue_get_for(ctxt, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume['id'], + "mountpoint": volume['mountpoint']}}) + + CATEGORIES = [ ('user', UserCommands), ('project', ProjectCommands), @@ -545,7 +600,8 @@ CATEGORIES = [ ('network', NetworkCommands), ('service', ServiceCommands), ('log', LogCommands), - ('db', DbCommands)] + ('db', DbCommands), + ('volume', VolumeCommands)] def lazy_match(name, key_value_tuples): diff --git a/bin/nova-spoolsentry b/bin/nova-spoolsentry index ab20268a9..c53482852 100644 --- a/bin/nova-spoolsentry +++ b/bin/nova-spoolsentry @@ -74,10 +74,8 @@ class SpoolSentry(object): return rv def send_data(self, data): - data = { - 'data': base64.b64encode(pickle.dumps(data).encode('zlib')), - 'key': self.key - } + data = {'data': base64.b64encode(pickle.dumps(data).encode('zlib')), + 'key': self.key} req = urllib2.Request(self.sentry_url) res = urllib2.urlopen(req, urllib.urlencode(data)) if res.getcode() != 200: @@ -22,6 +22,7 @@ import eventlet eventlet.monkey_patch() +import json import os import pprint import sys @@ -38,7 +39,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) import gflags -from nova import utils FLAGS = gflags.FLAGS @@ -106,8 +106,12 @@ def do_request(controller, method, params=None): 'X-OpenStack-Project': FLAGS.project} req = urllib2.Request(url, data, headers) - resp = urllib2.urlopen(req) - return utils.loads(resp.read()) + try: + resp = urllib2.urlopen(req) + except urllib2.HTTPError, e: + print e.read() + sys.exit(1) + return json.loads(resp.read()) if __name__ == '__main__': diff --git a/contrib/nova.sh b/contrib/nova.sh index a0e8e642c..08dc89bae 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -87,6 +87,7 @@ if [ "$CMD" == "install" ]; then sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot sudo apt-get install -y python-daemon python-eventlet python-gflags python-ipy sudo apt-get install -y python-libvirt python-libxml2 python-routes python-cheetah + sudo apt-get install -y python-paste python-pastedeploy #For IPV6 sudo apt-get install -y python-netaddr sudo apt-get install -y radvd diff --git a/doc/ext/nova_todo.py b/doc/ext/nova_todo.py index efc0c3edd..67bbfd2e0 100644 --- a/doc/ext/nova_todo.py +++ b/doc/ext/nova_todo.py @@ -26,7 +26,7 @@ def process_todo_nodes(app, doctree, fromdocname): # reading through docutils for the proper way to construct an empty list lists = [] for i in xrange(5): - lists.append(nodes.bullet_list("", nodes.Text('',''))); + lists.append(nodes.bullet_list("", nodes.Text('',''))) lists[i].remove(lists[i][0]) lists[i].set_class('todo_list') @@ -42,7 +42,8 @@ def process_todo_nodes(app, doctree, fromdocname): # Create a reference newnode = nodes.reference('', '') - link = _('%s, line %d') % (filename, todo_info['lineno']); + line_info = todo_info['lineno'] + link = _('%(filename)s, line %(line_info)d') % locals() innernode = nodes.emphasis(link, link) newnode['refdocname'] = todo_info['docname'] diff --git a/doc/source/adminguide/distros/ubuntu.10.04.rst b/doc/source/adminguide/distros/ubuntu.10.04.rst index 9d856458a..bd0693c46 100644 --- a/doc/source/adminguide/distros/ubuntu.10.04.rst +++ b/doc/source/adminguide/distros/ubuntu.10.04.rst @@ -31,7 +31,7 @@ If you're running on Ubuntu 10.04, you'll need to install Twisted and python-gfl :: - sudo add-get install python-software-properties + sudo apt-get install python-software-properties sudo add-apt-repository ppa:nova-core/trunk sudo apt-get update sudo apt-get install python-twisted python-gflags diff --git a/doc/source/adminguide/index.rst b/doc/source/adminguide/index.rst index e653c9e8b..3bd72cfdc 100644 --- a/doc/source/adminguide/index.rst +++ b/doc/source/adminguide/index.rst @@ -60,12 +60,13 @@ For background on the core objects referenced in this section, see :doc:`../obje Deployment ---------- -.. todo:: talk about deployment scenarios +For a starting multi-node architecture, you would start with two nodes - a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the Nova database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node. Ensure that the nova.conf file is identical on each node. If you find performance issues not related to database reads or writes, but due to the messaging queue backing up, you could add additional messaging services (rabbitmq). .. toctree:: :maxdepth: 1 multi.node.install + dbsync Networking diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst index 5918b0d38..df7078180 100644 --- a/doc/source/adminguide/multi.node.install.rst +++ b/doc/source/adminguide/multi.node.install.rst @@ -1,20 +1,3 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. Installing Nova on Multiple Servers =================================== @@ -26,13 +9,14 @@ through that process. You can install multiple nodes to increase performance and availability of the OpenStack Compute installation. -This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved in the installation and configuration scripts as of October 18th 2010. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward. - - +This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved either in packaging or bug-fixing. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward. + +For a starting architecture, these instructions describing installing a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node. + Requirements for a multi-node installation ------------------------------------------ -* You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know Postgres. We should document both configurations, though. +* You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know PostgreSQL. We should document both configurations, though. * For a recommended HA setup, consider a MySQL master/slave replication, with as many slaves as you like, and probably a heartbeat to kick one of the slaves into being a master if it dies. * For performance optimization, split reads and writes to the database. MySQL proxy is the easiest way to make this work if running MySQL. @@ -41,7 +25,45 @@ Assumptions * Networking is configured between/through the physical machines on a single subnet. * Installation and execution are both performed by ROOT user. - + +Scripted Installation +--------------------- +A script available to get your OpenStack cloud running quickly. You can copy the file to the server where you want to install OpenStack Compute services - typically you would install a compute node and a cloud controller node. + +You must run these scripts with root permissions. + +From a server you intend to use as a cloud controller node, use this command to get the cloud controller script. This script is a work-in-progress and the maintainer plans to keep it up, but it is offered "as-is." Feel free to collaborate on it in GitHub - https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/. + +:: + + wget --no-check-certificate https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/raw/master/Nova_CC_Installer_v0.1 + +Ensure you can execute the script by modifying the permissions on the script file. + +:: + + sudo chmod 755 Nova_CC_Installer_v0.1 + + +:: + + sudo ./Nova_CC_Installer_v0.1 + +Next, from a server you intend to use as a compute node (doesn't contain the database), install the nova services. Copy the nova.conf from the cloud controller node to the compute node. + +Restart related services:: + + libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart + +You can go to the `Configuration section`_ for next steps. + +Manual Installation - Step-by-Step +---------------------------------- +The following sections show you how to install Nova manually with a cloud controller node and a separate compute node. The cloud controller node contains the database plus all nova- services, and the compute node runs nova- services only. + +Cloud Controller Installation +````````````````````````````` +On the cloud controller node, you install nova services and the related helper applications, and then configure with the nova.conf file. You will then copy the nova.conf file to the compute node, which you install as a second node in the `Compute Installation`_. Step 1 - Use apt-get to get the latest code ------------------------------------------- @@ -59,19 +81,18 @@ Step 1 - Use apt-get to get the latest code sudo apt-get update -3. Install nova-pkgs (dependencies should be automatically installed). +3. Install python required packages, nova-packages, and helper apps. :: - sudo apt-get install python-greenlet - sudo apt-get install nova-common nova-doc python-nova nova-api nova-network nova-objectstore nova-scheduler + sudo apt-get install python-greenlet python-mysqldb python-nova nova-common nova-doc nova-api nova-network nova-objectstore nova-scheduler nova-compute euca2ools unzip It is highly likely that there will be errors when the nova services come up since they are not yet configured. Don't worry, you're only at step 1! -Step 2 Setup configuration file (installed in /etc/nova) --------------------------------------------------------- +Step 2 Set up configuration file (installed in /etc/nova) +--------------------------------------------------------- -1. Nova development has consolidated all config files to nova.conf as of November 2010. There is a default set of options that are already configured in nova.conf: +1. Nova development has consolidated all config files to nova.conf as of November 2010. There is a default set of options that are already configured in nova.conf: :: @@ -81,7 +102,7 @@ Step 2 Setup configuration file (installed in /etc/nova) --logdir=/var/log/nova --state_path=/var/lib/nova -The following items ALSO need to be defined in /etc/nova/nova.conf. I’ve added some explanation of the variables, as comments CANNOT be in nova.conf. There seems to be an issue with nova-manage not processing the comments/whitespace correctly: +The following items ALSO need to be defined in /etc/nova/nova.conf. I’ve added some explanation of the variables, as comments CANNOT be in nova.conf. There seems to be an issue with nova-manage not processing the comments/whitespace correctly: --sql_connection ### Location of Nova SQL DB @@ -130,7 +151,7 @@ Detailed explanation of the following example is available above. The Nova config file should have its owner set to root:nova, and mode set to 0644, since they contain your MySQL server's root password. :: chown -R root:nova /etc/nova - chmod 644 /etc/nova/nova.conf + chmod 644 /etc/nova/nova.conf Step 3 - Setup the SQL DB (MySQL for this setup) ------------------------------------------------ @@ -153,10 +174,30 @@ Step 3 - Setup the SQL DB (MySQL for this setup) sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf service mysql restart + +4. MySQL DB configuration: + +Create NOVA database:: + + mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;' + +Update the DB to include user 'root'@'%' with super user privileges:: + + mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;" + +Set mySQL root password:: + + mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');" + +Compute Node Installation +````````````````````````` -3. Network Configuration +Repeat steps 1 and 2 from the Cloud Controller Installation section above, then configure the network for your Compute instances on the Compute node. Copy the nova.conf file from the Cloud Controller node to this node. -If you use FlatManager (as opposed to VlanManager that we set) as your network manager, there are some additional networking changes you’ll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as it’s set up for you automatically. +Network Configuration +--------------------- + +If you use FlatManager as your network manager (as opposed to VlanManager that is shown in the nova.conf example above), there are some additional networking changes you’ll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as it’s set up for you automatically. Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. To keep things as simple as possible, have all the VM guests on the same network as the VM hosts (the compute nodes). To do so, set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces config to look like the following:: @@ -179,31 +220,24 @@ Next, restart networking to apply the changes:: sudo /etc/init.d/networking restart -4. MySQL DB configuration: - -Create NOVA database:: - - mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;' - -Update the DB to include user 'root'@'%' with super user privileges:: +Configuration +````````````` - mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;" - -Set mySQL root password:: +On the Compute node, you should continue with these configuration steps. - mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');" - -Step 4 - Setup Nova environment -------------------------------- +Step 1 - Set up the Nova environment +------------------------------------ -These are the commands you run to set up a user and project:: +These are the commands you run to update the database if needed, and then set up a user and project:: + /usr/bin/python /usr/bin/nova-manage db sync /usr/bin/python /usr/bin/nova-manage user admin <user_name> /usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name> /usr/bin/python /usr/bin/nova-manage network create <project-network> <number-of-networks-in-project> <IPs in project> Here is an example of what this looks like with real data:: + /usr/bin/python /usr/bin/nova-manage db sync /usr/bin/python /usr/bin/nova-manage user admin dub /usr/bin/python /usr/bin/nova-manage project create dubproject dub /usr/bin/python /usr/bin/nova-manage network create 192.168.0.0/24 1 255 @@ -215,7 +249,7 @@ Note: The nova-manage service assumes that the first IP address is your network On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. This is ONLY necessary if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device. -Step 5 - Create Nova certifications +Step 2 - Create Nova certifications ----------------------------------- 1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions. @@ -229,18 +263,18 @@ Step 5 - Create Nova certifications :: - unzip /root/creds/novacreds.zip -d /root/creds/ + unzip /root/creds/novacreds.zip -d /root/creds/ cat /root/creds/novarc >> ~/.bashrc source ~/.bashrc -Step 6 - Restart all relevant services +Step 3 - Restart all relevant services -------------------------------------- Restart all six services in total, just to cover the entire spectrum:: libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart -Step 7 - Closing steps, and cleaning up +Step 4 - Closing steps, and cleaning up --------------------------------------- One of the most commonly missed configuration areas is not allowing the proper access to VMs. Use the 'euca-authorize' command to enable access. Below, you will find the commands to allow 'ping' and 'ssh' to your VMs:: @@ -253,8 +287,8 @@ Another common issue is you cannot ping or SSH your instances after issusing the killall dnsmasq service nova-network restart -Step 8 – Testing the installation ---------------------------------- +Testing the Installation +```````````````````````` You can then use `euca2ools` to test some items:: @@ -267,13 +301,15 @@ If you have issues with the API key, you may need to re-source your creds file:: If you don’t get any immediate errors, you’re successfully making calls to your cloud! -Step 9 - Spinning up a VM for testing -------------------------------------- +Spinning up a VM for Testing +```````````````````````````` (This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.) The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM. +UPDATE: Due to `bug 661159 <https://bugs.launchpad.net/nova/+bug/661159>`_, we can’t use images without ramdisks yet, so we can’t use the classic Ubuntu cloud images from http://uec-images.ubuntu.com/releases/ yet. For the sake of this tutorial, we’ll use the `ttylinux images from Scott Moser instead <http://smoser.brickies.net/ubuntu/ttylinux-uec/>`_. + Download the image, and publish to your bucket: :: @@ -324,5 +360,4 @@ You can determine the instance-id with `euca-describe-instances`, and the format For more information in creating you own custom (production ready) instance images, please visit http://wiki.openstack.org/GettingImages for more information! -Enjoy your new private cloud, and play responsibly! - +Enjoy your new private cloud, and play responsibly!
\ No newline at end of file diff --git a/doc/source/adminguide/single.node.install.rst b/doc/source/adminguide/single.node.install.rst index 8572c5a4a..119e3855b 100644 --- a/doc/source/adminguide/single.node.install.rst +++ b/doc/source/adminguide/single.node.install.rst @@ -159,7 +159,7 @@ To make things easier, we've provided a small image on the Rackspace CDN. Use th Resolving cblah2.cdn.cloudfiles.rackspacecloud.com... 208.111.196.6, 208.111.196.7 Connecting to cblah2.cdn.cloudfiles.rackspacecloud.com|208.111.196.6|:80... connected. HTTP request sent, awaiting response... 200 OK - Length: 58520278 (56M) [appication/x-gzip] + Length: 58520278 (56M) [application/x-gzip] Saving to: `images.tgz' 100%[======================================>] 58,520,278 14.1M/s in 3.9s diff --git a/doc/source/index.rst b/doc/source/index.rst index 6eec09acb..d337fb69f 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -20,7 +20,7 @@ Welcome to Nova's documentation! Nova is a cloud computing fabric controller, the main part of an IaaS system. Individuals and organizations can use Nova to host and manage their own cloud -computing systems. Nova originated as a project out of NASA Ames Research Laboratory. +computing systems. Nova originated as a project out of NASA Ames Research Laboratory. Nova is written with the following design guidelines in mind: @@ -32,7 +32,7 @@ Nova is written with the following design guidelines in mind: * **API Compatibility**: Nova strives to provide API-compatible with popular systems like Amazon EC2 This documentation is generated by the Sphinx toolkit and lives in the source -tree. Additional documentation on Nova and other components of OpenStack can +tree. Additional documentation on Nova and other components of OpenStack can be found on the `OpenStack wiki`_. Also see the :doc:`community` page for other ways to interact with the community. diff --git a/etc/nova-api.conf b/etc/nova-api.conf index 4873e465d..f0e749805 100644 --- a/etc/nova-api.conf +++ b/etc/nova-api.conf @@ -11,7 +11,14 @@ use = egg:Paste#urlmap /services/Cloud: ec2cloud /services/Admin: ec2admin /latest: ec2metadata -/20: ec2metadata +/2007-01-19: ec2metadata +/2007-03-01: ec2metadata +/2007-08-29: ec2metadata +/2007-10-10: ec2metadata +/2007-12-15: ec2metadata +/2008-02-01: ec2metadata +/2008-09-01: ec2metadata +/2009-04-04: ec2metadata /1.0: ec2metadata [pipeline:ec2cloud] diff --git a/nova/api/direct.py b/nova/api/direct.py index 81b3ae202..208b6d086 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -142,9 +142,15 @@ class Reflection(object): if argspec[2]: args_out.insert(0, ('**%s' % argspec[2],)) + if f.__doc__: + short_doc = f.__doc__.split('\n')[0] + doc = f.__doc__ + else: + short_doc = doc = _('not available') + methods['/%s/%s' % (route, k)] = { - 'short_doc': f.__doc__.split('\n')[0], - 'doc': f.__doc__, + 'short_doc': short_doc, + 'doc': doc, 'name': k, 'args': list(reversed(args_out))} @@ -196,6 +202,8 @@ class ServiceWrapper(wsgi.Controller): # TODO(termie): do some basic normalization on methods method = getattr(self.service_handle, action) + # NOTE(vish): make sure we have no unicode keys for py2.6. + params = dict([(str(k), v) for (k, v) in params.iteritems()]) result = method(context, **params) if type(result) is dict or type(result) is list: return self._serialize(result, req) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 238cb0f38..9938b23f8 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -131,9 +131,11 @@ class Lockout(wsgi.Middleware): # NOTE(vish): To use incr, failures has to be a string. self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60) elif failures >= FLAGS.lockout_attempts: - LOG.warn(_('Access key %s has had %d failed authentications' - ' and will be locked out for %d minutes.'), - access_key, failures, FLAGS.lockout_minutes) + lock_mins = FLAGS.lockout_minutes + msg = _('Access key %(access_key)s has had %(failures)d' + ' failed authentications and will be locked out' + ' for %(lock_mins)d minutes.') % locals() + LOG.warn(msg) self.mc.set(failures_key, str(failures), time=FLAGS.lockout_minutes * 60) return res @@ -179,8 +181,10 @@ class Authenticate(wsgi.Middleware): project=project, remote_address=remote_address) req.environ['ec2.context'] = ctxt - LOG.audit(_('Authenticated Request For %s:%s)'), user.name, - project.name, context=req.environ['ec2.context']) + uname = user.name + pname = project.name + msg = _('Authenticated Request For %(uname)s:%(pname)s)') % locals() + LOG.audit(msg, context=req.environ['ec2.context']) return self.application @@ -206,7 +210,7 @@ class Requestify(wsgi.Middleware): LOG.debug(_('action: %s'), action) for key, value in args.items(): - LOG.debug(_('arg: %s\t\tval: %s'), key, value) + LOG.debug(_('arg: %(key)s\t\tval: %(value)s') % locals()) # Success! api_request = apirequest.APIRequest(self.controller, action, args) @@ -277,8 +281,8 @@ class Authorizer(wsgi.Middleware): if self._matches_any_role(context, allowed_roles): return self.application else: - LOG.audit(_("Unauthorized request for controller=%s " - "and action=%s"), controller, action, context=context) + LOG.audit(_('Unauthorized request for controller=%(controller)s ' + 'and action=%(action)s') % locals(), context=context) raise webob.exc.HTTPUnauthorized() def _matches_any_role(self, context, roles): diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index 758b612e8..78ff1b3e0 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -111,19 +111,23 @@ class AdminController(object): """Add or remove a role for a user and project.""" if operation == 'add': if project: - LOG.audit(_("Adding role %s to user %s for project %s"), role, - user, project, context=context) + msg = _("Adding role %(role)s to user %(user)s" + " for project %(project)s") % locals() + LOG.audit(msg, context=context) else: - LOG.audit(_("Adding sitewide role %s to user %s"), role, user, - context=context) + msg = _("Adding sitewide role %(role)s to" + " user %(user)s") % locals() + LOG.audit(msg, context=context) manager.AuthManager().add_role(user, role, project) elif operation == 'remove': if project: - LOG.audit(_("Removing role %s from user %s for project %s"), - role, user, project, context=context) + msg = _("Removing role %(role)s from user %(user)s" + " for project %(project)s") % locals() + LOG.audit(msg, context=context) else: - LOG.audit(_("Removing sitewide role %s from user %s"), role, - user, context=context) + msg = _("Removing sitewide role %(role)s" + " from user %(user)s") % locals() + LOG.audit(msg, context=context) manager.AuthManager().remove_role(user, role, project) else: raise exception.ApiError(_('operation must be add or remove')) @@ -139,8 +143,9 @@ class AdminController(object): project = name project = manager.AuthManager().get_project(project) user = manager.AuthManager().get_user(name) - LOG.audit(_("Getting x509 for user: %s on project: %s"), name, - project, context=context) + msg = _("Getting x509 for user: %(name)s" + " on project: %(project)s") % locals() + LOG.audit(msg, context=context) return user_dict(user, base64.b64encode(project.get_credentials(user))) def describe_project(self, context, name, **kwargs): @@ -156,8 +161,9 @@ class AdminController(object): def register_project(self, context, name, manager_user, description=None, member_users=None, **kwargs): """Creates a new project""" - LOG.audit(_("Create project %s managed by %s"), name, manager_user, - context=context) + msg = _("Create project %(name)s managed by" + " %(manager_user)s") % locals() + LOG.audit(msg, context=context) return project_dict( manager.AuthManager().create_project( name, @@ -181,12 +187,13 @@ class AdminController(object): **kwargs): """Add or remove a user from a project.""" if operation == 'add': - LOG.audit(_("Adding user %s to project %s"), user, project, - context=context) + msg = _("Adding user %(user)s to project %(project)s") % locals() + LOG.audit(msg, context=context) manager.AuthManager().add_to_project(user, project) elif operation == 'remove': - LOG.audit(_("Removing user %s from project %s"), user, project, - context=context) + msg = _("Removing user %(user)s from" + " project %(project)s") % locals() + LOG.audit(msg, context=context) manager.AuthManager().remove_from_project(user, project) else: raise exception.ApiError(_('operation must be add or remove')) diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index 78576470a..d8a2b5f53 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -93,8 +93,10 @@ class APIRequest(object): method = getattr(self.controller, _camelcase_to_underscore(self.action)) except AttributeError: - _error = _('Unsupported API request: controller = %s,' - 'action = %s') % (self.controller, self.action) + controller = self.controller + action = self.action + _error = _('Unsupported API request: controller = %(controller)s,' + ' action = %(action)s') % locals() LOG.exception(_error) # TODO: Raise custom exception, trap in apiserver, # and reraise as 400 error. diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index c94540793..3b228bf1a 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -59,7 +59,7 @@ def _gen_key(context, user_id, key_name): # creation before creating key_pair try: db.key_pair_get(context, user_id, key_name) - raise exception.Duplicate("The key_pair %s already exists" + raise exception.Duplicate(_("The key_pair %s already exists") % key_name) except exception.NotFound: pass @@ -133,7 +133,7 @@ class CloudController(object): return result def _get_availability_zone_by_host(self, context, host): - services = db.service_get_all_by_host(context, host) + services = db.service_get_all_by_host(context.elevated(), host) if len(services) > 0: return services[0]['availability_zone'] return 'unknown zone' @@ -252,18 +252,18 @@ class CloudController(object): regions = [] for region in FLAGS.region_list: name, _sep, host = region.partition('=') - endpoint = '%s://%s:%s%s' % (FLAGS.ec2_prefix, + endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme, host, FLAGS.ec2_port, - FLAGS.ec2_suffix) + FLAGS.ec2_path) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', - 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_prefix, + 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme, FLAGS.ec2_host, FLAGS.ec2_port, - FLAGS.ec2_suffix)}] + FLAGS.ec2_path)}] return {'regionInfo': regions} def describe_snapshots(self, @@ -601,8 +601,9 @@ class CloudController(object): def attach_volume(self, context, volume_id, instance_id, device, **kwargs): volume_id = ec2_id_to_id(volume_id) instance_id = ec2_id_to_id(instance_id) - LOG.audit(_("Attach volume %s to instance %s at %s"), volume_id, - instance_id, device, context=context) + msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" + " at %(device)s") % locals() + LOG.audit(msg, context=context) self.compute_api.attach_volume(context, instance_id=instance_id, volume_id=volume_id, @@ -751,8 +752,8 @@ class CloudController(object): return {'releaseResponse': ["Address released."]} def associate_address(self, context, instance_id, public_ip, **kwargs): - LOG.audit(_("Associate address %s to instance %s"), public_ip, - instance_id, context=context) + LOG.audit(_("Associate address %(public_ip)s to" + " instance %(instance_id)s") % locals(), context=context) instance_id = ec2_id_to_id(instance_id) self.compute_api.associate_floating_ip(context, instance_id=instance_id, @@ -840,8 +841,9 @@ class CloudController(object): if image_location is None and 'name' in kwargs: image_location = kwargs['name'] image_id = self.image_service.register(context, image_location) - LOG.audit(_("Registered image %s with id %s"), image_location, - image_id, context=context) + msg = _("Registered image %(image_location)s with" + " id %(image_id)s") % locals() + LOG.audit(msg, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 54af09a5d..32249b80e 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -133,9 +133,9 @@ class Controller(wsgi.Controller): if image_id in mapping: return mapping[image_id] - raise exception.NotFound( - _("No entry for image '%s' in mapping file '%s'") % - (image_id, mapping_filename)) + msg = _("No entry for image '%(image_id)s'" + " in mapping file '%(mapping_filename)s'") % locals() + raise exception.NotFound(msg) def create(self, req): """ Creates a new server for a given user """ diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py index 0eb6fe588..d8dad8edd 100644 --- a/nova/auth/dbdriver.py +++ b/nova/auth/dbdriver.py @@ -119,8 +119,8 @@ class DbDriver(object): for member_uid in member_uids: member = db.user_get(context.get_admin_context(), member_uid) if not member: - raise exception.NotFound("Project can't be created " - "because user %s doesn't exist" + raise exception.NotFound(_("Project can't be created " + "because user %s doesn't exist") % member_uid) members.add(member) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index bc53e0ec6..e652f1caa 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -146,7 +146,7 @@ class LdapDriver(object): def create_user(self, name, access_key, secret_key, is_admin): """Create a user""" if self.__user_exists(name): - raise exception.Duplicate("LDAP user %s already exists" % name) + raise exception.Duplicate(_("LDAP user %s already exists") % name) if FLAGS.ldap_user_modify_only: if self.__ldap_user_exists(name): # Retrieve user by name @@ -310,7 +310,7 @@ class LdapDriver(object): def delete_user(self, uid): """Delete a user""" if not self.__user_exists(uid): - raise exception.NotFound("User %s doesn't exist" % uid) + raise exception.NotFound(_("User %s doesn't exist") % uid) self.__remove_from_all(uid) if FLAGS.ldap_user_modify_only: # Delete attributes @@ -432,15 +432,15 @@ class LdapDriver(object): description, member_uids=None): """Create a group""" if self.__group_exists(group_dn): - raise exception.Duplicate("Group can't be created because " - "group %s already exists" % name) + raise exception.Duplicate(_("Group can't be created because " + "group %s already exists") % name) members = [] if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Group can't be created " - "because user %s doesn't exist" % - member_uid) + raise exception.NotFound(_("Group can't be created " + "because user %s doesn't exist") + % member_uid) members.append(self.__uid_to_dn(member_uid)) dn = self.__uid_to_dn(uid) if not dn in members: @@ -455,8 +455,8 @@ class LdapDriver(object): def __is_in_group(self, uid, group_dn): """Check if user is in group""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be searched in group " - "because the user doesn't exist" % uid) + raise exception.NotFound(_("User %s can't be searched in group " + "because the user doesn't exist") % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -467,29 +467,29 @@ class LdapDriver(object): def __add_to_group(self, uid, group_dn): """Add user to group""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be added to the group " - "because the user doesn't exist" % uid) + raise exception.NotFound(_("User %s can't be added to the group " + "because the user doesn't exist") % uid) if not self.__group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % + raise exception.NotFound(_("The group at dn %s doesn't exist") % group_dn) if self.__is_in_group(uid, group_dn): - raise exception.Duplicate(_("User %s is already a member of " - "the group %s") % (uid, group_dn)) + raise exception.Duplicate(_("User %(uid)s is already a member of " + "the group %(group_dn)s") % locals()) attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))] self.conn.modify_s(group_dn, attr) def __remove_from_group(self, uid, group_dn): """Remove user from group""" if not self.__group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % - group_dn) + raise exception.NotFound(_("The group at dn %s doesn't exist") + % group_dn) if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % - uid) + raise exception.NotFound(_("User %s can't be removed from the " + "group because the user doesn't exist") + % uid) if not self.__is_in_group(uid, group_dn): - raise exception.NotFound("User %s is not a member of the group" % - uid) + raise exception.NotFound(_("User %s is not a member of the group") + % uid) # NOTE(vish): remove user from group and any sub_groups sub_dns = self.__find_group_dns_with_member(group_dn, uid) for sub_dn in sub_dns: @@ -509,8 +509,9 @@ class LdapDriver(object): def __remove_from_all(self, uid): """Remove user from all roles and projects""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % uid) + raise exception.NotFound(_("User %s can't be removed from all " + "because the user doesn't exist") + % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -584,10 +585,11 @@ class LdapDriver(object): else: return None - @staticmethod - def __dn_to_uid(dn): + def __dn_to_uid(self, dn): """Convert user dn to uid""" - return dn.split(',')[0].split('=')[1] + query = '(objectclass=novaUser)' + user = self.__find_object(dn, query) + return user[FLAGS.ldap_user_id_attribute][0] class FakeLdapDriver(LdapDriver): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 1652e24e1..450ab803a 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -272,16 +272,22 @@ class AuthManager(object): project = self.get_project(project_id) if project == None: - LOG.audit(_("failed authorization: no project named %s (user=%s)"), - project_id, user.name) + pjid = project_id + uname = user.name + LOG.audit(_("failed authorization: no project named %(pjid)s" + " (user=%(uname)s)") % locals()) raise exception.NotFound(_('No project called %s could be found') % project_id) if not self.is_admin(user) and not self.is_project_member(user, project): - LOG.audit(_("Failed authorization: user %s not admin and not " - "member of project %s"), user.name, project.name) - raise exception.NotFound(_('User %s is not a member of project %s') - % (user.id, project.id)) + uname = user.name + uid = user.id + pjname = project.name + pjid = project.id + LOG.audit(_("Failed authorization: user %(uname)s not admin" + " and not member of project %(pjname)s") % locals()) + raise exception.NotFound(_('User %(uid)s is not a member of' + ' project %(pjid)s') % locals()) if check_type == 's3': sign = signer.Signer(user.secret.encode()) expected_signature = sign.s3_authorization(headers, verb, path) @@ -408,14 +414,16 @@ class AuthManager(object): raise exception.NotFound(_("The %s role can not be found") % role) if project is not None and role in FLAGS.global_roles: raise exception.NotFound(_("The %s role is global only") % role) + uid = User.safe_id(user) + pid = Project.safe_id(project) if project: - LOG.audit(_("Adding role %s to user %s in project %s"), role, - User.safe_id(user), Project.safe_id(project)) + LOG.audit(_("Adding role %(role)s to user %(uid)s" + " in project %(pid)s") % locals()) else: - LOG.audit(_("Adding sitewide role %s to user %s"), role, - User.safe_id(user)) + LOG.audit(_("Adding sitewide role %(role)s to user %(uid)s") + % locals()) with self.driver() as drv: - drv.add_role(User.safe_id(user), role, Project.safe_id(project)) + drv.add_role(uid, role, pid) def remove_role(self, user, role, project=None): """Removes role for user @@ -434,14 +442,16 @@ class AuthManager(object): @type project: Project or project_id @param project: Project in which to remove local role. """ + uid = User.safe_id(user) + pid = Project.safe_id(project) if project: - LOG.audit(_("Removing role %s from user %s on project %s"), - role, User.safe_id(user), Project.safe_id(project)) + LOG.audit(_("Removing role %(role)s from user %(uid)s" + " on project %(pid)s") % locals()) else: - LOG.audit(_("Removing sitewide role %s from user %s"), role, - User.safe_id(user)) + LOG.audit(_("Removing sitewide role %(role)s" + " from user %(uid)s") % locals()) with self.driver() as drv: - drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) + drv.remove_role(uid, role, pid) @staticmethod def get_roles(project_roles=True): @@ -502,8 +512,8 @@ class AuthManager(object): description, member_users) if project_dict: - LOG.audit(_("Created project %s with manager %s"), name, - manager_user) + LOG.audit(_("Created project %(name)s with" + " manager %(manager_user)s") % locals()) project = Project(**project_dict) return project @@ -530,11 +540,12 @@ class AuthManager(object): def add_to_project(self, user, project): """Add user to project""" - LOG.audit(_("Adding user %s to project %s"), User.safe_id(user), - Project.safe_id(project)) + uid = User.safe_id(user) + pid = Project.safe_id(project) + LOG.audit(_("Adding user %(uid)s to project %(pid)s") % locals()) with self.driver() as drv: return drv.add_to_project(User.safe_id(user), - Project.safe_id(project)) + Project.safe_id(project)) def is_project_manager(self, user, project): """Checks if user is project manager""" @@ -550,11 +561,11 @@ class AuthManager(object): def remove_from_project(self, user, project): """Removes a user from a project""" - LOG.audit(_("Remove user %s from project %s"), User.safe_id(user), - Project.safe_id(project)) + uid = User.safe_id(user) + pid = Project.safe_id(project) + LOG.audit(_("Remove user %(uid)s from project %(pid)s") % locals()) with self.driver() as drv: - return drv.remove_from_project(User.safe_id(user), - Project.safe_id(project)) + return drv.remove_from_project(uid, pid) @staticmethod def get_project_vpn_data(project): @@ -634,7 +645,10 @@ class AuthManager(object): user_dict = drv.create_user(name, access, secret, admin) if user_dict: rv = User(**user_dict) - LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin) + rvname = rv.name + rvadmin = rv.admin + LOG.audit(_("Created user %(rvname)s" + " (admin: %(rvadmin)r)") % locals()) return rv def delete_user(self, user): @@ -656,7 +670,8 @@ class AuthManager(object): if secret_key: LOG.audit(_("Secret Key change for user %s"), uid) if admin is not None: - LOG.audit(_("Admin status set to %r for user %s"), admin, uid) + LOG.audit(_("Admin status set to %(admin)r" + " for user %(uid)s") % locals()) with self.driver() as drv: drv.modify_user(uid, access_key, secret_key, admin) diff --git a/nova/compute/api.py b/nova/compute/api.py index a6b99c1cb..1d8b9d79f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -92,8 +92,9 @@ class API(base.Base): type_data = instance_types.INSTANCE_TYPES[instance_type] num_instances = quota.allowed_instances(context, max_count, type_data) if num_instances < min_count: - LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"), - context.project_id, min_count) + pid = context.project_id + LOG.warn(_("Quota exceeeded for %(pid)s," + " tried to run %(min_count)s instances") % locals()) raise quota.QuotaError(_("Instance quota exceeded. You can only " "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") @@ -183,8 +184,10 @@ class API(base.Base): instance = self.update(context, instance_id, **updates) instances.append(instance) - LOG.debug(_("Casting to scheduler for %s/%s's instance %s"), - context.project_id, context.user_id, instance_id) + pid = context.project_id + uid = context.user_id + LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" + " instance %(instance_id)s") % locals()) rpc.cast(context, FLAGS.scheduler_topic, {"method": "run_instance", @@ -246,13 +249,16 @@ class API(base.Base): # ..then we distill the security groups to which they belong.. security_groups = set() for rule in security_group_rules: - security_groups.add(rule['parent_group_id']) + security_group = self.db.security_group_get( + context, + rule['parent_group_id']) + security_groups.add(security_group) # ..then we find the instances that are members of these groups.. instances = set() for security_group in security_groups: for instance in security_group['instances']: - instances.add(instance['id']) + instances.add(instance) # ...then we find the hosts where they live... hosts = set() diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6f09ce674..376b1ed68 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -77,8 +77,8 @@ def checks_instance_lock(function): LOG.info(_("check_instance_lock: decorating: |%s|"), function, context=context) - LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"), - self, context, instance_id, context=context) + LOG.info(_("check_instance_lock: arguments: |%(self)s| |%(context)s|" + " |%(instance_id)s|") % locals(), context=context) locked = self.get_lock(context, instance_id) admin = context.is_admin LOG.info(_("check_instance_lock: locked: |%s|"), locked, @@ -278,11 +278,11 @@ class ComputeManager(manager.Manager): LOG.audit(_("Rebooting instance %s"), instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: + state = instance_ref['state'] + running = power_state.RUNNING LOG.warn(_('trying to reboot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_id, - instance_ref['state'], - power_state.RUNNING, + 'instance: %(instance_id)s (state: %(state)s ' + 'expected: %(running)s)') % locals(), context=context) self.db.instance_set_state(context, @@ -307,9 +307,11 @@ class ComputeManager(manager.Manager): LOG.audit(_('instance %s: snapshotting'), instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: + state = instance_ref['state'] + running = power_state.RUNNING LOG.warn(_('trying to snapshot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_id, instance_ref['state'], power_state.RUNNING) + 'instance: %(instance_id)s (state: %(state)s ' + 'expected: %(running)s)') % locals()) self.driver.snapshot(instance_ref, image_id) @@ -517,8 +519,8 @@ class ComputeManager(manager.Manager): """Attach a volume to an instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id, - volume_id, mountpoint, context=context) + LOG.audit(_("instance %(instance_id)s: attaching volume %(volume_id)s" + " to %(mountpoint)s") % locals(), context=context) dev_path = self.volume_manager.setup_compute_volume(context, volume_id) try: @@ -533,8 +535,8 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - LOG.exception(_("instance %s: attach failed %s, removing"), - instance_id, mountpoint, context=context) + LOG.exception(_("instance %(instance_id)s: attach failed" + " %(mountpoint)s, removing") % locals(), context=context) self.volume_manager.remove_compute_volume(context, volume_id) raise exc @@ -548,9 +550,9 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) - LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"), - volume_id, volume_ref['mountpoint'], instance_id, - context=context) + mp = volume_ref['mountpoint'] + LOG.audit(_("Detach volume %(volume_id)s from mountpoint %(mp)s" + " on instance %(instance_id)s") % locals(), context=context) if instance_ref['name'] not in self.driver.list_instances(): LOG.warn(_("Detaching volume from unknown instance %s"), instance_id, context=context) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 14d0e8ca1..04e08a235 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -352,8 +352,9 @@ class Instance(object): rd += rd_bytes wr += wr_bytes except TypeError: - LOG.error(_('Cannot get blockstats for "%s" on "%s"'), - disk, self.instance_id) + iid = self.instance_id + LOG.error(_('Cannot get blockstats for "%(disk)s"' + ' on "%(iid)s"') % locals()) raise return '%d:%d' % (rd, wr) @@ -374,8 +375,9 @@ class Instance(object): rx += stats[0] tx += stats[4] except TypeError: - LOG.error(_('Cannot get ifstats for "%s" on "%s"'), - interface, self.instance_id) + iid = self.instance_id + LOG.error(_('Cannot get ifstats for "%(interface)s"' + ' on "%(iid)s"') % locals()) raise return '%d:%d' % (rx, tx) diff --git a/nova/console/manager.py b/nova/console/manager.py index c55ca8e8f..5697e7cb1 100644 --- a/nova/console/manager.py +++ b/nova/console/manager.py @@ -67,7 +67,7 @@ class ConsoleProxyManager(manager.Manager): pool['id'], instance_id) except exception.NotFound: - logging.debug("Adding console") + logging.debug(_("Adding console")) if not password: password = self.driver.generate_password() if not port: diff --git a/nova/console/xvp.py b/nova/console/xvp.py index 2a76223da..ee66dac46 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -96,7 +96,7 @@ class XVPConsoleProxy(object): return os.urandom(length * 2).encode('base64')[:length] def _rebuild_xvp_conf(self, context): - logging.debug("Rebuilding xvp conf") + logging.debug(_("Rebuilding xvp conf")) pools = [pool for pool in db.console_pool_get_all_by_host_type(context, self.host, self.console_type) @@ -113,12 +113,12 @@ class XVPConsoleProxy(object): self._xvp_restart() def _write_conf(self, config): - logging.debug('Re-wrote %s' % FLAGS.console_xvp_conf) + logging.debug(_('Re-wrote %s') % FLAGS.console_xvp_conf) with open(FLAGS.console_xvp_conf, 'w') as cfile: cfile.write(config) def _xvp_stop(self): - logging.debug("Stopping xvp") + logging.debug(_("Stopping xvp")) pid = self._xvp_pid() if not pid: return @@ -131,19 +131,19 @@ class XVPConsoleProxy(object): def _xvp_start(self): if self._xvp_check_running(): return - logging.debug("Starting xvp") + logging.debug(_("Starting xvp")) try: utils.execute('xvp -p %s -c %s -l %s' % (FLAGS.console_xvp_pid, FLAGS.console_xvp_conf, FLAGS.console_xvp_log)) except exception.ProcessExecutionError, err: - logging.error("Error starting xvp: %s" % err) + logging.error(_("Error starting xvp: %s") % err) def _xvp_restart(self): - logging.debug("Restarting xvp") + logging.debug(_("Restarting xvp")) if not self._xvp_check_running(): - logging.debug("xvp not running...") + logging.debug(_("xvp not running...")) self._xvp_start() else: pid = self._xvp_pid() diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index b63b84bed..370ca651a 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -247,7 +247,8 @@ def service_get_by_args(context, host, binary): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound(_('No service for %s, %s') % (host, binary)) + raise exception.NotFound(_('No service for %(host)s, %(binary)s') + % locals()) return result @@ -777,7 +778,7 @@ def instance_get_by_id(context, instance_id): result = session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -785,6 +786,7 @@ def instance_get_by_id(context, instance_id): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ filter_by(deleted=False).\ @@ -934,8 +936,8 @@ def key_pair_get(context, user_id, name, session=None): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound(_('no keypair for user %s, name %s') % - (user_id, name)) + raise exception.NotFound(_('no keypair for user %(user_id)s,' + ' name %(name)s') % locals()) return result @@ -1536,8 +1538,8 @@ def security_group_get_by_name(context, project_id, group_name): first() if not result: raise exception.NotFound( - _('No security group named %s for project: %s') - % (group_name, project_id)) + _('No security group named %(group_name)s' + ' for project: %(project_id)s') % locals()) return result @@ -1921,8 +1923,8 @@ def console_pool_get(context, pool_id): filter_by(id=pool_id).\ first() if not result: - raise exception.NotFound(_("No console pool with id %(pool_id)s") % - {'pool_id': pool_id}) + raise exception.NotFound(_("No console pool with id %(pool_id)s") + % locals()) return result @@ -1938,12 +1940,9 @@ def console_pool_get_by_host_type(context, compute_host, host, options(joinedload('consoles')).\ first() if not result: - raise exception.NotFound(_('No console pool of type %(type)s ' + raise exception.NotFound(_('No console pool of type %(console_type)s ' 'for compute host %(compute_host)s ' - 'on proxy host %(host)s') % - {'type': console_type, - 'compute_host': compute_host, - 'host': host}) + 'on proxy host %(host)s') % locals()) return result @@ -1981,9 +1980,7 @@ def console_get_by_pool_instance(context, pool_id, instance_id): first() if not result: raise exception.NotFound(_('No console for instance %(instance_id)s ' - 'in pool %(pool_id)s') % - {'instance_id': instance_id, - 'pool_id': pool_id}) + 'in pool %(pool_id)s') % locals()) return result @@ -2004,9 +2001,7 @@ def console_get(context, console_id, instance_id=None): query = query.filter_by(instance_id=instance_id) result = query.options(joinedload('pool')).first() if not result: - idesc = (_("on instance %s") % instance_id) if instance_id else "" + idesc = (_("on instance %s") % instance_id) if instance_id else "" raise exception.NotFound(_("No console with id %(console_id)s" - " %(instance)s") % - {'instance': idesc, - 'console_id': console_id}) + " %(idesc)s") % locals()) return result diff --git a/nova/exception.py b/nova/exception.py index ecd814e5d..2320e2214 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -33,8 +33,9 @@ class ProcessExecutionError(IOError): description = _("Unexpected error while running command.") if exit_code is None: exit_code = '-' - message = _("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r")\ - % (description, cmd, exit_code, stdout, stderr) + message = _("%(description)s\nCommand: %(cmd)s\n" + "Exit code: %(exit_code)s\nStdout: %(stdout)r\n" + "Stderr: %(stderr)r") % locals() IOError.__init__(self, message) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 7c2d7177b..dd82a9366 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -45,8 +45,9 @@ class Exchange(object): self._routes = {} def publish(self, message, routing_key=None): - LOG.debug(_('(%s) publish (key: %s) %s'), - self.name, routing_key, message) + nm = self.name + LOG.debug(_('(%(nm)s) publish (key: %(routing_key)s)' + ' %(message)s') % locals()) routing_key = routing_key.split('.')[0] if routing_key in self._routes: for f in self._routes[routing_key]: @@ -92,8 +93,8 @@ class Backend(base.BaseBackend): def queue_bind(self, queue, exchange, routing_key, **kwargs): global EXCHANGES global QUEUES - LOG.debug(_('Binding %s to %s with key %s'), - queue, exchange, routing_key) + LOG.debug(_('Binding %(queue)s to %(exchange)s with' + ' key %(routing_key)s') % locals()) EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) def declare_consumer(self, queue, callback, *args, **kwargs): @@ -117,7 +118,7 @@ class Backend(base.BaseBackend): content_type=content_type, content_encoding=content_encoding) message.result = True - LOG.debug(_('Getting from %s: %s'), queue, message) + LOG.debug(_('Getting from %(queue)s: %(message)s') % locals()) return message def prepare_message(self, message_data, delivery_mode, diff --git a/nova/flags.py b/nova/flags.py index 81e2e36f9..43bc174d2 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -218,7 +218,7 @@ def _get_my_ip(): DEFINE_string('my_ip', _get_my_ip(), 'host ip address') DEFINE_list('region_list', [], - 'list of region=url pairs separated by commas') + 'list of region=fqdn pairs separated by commas') DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') diff --git a/nova/log.py b/nova/log.py index 4997d3f28..e1c9f46f4 100644 --- a/nova/log.py +++ b/nova/log.py @@ -40,15 +40,15 @@ from nova import version FLAGS = flags.FLAGS flags.DEFINE_string('logging_context_format_string', - '(%(name)s %(nova_version)s): %(levelname)s ' + '%(asctime)s %(levelname)s %(name)s ' '[%(request_id)s %(user)s ' '%(project)s] %(message)s', - 'format string to use for log messages') + 'format string to use for log messages with context') flags.DEFINE_string('logging_default_format_string', - '(%(name)s %(nova_version)s): %(levelname)s [N/A] ' + '%(asctime)s %(levelname)s %(name)s [-] ' '%(message)s', - 'format string to use for log messages') + 'format string to use for log messages without context') flags.DEFINE_string('logging_debug_format_suffix', 'from %(processName)s (pid=%(process)d) %(funcName)s' diff --git a/nova/network/manager.py b/nova/network/manager.py index 61de8055a..dd429d122 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -83,7 +83,7 @@ flags.DEFINE_string('floating_range', '4.4.4.0/24', 'Floating IP address block') flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block') flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block') -flags.DEFINE_integer('cnt_vpn_clients', 5, +flags.DEFINE_integer('cnt_vpn_clients', 0, 'Number of addresses reserved for vpn clients') flags.DEFINE_string('network_driver', 'nova.network.linux_net', 'Driver to use for network creation') @@ -198,8 +198,9 @@ class NetworkManager(manager.Manager): raise exception.Error(_("IP %s leased that isn't associated") % address) if instance_ref['mac_address'] != mac: - raise exception.Error(_("IP %s leased to bad mac %s vs %s") % - (address, instance_ref['mac_address'], mac)) + inst_addr = instance_ref['mac_address'] + raise exception.Error(_("IP %(address)s leased to bad" + " mac %(inst_addr)s vs %(mac)s") % locals()) now = datetime.datetime.utcnow() self.db.fixed_ip_update(context, fixed_ip_ref['address'], @@ -211,15 +212,16 @@ class NetworkManager(manager.Manager): def release_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is released.""" - LOG.debug("Releasing IP %s", address, context=context) + LOG.debug(_("Releasing IP %s"), address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: raise exception.Error(_("IP %s released that isn't associated") % address) if instance_ref['mac_address'] != mac: - raise exception.Error(_("IP %s released from bad mac %s vs %s") % - (address, instance_ref['mac_address'], mac)) + inst_addr = instance_ref['mac_address'] + raise exception.Error(_("IP %(address)s released from" + " bad mac %(inst_addr)s vs %(mac)s") % locals()) if not fixed_ip_ref['leased']: LOG.warn(_("IP %s released that was not leased"), address, context=context) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index bc26fd3c5..05ddace4b 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -180,7 +180,7 @@ class S3(ErrorHandlingResource): def render_GET(self, request): # pylint: disable-msg=R0201 """Renders the GET request for a list of buckets as XML""" LOG.debug(_('List of buckets requested'), context=request.context) - buckets = [b for b in bucket.Bucket.all() \ + buckets = [b for b in bucket.Bucket.all() if b.is_authorized(request.context)] render_xml(request, {"ListAllMyBucketsResult": { @@ -268,12 +268,14 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - LOG.debug(_("Getting object: %s / %s"), self.bucket.name, self.name) + bname = self.bucket.name + nm = self.name + LOG.debug(_("Getting object: %(bname)s / %(nm)s") % locals()) if not self.bucket.is_authorized(request.context): - LOG.audit(_("Unauthorized attempt to get object %s from bucket " - "%s"), self.name, self.bucket.name, - context=request.context) + LOG.audit(_("Unauthorized attempt to get object %(nm)s" + " from bucket %(bname)s") % locals(), + context=request.context) raise exception.NotAuthorized() obj = self.bucket[urllib.unquote(self.name)] @@ -289,12 +291,13 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - LOG.debug(_("Putting object: %s / %s"), self.bucket.name, self.name) + nm = self.name + bname = self.bucket.name + LOG.debug(_("Putting object: %(bname)s / %(nm)s") % locals()) if not self.bucket.is_authorized(request.context): - LOG.audit(_("Unauthorized attempt to upload object %s to bucket " - "%s"), - self.name, self.bucket.name, context=request.context) + LOG.audit(_("Unauthorized attempt to upload object %(nm)s to" + " bucket %(bname)s") % locals(), context=request.context) raise exception.NotAuthorized() key = urllib.unquote(self.name) @@ -310,14 +313,14 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - - LOG.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name, + nm = self.name + bname = self.bucket.name + LOG.debug(_("Deleting object: %(bname)s / %(nm)s") % locals(), context=request.context) if not self.bucket.is_authorized(request.context): - LOG.audit("Unauthorized attempt to delete object %s from " - "bucket %s", self.name, self.bucket.name, - context=request.context) + LOG.audit(_("Unauthorized attempt to delete object %(nm)s from " + "bucket %(bname)s") % locals(), context=request.context) raise exception.NotAuthorized() del self.bucket[urllib.unquote(self.name)] @@ -388,10 +391,10 @@ class ImagesResource(resource.Resource): image_location = get_argument(request, 'image_location', u'') image_path = os.path.join(FLAGS.images_path, image_id) - if not image_path.startswith(FLAGS.images_path) or \ - os.path.exists(image_path): + if ((not image_path.startswith(FLAGS.images_path)) or + os.path.exists(image_path)): LOG.audit(_("Not authorized to upload image: invalid directory " - "%s"), + "%s"), image_path, context=request.context) raise exception.NotAuthorized() @@ -425,8 +428,8 @@ class ImagesResource(resource.Resource): if operation: # operation implies publicity toggle newstatus = (operation == 'add') - LOG.audit(_("Toggling publicity flag of image %s %r"), image_id, - newstatus, context=request.context) + LOG.audit(_("Toggling publicity flag of image %(image_id)s" + " %(newstatus)r") % locals(), context=request.context) image_object.set_public(newstatus) else: # other attributes imply update diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index abc28182e..41e0abd80 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -259,22 +259,25 @@ class Image(object): process_input=encrypted_key, check_exit_code=False) if err: - raise exception.Error("Failed to decrypt private key: %s" % err) + raise exception.Error(_("Failed to decrypt private key: %s") + % err) iv, err = utils.execute( 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, process_input=encrypted_iv, check_exit_code=False) if err: - raise exception.Error("Failed to decrypt initialization " - "vector: %s" % err) + raise exception.Error(_("Failed to decrypt initialization " + "vector: %s") % err) _out, err = utils.execute( 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' % (encrypted_filename, key, iv, decrypted_filename), check_exit_code=False) if err: - raise exception.Error("Failed to decrypt image file %s : %s" % - (encrypted_filename, err)) + raise exception.Error(_("Failed to decrypt image file " + "%(image_file)s: %(err)s") % + {'image_file': encrypted_filename, + 'err': err}) @staticmethod def untarzip_image(path, filename): diff --git a/nova/rpc.py b/nova/rpc.py index 49b11602b..01fc6d44b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -89,15 +89,16 @@ class Consumer(messaging.Consumer): self.failed_connection = False break except: # Catching all because carrot sucks - LOG.exception(_("AMQP server on %s:%d is unreachable." - " Trying again in %d seconds.") % ( - FLAGS.rabbit_host, - FLAGS.rabbit_port, - FLAGS.rabbit_retry_interval)) + fl_host = FLAGS.rabbit_host + fl_port = FLAGS.rabbit_port + fl_intv = FLAGS.rabbit_retry_interval + LOG.exception(_("AMQP server on %(fl_host)s:%(fl_port)d is" + " unreachable. Trying again in %(fl_intv)d seconds.") + % locals()) self.failed_connection = True if self.failed_connection: LOG.exception(_("Unable to connect to AMQP server " - "after %d tries. Shutting down."), + "after %d tries. Shutting down."), FLAGS.rabbit_max_retries) sys.exit(1) @@ -152,7 +153,7 @@ class TopicConsumer(Consumer): class AdapterConsumer(TopicConsumer): """Calls methods on a proxy object based on method and args""" def __init__(self, connection=None, topic="broadcast", proxy=None): - LOG.debug(_('Initing the Adapter Consumer for %s') % (topic)) + LOG.debug(_('Initing the Adapter Consumer for %s') % topic) self.proxy = proxy super(AdapterConsumer, self).__init__(connection=connection, topic=topic) @@ -167,7 +168,7 @@ class AdapterConsumer(TopicConsumer): Example: {'method': 'echo', 'args': {'value': 42}} """ - LOG.debug(_('received %s') % (message_data)) + LOG.debug(_('received %s') % message_data) msg_id = message_data.pop('_msg_id', None) ctxt = _unpack_context(message_data) @@ -180,7 +181,7 @@ class AdapterConsumer(TopicConsumer): # messages stay in the queue indefinitely, so for now # we just log the message and send an error string # back to the caller - LOG.warn(_('no method for message: %s') % (message_data)) + LOG.warn(_('no method for message: %s') % message_data) msg_reply(msg_id, _('No method for message: %s') % message_data) return @@ -343,7 +344,7 @@ def call(context, topic, msg): def cast(context, topic, msg): """Sends a message on a topic without waiting for a response""" - LOG.debug("Making asynchronous cast...") + LOG.debug(_("Making asynchronous cast...")) _pack_context(msg, context) conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index a4d6dd574..e9b47512e 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -66,4 +66,4 @@ class SchedulerManager(manager.Manager): db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) - LOG.debug(_("Casting to %s %s for %s"), topic, host, method) + LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals()) diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index 47baf0d73..baf4966d4 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -48,7 +48,7 @@ class SimpleScheduler(chance.ChanceScheduler): service = db.service_get_by_args(context.elevated(), host, 'nova-compute') if not self.service_is_up(service): - raise driver.WillNotSchedule("Host %s is not alive" % host) + raise driver.WillNotSchedule(_("Host %s is not alive") % host) # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow @@ -80,7 +80,7 @@ class SimpleScheduler(chance.ChanceScheduler): service = db.service_get_by_args(context.elevated(), host, 'nova-volume') if not self.service_is_up(service): - raise driver.WillNotSchedule("Host %s not available" % host) + raise driver.WillNotSchedule(_("Host %s not available") % host) # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow diff --git a/nova/service.py b/nova/service.py index efc08fd63..2c30997f2 100644 --- a/nova/service.py +++ b/nova/service.py @@ -38,6 +38,7 @@ from nova import log as logging from nova import flags from nova import rpc from nova import utils +from nova import version FLAGS = flags.FLAGS @@ -156,7 +157,8 @@ class Service(object): report_interval = FLAGS.report_interval if not periodic_interval: periodic_interval = FLAGS.periodic_interval - logging.audit(_("Starting %s node"), topic) + logging.audit(_("Starting %s node (version %s)"), topic, + version.version_string_with_vcs()) service_obj = cls(host, binary, topic, manager, report_interval, periodic_interval) @@ -219,10 +221,10 @@ def serve(*services): name = '_'.join(x.binary for x in services) logging.debug(_("Serving %s"), name) - logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: - logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) + flag_get = FLAGS.get(flag, None) + logging.debug("%(flag)s : %(flag_get)s" % locals()) for x in services: x.start() diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 771b1fcc0..445cc6e8b 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -87,6 +87,16 @@ class CloudTestCase(test.TestCase): # NOTE(vish): create depends on pool, so just call helper directly return cloud._gen_key(self.context, self.context.user.id, name) + def test_describe_regions(self): + """Makes sure describe regions runs without raising an exception""" + result = self.cloud.describe_regions(self.context) + self.assertEqual(len(result['regionInfo']), 1) + regions = FLAGS.region_list + FLAGS.region_list = ["one=test_host1", "two=test_host2"] + result = self.cloud.describe_regions(self.context) + self.assertEqual(len(result['regionInfo']), 2) + FLAGS.region_list = regions + def test_describe_addresses(self): """Makes sure describe addresses runs without raising an exception""" address = "10.10.10.10" diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 85593ab46..4820e04fb 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -86,7 +86,8 @@ class RpcTestCase(test.TestCase): @staticmethod def echo(context, queue, value): """Calls echo in the passed queue""" - LOG.debug(_("Nested received %s, %s"), queue, value) + LOG.debug(_("Nested received %(queue)s, %(value)s") + % locals()) ret = rpc.call(context, queue, {"method": "echo", diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index f6800e3d9..0b9b847a0 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -256,7 +256,7 @@ class IptablesFirewallTestCase(test.TestCase): ':FORWARD ACCEPT [0:0]', ':OUTPUT ACCEPT [349256:75777230]', 'COMMIT', - '# Completed on Tue Jan 18 23:47:56 2011' + '# Completed on Tue Jan 18 23:47:56 2011', ] def test_static_filters(self): diff --git a/nova/twistd.py b/nova/twistd.py index 556271999..6390a8144 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -156,7 +156,7 @@ def WrapTwistedOptions(wrapped): try: self.parseArgs(*argv) except TypeError: - raise usage.UsageError("Wrong number of arguments.") + raise usage.UsageError(_("Wrong number of arguments.")) self.postOptions() return args @@ -220,7 +220,7 @@ def stop(pidfile): time.sleep(0.1) except OSError, err: err = str(err) - if err.find("No such process") > 0: + if err.find(_("No such process")) > 0: if os.path.exists(pidfile): os.remove(pidfile) else: diff --git a/nova/utils.py b/nova/utils.py index 6d3ddd092..2f3bd2894 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -138,7 +138,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): result = obj.communicate() obj.stdin.close() if obj.returncode: - LOG.debug(_("Result was %s") % (obj.returncode)) + LOG.debug(_("Result was %s") % obj.returncode) if check_exit_code and obj.returncode != 0: (stdout, stderr) = result raise ProcessExecutionError(exit_code=obj.returncode, @@ -214,9 +214,11 @@ def get_my_linklocal(interface): else: return 'fe00::' except IndexError as ex: - LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex) + LOG.warn(_("Couldn't get Link Local IP of %(interface)s :%(ex)s") + % locals()) except ProcessExecutionError as ex: - LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex) + LOG.warn(_("Couldn't get Link Local IP of %(interface)s :%(ex)s") + % locals()) except: return 'fe00::' diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 30dc1c79b..5facb7aff 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -129,7 +129,7 @@ class HyperVConnection(object): vm = self._lookup(instance.name) if vm is not None: raise exception.Duplicate(_('Attempt to create duplicate vm %s') % - instance.name) + instance.name) user = manager.AuthManager().get_user(instance['user_id']) project = manager.AuthManager().get_project(instance['project_id']) @@ -159,7 +159,7 @@ class HyperVConnection(object): vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new() vs_gs_data.ElementName = instance['name'] (job, ret_val) = vs_man_svc.DefineVirtualSystem( - [], None, vs_gs_data.GetText_(1))[1:] + [], None, vs_gs_data.GetText_(1))[1:] if ret_val == WMI_JOB_STATUS_STARTED: success = self._check_job_status(job) else: @@ -184,40 +184,40 @@ class HyperVConnection(object): memsetting.Limit = mem (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( - vm.path_(), [memsetting.GetText_(1)]) + vm.path_(), [memsetting.GetText_(1)]) LOG.debug(_('Set memory for vm %s...'), instance.name) procsetting = vmsetting.associators( - wmi_result_class='Msvm_ProcessorSettingData')[0] + wmi_result_class='Msvm_ProcessorSettingData')[0] vcpus = long(instance['vcpus']) procsetting.VirtualQuantity = vcpus procsetting.Reservation = vcpus procsetting.Limit = vcpus (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( - vm.path_(), [procsetting.GetText_(1)]) + vm.path_(), [procsetting.GetText_(1)]) LOG.debug(_('Set vcpus for vm %s...'), instance.name) def _create_disk(self, vm_name, vhdfile): """Create a disk and attach it to the vm""" - LOG.debug(_('Creating disk for %s by attaching disk file %s'), - vm_name, vhdfile) + LOG.debug(_('Creating disk for %(vm_name)s by attaching' + ' disk file %(vhdfile)s') % locals()) #Find the IDE controller for the vm. vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name) vm = vms[0] vmsettings = vm.associators( - wmi_result_class='Msvm_VirtualSystemSettingData') + wmi_result_class='Msvm_VirtualSystemSettingData') rasds = vmsettings[0].associators( - wmi_result_class='MSVM_ResourceAllocationSettingData') + wmi_result_class='MSVM_ResourceAllocationSettingData') ctrller = [r for r in rasds if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\ - and r.Address == "0"] + and r.Address == "0"] #Find the default disk drive object for the vm and clone it. diskdflt = self._conn.query( - "SELECT * FROM Msvm_ResourceAllocationSettingData \ - WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\ - AND InstanceID LIKE '%Default%'")[0] + "SELECT * FROM Msvm_ResourceAllocationSettingData \ + WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\ + AND InstanceID LIKE '%Default%'")[0] diskdrive = self._clone_wmi_obj( - 'Msvm_ResourceAllocationSettingData', diskdflt) + 'Msvm_ResourceAllocationSettingData', diskdflt) #Set the IDE ctrller as parent. diskdrive.Parent = ctrller[0].path_() diskdrive.Address = 0 @@ -263,17 +263,18 @@ class HyperVConnection(object): default_nic_data = [n for n in emulatednics_data if n.InstanceID.rfind('Default') > 0] new_nic_data = self._clone_wmi_obj( - 'Msvm_EmulatedEthernetPortSettingData', - default_nic_data[0]) + 'Msvm_EmulatedEthernetPortSettingData', + default_nic_data[0]) #Create a port on the vswitch. (new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name, "", extswitch.path_()) if ret_val != 0: LOG.error(_('Failed creating a port on the external vswitch')) raise Exception(_('Failed creating port for %s'), - vm_name) - LOG.debug(_("Created switch port %s on switch %s"), - vm_name, extswitch.path_()) + vm_name) + ext_path = extswitch.path_() + LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s") + % locals()) #Connect the new nic to the new port. new_nic_data.Connection = [new_port] new_nic_data.ElementName = vm_name + ' nic' @@ -283,7 +284,7 @@ class HyperVConnection(object): new_resources = self._add_virt_resource(new_nic_data, vm) if new_resources is None: raise Exception(_('Failed to add nic to VM %s'), - vm_name) + vm_name) LOG.info(_("Created nic for %s "), vm_name) def _add_virt_resource(self, res_setting_data, target_vm): @@ -319,8 +320,10 @@ class HyperVConnection(object): if job.JobState != WMI_JOB_STATE_COMPLETED: LOG.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription) return False - LOG.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description, - job.ElapsedTime) + desc = job.Description + elap = job.ElapsedTime + LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s ") + % locals()) return True def _find_external_network(self): @@ -386,7 +389,9 @@ class HyperVConnection(object): vhdfile = self._cim_conn.CIM_DataFile(Name=disk) for vf in vhdfile: vf.Delete() - LOG.debug(_("Del: disk %s vm %s"), vhdfile, instance.name) + instance_name = instance.name + LOG.debug(_("Del: disk %(vhdfile)s vm %(instance_name)s") + % locals()) def get_info(self, instance_id): """Get information about the VM""" @@ -402,12 +407,14 @@ class HyperVConnection(object): summary_info = vs_man_svc.GetSummaryInformation( [4, 100, 103, 105], settings_paths)[1] info = summary_info[0] - LOG.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \ - cpu_time=%s"), instance_id, - str(HYPERV_POWER_STATE[info.EnabledState]), - str(info.MemoryUsage), - str(info.NumberOfProcessors), - str(info.UpTime)) + state = str(HYPERV_POWER_STATE[info.EnabledState]) + memusage = str(info.MemoryUsage) + numprocs = str(info.NumberOfProcessors) + uptime = str(info.UpTime) + + LOG.debug(_("Got Info for vm %(instance_id)s: state=%(state)s," + " mem=%(memusage)s, num_cpu=%(numprocs)s," + " cpu_time=%(uptime)s") % locals()) return {'state': HYPERV_POWER_STATE[info.EnabledState], 'max_mem': info.MemoryUsage, @@ -441,22 +448,22 @@ class HyperVConnection(object): #already in the state requested success = True if success: - LOG.info(_("Successfully changed vm state of %s to %s"), vm_name, - req_state) + LOG.info(_("Successfully changed vm state of %(vm_name)s" + " to %(req_state)s") % locals()) else: - LOG.error(_("Failed to change vm state of %s to %s"), vm_name, - req_state) - raise Exception(_("Failed to change vm state of %s to %s"), - vm_name, req_state) + msg = _("Failed to change vm state of %(vm_name)s" + " to %(req_state)s") % locals() + LOG.error(msg) + raise Exception(msg) def attach_volume(self, instance_name, device_path, mountpoint): vm = self._lookup(instance_name) if vm is None: - raise exception.NotFound('Cannot attach volume to missing %s vm' % - instance_name) + raise exception.NotFound('Cannot attach volume to missing %s vm' + % instance_name) def detach_volume(self, instance_name, mountpoint): vm = self._lookup(instance_name) if vm is None: - raise exception.NotFound('Cannot detach volume from missing %s ' % - instance_name) + raise exception.NotFound('Cannot detach volume from missing %s ' + % instance_name) diff --git a/nova/virt/images.py b/nova/virt/images.py index ecf0e5efb..9c987e14d 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -67,7 +67,7 @@ def _fetch_image_no_curl(url, path, headers): urlopened = urllib2.urlopen(request) urlretrieve(urlopened, path) - LOG.debug(_("Finished retreving %s -- placed in %s"), url, path) + LOG.debug(_("Finished retreving %(url)s -- placed in %(path)s") % locals()) def _fetch_s3_image(image, path, user, project): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index e70abb4e5..259e19a69 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -236,8 +236,9 @@ class LibvirtConnection(object): def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) - LOG.info(_('instance %s: deleting instance files %s'), - instance['name'], target) + instance_name = instance['name'] + LOG.info(_('instance %(instance_name)s: deleting instance files' + ' %(target)s') % locals()) if os.path.exists(target): shutil.rmtree(target) @@ -418,7 +419,7 @@ class LibvirtConnection(object): virsh_output = virsh_output[0].strip() if virsh_output.startswith('/dev/'): - LOG.info(_('cool, it\'s a device')) + LOG.info(_("cool, it's a device")) out, err = utils.execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) return out @@ -426,7 +427,7 @@ class LibvirtConnection(object): return '' def _append_to_file(self, data, fpath): - LOG.info(_('data: %r, fpath: %r'), data, fpath) + LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals()) fp = open(fpath, 'a+') fp.write(data) return fpath @@ -434,7 +435,7 @@ class LibvirtConnection(object): def _dump_file(self, fpath): fp = open(fpath, 'r+') contents = fp.read() - LOG.info(_('Contents of file %s: %r'), fpath, contents) + LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals()) return contents @exception.wrap_exception @@ -510,7 +511,6 @@ class LibvirtConnection(object): base_dir = os.path.join(FLAGS.instances_path, '_base') if not os.path.exists(base_dir): os.mkdir(base_dir) - os.chmod(base_dir, 0777) base = os.path.join(base_dir, fname) if not os.path.exists(base): fn(target=base, *args, **kwargs) @@ -541,7 +541,6 @@ class LibvirtConnection(object): # ensure directories exist and are writable utils.execute('mkdir -p %s' % basepath(suffix='')) - utils.execute('chmod 0777 %s' % basepath(suffix='')) LOG.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') @@ -623,21 +622,22 @@ class LibvirtConnection(object): 'dns': network_ref['dns'], 'ra_server': ra_server} if key or net: + inst_name = inst['name'] + img_id = inst.image_id if key: - LOG.info(_('instance %s: injecting key into image %s'), - inst['name'], inst.image_id) + LOG.info(_('instance %(inst_name)s: injecting key into' + ' image %(img_id)s') % locals()) if net: - LOG.info(_('instance %s: injecting net into image %s'), - inst['name'], inst.image_id) + LOG.info(_('instance %(inst_name)s: injecting net into' + ' image %(img_id)s') % locals()) try: disk.inject_data(basepath('disk'), key, net, partition=target_partition, nbd=FLAGS.use_cow_images) except Exception as e: # This could be a windows image, or a vmdk format disk - LOG.warn(_('instance %s: ignoring error injecting data' - ' into image %s (%s)'), - inst['name'], inst.image_id, e) + LOG.warn(_('instance %(inst_name)s: ignoring error injecting' + ' data into image %(img_id)s (%(e)s)') % locals()) if FLAGS.libvirt_type == 'uml': utils.execute('sudo chown root %s' % basepath('disk')) @@ -645,9 +645,6 @@ class LibvirtConnection(object): def to_xml(self, instance, rescue=False): # TODO(termie): cache? LOG.debug(_('instance %s: starting toXML method'), instance['name']) - network = db.project_get_network(context.get_admin_context(), - instance['project_id']) - LOG.debug(_('instance %s: starting toXML method'), instance['name']) network = db.network_get_by_instance(context.get_admin_context(), instance['id']) # FIXME(vish): stick this in db @@ -732,7 +729,8 @@ class LibvirtConnection(object): 'cpu_time': cpu_time} def get_diagnostics(self, instance_name): - raise exception.APIError("diagnostics are not supported for libvirt") + raise exception.APIError(_("diagnostics are not supported " + "for libvirt")) def get_disks(self, instance_name): """ @@ -1363,11 +1361,11 @@ class IptablesFirewallDriver(FirewallDriver): instance['id']) def _dhcp_server_for_instance(self, instance): - network = db.project_get_network(context.get_admin_context(), - instance['project_id']) + network = db.network_get_by_instance(context.get_admin_context(), + instance['id']) return network['gateway'] def _ra_server_for_instance(self, instance): - network = db.project_get_network(context.get_admin_context(), - instance['project_id']) + network = db.network_get_by_instance(context.get_admin_context(), + instance['id']) return network['ra_server'] diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 4bfaf4b57..e8352771c 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -69,7 +69,9 @@ LOG = logging.getLogger("nova.virt.xenapi.fake") def log_db_contents(msg=None): - LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content)) + text = msg or "" + content = pformat(_db_content) + LOG.debug(_("%(text)s: _db_content => %(content)s") % locals()) def reset(): @@ -331,7 +333,8 @@ class SessionBase(object): if impl is not None: def callit(*params): - LOG.debug(_('Calling %s %s'), name, impl) + localname = name + LOG.debug(_('Calling %(localname)s %(impl)s') % locals()) self._check_session(params) return impl(*params) return callit diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index b80ff4dba..4afd28dd8 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -22,6 +22,7 @@ their attributes like VDIs, VIFs, as well as their lookup functions. import os import pickle import re +import time import urllib from xml.dom import minidom @@ -133,7 +134,8 @@ class VMHelper(HelperBase): 'pae': 'true', 'viridian': 'true'} LOG.debug(_('Created VM %s...'), instance.name) vm_ref = session.call_xenapi('VM.create', rec) - LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref) + instance_name = instance.name + LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals()) return vm_ref @classmethod @@ -153,10 +155,11 @@ class VMHelper(HelperBase): vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] - LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref) + LOG.debug(_('Creating VBD for VM %(vm_ref)s,' + ' VDI %(vdi_ref)s ... ') % locals()) vbd_ref = session.call_xenapi('VBD.create', vbd_rec) - LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref, - vdi_ref) + LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,' + ' VDI %(vdi_ref)s.') % locals()) return vbd_ref @classmethod @@ -208,11 +211,11 @@ class VMHelper(HelperBase): vif_rec['other_config'] = {} vif_rec['qos_algorithm_type'] = '' vif_rec['qos_algorithm_params'] = {} - LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref, - network_ref) + LOG.debug(_('Creating VIF for VM %(vm_ref)s,' + ' network %(network_ref)s.') % locals()) vif_ref = session.call_xenapi('VIF.create', vif_rec) - LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref, - vm_ref, network_ref) + LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,' + ' network %(network_ref)s.') % locals()) return vif_ref @classmethod @@ -230,8 +233,9 @@ class VMHelper(HelperBase): 'other_config': {}, 'sm_config': {}, 'tags': []}) - LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref, - name_label, virtual_size, read_only, sr_ref) + LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,' + ' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.') + % locals()) return vdi_ref @classmethod @@ -241,7 +245,8 @@ class VMHelper(HelperBase): """ #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added - LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label) + LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...") + % locals()) vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) vm_vdi_uuid = vm_vdi_rec["uuid"] @@ -254,8 +259,8 @@ class VMHelper(HelperBase): template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1] template_vdi_uuid = template_vdi_rec["uuid"] - LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref, - vm_ref) + LOG.debug(_('Created snapshot %(template_vm_ref)s from' + ' VM %(vm_ref)s.') % locals()) parent_uuid = wait_for_vhd_coalesce( session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid) @@ -268,8 +273,8 @@ class VMHelper(HelperBase): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ - logging.debug(_("Asking xapi to upload %s as ID %s"), - vdi_uuids, image_id) + logging.debug(_("Asking xapi to upload %(vdi_uuids)s as" + " ID %(image_id)s") % locals()) params = {'vdi_uuids': vdi_uuids, 'image_id': image_id, @@ -309,7 +314,7 @@ class VMHelper(HelperBase): meta, image_file = c.get_image(image) virtual_size = int(meta['size']) vdi_size = virtual_size - LOG.debug(_("Size for image %s:%d"), image, virtual_size) + LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals()) if type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES @@ -343,7 +348,7 @@ class VMHelper(HelperBase): def _fetch_image_objectstore(cls, session, instance_id, image, access, secret, type): url = images.image_url(image) - LOG.debug(_("Asking xapi to fetch %s as %s"), url, access) + LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url @@ -498,7 +503,8 @@ def get_vhd_parent(session, vdi_rec): parent_uuid = vdi_rec['sm_config']['vhd-parent'] parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid) parent_rec = session.get_xenapi().VDI.get_record(parent_ref) - LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref) + vdi_uuid = vdi_rec['uuid'] + LOG.debug(_("VHD %(vdi_uuid)s has parent %(parent_ref)s") % locals()) return parent_ref, parent_rec else: return None @@ -539,16 +545,17 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, def _poll_vhds(): attempts['counter'] += 1 if attempts['counter'] > max_attempts: - msg = (_("VHD coalesce attempts exceeded (%d > %d), giving up...") - % (attempts['counter'], max_attempts)) + counter = attempts['counter'] + msg = (_("VHD coalesce attempts exceeded (%(counter)d >" + " %(max_attempts)d), giving up...") % locals()) raise exception.Error(msg) scan_sr(session, instance_id, sr_ref) parent_uuid = get_vhd_parent_uuid(session, vdi_ref) if original_parent_uuid and (parent_uuid != original_parent_uuid): - LOG.debug(_("Parent %s doesn't match original parent %s, " - "waiting for coalesce..."), parent_uuid, - original_parent_uuid) + LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent" + " %(original_parent_uuid)s, waiting for coalesce...") + % locals()) else: # Breakout of the loop (normally) and return the parent_uuid raise utils.LoopingCallDone(parent_uuid) @@ -566,8 +573,8 @@ def get_vdi_for_vm_safely(session, vm_ref): else: num_vdis = len(vdi_refs) if num_vdis != 1: - raise Exception(_("Unexpected number of VDIs (%s) found for " - "VM %s") % (num_vdis, vm_ref)) + raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found" + " for VM %(vm_ref)s") % locals()) vdi_ref = vdi_refs[0] vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) @@ -589,6 +596,27 @@ def find_sr(session): return None +def remap_vbd_dev(dev): + """Return the appropriate location for a plugged-in VBD device + + Ubuntu Maverick moved xvd? -> sd?. This is considered a bug and will be + fixed in future versions: + https://bugs.launchpad.net/ubuntu/+source/linux/+bug/684875 + + For now, we work around it by just doing a string replace. + """ + # NOTE(sirp): This hack can go away when we pull support for Maverick + should_remap = FLAGS.xenapi_remap_vbd_dev + if not should_remap: + return dev + + old_prefix = 'xvd' + new_prefix = FLAGS.xenapi_remap_vbd_dev_prefix + remapped_dev = dev.replace(old_prefix, new_prefix) + + return remapped_dev + + def with_vdi_attached_here(session, vdi, read_only, f): this_vm_ref = get_this_vm_ref(session) vbd_rec = {} @@ -611,7 +639,13 @@ def with_vdi_attached_here(session, vdi, read_only, f): LOG.debug(_('Plugging VBD %s ... '), vbd) session.get_xenapi().VBD.plug(vbd) LOG.debug(_('Plugging VBD %s done.'), vbd) - return f(session.get_xenapi().VBD.get_device(vbd)) + orig_dev = session.get_xenapi().VBD.get_device(vbd) + LOG.debug(_('VBD %s plugged as %s'), vbd, orig_dev) + dev = remap_vbd_dev(orig_dev) + if dev != orig_dev: + LOG.debug(_('VBD %(vbd)s plugged into wrong dev, ' + 'remapping to %(dev)s') % locals()) + return f(dev) finally: LOG.debug(_('Destroying VBD for VDI %s ... '), vdi) vbd_unplug_with_retry(session, vbd) @@ -624,6 +658,7 @@ def vbd_unplug_with_retry(session, vbd): DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're seeing the device still in use, even when all processes using the device should be dead.""" + # FIXME(sirp): We can use LoopingCall here w/o blocking sleep() while True: try: session.get_xenapi().VBD.unplug(vbd) @@ -679,8 +714,8 @@ def _write_partition(virtual_size, dev): primary_first = MBR_SIZE_SECTORS primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 - LOG.debug(_('Writing partition table %d %d to %s...'), - primary_first, primary_last, dest) + LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d' + ' to %(dest)s...') % locals()) def execute(cmd, process_input=None, check_exit_code=True): return utils.execute(cmd=cmd, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6c2fd6a68..628a171fa 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -104,7 +104,9 @@ class VMOps(object): network_ref, instance.mac_address) LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) - LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref) + instance_name = instance.name + LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.') + % locals()) # NOTE(armando): Do we really need to do this in virt? timer = utils.LoopingCall(f=None) @@ -196,7 +198,8 @@ class VMOps(object): template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( self._session, instance.id, vm_ref, label) except self.XenAPI.Failure, exc: - logging.error(_("Unable to Snapshot %s: %s"), vm_ref, exc) + logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s") + % locals()) return try: @@ -252,41 +255,71 @@ class VMOps(object): raise RuntimeError(resp_dict['message']) return resp_dict['message'] - def destroy(self, instance): - """Destroy VM instance""" - vm = VMHelper.lookup(self._session, instance.name) - return self._destroy(instance, vm, shutdown=True) - - def _destroy(self, instance, vm, shutdown=True): - """ Destroy VM instance """ - if vm is None: - # Don't complain, just return. This lets us clean up instances - # that have already disappeared from the underlying platform. + def _shutdown(self, instance, vm): + """Shutdown an instance """ + state = self.get_info(instance['name'])['state'] + if state == power_state.SHUTDOWN: + LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") % + locals()) return - # Get the VDIs related to the VM + + try: + task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) + self._session.wait_for_task(instance.id, task) + except self.XenAPI.Failure, exc: + LOG.exception(exc) + + def _destroy_vdis(self, instance, vm): + """Destroys all VDIs associated with a VM """ vdis = VMHelper.lookup_vm_vdis(self._session, vm) - if shutdown: + + if not vdis: + return + + for vdi in vdis: try: - task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) + task = self._session.call_xenapi('Async.VDI.destroy', vdi) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: LOG.exception(exc) - # Disk clean-up - if vdis: - for vdi in vdis: - try: - task = self._session.call_xenapi('Async.VDI.destroy', vdi) - self._session.wait_for_task(instance.id, task) - except self.XenAPI.Failure, exc: - LOG.exception(exc) - # VM Destroy + def _destroy_vm(self, instance, vm): + """Destroys a VM record """ try: task = self._session.call_xenapi('Async.VM.destroy', vm) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: LOG.exception(exc) + def destroy(self, instance): + """ + Destroy VM instance + + This is the method exposed by xenapi_conn.destroy(). The rest of the + destroy_* methods are internal. + """ + vm = VMHelper.lookup(self._session, instance.name) + return self._destroy(instance, vm, shutdown=True) + + def _destroy(self, instance, vm, shutdown=True): + """ + Destroys VM instance by performing: + + 1. A shutdown if requested + 2. Destroying associated VDIs + 3. Destroying that actual VM record + """ + if vm is None: + # Don't complain, just return. This lets us clean up instances + # that have already disappeared from the underlying platform. + return + + if shutdown: + self._shutdown(instance, vm) + + self._destroy_vdis(instance, vm) + self._destroy_vm(instance, vm) + def _wait_with_callback(self, instance_id, task, callback): ret = None try: diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 0cd15b950..d5ebd29d5 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -71,7 +71,7 @@ class VolumeHelper(HelperBase): session.get_xenapi_host(), record, '0', label, description, 'iscsi', '', False, {}) - LOG.debug(_('Introduced %s as %s.'), label, sr_ref) + LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals()) return sr_ref except cls.XenAPI.Failure, exc: LOG.exception(exc) @@ -98,20 +98,20 @@ class VolumeHelper(HelperBase): try: pbds = session.get_xenapi().SR.get_PBDs(sr_ref) except cls.XenAPI.Failure, exc: - LOG.warn(_('Ignoring exception %s when getting PBDs for %s'), - exc, sr_ref) + LOG.warn(_('Ignoring exception %(exc)s when getting PBDs' + ' for %(sr_ref)s') % locals()) for pbd in pbds: try: session.get_xenapi().PBD.unplug(pbd) except cls.XenAPI.Failure, exc: - LOG.warn(_('Ignoring exception %s when unplugging PBD %s'), - exc, pbd) + LOG.warn(_('Ignoring exception %(exc)s when unplugging' + ' PBD %(pbd)s') % locals()) try: session.get_xenapi().SR.forget(sr_ref) LOG.debug(_("Forgetting SR %s done."), sr_ref) except cls.XenAPI.Failure, exc: - LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc, - sr_ref) + LOG.warn(_('Ignoring exception %(exc)s when forgetting' + ' SR %(sr_ref)s') % locals()) @classmethod def introduce_vdi(cls, session, sr_ref): @@ -172,8 +172,8 @@ class VolumeHelper(HelperBase): (volume_id is None) or \ (target_host is None) or \ (target_iqn is None): - raise StorageError(_('Unable to obtain target information %s, %s') - % (device_path, mountpoint)) + raise StorageError(_('Unable to obtain target information' + ' %(device_path)s, %(mountpoint)s') % locals()) volume_info = {} volume_info['deviceNumber'] = device_number volume_info['volumeId'] = volume_id diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 189f968c6..d89a6f995 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -48,8 +48,8 @@ class VolumeOps(object): raise exception.NotFound(_('Instance %s not found') % instance_name) # NOTE: No Resource Pool concept so far - LOG.debug(_("Attach_volume: %s, %s, %s"), - instance_name, device_path, mountpoint) + LOG.debug(_("Attach_volume: %(instance_name)s, %(device_path)s," + " %(mountpoint)s") % locals()) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint) @@ -66,9 +66,8 @@ class VolumeOps(object): except StorageError, exc: LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) - raise Exception(_('Unable to create VDI on SR %s for instance %s') - % (sr_ref, - instance_name)) + raise Exception(_('Unable to create VDI on SR %(sr_ref)s for' + ' instance %(instance_name)s') % locals()) else: try: vbd_ref = VMHelper.create_vbd(self._session, @@ -78,9 +77,8 @@ class VolumeOps(object): except self.XenAPI.Failure, exc: LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) - raise Exception(_('Unable to use SR %s for instance %s') - % (sr_ref, - instance_name)) + raise Exception(_('Unable to use SR %(sr_ref)s for' + ' instance %(instance_name)s') % locals()) else: try: task = self._session.call_xenapi('Async.VBD.plug', @@ -92,8 +90,8 @@ class VolumeOps(object): sr_ref) raise Exception(_('Unable to attach volume to instance %s') % instance_name) - LOG.info(_('Mountpoint %s attached to instance %s'), - mountpoint, instance_name) + LOG.info(_('Mountpoint %(mountpoint)s attached to' + ' instance %(instance_name)s') % locals()) def detach_volume(self, instance_name, mountpoint): """Detach volume storage to VM instance""" @@ -103,7 +101,8 @@ class VolumeOps(object): raise exception.NotFound(_('Instance %s not found') % instance_name) # Detach VBD from VM - LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint) + LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s") + % locals()) device_number = VolumeHelper.mountpoint_to_number(mountpoint) try: vbd_ref = VMHelper.find_vbd_by_number(self._session, @@ -125,5 +124,5 @@ class VolumeOps(object): LOG.exception(exc) # Forget SR VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) - LOG.info(_('Mountpoint %s detached from instance %s'), - mountpoint, instance_name) + LOG.info(_('Mountpoint %(mountpoint)s detached from' + ' instance %(instance_name)s') % locals()) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index c57c883c9..78f0d14b9 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -109,6 +109,14 @@ flags.DEFINE_string('target_port', flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') +# NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick, when we pull +# support for it, we should remove this +flags.DEFINE_bool('xenapi_remap_vbd_dev', False, + 'Used to enable the remapping of VBD dev ' + '(Works around an issue in Ubuntu Maverick)') +flags.DEFINE_string('xenapi_remap_vbd_dev_prefix', 'sd', + 'Specify prefix to remap VBD dev to ' + '(ex. /dev/xvdb -> /dev/sdb)') def get_connection(_): @@ -290,19 +298,14 @@ class XenAPISession(object): return elif status == "success": result = self._session.xenapi.task.get_result(task) - LOG.info(_("Task [%s] %s status: success %s") % ( - name, - task, - result)) + LOG.info(_("Task [%(name)s] %(task)s status:" + " success %(result)s") % locals()) done.send(_parse_xmlrpc_value(result)) else: error_info = self._session.xenapi.task.get_error_info(task) action["error"] = str(error_info) - LOG.warn(_("Task [%s] %s status: %s %s") % ( - name, - task, - status, - error_info)) + LOG.warn(_("Task [%(name)s] %(task)s status:" + " %(status)s %(error_info)s") % locals()) done.send_exception(self.XenAPI.Failure(error_info)) db.instance_action_create(context.get_admin_context(), action) except self.XenAPI.Failure, exc: diff --git a/nova/volume/api.py b/nova/volume/api.py index ce4831cc3..0bcd8a3b0 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -41,10 +41,11 @@ class API(base.Base): def create(self, context, size, name, description): if quota.allowed_volumes(context, 1, size) < 1: - LOG.warn(_("Quota exceeeded for %s, tried to create %sG volume"), - context.project_id, size) + pid = context.project_id + LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" + " %(size)sG volume") % locals()) raise quota.QuotaError(_("Volume quota exceeded. You cannot " - "create a volume of size %s") % size) + "create a volume of size %s") % size) options = { 'size': size, diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 5fefa10cf..da7307733 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -100,6 +100,14 @@ class VolumeDriver(object): def delete_volume(self, volume): """Deletes a logical volume.""" + try: + self._try_execute("sudo lvdisplay %s/%s" % + (FLAGS.volume_group, + volume['name'])) + except Exception as e: + # If the volume isn't present, then don't attempt to delete + return True + self._try_execute("sudo lvremove -f %s/%s" % (FLAGS.volume_group, volume['name'])) @@ -218,8 +226,14 @@ class ISCSIDriver(VolumeDriver): def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" - iscsi_target = self.db.volume_get_iscsi_target_num(context, + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, volume['id']) + except exception.NotFound: + LOG.info(_("Skipping ensure_export. No iscsi_target " + + "provisioned for volume: %d"), volume['id']) + return + iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) self._sync_exec("sudo ietadm --op new " @@ -258,8 +272,23 @@ class ISCSIDriver(VolumeDriver): def remove_export(self, context, volume): """Removes an export for a logical volume.""" - iscsi_target = self.db.volume_get_iscsi_target_num(context, + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, volume['id']) + except exception.NotFound: + LOG.info(_("Skipping remove_export. No iscsi_target " + + "provisioned for volume: %d"), volume['id']) + return + + try: + # ietadm show will exit with an error + # this export has already been removed + self._execute("sudo ietadm --op show --tid=%s " % iscsi_target) + except Exception as e: + LOG.info(_("Skipping remove_export. No iscsi_target " + + "is presently exported for volume: %d"), volume['id']) + return + self._execute("sudo ietadm --op delete --tid=%s " "--lun=0" % iscsi_target) self._execute("sudo ietadm --op delete --tid=%s" % diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 6348539c5..6f8e25e19 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -84,7 +84,10 @@ class VolumeManager(manager.Manager): volumes = self.db.volume_get_all_by_host(ctxt, self.host) LOG.debug(_("Re-exporting %s volumes"), len(volumes)) for volume in volumes: - self.driver.ensure_export(ctxt, volume) + if volume['status'] in ['available', 'in-use']: + self.driver.ensure_export(ctxt, volume) + else: + LOG.info(_("volume %s: skipping export"), volume_ref['name']) def create_volume(self, context, volume_id): """Creates and exports the volume.""" @@ -99,12 +102,19 @@ class VolumeManager(manager.Manager): # before passing it to the driver. volume_ref['host'] = self.host - LOG.debug(_("volume %s: creating lv of size %sG"), volume_ref['name'], - volume_ref['size']) - self.driver.create_volume(volume_ref) + try: + vol_name = volume_ref['name'] + vol_size = volume_ref['size'] + LOG.debug(_("volume %(vol_name)s: creating lv of" + " size %(vol_size)sG") % locals()) + self.driver.create_volume(volume_ref) - LOG.debug(_("volume %s: creating export"), volume_ref['name']) - self.driver.create_export(context, volume_ref) + LOG.debug(_("volume %s: creating export"), volume_ref['name']) + self.driver.create_export(context, volume_ref) + except Exception as e: + self.db.volume_update(context, + volume_ref['id'], {'status': 'error'}) + raise e now = datetime.datetime.utcnow() self.db.volume_update(context, @@ -121,10 +131,18 @@ class VolumeManager(manager.Manager): raise exception.Error(_("Volume is still attached")) if volume_ref['host'] != self.host: raise exception.Error(_("Volume is not local to this node")) - LOG.debug(_("volume %s: removing export"), volume_ref['name']) - self.driver.remove_export(context, volume_ref) - LOG.debug(_("volume %s: deleting"), volume_ref['name']) - self.driver.delete_volume(volume_ref) + + try: + LOG.debug(_("volume %s: removing export"), volume_ref['name']) + self.driver.remove_export(context, volume_ref) + LOG.debug(_("volume %s: deleting"), volume_ref['name']) + self.driver.delete_volume(volume_ref) + except Exception as e: + self.db.volume_update(context, + volume_ref['id'], + {'status': 'error_deleting'}) + raise e + self.db.volume_destroy(context, volume_id) LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) return True diff --git a/nova/wsgi.py b/nova/wsgi.py index 4f5307d80..e01cc1e1e 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -64,7 +64,8 @@ class Server(object): def start(self, application, port, host='0.0.0.0', backlog=128): """Run a WSGI server with the given application.""" - logging.audit(_("Starting %s on %s:%s"), sys.argv[0], host, port) + arg0 = sys.argv[0] + logging.audit(_("Starting %(arg0)s on %(host)s:%(port)s") % locals()) socket = eventlet.listen((host, port), backlog=backlog) self.pool.spawn_n(self._run, application, socket) @@ -143,7 +144,7 @@ class Application(object): See the end of http://pythonpaste.org/webob/modules/dec.html for more info. """ - raise NotImplementedError("You must implement __call__") + raise NotImplementedError(_("You must implement __call__")) class Middleware(Application): diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py index 8e7a829d5..7fea1136d 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py @@ -19,6 +19,8 @@ # that we need. # +import gettext +gettext.install('nova', unicode=1) import httplib import logging import logging.handlers @@ -60,7 +62,7 @@ def ignore_failure(func, *args, **kwargs): try: return func(*args, **kwargs) except XenAPI.Failure, e: - logging.error('Ignoring XenAPI.Failure %s', e) + logging.error(_('Ignoring XenAPI.Failure %s'), e) return None @@ -78,19 +80,25 @@ def validate_exists(args, key, default=None): """ if key in args: if len(args[key]) == 0: - raise ArgumentError('Argument %r value %r is too short.' % - (key, args[key])) + raise ArgumentError(_('Argument %(key)s value %(value)s is too ' + 'short.') % + {'key': key, + 'value': args[key]}) if not ARGUMENT_PATTERN.match(args[key]): - raise ArgumentError('Argument %r value %r contains invalid ' - 'characters.' % (key, args[key])) + raise ArgumentError(_('Argument %(key)s value %(value)s contains ' + 'invalid characters.') % + {'key': key, + 'value': args[key]}) if args[key][0] == '-': - raise ArgumentError('Argument %r value %r starts with a hyphen.' - % (key, args[key])) + raise ArgumentError(_('Argument %(key)s value %(value)s starts ' + 'with a hyphen.') % + {'key': key, + 'value': args[key]}) return args[key] elif default is not None: return default else: - raise ArgumentError('Argument %s is required.' % key) + raise ArgumentError(_('Argument %s is required.') % key) def validate_bool(args, key, default=None): @@ -105,8 +113,10 @@ def validate_bool(args, key, default=None): elif value.lower() == 'false': return False else: - raise ArgumentError("Argument %s may not take value %r. " - "Valid values are ['true', 'false']." % (key, value)) + raise ArgumentError(_("Argument %(key)s may not take value %(value)s. " + "Valid values are ['true', 'false'].") + % {'key': key, + 'value': value}) def exists(args, key): @@ -116,7 +126,7 @@ def exists(args, key): if key in args: return args[key] else: - raise ArgumentError('Argument %s is required.' % key) + raise ArgumentError(_('Argument %s is required.') % key) def optional(args, key): @@ -149,8 +159,13 @@ def create_vdi(session, sr_ref, name_label, virtual_size, read_only): 'other_config': {}, 'sm_config': {}, 'tags': []}) - logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label, - virtual_size, read_only, sr_ref) + logging.debug(_('Created VDI %(vdi_ref)s (%(label)s, %(size)s, ' + '%(read_only)s) on %(sr_ref)s.') % + {'vdi_ref': vdi_ref, + 'label': name_label, + 'size': virtual_size, + 'read_only': read_only, + 'sr_ref': sr_ref}) return vdi_ref @@ -169,19 +184,19 @@ def with_vdi_in_dom0(session, vdi, read_only, f): vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] - logging.debug('Creating VBD for VDI %s ... ', vdi) + logging.debug(_('Creating VBD for VDI %s ... '), vdi) vbd = session.xenapi.VBD.create(vbd_rec) - logging.debug('Creating VBD for VDI %s done.', vdi) + logging.debug(_('Creating VBD for VDI %s done.'), vdi) try: - logging.debug('Plugging VBD %s ... ', vbd) + logging.debug(_('Plugging VBD %s ... '), vbd) session.xenapi.VBD.plug(vbd) - logging.debug('Plugging VBD %s done.', vbd) + logging.debug(_('Plugging VBD %s done.'), vbd) return f(session.xenapi.VBD.get_device(vbd)) finally: - logging.debug('Destroying VBD for VDI %s ... ', vdi) + logging.debug(_('Destroying VBD for VDI %s ... '), vdi) vbd_unplug_with_retry(session, vbd) ignore_failure(session.xenapi.VBD.destroy, vbd) - logging.debug('Destroying VBD for VDI %s done.', vdi) + logging.debug(_('Destroying VBD for VDI %s done.'), vdi) def vbd_unplug_with_retry(session, vbd): @@ -192,19 +207,20 @@ def vbd_unplug_with_retry(session, vbd): while True: try: session.xenapi.VBD.unplug(vbd) - logging.debug('VBD.unplug successful first time.') + logging.debug(_('VBD.unplug successful first time.')) return except XenAPI.Failure, e: if (len(e.details) > 0 and e.details[0] == 'DEVICE_DETACH_REJECTED'): - logging.debug('VBD.unplug rejected: retrying...') + logging.debug(_('VBD.unplug rejected: retrying...')) time.sleep(1) elif (len(e.details) > 0 and e.details[0] == 'DEVICE_ALREADY_DETACHED'): - logging.debug('VBD.unplug successful eventually.') + logging.debug(_('VBD.unplug successful eventually.')) return else: - logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e) + logging.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'), + e) return diff --git a/run_tests.py b/run_tests.py index 7b5e2192a..5c8436aee 100644 --- a/run_tests.py +++ b/run_tests.py @@ -60,7 +60,8 @@ class NovaTestRunner(core.TextTestRunner): if __name__ == '__main__': c = config.Config(stream=sys.stdout, env=os.environ, - verbosity=3) + verbosity=3, + plugins=core.DefaultPluginManager()) runner = NovaTestRunner(stream=c.stream, verbosity=c.verbosity, diff --git a/run_tests.sh b/run_tests.sh index 0574643c5..cf1affcea 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -71,6 +71,4 @@ then fi fi -run_tests - -pep8 --repeat --show-pep8 --show-source bin/* nova setup.py +run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py bin/* nova setup.py || exit 1 diff --git a/tools/pip-requires b/tools/pip-requires index 895e81eb3..3587df644 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -27,4 +27,5 @@ PasteDeploy paste sqlalchemy-migrate netaddr +sphinx glance |
