diff options
50 files changed, 753 insertions, 472 deletions
@@ -21,6 +21,7 @@ Jay Pipes <jaypipes@gmail.com> Jesse Andrews <anotherjesse@gmail.com> Joe Heck <heckj@mac.com> Joel Moore <joelbm24@gmail.com> +John Dewey <john@dewey.ws> Jonathan Bryce <jbryce@jbryce.com> Josh Durgin <joshd@hq.newdream.net> Josh Kearney <josh.kearney@rackspace.com> @@ -49,6 +50,7 @@ Soren Hansen <soren.hansen@rackspace.com> Thierry Carrez <thierry@openstack.org> Todd Willey <todd@ansolabs.com> Trey Morris <trey.morris@rackspace.com> +Tushar Patil <tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in> Vishvananda Ishaya <vishvananda@gmail.com> Youcef Laribi <Youcef.Laribi@eu.citrix.com> Zhixue Wu <Zhixue.Wu@citrix.com> diff --git a/bin/nova-api b/bin/nova-api index 44bbfaf86..11176a021 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -56,7 +56,7 @@ def run_app(paste_config_file): if config is None: LOG.debug(_("No paste configuration for app: %s"), api) continue - LOG.debug(_("App Config: %s\n%r"), api, config) + LOG.debug(_("App Config: %(api)s\n%(config)r") % locals()) wsgi.paste_config_to_flags(config, { "verbose": FLAGS.verbose, "%s_host" % api: config.get('host', '0.0.0.0'), diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 1a994d956..d38ba2543 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -120,9 +120,9 @@ def main(): mac = argv[2] ip = argv[3] hostname = argv[4] - LOG.debug(_("Called %s for mac %s with ip %s and " - "hostname %s on interface %s"), - action, mac, ip, hostname, interface) + msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s and" + " hostname %(hostname)s on interface %(interface)s") % locals() + LOG.debug(msg) globals()[action + '_lease'](mac, ip, hostname, interface) else: print init_leases(interface) diff --git a/bin/nova-manage b/bin/nova-manage index 1c885f8a6..1b70ebf17 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -267,6 +267,14 @@ class RoleCommands(object): self.manager.remove_role(user, role, project) +def _db_error(caught_exception): + print caught_exception + print _("The above error may show that the database has not " + "been created.\nPlease create a database using " + "nova-manage sync db before running this command.") + exit(1) + + class UserCommands(object): """Class for managing users.""" @@ -282,13 +290,19 @@ class UserCommands(object): def admin(self, name, access=None, secret=None): """creates a new admin and prints exports arguments: name [access] [secret]""" - user = self.manager.create_user(name, access, secret, True) + try: + user = self.manager.create_user(name, access, secret, True) + except exception.DBError, e: + _db_error(e) self._print_export(user) def create(self, name, access=None, secret=None): """creates a new user and prints exports arguments: name [access] [secret]""" - user = self.manager.create_user(name, access, secret, False) + try: + user = self.manager.create_user(name, access, secret, False) + except exception.DBError, e: + _db_error(e) self._print_export(user) def delete(self, name): @@ -409,9 +423,14 @@ class ProjectCommands(object): with open(filename, 'w') as f: f.write(zip_file) except db.api.NoMoreNetworks: - print ('No more networks available. If this is a new ' - 'installation, you need\nto call something like this:\n\n' - ' nova-manage network create 10.0.0.0/8 10 64\n\n') + print _('No more networks available. If this is a new ' + 'installation, you need\nto call something like this:\n\n' + ' nova-manage network create 10.0.0.0/8 10 64\n\n') + except exception.ProcessExecutionError, e: + print e + print _("The above error may show that the certificate db has not " + "been created.\nPlease create a database by running a " + "nova-api server on this host.") class FloatingIpCommands(object): diff --git a/bin/nova-spoolsentry b/bin/nova-spoolsentry index ab20268a9..c53482852 100644 --- a/bin/nova-spoolsentry +++ b/bin/nova-spoolsentry @@ -74,10 +74,8 @@ class SpoolSentry(object): return rv def send_data(self, data): - data = { - 'data': base64.b64encode(pickle.dumps(data).encode('zlib')), - 'key': self.key - } + data = {'data': base64.b64encode(pickle.dumps(data).encode('zlib')), + 'key': self.key} req = urllib2.Request(self.sentry_url) res = urllib2.urlopen(req, urllib.urlencode(data)) if res.getcode() != 200: diff --git a/contrib/nova.sh b/contrib/nova.sh index 08dc89bae..9259035ca 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -1,15 +1,14 @@ #!/usr/bin/env bash DIR=`pwd` CMD=$1 -SOURCE_BRANCH=lp:nova -if [ -n "$2" ]; then - SOURCE_BRANCH=$2 +if [ "$CMD" = "branch" ]; then + SOURCE_BRANCH=${2:-lp:nova} + DIRNAME=${3:-nova} +else + DIRNAME=${2:-nova} fi -DIRNAME=nova + NOVA_DIR=$DIR/$DIRNAME -if [ -n "$3" ]; then - NOVA_DIR=$DIR/$3 -fi if [ ! -n "$HOST_IP" ]; then # NOTE(vish): This will just get the first ip in the list, so if you @@ -24,6 +23,8 @@ TEST=${TEST:-0} USE_LDAP=${USE_LDAP:-0} # Use OpenDJ instead of OpenLDAP when using LDAP USE_OPENDJ=${USE_OPENDJ:-0} +# Use IPv6 +USE_IPV6=${USE_IPV6:-0} LIBVIRT_TYPE=${LIBVIRT_TYPE:-qemu} NET_MAN=${NET_MAN:-VlanManager} # NOTE(vish): If you are using FlatDHCP on multiple hosts, set the interface @@ -43,30 +44,17 @@ else AUTH=dbdriver.DbDriver fi -mkdir -p /etc/nova -cat >$NOVA_DIR/bin/nova.conf << NOVA_CONF_EOF ---verbose ---nodaemon ---dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf ---network_manager=nova.network.manager.$NET_MAN ---cc_host=$HOST_IP ---routing_source_ip=$HOST_IP ---sql_connection=$SQL_CONN ---auth_driver=nova.auth.$AUTH ---libvirt_type=$LIBVIRT_TYPE -NOVA_CONF_EOF - -if [ -n "$FLAT_INTERFACE" ]; then - echo "--flat_interface=$FLAT_INTERFACE" >>$NOVA_DIR/bin/nova.conf -fi - if [ "$CMD" == "branch" ]; then sudo apt-get install -y bzr + if [ ! -e "$DIR/.bzr" ]; then + bzr init-repo $DIR + fi rm -rf $NOVA_DIR bzr branch $SOURCE_BRANCH $NOVA_DIR cd $NOVA_DIR mkdir -p $NOVA_DIR/instances mkdir -p $NOVA_DIR/networks + exit fi # You should only have to run this once @@ -74,7 +62,7 @@ if [ "$CMD" == "install" ]; then sudo apt-get install -y python-software-properties sudo add-apt-repository ppa:nova-core/trunk sudo apt-get update - sudo apt-get install -y dnsmasq kpartx kvm gawk iptables ebtables + sudo apt-get install -y dnsmasq-base kpartx kvm gawk iptables ebtables sudo apt-get install -y user-mode-linux kvm libvirt-bin sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server sudo apt-get install -y lvm2 iscsitarget open-iscsi @@ -85,16 +73,16 @@ if [ "$CMD" == "install" ]; then sudo /etc/init.d/libvirt-bin restart sudo modprobe nbd sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot - sudo apt-get install -y python-daemon python-eventlet python-gflags python-ipy + sudo apt-get install -y python-migrate python-eventlet python-gflags python-ipy python-tempita sudo apt-get install -y python-libvirt python-libxml2 python-routes python-cheetah - sudo apt-get install -y python-paste python-pastedeploy -#For IPV6 - sudo apt-get install -y python-netaddr - sudo apt-get install -y radvd -#(Nati) Note that this configuration is only needed for nova-network node. - sudo bash -c "echo 1 > /proc/sys/net/ipv6/conf/all/forwarding" - sudo bash -c "echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra" - + sudo apt-get install -y python-netaddr python-paste python-pastedeploy python-glance + + if [ "$USE_IPV6" == 1 ]; then + sudo apt-get install -y radvd + sudo bash -c "echo 1 > /proc/sys/net/ipv6/conf/all/forwarding" + sudo bash -c "echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra" + fi + if [ "$USE_MYSQL" == 1 ]; then cat <<MYSQL_PRESEED | debconf-set-selections mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS @@ -103,8 +91,10 @@ mysql-server-5.1 mysql-server/start_on_boot boolean true MYSQL_PRESEED apt-get install -y mysql-server python-mysqldb fi - wget http://c2477062.cdn.cloudfiles.rackspacecloud.com/images.tgz - tar -C $DIR -zxf images.tgz + mkdir -p $DIR/images + wget -c http://images.ansolabs.com/tty.tgz + tar -C $DIR/images -zxf tty.tgz + exit fi NL=`echo -ne '\015'` @@ -115,9 +105,31 @@ function screen_it { } if [ "$CMD" == "run" ]; then + + cat >$NOVA_DIR/bin/nova.conf << NOVA_CONF_EOF +--verbose +--nodaemon +--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf +--network_manager=nova.network.manager.$NET_MAN +--cc_host=$HOST_IP +--routing_source_ip=$HOST_IP +--sql_connection=$SQL_CONN +--auth_driver=nova.auth.$AUTH +--libvirt_type=$LIBVIRT_TYPE +NOVA_CONF_EOF + + if [ -n "$FLAT_INTERFACE" ]; then + echo "--flat_interface=$FLAT_INTERFACE" >>$NOVA_DIR/bin/nova.conf + fi + + if [ "$USE_IPV6" == 1 ]; then + echo "--use_ipv6" >>$NOVA_DIR/bin/nova.conf + fi + killall dnsmasq - #For IPv6 - killall radvd + if [ "$USE_IPV6" == 1 ]; then + killall radvd + fi screen -d -m -S nova -t nova sleep 1 if [ "$USE_MYSQL" == 1 ]; then @@ -150,6 +162,8 @@ if [ "$CMD" == "run" ]; then cd $DIR fi + # create the database + $NOVA_DIR/bin/nova-manage db sync # create an admin user called 'admin' $NOVA_DIR/bin/nova-manage user admin admin admin admin # create a project called 'admin' with project manager of 'admin' @@ -178,6 +192,7 @@ if [ "$CMD" == "run" ] || [ "$CMD" == "terminate" ]; then sleep 2 # delete volumes . $NOVA_DIR/novarc; euca-describe-volumes | grep vol- | cut -f2 | xargs -n1 euca-delete-volume + sleep 2 fi if [ "$CMD" == "run" ] || [ "$CMD" == "clean" ]; then @@ -192,5 +207,4 @@ if [ "$CMD" == "scrub" ]; then else virsh list | grep i- | awk '{print \$1}' | xargs -n1 virsh destroy fi - vblade-persist ls | grep vol- | awk '{print \$1\" \"\$2}' | xargs -n2 vblade-persist destroy fi diff --git a/doc/ext/nova_todo.py b/doc/ext/nova_todo.py index efc0c3edd..67bbfd2e0 100644 --- a/doc/ext/nova_todo.py +++ b/doc/ext/nova_todo.py @@ -26,7 +26,7 @@ def process_todo_nodes(app, doctree, fromdocname): # reading through docutils for the proper way to construct an empty list lists = [] for i in xrange(5): - lists.append(nodes.bullet_list("", nodes.Text('',''))); + lists.append(nodes.bullet_list("", nodes.Text('',''))) lists[i].remove(lists[i][0]) lists[i].set_class('todo_list') @@ -42,7 +42,8 @@ def process_todo_nodes(app, doctree, fromdocname): # Create a reference newnode = nodes.reference('', '') - link = _('%s, line %d') % (filename, todo_info['lineno']); + line_info = todo_info['lineno'] + link = _('%(filename)s, line %(line_info)d') % locals() innernode = nodes.emphasis(link, link) newnode['refdocname'] = todo_info['docname'] diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst index df7078180..f2f25b060 100644 --- a/doc/source/adminguide/multi.node.install.rst +++ b/doc/source/adminguide/multi.node.install.rst @@ -1,84 +1,85 @@ Installing Nova on Multiple Servers =================================== - + When you move beyond evaluating the technology and into building an actual production environment, you will need to know how to configure your datacenter and how to deploy components across your clusters. This guide should help you through that process. - + You can install multiple nodes to increase performance and availability of the OpenStack Compute installation. - + This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved either in packaging or bug-fixing. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward. For a starting architecture, these instructions describing installing a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node. Requirements for a multi-node installation ------------------------------------------ - + * You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know PostgreSQL. We should document both configurations, though. * For a recommended HA setup, consider a MySQL master/slave replication, with as many slaves as you like, and probably a heartbeat to kick one of the slaves into being a master if it dies. * For performance optimization, split reads and writes to the database. MySQL proxy is the easiest way to make this work if running MySQL. - + Assumptions ----------- - + * Networking is configured between/through the physical machines on a single subnet. * Installation and execution are both performed by ROOT user. + Scripted Installation --------------------- -A script available to get your OpenStack cloud running quickly. You can copy the file to the server where you want to install OpenStack Compute services - typically you would install a compute node and a cloud controller node. +A script available to get your OpenStack cloud running quickly. You can copy the file to the server where you want to install OpenStack Compute services - typically you would install a compute node and a cloud controller node. -You must run these scripts with root permissions. +You must run these scripts with root permissions. From a server you intend to use as a cloud controller node, use this command to get the cloud controller script. This script is a work-in-progress and the maintainer plans to keep it up, but it is offered "as-is." Feel free to collaborate on it in GitHub - https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/. :: - + wget --no-check-certificate https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/raw/master/Nova_CC_Installer_v0.1 Ensure you can execute the script by modifying the permissions on the script file. :: - + sudo chmod 755 Nova_CC_Installer_v0.1 :: - + sudo ./Nova_CC_Installer_v0.1 -Next, from a server you intend to use as a compute node (doesn't contain the database), install the nova services. Copy the nova.conf from the cloud controller node to the compute node. +Next, from a server you intend to use as a compute node (doesn't contain the database), install the nova services. Copy the nova.conf from the cloud controller node to the compute node. Restart related services:: - + libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart You can go to the `Configuration section`_ for next steps. Manual Installation - Step-by-Step ---------------------------------- -The following sections show you how to install Nova manually with a cloud controller node and a separate compute node. The cloud controller node contains the database plus all nova- services, and the compute node runs nova- services only. +The following sections show you how to install Nova manually with a cloud controller node and a separate compute node. The cloud controller node contains the database plus all nova- services, and the compute node runs nova- services only. Cloud Controller Installation ````````````````````````````` On the cloud controller node, you install nova services and the related helper applications, and then configure with the nova.conf file. You will then copy the nova.conf file to the compute node, which you install as a second node in the `Compute Installation`_. - + Step 1 - Use apt-get to get the latest code ------------------------------------------- 1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk. The ‘python-software-properties’ package is a pre-requisite for setting up the nova package repo: :: - + sudo apt-get install python-software-properties sudo add-apt-repository ppa:nova-core/trunk - + 2. Run update. :: - + sudo apt-get update 3. Install python required packages, nova-packages, and helper apps. @@ -93,15 +94,15 @@ Step 2 Set up configuration file (installed in /etc/nova) --------------------------------------------------------- 1. Nova development has consolidated all config files to nova.conf as of November 2010. There is a default set of options that are already configured in nova.conf: - + :: - + --daemonize=1 --dhcpbridge_flagfile=/etc/nova/nova.conf --dhcpbridge=/usr/bin/nova-dhcpbridge --logdir=/var/log/nova --state_path=/var/lib/nova - + The following items ALSO need to be defined in /etc/nova/nova.conf. I’ve added some explanation of the variables, as comments CANNOT be in nova.conf. There seems to be an issue with nova-manage not processing the comments/whitespace correctly: --sql_connection ### Location of Nova SQL DB @@ -131,31 +132,31 @@ The following code can be cut and paste, and edited to your setup: Note: CC_ADDR=<the external IP address of your cloud controller> Detailed explanation of the following example is available above. - + :: - + --sql_connection=mysql://root:nova@<CC_ADDR>/nova --s3_host=<CC_ADDR> --rabbit_host=<CC_ADDR> ---cc_host=<CC_ADDR> ---verbose +--cc_host=<CC_ADDR> +--verbose --ec2_url=http://<CC_ADDR>:8773/services/Cloud --network_manager=nova.network.manager.VlanManager --fixed_range=<network/prefix> ---network_size=<# of addrs> - +--network_size=<# of addrs> + 2. Create a “nova” group, and set permissions:: addgroup nova - + The Nova config file should have its owner set to root:nova, and mode set to 0644, since they contain your MySQL server's root password. :: chown -R root:nova /etc/nova - chmod 644 /etc/nova/nova.conf - + chmod 644 /etc/nova/nova.conf + Step 3 - Setup the SQL DB (MySQL for this setup) ------------------------------------------------ - + 1. First you 'preseed' to bypass all the installation prompts:: bash @@ -165,59 +166,59 @@ Step 3 - Setup the SQL DB (MySQL for this setup) mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS mysql-server-5.1 mysql-server/start_on_boot boolean true MYSQL_PRESEED - + 2. Install MySQL:: - + apt-get install -y mysql-server - + 3. Edit /etc/mysql/my.cnf to change ‘bind-address’ from localhost to any:: sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf service mysql restart 4. MySQL DB configuration: - + Create NOVA database:: mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;' - + Update the DB to include user 'root'@'%' with super user privileges:: mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;" - + Set mySQL root password:: - mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');" - + mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');" + Compute Node Installation ````````````````````````` - + Repeat steps 1 and 2 from the Cloud Controller Installation section above, then configure the network for your Compute instances on the Compute node. Copy the nova.conf file from the Cloud Controller node to this node. - + Network Configuration --------------------- If you use FlatManager as your network manager (as opposed to VlanManager that is shown in the nova.conf example above), there are some additional networking changes you’ll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as it’s set up for you automatically. - + Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. To keep things as simple as possible, have all the VM guests on the same network as the VM hosts (the compute nodes). To do so, set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces config to look like the following:: < begin /etc/network/interfaces > # The loopback network interface auto lo iface lo inet loopback - + # Networking for NOVA auto br100 - + iface br100 inet dhcp bridge_ports eth0 bridge_stp off bridge_maxwait 0 bridge_fd 0 < end /etc/network/interfaces > - + Next, restart networking to apply the changes:: - + sudo /etc/init.d/networking restart Configuration @@ -234,32 +235,32 @@ These are the commands you run to update the database if needed, and then set up /usr/bin/python /usr/bin/nova-manage user admin <user_name> /usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name> /usr/bin/python /usr/bin/nova-manage network create <project-network> <number-of-networks-in-project> <IPs in project> - + Here is an example of what this looks like with real data:: /usr/bin/python /usr/bin/nova-manage db sync /usr/bin/python /usr/bin/nova-manage user admin dub /usr/bin/python /usr/bin/nova-manage project create dubproject dub /usr/bin/python /usr/bin/nova-manage network create 192.168.0.0/24 1 255 - + (I chose a /24 since that falls inside my /12 range I set in ‘fixed-range’ in nova.conf. Currently, there can only be one network, and I am using the max IP’s available in a /24. You can choose to use any valid amount that you would like.) - + Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o. - + On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. This is ONLY necessary if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device. - - + + Step 2 - Create Nova certifications ----------------------------------- - -1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions. + +1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions. :: mkdir –p /root/creds /usr/bin/python /usr/bin/nova-manage project zipfile $NOVA_PROJECT $NOVA_PROJECT_USER /root/creds/novacreds.zip - -2. Unzip them in your home directory, and add them to your environment. + +2. Unzip them in your home directory, and add them to your environment. :: @@ -271,7 +272,7 @@ Step 3 - Restart all relevant services -------------------------------------- Restart all six services in total, just to cover the entire spectrum:: - + libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart Step 4 - Closing steps, and cleaning up @@ -294,19 +295,19 @@ You can then use `euca2ools` to test some items:: euca-describe-images euca-describe-instances - + If you have issues with the API key, you may need to re-source your creds file:: . /root/creds/novarc - + If you don’t get any immediate errors, you’re successfully making calls to your cloud! -Spinning up a VM for Testing +Spinning up a VM for Testing ```````````````````````````` -(This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.) +(This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.) -The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM. +The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM. UPDATE: Due to `bug 661159 <https://bugs.launchpad.net/nova/+bug/661159>`_, we can’t use images without ramdisks yet, so we can’t use the classic Ubuntu cloud images from http://uec-images.ubuntu.com/releases/ yet. For the sake of this tutorial, we’ll use the `ttylinux images from Scott Moser instead <http://smoser.brickies.net/ubuntu/ttylinux-uec/>`_. @@ -332,13 +333,13 @@ Boot your instance: :: - euca-run-instances $emi -k mykey -t m1.tiny + euca-run-instances $emi -k mykey -t m1.tiny ($emi is replaced with the output from the previous command) Checking status, and confirming communication: -Once you have booted the instance, you can check the status the the `euca-describe-instances` command. Here you can view the instance ID, IP, and current status of the VM. +Once you have booted the instance, you can check the status the the `euca-describe-instances` command. Here you can view the instance ID, IP, and current status of the VM. :: @@ -360,4 +361,4 @@ You can determine the instance-id with `euca-describe-instances`, and the format For more information in creating you own custom (production ready) instance images, please visit http://wiki.openstack.org/GettingImages for more information! -Enjoy your new private cloud, and play responsibly!
\ No newline at end of file +Enjoy your new private cloud, and play responsibly! diff --git a/doc/source/adminguide/nova.manage.rst b/doc/source/adminguide/nova.manage.rst index 0ec67c69c..0e9a29b6b 100644 --- a/doc/source/adminguide/nova.manage.rst +++ b/doc/source/adminguide/nova.manage.rst @@ -42,6 +42,17 @@ You can also run with a category argument such as user to see a list of all comm These sections describe the available categories and arguments for nova-manage. +Nova Db +~~~~~~~ + +``nova-manage db version`` + + Print the current database version. + +``nova-manage db sync`` + + Sync the database up to the most recent version. This is the standard way to create the db as well. + Nova User ~~~~~~~~~ @@ -68,7 +79,7 @@ Nova User ``nova-manage user modify <accesskey> <secretkey> <admin?T/F>`` Updates the indicated user keys, indicating with T or F if the user is an admin user. Leave any argument blank if you do not want to update it. - + Nova Project ~~~~~~~~~~~~ @@ -79,7 +90,7 @@ Nova Project ``nova-manage project create <projectname>`` Create a new nova project with the name <projectname> (you still need to do nova-manage project add <projectname> to add it to the database). - + ``nova-manage project delete <projectname>`` Delete a nova project with the name <projectname>. @@ -87,7 +98,7 @@ Nova Project ``nova-manage project environment <projectname> <username>`` Exports environment variables for the named project to a file named novarc. - + ``nova-manage project list`` Outputs a list of all the projects to the screen. @@ -103,27 +114,27 @@ Nova Project ``nova-manage project zipfile`` Compresses all related files for a created project into a zip file nova.zip. - + Nova Role ~~~~~~~~~ -nova-manage role <action> [<argument>] +nova-manage role <action> [<argument>] ``nova-manage role add <username> <rolename> <(optional) projectname>`` - Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: admin, itsec, projectmanager, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects. + Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: admin, itsec, projectmanager, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects. ``nova-manage role has <username> <projectname>`` Checks the user or project and responds with True if the user has a global role with a particular project. ``nova-manage role remove <username> <rolename>`` - Remove the indicated role from the user. + Remove the indicated role from the user. Nova Shell ~~~~~~~~~~ ``nova-manage shell bpython`` - Starts a new bpython shell. + Starts a new bpython shell. ``nova-manage shell ipython`` @@ -150,12 +161,12 @@ Nova VPN ``nova-manage vpn run <projectname>`` - Starts the VPN for the named project. + Starts the VPN for the named project. ``nova-manage vpn spawn`` Runs all VPNs. - + Nova Floating IPs ~~~~~~~~~~~~~~~~~ @@ -165,8 +176,8 @@ Nova Floating IPs ``nova-manage floating delete <ip_range>`` - Deletes floating IP addresses in the range given. - + Deletes floating IP addresses in the range given. + ``nova-manage floating list`` Displays a list of all floating IP addresses. diff --git a/doc/source/adminguide/single.node.install.rst b/doc/source/adminguide/single.node.install.rst index 119e3855b..ff43aa90b 100644 --- a/doc/source/adminguide/single.node.install.rst +++ b/doc/source/adminguide/single.node.install.rst @@ -52,7 +52,13 @@ When the installation is complete, you'll see the following lines: Finished processing dependencies for nova==2010.1 -Step 4: Create a Nova administrator +Step 4: Create the Nova Database +-------------------------------- +Type or copy/paste in the following line to create your nova db:: + + sudo nova-manage db sync + +Step 5: Create a Nova administrator ----------------------------------- Type or copy/paste in the following line to create a user named "anne.":: @@ -63,10 +69,10 @@ You see an access key and a secret key export, such as these made-up ones::: export EC2_ACCESS_KEY=4e6498a2-blah-blah-blah-17d1333t97fd export EC2_SECRET_KEY=0a520304-blah-blah-blah-340sp34k05bbe9a7 -Step 5: Create the network +Step 6: Create the network -------------------------- -Type or copy/paste in the following line to create a network prior to creating a project. +Type or copy/paste in the following line to create a network prior to creating a project. :: @@ -76,7 +82,7 @@ For this command, the IP address is the cidr notation for your netmask, such as After running this command, entries are made in the 'networks' and 'fixed_ips' table in the database. -Step 6: Create a project with the user you created +Step 7: Create a project with the user you created -------------------------------------------------- Type or copy/paste in the following line to create a project named IRT (for Ice Road Truckers, of course) with the newly-created user named anne. @@ -106,7 +112,7 @@ Type or copy/paste in the following line to create a project named IRT (for Ice Data Base Updated -Step 7: Unzip the nova.zip +Step 8: Unzip the nova.zip -------------------------- You should have a nova.zip file in your current working directory. Unzip it with this command: @@ -128,7 +134,7 @@ You'll see these files extract. extracting: cacert.pem -Step 8: Source the rc file +Step 9: Source the rc file -------------------------- Type or copy/paste the following to source the novarc file in your current working directory. @@ -137,14 +143,14 @@ Type or copy/paste the following to source the novarc file in your current worki . novarc -Step 9: Pat yourself on the back :) +Step 10: Pat yourself on the back :) ----------------------------------- Congratulations, your cloud is up and running, you’ve created an admin user, created a network, retrieved the user's credentials and put them in your environment. Now you need an image. -Step 9: Get an image +Step 11: Get an image -------------------- To make things easier, we've provided a small image on the Rackspace CDN. Use this command to get it on your server. @@ -168,7 +174,7 @@ To make things easier, we've provided a small image on the Rackspace CDN. Use th -Step 10: Decompress the image file +Step 12: Decompress the image file ---------------------------------- Use this command to extract the image files::: @@ -187,7 +193,7 @@ You get a directory listing like so::: |-- image `-- info.json -Step 11: Send commands to upload sample image to the cloud +Step 13: Send commands to upload sample image to the cloud ---------------------------------------------------------- Type or copy/paste the following commands to create a manifest for the kernel.:: @@ -340,7 +346,7 @@ You should see this in response::: Type or copy/paste the following commands to ssh to the instance using your private key.:: ssh -i mykey.priv root@10.0.0.3 - + Troubleshooting Installation ---------------------------- diff --git a/doc/source/man/novamanage.rst b/doc/source/man/novamanage.rst index 0cb6c7c90..bb9d7a7fe 100644 --- a/doc/source/man/novamanage.rst +++ b/doc/source/man/novamanage.rst @@ -42,6 +42,17 @@ You can also run with a category argument such as user to see a list of all comm These sections describe the available categories and arguments for nova-manage. +Nova Db +~~~~~~~ + +``nova-manage db version`` + + Print the current database version. + +``nova-manage db sync`` + + Sync the database up to the most recent version. This is the standard way to create the db as well. + Nova User ~~~~~~~~~ @@ -68,7 +79,7 @@ Nova User ``nova-manage user modify <accesskey> <secretkey> <admin?T/F>`` Updates the indicated user keys, indicating with T or F if the user is an admin user. Leave any argument blank if you do not want to update it. - + Nova Project ~~~~~~~~~~~~ @@ -79,7 +90,7 @@ Nova Project ``nova-manage project create <projectname>`` Create a new nova project with the name <projectname> (you still need to do nova-manage project add <projectname> to add it to the database). - + ``nova-manage project delete <projectname>`` Delete a nova project with the name <projectname>. @@ -87,7 +98,7 @@ Nova Project ``nova-manage project environment <projectname> <username>`` Exports environment variables for the named project to a file named novarc. - + ``nova-manage project list`` Outputs a list of all the projects to the screen. @@ -103,27 +114,27 @@ Nova Project ``nova-manage project zipfile`` Compresses all related files for a created project into a zip file nova.zip. - + Nova Role ~~~~~~~~~ -nova-manage role <action> [<argument>] +nova-manage role <action> [<argument>] ``nova-manage role add <username> <rolename> <(optional) projectname>`` - Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: admin, itsec, projectmanager, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects. + Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: admin, itsec, projectmanager, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects. ``nova-manage role has <username> <projectname>`` Checks the user or project and responds with True if the user has a global role with a particular project. ``nova-manage role remove <username> <rolename>`` - Remove the indicated role from the user. + Remove the indicated role from the user. Nova Shell ~~~~~~~~~~ ``nova-manage shell bpython`` - Starts a new bpython shell. + Starts a new bpython shell. ``nova-manage shell ipython`` @@ -150,20 +161,20 @@ Nova VPN ``nova-manage vpn run <projectname>`` - Starts the VPN for the named project. + Starts the VPN for the named project. ``nova-manage vpn spawn`` Runs all VPNs. - + Nova Floating IPs ~~~~~~~~~~~~~~~~~ ``nova-manage floating create <host> <ip_range>`` Creates floating IP addresses for the named host by the given range. - floating delete <ip_range> Deletes floating IP addresses in the range given. - + floating delete <ip_range> Deletes floating IP addresses in the range given. + ``nova-manage floating list`` Displays a list of all floating IP addresses. diff --git a/krm_mapping.json.sample b/krm_mapping.json.sample deleted file mode 100644 index 1ecfba635..000000000 --- a/krm_mapping.json.sample +++ /dev/null @@ -1,3 +0,0 @@ -{ - "machine" : ["kernel", "ramdisk"] -} diff --git a/nova/adminclient.py b/nova/adminclient.py index b2609c8c4..3cdd8347f 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -190,6 +190,45 @@ class HostInfo(object): setattr(self, name, value) +class InstanceType(object): + """ + Information about a Nova instance type, as parsed through SAX. + + **Fields include** + + * name + * vcpus + * disk_gb + * memory_mb + * flavor_id + + """ + + def __init__(self, connection=None): + self.connection = connection + self.name = None + self.vcpus = None + self.disk_gb = None + self.memory_mb = None + self.flavor_id = None + + def __repr__(self): + return 'InstanceType:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "memoryMb": + self.memory_mb = str(value) + elif name == "flavorId": + self.flavor_id = str(value) + elif name == "diskGb": + self.disk_gb = str(value) + else: + setattr(self, name, str(value)) + + class NovaAdminClient(object): def __init__( @@ -373,3 +412,8 @@ class NovaAdminClient(object): def get_hosts(self): return self.apiconn.get_list('DescribeHosts', {}, [('item', HostInfo)]) + + def get_instance_types(self): + """Grabs the list of all users.""" + return self.apiconn.get_list('DescribeInstanceTypes', {}, + [('item', InstanceType)]) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 238cb0f38..fc9a37908 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -131,9 +131,11 @@ class Lockout(wsgi.Middleware): # NOTE(vish): To use incr, failures has to be a string. self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60) elif failures >= FLAGS.lockout_attempts: - LOG.warn(_('Access key %s has had %d failed authentications' - ' and will be locked out for %d minutes.'), - access_key, failures, FLAGS.lockout_minutes) + lock_mins = FLAGS.lockout_minutes + msg = _('Access key %(access_key)s has had %(failures)d' + ' failed authentications and will be locked out' + ' for %(lock_mins)d minutes.') % locals() + LOG.warn(msg) self.mc.set(failures_key, str(failures), time=FLAGS.lockout_minutes * 60) return res @@ -168,7 +170,7 @@ class Authenticate(wsgi.Middleware): req.path) # Be explicit for what exceptions are 403, the rest bubble as 500 except (exception.NotFound, exception.NotAuthorized) as ex: - LOG.audit(_("Authentication Failure: %s"), str(ex)) + LOG.audit(_("Authentication Failure: %s"), ex.args[0]) raise webob.exc.HTTPForbidden() # Authenticated! @@ -179,8 +181,10 @@ class Authenticate(wsgi.Middleware): project=project, remote_address=remote_address) req.environ['ec2.context'] = ctxt - LOG.audit(_('Authenticated Request For %s:%s)'), user.name, - project.name, context=req.environ['ec2.context']) + uname = user.name + pname = project.name + msg = _('Authenticated Request For %(uname)s:%(pname)s)') % locals() + LOG.audit(msg, context=req.environ['ec2.context']) return self.application @@ -206,7 +210,7 @@ class Requestify(wsgi.Middleware): LOG.debug(_('action: %s'), action) for key, value in args.items(): - LOG.debug(_('arg: %s\t\tval: %s'), key, value) + LOG.debug(_('arg: %(key)s\t\tval: %(value)s') % locals()) # Success! api_request = apirequest.APIRequest(self.controller, action, args) @@ -277,8 +281,8 @@ class Authorizer(wsgi.Middleware): if self._matches_any_role(context, allowed_roles): return self.application else: - LOG.audit(_("Unauthorized request for controller=%s " - "and action=%s"), controller, action, context=context) + LOG.audit(_('Unauthorized request for controller=%(controller)s ' + 'and action=%(action)s') % locals(), context=context) raise webob.exc.HTTPUnauthorized() def _matches_any_role(self, context, roles): @@ -310,17 +314,18 @@ class Executor(wsgi.Application): try: result = api_request.invoke(context) except exception.NotFound as ex: - LOG.info(_('NotFound raised: %s'), str(ex), context=context) - return self._error(req, context, type(ex).__name__, str(ex)) + LOG.info(_('NotFound raised: %s'), ex.args[0], context=context) + return self._error(req, context, type(ex).__name__, ex.args[0]) except exception.ApiError as ex: - LOG.exception(_('ApiError raised: %s'), str(ex), context=context) + LOG.exception(_('ApiError raised: %s'), ex.args[0], + context=context) if ex.code: - return self._error(req, context, ex.code, str(ex)) + return self._error(req, context, ex.code, ex.args[0]) else: - return self._error(req, context, type(ex).__name__, str(ex)) + return self._error(req, context, type(ex).__name__, ex.args[0]) except Exception as ex: extra = {'environment': req.environ} - LOG.exception(_('Unexpected error raised: %s'), str(ex), + LOG.exception(_('Unexpected error raised: %s'), ex.args[0], extra=extra, context=context) return self._error(req, context, @@ -343,7 +348,8 @@ class Executor(wsgi.Application): '<Response><Errors><Error><Code>%s</Code>' '<Message>%s</Message></Error></Errors>' '<RequestID>%s</RequestID></Response>' % - (code, message, context.request_id)) + (utils.utf8(code), utils.utf8(message), + utils.utf8(context.request_id))) return resp diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index 758b612e8..d7e899d12 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -26,6 +26,7 @@ from nova import db from nova import exception from nova import log as logging from nova.auth import manager +from nova.compute import instance_types LOG = logging.getLogger('nova.api.ec2.admin') @@ -62,6 +63,14 @@ def host_dict(host): return {} +def instance_dict(name, inst): + return {'name': name, + 'memory_mb': inst['memory_mb'], + 'vcpus': inst['vcpus'], + 'disk_gb': inst['local_gb'], + 'flavor_id': inst['flavorid']} + + class AdminController(object): """ API Controller for users, hosts, nodes, and workers. @@ -70,6 +79,10 @@ class AdminController(object): def __str__(self): return 'AdminController' + def describe_instance_types(self, _context, **_kwargs): + return {'instanceTypeSet': [instance_dict(n, v) for n, v in + instance_types.INSTANCE_TYPES.iteritems()]} + def describe_user(self, _context, name, **_kwargs): """Returns user data, including access and secret keys.""" return user_dict(manager.AuthManager().get_user(name)) @@ -111,19 +124,23 @@ class AdminController(object): """Add or remove a role for a user and project.""" if operation == 'add': if project: - LOG.audit(_("Adding role %s to user %s for project %s"), role, - user, project, context=context) + msg = _("Adding role %(role)s to user %(user)s" + " for project %(project)s") % locals() + LOG.audit(msg, context=context) else: - LOG.audit(_("Adding sitewide role %s to user %s"), role, user, - context=context) + msg = _("Adding sitewide role %(role)s to" + " user %(user)s") % locals() + LOG.audit(msg, context=context) manager.AuthManager().add_role(user, role, project) elif operation == 'remove': if project: - LOG.audit(_("Removing role %s from user %s for project %s"), - role, user, project, context=context) + msg = _("Removing role %(role)s from user %(user)s" + " for project %(project)s") % locals() + LOG.audit(msg, context=context) else: - LOG.audit(_("Removing sitewide role %s from user %s"), role, - user, context=context) + msg = _("Removing sitewide role %(role)s" + " from user %(user)s") % locals() + LOG.audit(msg, context=context) manager.AuthManager().remove_role(user, role, project) else: raise exception.ApiError(_('operation must be add or remove')) @@ -139,8 +156,9 @@ class AdminController(object): project = name project = manager.AuthManager().get_project(project) user = manager.AuthManager().get_user(name) - LOG.audit(_("Getting x509 for user: %s on project: %s"), name, - project, context=context) + msg = _("Getting x509 for user: %(name)s" + " on project: %(project)s") % locals() + LOG.audit(msg, context=context) return user_dict(user, base64.b64encode(project.get_credentials(user))) def describe_project(self, context, name, **kwargs): @@ -156,8 +174,9 @@ class AdminController(object): def register_project(self, context, name, manager_user, description=None, member_users=None, **kwargs): """Creates a new project""" - LOG.audit(_("Create project %s managed by %s"), name, manager_user, - context=context) + msg = _("Create project %(name)s managed by" + " %(manager_user)s") % locals() + LOG.audit(msg, context=context) return project_dict( manager.AuthManager().create_project( name, @@ -181,12 +200,13 @@ class AdminController(object): **kwargs): """Add or remove a user from a project.""" if operation == 'add': - LOG.audit(_("Adding user %s to project %s"), user, project, - context=context) + msg = _("Adding user %(user)s to project %(project)s") % locals() + LOG.audit(msg, context=context) manager.AuthManager().add_to_project(user, project) elif operation == 'remove': - LOG.audit(_("Removing user %s from project %s"), user, project, - context=context) + msg = _("Removing user %(user)s from" + " project %(project)s") % locals() + LOG.audit(msg, context=context) manager.AuthManager().remove_from_project(user, project) else: raise exception.ApiError(_('operation must be add or remove')) diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index 78576470a..d8a2b5f53 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -93,8 +93,10 @@ class APIRequest(object): method = getattr(self.controller, _camelcase_to_underscore(self.action)) except AttributeError: - _error = _('Unsupported API request: controller = %s,' - 'action = %s') % (self.controller, self.action) + controller = self.controller + action = self.action + _error = _('Unsupported API request: controller = %(controller)s,' + ' action = %(action)s') % locals() LOG.exception(_error) # TODO: Raise custom exception, trap in apiserver, # and reraise as 400 error. diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 43b9a88e1..22b8c19cb 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -529,11 +529,18 @@ class CloudController(object): def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: - volume_id = [ec2_id_to_id(x) for x in volume_id] - volumes = self.volume_api.get_all(context) - # NOTE(vish): volume_id is an optional list of volume ids to filter by. - volumes = [self._format_volume(context, v) for v in volumes - if volume_id is None or v['id'] in volume_id] + volumes = [] + for ec2_id in volume_id: + internal_id = ec2_id_to_id(ec2_id) + try: + volume = self.volume_api.get(context, internal_id) + volumes.append(volume) + except exception.NotFound: + raise exception.NotFound(_("Volume %s not found") + % ec2_id) + else: + volumes = self.volume_api.get_all(context) + volumes = [self._format_volume(context, v) for v in volumes] return {'volumeSet': volumes} def _format_volume(self, context, volume): @@ -601,8 +608,9 @@ class CloudController(object): def attach_volume(self, context, volume_id, instance_id, device, **kwargs): volume_id = ec2_id_to_id(volume_id) instance_id = ec2_id_to_id(instance_id) - LOG.audit(_("Attach volume %s to instance %s at %s"), volume_id, - instance_id, device, context=context) + msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" + " at %(device)s") % locals() + LOG.audit(msg, context=context) self.compute_api.attach_volume(context, instance_id=instance_id, volume_id=volume_id, @@ -657,8 +665,15 @@ class CloudController(object): reservations = {} # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: - instance_id = [ec2_id_to_id(x) for x in instance_id] - instances = [self.compute_api.get(context, x) for x in instance_id] + instances = [] + for ec2_id in instance_id: + internal_id = ec2_id_to_id(ec2_id) + try: + instance = self.compute_api.get(context, internal_id) + instances.append(instance) + except exception.NotFound: + raise exception.NotFound(_("Instance %s not found") + % ec2_id) else: instances = self.compute_api.get_all(context, **kwargs) for instance in instances: @@ -751,8 +766,8 @@ class CloudController(object): return {'releaseResponse': ["Address released."]} def associate_address(self, context, instance_id, public_ip, **kwargs): - LOG.audit(_("Associate address %s to instance %s"), public_ip, - instance_id, context=context) + LOG.audit(_("Associate address %(public_ip)s to" + " instance %(instance_id)s") % locals(), context=context) instance_id = ec2_id_to_id(instance_id) self.compute_api.associate_floating_ip(context, instance_id=instance_id, @@ -840,8 +855,9 @@ class CloudController(object): if image_location is None and 'name' in kwargs: image_location = kwargs['name'] image_id = self.image_service.register(context, image_location) - LOG.audit(_("Registered image %s with id %s"), image_location, - image_id, context=context) + msg = _("Registered image %(image_location)s with" + " id %(image_id)s") % locals() + LOG.audit(msg, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index f2caac483..c70bb39ed 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -38,9 +38,6 @@ from nova.api.openstack import shared_ip_groups LOG = logging.getLogger('nova.api.openstack') FLAGS = flags.FLAGS -flags.DEFINE_string('os_krm_mapping_file', - 'krm_mapping.json', - 'Location of OpenStack Flavor/OS:EC2 Kernel/Ramdisk/Machine JSON file.') flags.DEFINE_bool('allow_admin_api', False, 'When True, this API service will accept admin operations.') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8cbcebed2..9d308ea24 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -124,17 +124,22 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() - def _get_kernel_ramdisk_from_image(self, image_id): - mapping_filename = FLAGS.os_krm_mapping_file - - with open(mapping_filename) as f: - mapping = json.load(f) - if image_id in mapping: - return mapping[image_id] + def _get_kernel_ramdisk_from_image(self, req, image_id): + """ + Machine images are associated with Kernels and Ramdisk images via + metadata stored in Glance as 'image_properties' + """ + def lookup(param): + _image_id = image_id + try: + return image['properties'][param] + except KeyError: + raise exception.NotFound( + _("%(param)s property not found for image %(_image_id)s") % + locals()) - raise exception.NotFound( - _("No entry for image '%s' in mapping file '%s'") % - (image_id, mapping_filename)) + image = self._image_service.show(req.environ['nova.context'], image_id) + return lookup('kernel_id'), lookup('ramdisk_id') def create(self, req): """ Creates a new server for a given user """ @@ -146,7 +151,8 @@ class Controller(wsgi.Controller): req.environ['nova.context'])[0] image_id = common.get_image_id_from_image_hash(self._image_service, req.environ['nova.context'], env['server']['imageId']) - kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(image_id) + kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( + req, image_id) instances = self.compute_api.create( req.environ['nova.context'], instance_types.get_by_flavor_id(env['server']['flavorId']), diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index a6915ce03..e652f1caa 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -473,8 +473,8 @@ class LdapDriver(object): raise exception.NotFound(_("The group at dn %s doesn't exist") % group_dn) if self.__is_in_group(uid, group_dn): - raise exception.Duplicate(_("User %s is already a member of " - "the group %s") % (uid, group_dn)) + raise exception.Duplicate(_("User %(uid)s is already a member of " + "the group %(group_dn)s") % locals()) attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))] self.conn.modify_s(group_dn, attr) @@ -585,10 +585,11 @@ class LdapDriver(object): else: return None - @staticmethod - def __dn_to_uid(dn): + def __dn_to_uid(self, dn): """Convert user dn to uid""" - return dn.split(',')[0].split('=')[1] + query = '(objectclass=novaUser)' + user = self.__find_object(dn, query) + return user[FLAGS.ldap_user_id_attribute][0] class FakeLdapDriver(LdapDriver): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 1652e24e1..450ab803a 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -272,16 +272,22 @@ class AuthManager(object): project = self.get_project(project_id) if project == None: - LOG.audit(_("failed authorization: no project named %s (user=%s)"), - project_id, user.name) + pjid = project_id + uname = user.name + LOG.audit(_("failed authorization: no project named %(pjid)s" + " (user=%(uname)s)") % locals()) raise exception.NotFound(_('No project called %s could be found') % project_id) if not self.is_admin(user) and not self.is_project_member(user, project): - LOG.audit(_("Failed authorization: user %s not admin and not " - "member of project %s"), user.name, project.name) - raise exception.NotFound(_('User %s is not a member of project %s') - % (user.id, project.id)) + uname = user.name + uid = user.id + pjname = project.name + pjid = project.id + LOG.audit(_("Failed authorization: user %(uname)s not admin" + " and not member of project %(pjname)s") % locals()) + raise exception.NotFound(_('User %(uid)s is not a member of' + ' project %(pjid)s') % locals()) if check_type == 's3': sign = signer.Signer(user.secret.encode()) expected_signature = sign.s3_authorization(headers, verb, path) @@ -408,14 +414,16 @@ class AuthManager(object): raise exception.NotFound(_("The %s role can not be found") % role) if project is not None and role in FLAGS.global_roles: raise exception.NotFound(_("The %s role is global only") % role) + uid = User.safe_id(user) + pid = Project.safe_id(project) if project: - LOG.audit(_("Adding role %s to user %s in project %s"), role, - User.safe_id(user), Project.safe_id(project)) + LOG.audit(_("Adding role %(role)s to user %(uid)s" + " in project %(pid)s") % locals()) else: - LOG.audit(_("Adding sitewide role %s to user %s"), role, - User.safe_id(user)) + LOG.audit(_("Adding sitewide role %(role)s to user %(uid)s") + % locals()) with self.driver() as drv: - drv.add_role(User.safe_id(user), role, Project.safe_id(project)) + drv.add_role(uid, role, pid) def remove_role(self, user, role, project=None): """Removes role for user @@ -434,14 +442,16 @@ class AuthManager(object): @type project: Project or project_id @param project: Project in which to remove local role. """ + uid = User.safe_id(user) + pid = Project.safe_id(project) if project: - LOG.audit(_("Removing role %s from user %s on project %s"), - role, User.safe_id(user), Project.safe_id(project)) + LOG.audit(_("Removing role %(role)s from user %(uid)s" + " on project %(pid)s") % locals()) else: - LOG.audit(_("Removing sitewide role %s from user %s"), role, - User.safe_id(user)) + LOG.audit(_("Removing sitewide role %(role)s" + " from user %(uid)s") % locals()) with self.driver() as drv: - drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) + drv.remove_role(uid, role, pid) @staticmethod def get_roles(project_roles=True): @@ -502,8 +512,8 @@ class AuthManager(object): description, member_users) if project_dict: - LOG.audit(_("Created project %s with manager %s"), name, - manager_user) + LOG.audit(_("Created project %(name)s with" + " manager %(manager_user)s") % locals()) project = Project(**project_dict) return project @@ -530,11 +540,12 @@ class AuthManager(object): def add_to_project(self, user, project): """Add user to project""" - LOG.audit(_("Adding user %s to project %s"), User.safe_id(user), - Project.safe_id(project)) + uid = User.safe_id(user) + pid = Project.safe_id(project) + LOG.audit(_("Adding user %(uid)s to project %(pid)s") % locals()) with self.driver() as drv: return drv.add_to_project(User.safe_id(user), - Project.safe_id(project)) + Project.safe_id(project)) def is_project_manager(self, user, project): """Checks if user is project manager""" @@ -550,11 +561,11 @@ class AuthManager(object): def remove_from_project(self, user, project): """Removes a user from a project""" - LOG.audit(_("Remove user %s from project %s"), User.safe_id(user), - Project.safe_id(project)) + uid = User.safe_id(user) + pid = Project.safe_id(project) + LOG.audit(_("Remove user %(uid)s from project %(pid)s") % locals()) with self.driver() as drv: - return drv.remove_from_project(User.safe_id(user), - Project.safe_id(project)) + return drv.remove_from_project(uid, pid) @staticmethod def get_project_vpn_data(project): @@ -634,7 +645,10 @@ class AuthManager(object): user_dict = drv.create_user(name, access, secret, admin) if user_dict: rv = User(**user_dict) - LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin) + rvname = rv.name + rvadmin = rv.admin + LOG.audit(_("Created user %(rvname)s" + " (admin: %(rvadmin)r)") % locals()) return rv def delete_user(self, user): @@ -656,7 +670,8 @@ class AuthManager(object): if secret_key: LOG.audit(_("Secret Key change for user %s"), uid) if admin is not None: - LOG.audit(_("Admin status set to %r for user %s"), admin, uid) + LOG.audit(_("Admin status set to %(admin)r" + " for user %(uid)s") % locals()) with self.driver() as drv: drv.modify_user(uid, access_key, secret_key, admin) diff --git a/nova/compute/api.py b/nova/compute/api.py index 6a3fe08b6..1d8b9d79f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -92,8 +92,9 @@ class API(base.Base): type_data = instance_types.INSTANCE_TYPES[instance_type] num_instances = quota.allowed_instances(context, max_count, type_data) if num_instances < min_count: - LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"), - context.project_id, min_count) + pid = context.project_id + LOG.warn(_("Quota exceeeded for %(pid)s," + " tried to run %(min_count)s instances") % locals()) raise quota.QuotaError(_("Instance quota exceeded. You can only " "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") @@ -183,8 +184,10 @@ class API(base.Base): instance = self.update(context, instance_id, **updates) instances.append(instance) - LOG.debug(_("Casting to scheduler for %s/%s's instance %s"), - context.project_id, context.user_id, instance_id) + pid = context.project_id + uid = context.user_id + LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" + " instance %(instance_id)s") % locals()) rpc.cast(context, FLAGS.scheduler_topic, {"method": "run_instance", diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6f09ce674..0f9bf301f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -77,8 +77,8 @@ def checks_instance_lock(function): LOG.info(_("check_instance_lock: decorating: |%s|"), function, context=context) - LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"), - self, context, instance_id, context=context) + LOG.info(_("check_instance_lock: arguments: |%(self)s| |%(context)s|" + " |%(instance_id)s|") % locals(), context=context) locked = self.get_lock(context, instance_id) admin = context.is_admin LOG.info(_("check_instance_lock: locked: |%s|"), locked, @@ -118,7 +118,7 @@ class ComputeManager(manager.Manager): """Do any initialization that needs to be run if this is a standalone service. """ - self.driver.init_host() + self.driver.init_host(host=self.host) def _update_state(self, context, instance_id): """Update the state of an instance from the driver info.""" @@ -278,11 +278,11 @@ class ComputeManager(manager.Manager): LOG.audit(_("Rebooting instance %s"), instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: + state = instance_ref['state'] + running = power_state.RUNNING LOG.warn(_('trying to reboot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_id, - instance_ref['state'], - power_state.RUNNING, + 'instance: %(instance_id)s (state: %(state)s ' + 'expected: %(running)s)') % locals(), context=context) self.db.instance_set_state(context, @@ -307,9 +307,11 @@ class ComputeManager(manager.Manager): LOG.audit(_('instance %s: snapshotting'), instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: + state = instance_ref['state'] + running = power_state.RUNNING LOG.warn(_('trying to snapshot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_id, instance_ref['state'], power_state.RUNNING) + 'instance: %(instance_id)s (state: %(state)s ' + 'expected: %(running)s)') % locals()) self.driver.snapshot(instance_ref, image_id) @@ -517,8 +519,8 @@ class ComputeManager(manager.Manager): """Attach a volume to an instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id, - volume_id, mountpoint, context=context) + LOG.audit(_("instance %(instance_id)s: attaching volume %(volume_id)s" + " to %(mountpoint)s") % locals(), context=context) dev_path = self.volume_manager.setup_compute_volume(context, volume_id) try: @@ -533,8 +535,8 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - LOG.exception(_("instance %s: attach failed %s, removing"), - instance_id, mountpoint, context=context) + LOG.exception(_("instance %(instance_id)s: attach failed" + " %(mountpoint)s, removing") % locals(), context=context) self.volume_manager.remove_compute_volume(context, volume_id) raise exc @@ -548,9 +550,9 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) - LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"), - volume_id, volume_ref['mountpoint'], instance_id, - context=context) + mp = volume_ref['mountpoint'] + LOG.audit(_("Detach volume %(volume_id)s from mountpoint %(mp)s" + " on instance %(instance_id)s") % locals(), context=context) if instance_ref['name'] not in self.driver.list_instances(): LOG.warn(_("Detaching volume from unknown instance %s"), instance_id, context=context) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 14d0e8ca1..04e08a235 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -352,8 +352,9 @@ class Instance(object): rd += rd_bytes wr += wr_bytes except TypeError: - LOG.error(_('Cannot get blockstats for "%s" on "%s"'), - disk, self.instance_id) + iid = self.instance_id + LOG.error(_('Cannot get blockstats for "%(disk)s"' + ' on "%(iid)s"') % locals()) raise return '%d:%d' % (rd, wr) @@ -374,8 +375,9 @@ class Instance(object): rx += stats[0] tx += stats[4] except TypeError: - LOG.error(_('Cannot get ifstats for "%s" on "%s"'), - interface, self.instance_id) + iid = self.instance_id + LOG.error(_('Cannot get ifstats for "%(interface)s"' + ' on "%(iid)s"') % locals()) raise return '%d:%d' % (rx, tx) diff --git a/nova/db/api.py b/nova/db/api.py index f9d561587..c6c03fb0e 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -71,7 +71,6 @@ class NoMoreTargets(exception.Error): """No more available blades""" pass - ################### @@ -351,6 +350,11 @@ def instance_get_all_by_project(context, project_id): return IMPL.instance_get_all_by_project(context, project_id) +def instance_get_all_by_host(context, host): + """Get all instance belonging to a host.""" + return IMPL.instance_get_all_by_host(context, host) + + def instance_get_all_by_reservation(context, reservation_id): """Get all instance belonging to a reservation.""" return IMPL.instance_get_all_by_reservation(context, reservation_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 7b965f672..fa060228f 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -247,7 +247,8 @@ def service_get_by_args(context, host, binary): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound(_('No service for %s, %s') % (host, binary)) + raise exception.NotFound(_('No service for %(host)s, %(binary)s') + % locals()) return result @@ -723,6 +724,17 @@ def instance_get_all_by_user(context, user_id): all() +@require_admin_context +def instance_get_all_by_host(context, host): + session = get_session() + return session.query(models.Instance).\ + options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload('security_groups')).\ + filter_by(host=host).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + @require_context def instance_get_all_by_project(context, project_id): authorize_project_context(context, project_id) @@ -935,8 +947,8 @@ def key_pair_get(context, user_id, name, session=None): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound(_('no keypair for user %s, name %s') % - (user_id, name)) + raise exception.NotFound(_('no keypair for user %(user_id)s,' + ' name %(name)s') % locals()) return result @@ -1395,11 +1407,13 @@ def volume_get(context, volume_id, session=None): if is_admin_context(context): result = session.query(models.Volume).\ + options(joinedload('instance')).\ filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Volume).\ + options(joinedload('instance')).\ filter_by(project_id=context.project_id).\ filter_by(id=volume_id).\ filter_by(deleted=False).\ @@ -1537,8 +1551,8 @@ def security_group_get_by_name(context, project_id, group_name): first() if not result: raise exception.NotFound( - _('No security group named %s for project: %s') - % (group_name, project_id)) + _('No security group named %(group_name)s' + ' for project: %(project_id)s') % locals()) return result @@ -1922,8 +1936,8 @@ def console_pool_get(context, pool_id): filter_by(id=pool_id).\ first() if not result: - raise exception.NotFound(_("No console pool with id %(pool_id)s") % - {'pool_id': pool_id}) + raise exception.NotFound(_("No console pool with id %(pool_id)s") + % locals()) return result @@ -1939,12 +1953,9 @@ def console_pool_get_by_host_type(context, compute_host, host, options(joinedload('consoles')).\ first() if not result: - raise exception.NotFound(_('No console pool of type %(type)s ' + raise exception.NotFound(_('No console pool of type %(console_type)s ' 'for compute host %(compute_host)s ' - 'on proxy host %(host)s') % - {'type': console_type, - 'compute_host': compute_host, - 'host': host}) + 'on proxy host %(host)s') % locals()) return result @@ -1982,9 +1993,7 @@ def console_get_by_pool_instance(context, pool_id, instance_id): first() if not result: raise exception.NotFound(_('No console for instance %(instance_id)s ' - 'in pool %(pool_id)s') % - {'instance_id': instance_id, - 'pool_id': pool_id}) + 'in pool %(pool_id)s') % locals()) return result @@ -2005,9 +2014,7 @@ def console_get(context, console_id, instance_id=None): query = query.filter_by(instance_id=instance_id) result = query.options(joinedload('pool')).first() if not result: - idesc = (_("on instance %s") % instance_id) if instance_id else "" + idesc = (_("on instance %s") % instance_id) if instance_id else "" raise exception.NotFound(_("No console with id %(console_id)s" - " %(instance)s") % - {'instance': idesc, - 'console_id': console_id}) + " %(idesc)s") % locals()) return result diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index c3876c02a..dc885f138 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -22,6 +22,7 @@ Session Handling for SQLAlchemy backend from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker +from nova import exception from nova import flags FLAGS = flags.FLAGS @@ -43,4 +44,6 @@ def get_session(autocommit=True, expire_on_commit=False): autocommit=autocommit, expire_on_commit=expire_on_commit)) session = _MAKER() + session.query = exception.wrap_db_error(session.query) + session.flush = exception.wrap_db_error(session.flush) return session diff --git a/nova/exception.py b/nova/exception.py index ecd814e5d..f604fd63a 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -33,8 +33,9 @@ class ProcessExecutionError(IOError): description = _("Unexpected error while running command.") if exit_code is None: exit_code = '-' - message = _("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r")\ - % (description, cmd, exit_code, stdout, stderr) + message = _("%(description)s\nCommand: %(cmd)s\n" + "Exit code: %(exit_code)s\nStdout: %(stdout)r\n" + "Stderr: %(stderr)r") % locals() IOError.__init__(self, message) @@ -80,6 +81,24 @@ class TimeoutException(Error): pass +class DBError(Error): + """Wraps an implementation specific exception""" + def __init__(self, inner_exception): + self.inner_exception = inner_exception + super(DBError, self).__init__(str(inner_exception)) + + +def wrap_db_error(f): + def _wrap(*args, **kwargs): + try: + return f(*args, **kwargs) + except Exception, e: + LOG.exception(_('DB exception wrapped')) + raise DBError(e) + return _wrap + _wrap.func_name = f.func_name + + def wrap_exception(f): def _wrap(*args, **kw): try: diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 7c2d7177b..dd82a9366 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -45,8 +45,9 @@ class Exchange(object): self._routes = {} def publish(self, message, routing_key=None): - LOG.debug(_('(%s) publish (key: %s) %s'), - self.name, routing_key, message) + nm = self.name + LOG.debug(_('(%(nm)s) publish (key: %(routing_key)s)' + ' %(message)s') % locals()) routing_key = routing_key.split('.')[0] if routing_key in self._routes: for f in self._routes[routing_key]: @@ -92,8 +93,8 @@ class Backend(base.BaseBackend): def queue_bind(self, queue, exchange, routing_key, **kwargs): global EXCHANGES global QUEUES - LOG.debug(_('Binding %s to %s with key %s'), - queue, exchange, routing_key) + LOG.debug(_('Binding %(queue)s to %(exchange)s with' + ' key %(routing_key)s') % locals()) EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) def declare_consumer(self, queue, callback, *args, **kwargs): @@ -117,7 +118,7 @@ class Backend(base.BaseBackend): content_type=content_type, content_encoding=content_encoding) message.result = True - LOG.debug(_('Getting from %s: %s'), queue, message) + LOG.debug(_('Getting from %(queue)s: %(message)s') % locals()) return message def prepare_message(self, message_data, delivery_mode, diff --git a/nova/network/manager.py b/nova/network/manager.py index 5d7589090..fe99f2612 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -83,7 +83,7 @@ flags.DEFINE_string('floating_range', '4.4.4.0/24', 'Floating IP address block') flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block') flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block') -flags.DEFINE_integer('cnt_vpn_clients', 5, +flags.DEFINE_integer('cnt_vpn_clients', 0, 'Number of addresses reserved for vpn clients') flags.DEFINE_string('network_driver', 'nova.network.linux_net', 'Driver to use for network creation') @@ -198,8 +198,9 @@ class NetworkManager(manager.Manager): raise exception.Error(_("IP %s leased that isn't associated") % address) if instance_ref['mac_address'] != mac: - raise exception.Error(_("IP %s leased to bad mac %s vs %s") % - (address, instance_ref['mac_address'], mac)) + inst_addr = instance_ref['mac_address'] + raise exception.Error(_("IP %(address)s leased to bad" + " mac %(inst_addr)s vs %(mac)s") % locals()) now = datetime.datetime.utcnow() self.db.fixed_ip_update(context, fixed_ip_ref['address'], @@ -218,8 +219,9 @@ class NetworkManager(manager.Manager): raise exception.Error(_("IP %s released that isn't associated") % address) if instance_ref['mac_address'] != mac: - raise exception.Error(_("IP %s released from bad mac %s vs %s") % - (address, instance_ref['mac_address'], mac)) + inst_addr = instance_ref['mac_address'] + raise exception.Error(_("IP %(address)s released from" + " bad mac %(inst_addr)s vs %(mac)s") % locals()) if not fixed_ip_ref['leased']: LOG.warn(_("IP %s released that was not leased"), address, context=context) @@ -393,6 +395,7 @@ class FlatDHCPManager(FlatManager): standalone service. """ super(FlatDHCPManager, self).init_host() + self.driver.init_host() self.driver.metadata_forward() def setup_compute_network(self, context, instance_id): @@ -458,8 +461,8 @@ class VlanManager(NetworkManager): standalone service. """ super(VlanManager, self).init_host() - self.driver.metadata_forward() self.driver.init_host() + self.driver.metadata_forward() def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): """Gets a fixed ip from the pool.""" diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index 43ed7ffe7..05ddace4b 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -180,7 +180,7 @@ class S3(ErrorHandlingResource): def render_GET(self, request): # pylint: disable-msg=R0201 """Renders the GET request for a list of buckets as XML""" LOG.debug(_('List of buckets requested'), context=request.context) - buckets = [b for b in bucket.Bucket.all() \ + buckets = [b for b in bucket.Bucket.all() if b.is_authorized(request.context)] render_xml(request, {"ListAllMyBucketsResult": { @@ -268,12 +268,14 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - LOG.debug(_("Getting object: %s / %s"), self.bucket.name, self.name) + bname = self.bucket.name + nm = self.name + LOG.debug(_("Getting object: %(bname)s / %(nm)s") % locals()) if not self.bucket.is_authorized(request.context): - LOG.audit(_("Unauthorized attempt to get object %s from bucket " - "%s"), self.name, self.bucket.name, - context=request.context) + LOG.audit(_("Unauthorized attempt to get object %(nm)s" + " from bucket %(bname)s") % locals(), + context=request.context) raise exception.NotAuthorized() obj = self.bucket[urllib.unquote(self.name)] @@ -289,12 +291,13 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - LOG.debug(_("Putting object: %s / %s"), self.bucket.name, self.name) + nm = self.name + bname = self.bucket.name + LOG.debug(_("Putting object: %(bname)s / %(nm)s") % locals()) if not self.bucket.is_authorized(request.context): - LOG.audit(_("Unauthorized attempt to upload object %s to bucket " - "%s"), - self.name, self.bucket.name, context=request.context) + LOG.audit(_("Unauthorized attempt to upload object %(nm)s to" + " bucket %(bname)s") % locals(), context=request.context) raise exception.NotAuthorized() key = urllib.unquote(self.name) @@ -310,16 +313,14 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - - LOG.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name, + nm = self.name + bname = self.bucket.name + LOG.debug(_("Deleting object: %(bname)s / %(nm)s") % locals(), context=request.context) if not self.bucket.is_authorized(request.context): - LOG.audit(_("Unauthorized attempt to delete object " - "%(object)s from bucket %(bucket)s") % - {'object': self.name, - 'bucket': self.bucket.name}, - context=request.context) + LOG.audit(_("Unauthorized attempt to delete object %(nm)s from " + "bucket %(bname)s") % locals(), context=request.context) raise exception.NotAuthorized() del self.bucket[urllib.unquote(self.name)] @@ -390,10 +391,10 @@ class ImagesResource(resource.Resource): image_location = get_argument(request, 'image_location', u'') image_path = os.path.join(FLAGS.images_path, image_id) - if not image_path.startswith(FLAGS.images_path) or \ - os.path.exists(image_path): + if ((not image_path.startswith(FLAGS.images_path)) or + os.path.exists(image_path)): LOG.audit(_("Not authorized to upload image: invalid directory " - "%s"), + "%s"), image_path, context=request.context) raise exception.NotAuthorized() @@ -427,8 +428,8 @@ class ImagesResource(resource.Resource): if operation: # operation implies publicity toggle newstatus = (operation == 'add') - LOG.audit(_("Toggling publicity flag of image %s %r"), image_id, - newstatus, context=request.context) + LOG.audit(_("Toggling publicity flag of image %(image_id)s" + " %(newstatus)r") % locals(), context=request.context) image_object.set_public(newstatus) else: # other attributes imply update diff --git a/nova/rpc.py b/nova/rpc.py index bbfa71138..01fc6d44b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -89,15 +89,16 @@ class Consumer(messaging.Consumer): self.failed_connection = False break except: # Catching all because carrot sucks - LOG.exception(_("AMQP server on %s:%d is unreachable." - " Trying again in %d seconds.") % ( - FLAGS.rabbit_host, - FLAGS.rabbit_port, - FLAGS.rabbit_retry_interval)) + fl_host = FLAGS.rabbit_host + fl_port = FLAGS.rabbit_port + fl_intv = FLAGS.rabbit_retry_interval + LOG.exception(_("AMQP server on %(fl_host)s:%(fl_port)d is" + " unreachable. Trying again in %(fl_intv)d seconds.") + % locals()) self.failed_connection = True if self.failed_connection: LOG.exception(_("Unable to connect to AMQP server " - "after %d tries. Shutting down."), + "after %d tries. Shutting down."), FLAGS.rabbit_max_retries) sys.exit(1) @@ -152,7 +153,7 @@ class TopicConsumer(Consumer): class AdapterConsumer(TopicConsumer): """Calls methods on a proxy object based on method and args""" def __init__(self, connection=None, topic="broadcast", proxy=None): - LOG.debug(_('Initing the Adapter Consumer for %s') % (topic)) + LOG.debug(_('Initing the Adapter Consumer for %s') % topic) self.proxy = proxy super(AdapterConsumer, self).__init__(connection=connection, topic=topic) @@ -167,7 +168,7 @@ class AdapterConsumer(TopicConsumer): Example: {'method': 'echo', 'args': {'value': 42}} """ - LOG.debug(_('received %s') % (message_data)) + LOG.debug(_('received %s') % message_data) msg_id = message_data.pop('_msg_id', None) ctxt = _unpack_context(message_data) @@ -180,7 +181,7 @@ class AdapterConsumer(TopicConsumer): # messages stay in the queue indefinitely, so for now # we just log the message and send an error string # back to the caller - LOG.warn(_('no method for message: %s') % (message_data)) + LOG.warn(_('no method for message: %s') % message_data) msg_reply(msg_id, _('No method for message: %s') % message_data) return diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index a4d6dd574..e9b47512e 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -66,4 +66,4 @@ class SchedulerManager(manager.Manager): db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) - LOG.debug(_("Casting to %s %s for %s"), topic, host, method) + LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals()) diff --git a/nova/service.py b/nova/service.py index 91e00d3d1..2c30997f2 100644 --- a/nova/service.py +++ b/nova/service.py @@ -221,10 +221,10 @@ def serve(*services): name = '_'.join(x.binary for x in services) logging.debug(_("Serving %s"), name) - logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: - logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) + flag_get = FLAGS.get(flag, None) + logging.debug("%(flag)s : %(flag_get)s" % locals()) for x in services: x.start() diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 85593ab46..4820e04fb 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -86,7 +86,8 @@ class RpcTestCase(test.TestCase): @staticmethod def echo(context, queue, value): """Calls echo in the passed queue""" - LOG.debug(_("Nested received %s, %s"), queue, value) + LOG.debug(_("Nested received %(queue)s, %(value)s") + % locals()) ret = rpc.call(context, queue, {"method": "echo", diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 12fb01596..95282dbcf 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -256,7 +256,7 @@ class IptablesFirewallTestCase(test.TestCase): ':FORWARD ACCEPT [0:0]', ':OUTPUT ACCEPT [349256:75777230]', 'COMMIT', - '# Completed on Tue Jan 18 23:47:56 2011' + '# Completed on Tue Jan 18 23:47:56 2011', ] def test_static_filters(self): diff --git a/nova/utils.py b/nova/utils.py index 108824143..f71a4d880 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -138,7 +138,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): result = obj.communicate() obj.stdin.close() if obj.returncode: - LOG.debug(_("Result was %s") % (obj.returncode)) + LOG.debug(_("Result was %s") % obj.returncode) if check_exit_code and obj.returncode != 0: (stdout, stderr) = result raise ProcessExecutionError(exit_code=obj.returncode, diff --git a/nova/virt/fake.py b/nova/virt/fake.py index f8b3c7807..161445b86 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -76,9 +76,10 @@ class FakeConnection(object): cls._instance = cls() return cls._instance - def init_host(self): + def init_host(self, host): """ - Initialize anything that is necessary for the driver to function + Initialize anything that is necessary for the driver to function, + including catching up with currently running VM's on the given host. """ return diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 30dc1c79b..5afa3221d 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -113,7 +113,7 @@ class HyperVConnection(object): self._conn = wmi.WMI(moniker='//./root/virtualization') self._cim_conn = wmi.WMI(moniker='//./root/cimv2') - def init_host(self): + def init_host(self, host): #FIXME(chiradeep): implement this LOG.debug(_('In init host')) pass @@ -129,7 +129,7 @@ class HyperVConnection(object): vm = self._lookup(instance.name) if vm is not None: raise exception.Duplicate(_('Attempt to create duplicate vm %s') % - instance.name) + instance.name) user = manager.AuthManager().get_user(instance['user_id']) project = manager.AuthManager().get_project(instance['project_id']) @@ -159,7 +159,7 @@ class HyperVConnection(object): vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new() vs_gs_data.ElementName = instance['name'] (job, ret_val) = vs_man_svc.DefineVirtualSystem( - [], None, vs_gs_data.GetText_(1))[1:] + [], None, vs_gs_data.GetText_(1))[1:] if ret_val == WMI_JOB_STATUS_STARTED: success = self._check_job_status(job) else: @@ -184,40 +184,40 @@ class HyperVConnection(object): memsetting.Limit = mem (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( - vm.path_(), [memsetting.GetText_(1)]) + vm.path_(), [memsetting.GetText_(1)]) LOG.debug(_('Set memory for vm %s...'), instance.name) procsetting = vmsetting.associators( - wmi_result_class='Msvm_ProcessorSettingData')[0] + wmi_result_class='Msvm_ProcessorSettingData')[0] vcpus = long(instance['vcpus']) procsetting.VirtualQuantity = vcpus procsetting.Reservation = vcpus procsetting.Limit = vcpus (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( - vm.path_(), [procsetting.GetText_(1)]) + vm.path_(), [procsetting.GetText_(1)]) LOG.debug(_('Set vcpus for vm %s...'), instance.name) def _create_disk(self, vm_name, vhdfile): """Create a disk and attach it to the vm""" - LOG.debug(_('Creating disk for %s by attaching disk file %s'), - vm_name, vhdfile) + LOG.debug(_('Creating disk for %(vm_name)s by attaching' + ' disk file %(vhdfile)s') % locals()) #Find the IDE controller for the vm. vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name) vm = vms[0] vmsettings = vm.associators( - wmi_result_class='Msvm_VirtualSystemSettingData') + wmi_result_class='Msvm_VirtualSystemSettingData') rasds = vmsettings[0].associators( - wmi_result_class='MSVM_ResourceAllocationSettingData') + wmi_result_class='MSVM_ResourceAllocationSettingData') ctrller = [r for r in rasds if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\ - and r.Address == "0"] + and r.Address == "0"] #Find the default disk drive object for the vm and clone it. diskdflt = self._conn.query( - "SELECT * FROM Msvm_ResourceAllocationSettingData \ - WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\ - AND InstanceID LIKE '%Default%'")[0] + "SELECT * FROM Msvm_ResourceAllocationSettingData \ + WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\ + AND InstanceID LIKE '%Default%'")[0] diskdrive = self._clone_wmi_obj( - 'Msvm_ResourceAllocationSettingData', diskdflt) + 'Msvm_ResourceAllocationSettingData', diskdflt) #Set the IDE ctrller as parent. diskdrive.Parent = ctrller[0].path_() diskdrive.Address = 0 @@ -263,17 +263,18 @@ class HyperVConnection(object): default_nic_data = [n for n in emulatednics_data if n.InstanceID.rfind('Default') > 0] new_nic_data = self._clone_wmi_obj( - 'Msvm_EmulatedEthernetPortSettingData', - default_nic_data[0]) + 'Msvm_EmulatedEthernetPortSettingData', + default_nic_data[0]) #Create a port on the vswitch. (new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name, "", extswitch.path_()) if ret_val != 0: LOG.error(_('Failed creating a port on the external vswitch')) raise Exception(_('Failed creating port for %s'), - vm_name) - LOG.debug(_("Created switch port %s on switch %s"), - vm_name, extswitch.path_()) + vm_name) + ext_path = extswitch.path_() + LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s") + % locals()) #Connect the new nic to the new port. new_nic_data.Connection = [new_port] new_nic_data.ElementName = vm_name + ' nic' @@ -283,7 +284,7 @@ class HyperVConnection(object): new_resources = self._add_virt_resource(new_nic_data, vm) if new_resources is None: raise Exception(_('Failed to add nic to VM %s'), - vm_name) + vm_name) LOG.info(_("Created nic for %s "), vm_name) def _add_virt_resource(self, res_setting_data, target_vm): @@ -319,8 +320,10 @@ class HyperVConnection(object): if job.JobState != WMI_JOB_STATE_COMPLETED: LOG.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription) return False - LOG.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description, - job.ElapsedTime) + desc = job.Description + elap = job.ElapsedTime + LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s ") + % locals()) return True def _find_external_network(self): @@ -386,7 +389,9 @@ class HyperVConnection(object): vhdfile = self._cim_conn.CIM_DataFile(Name=disk) for vf in vhdfile: vf.Delete() - LOG.debug(_("Del: disk %s vm %s"), vhdfile, instance.name) + instance_name = instance.name + LOG.debug(_("Del: disk %(vhdfile)s vm %(instance_name)s") + % locals()) def get_info(self, instance_id): """Get information about the VM""" @@ -402,12 +407,14 @@ class HyperVConnection(object): summary_info = vs_man_svc.GetSummaryInformation( [4, 100, 103, 105], settings_paths)[1] info = summary_info[0] - LOG.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \ - cpu_time=%s"), instance_id, - str(HYPERV_POWER_STATE[info.EnabledState]), - str(info.MemoryUsage), - str(info.NumberOfProcessors), - str(info.UpTime)) + state = str(HYPERV_POWER_STATE[info.EnabledState]) + memusage = str(info.MemoryUsage) + numprocs = str(info.NumberOfProcessors) + uptime = str(info.UpTime) + + LOG.debug(_("Got Info for vm %(instance_id)s: state=%(state)s," + " mem=%(memusage)s, num_cpu=%(numprocs)s," + " cpu_time=%(uptime)s") % locals()) return {'state': HYPERV_POWER_STATE[info.EnabledState], 'max_mem': info.MemoryUsage, @@ -441,22 +448,22 @@ class HyperVConnection(object): #already in the state requested success = True if success: - LOG.info(_("Successfully changed vm state of %s to %s"), vm_name, - req_state) + LOG.info(_("Successfully changed vm state of %(vm_name)s" + " to %(req_state)s") % locals()) else: - LOG.error(_("Failed to change vm state of %s to %s"), vm_name, - req_state) - raise Exception(_("Failed to change vm state of %s to %s"), - vm_name, req_state) + msg = _("Failed to change vm state of %(vm_name)s" + " to %(req_state)s") % locals() + LOG.error(msg) + raise Exception(msg) def attach_volume(self, instance_name, device_path, mountpoint): vm = self._lookup(instance_name) if vm is None: - raise exception.NotFound('Cannot attach volume to missing %s vm' % - instance_name) + raise exception.NotFound('Cannot attach volume to missing %s vm' + % instance_name) def detach_volume(self, instance_name, mountpoint): vm = self._lookup(instance_name) if vm is None: - raise exception.NotFound('Cannot detach volume from missing %s ' % - instance_name) + raise exception.NotFound('Cannot detach volume from missing %s ' + % instance_name) diff --git a/nova/virt/images.py b/nova/virt/images.py index ecf0e5efb..9c987e14d 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -67,7 +67,7 @@ def _fetch_image_no_curl(url, path, headers): urlopened = urllib2.urlopen(request) urlretrieve(urlopened, path) - LOG.debug(_("Finished retreving %s -- placed in %s"), url, path) + LOG.debug(_("Finished retreving %(url)s -- placed in %(path)s") % locals()) def _fetch_s3_image(image, path, user, project): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 36733764a..0a0bbfb59 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -157,8 +157,31 @@ class LibvirtConnection(object): else: self.firewall_driver = utils.import_object(FLAGS.firewall_driver) - def init_host(self): - pass + def init_host(self, host): + # Adopt existing VM's running here + ctxt = context.get_admin_context() + for instance in db.instance_get_all_by_host(ctxt, host): + try: + LOG.debug(_('Checking state of %s'), instance['name']) + state = self.get_info(instance['name'])['state'] + except exception.NotFound: + state = power_state.SHUTOFF + + LOG.debug(_('Current state of %(name)s was %(state)s.'), + {'name': instance['name'], 'state': state}) + db.instance_set_state(ctxt, instance['id'], state) + + if state == power_state.SHUTOFF: + # TODO(soren): This is what the compute manager does when you + # terminate # an instance. At some point I figure we'll have a + # "terminated" state and some sort of cleanup job that runs + # occasionally, cleaning them out. + db.instance_destroy(ctxt, instance['id']) + + if state != power_state.RUNNING: + continue + self.firewall_driver.prepare_instance_filter(instance) + self.firewall_driver.apply_instance_filter(instance) def _get_connection(self): if not self._wrapped_conn or not self._test_connection(): @@ -236,8 +259,9 @@ class LibvirtConnection(object): def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) - LOG.info(_('instance %s: deleting instance files %s'), - instance['name'], target) + instance_name = instance['name'] + LOG.info(_('instance %(instance_name)s: deleting instance files' + ' %(target)s') % locals()) if os.path.exists(target): shutil.rmtree(target) @@ -418,7 +442,7 @@ class LibvirtConnection(object): virsh_output = virsh_output[0].strip() if virsh_output.startswith('/dev/'): - LOG.info(_('cool, it\'s a device')) + LOG.info(_("cool, it's a device")) out, err = utils.execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) return out @@ -426,7 +450,7 @@ class LibvirtConnection(object): return '' def _append_to_file(self, data, fpath): - LOG.info(_('data: %r, fpath: %r'), data, fpath) + LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals()) fp = open(fpath, 'a+') fp.write(data) return fpath @@ -434,7 +458,7 @@ class LibvirtConnection(object): def _dump_file(self, fpath): fp = open(fpath, 'r+') contents = fp.read() - LOG.info(_('Contents of file %s: %r'), fpath, contents) + LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals()) return contents @exception.wrap_exception @@ -621,21 +645,22 @@ class LibvirtConnection(object): 'dns': network_ref['dns'], 'ra_server': ra_server} if key or net: + inst_name = inst['name'] + img_id = inst.image_id if key: - LOG.info(_('instance %s: injecting key into image %s'), - inst['name'], inst.image_id) + LOG.info(_('instance %(inst_name)s: injecting key into' + ' image %(img_id)s') % locals()) if net: - LOG.info(_('instance %s: injecting net into image %s'), - inst['name'], inst.image_id) + LOG.info(_('instance %(inst_name)s: injecting net into' + ' image %(img_id)s') % locals()) try: disk.inject_data(basepath('disk'), key, net, partition=target_partition, nbd=FLAGS.use_cow_images) except Exception as e: # This could be a windows image, or a vmdk format disk - LOG.warn(_('instance %s: ignoring error injecting data' - ' into image %s (%s)'), - inst['name'], inst.image_id, e) + LOG.warn(_('instance %(inst_name)s: ignoring error injecting' + ' data into image %(img_id)s (%(e)s)') % locals()) if FLAGS.libvirt_type == 'uml': utils.execute('sudo chown root %s' % basepath('disk')) @@ -1234,6 +1259,7 @@ class IptablesFirewallDriver(FirewallDriver): our_chains += [':nova-local - [0:0]'] our_rules += ['-A FORWARD -j nova-local'] + our_rules += ['-A OUTPUT -j nova-local'] security_groups = {} # Add our chains @@ -1274,14 +1300,23 @@ class IptablesFirewallDriver(FirewallDriver): if(ip_version == 4): # Allow DHCP responses dhcp_server = self._dhcp_server_for_instance(instance) - our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' % - (chain_name, dhcp_server)] + our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68 ' + '-j ACCEPT ' % (chain_name, dhcp_server)] + #Allow project network traffic + if (FLAGS.allow_project_net_traffic): + cidr = self._project_cidr_for_instance(instance) + our_rules += ['-A %s -s %s -j ACCEPT' % (chain_name, cidr)] elif(ip_version == 6): # Allow RA responses ra_server = self._ra_server_for_instance(instance) if ra_server: our_rules += ['-A %s -s %s -p icmpv6' % (chain_name, ra_server + "/128")] + #Allow project network traffic + if (FLAGS.allow_project_net_traffic): + cidrv6 = self._project_cidrv6_for_instance(instance) + our_rules += ['-A %s -s %s -j ACCEPT' % + (chain_name, cidrv6)] # If nothing matches, jump to the fallback chain our_rules += ['-A %s -j nova-fallback' % (chain_name,)] @@ -1371,3 +1406,18 @@ class IptablesFirewallDriver(FirewallDriver): network = db.network_get_by_instance(context.get_admin_context(), instance['id']) return network['gateway'] + + def _ra_server_for_instance(self, instance): + network = db.network_get_by_instance(context.get_admin_context(), + instance['id']) + return network['ra_server'] + + def _project_cidr_for_instance(self, instance): + network = db.network_get_by_instance(context.get_admin_context(), + instance['id']) + return network['cidr'] + + def _project_cidrv6_for_instance(self, instance): + network = db.network_get_by_instance(context.get_admin_context(), + instance['id']) + return network['cidr_v6'] diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 4bfaf4b57..e8352771c 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -69,7 +69,9 @@ LOG = logging.getLogger("nova.virt.xenapi.fake") def log_db_contents(msg=None): - LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content)) + text = msg or "" + content = pformat(_db_content) + LOG.debug(_("%(text)s: _db_content => %(content)s") % locals()) def reset(): @@ -331,7 +333,8 @@ class SessionBase(object): if impl is not None: def callit(*params): - LOG.debug(_('Calling %s %s'), name, impl) + localname = name + LOG.debug(_('Calling %(localname)s %(impl)s') % locals()) self._check_session(params) return impl(*params) return callit diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 6a9c96fc6..4afd28dd8 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -134,7 +134,8 @@ class VMHelper(HelperBase): 'pae': 'true', 'viridian': 'true'} LOG.debug(_('Created VM %s...'), instance.name) vm_ref = session.call_xenapi('VM.create', rec) - LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref) + instance_name = instance.name + LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals()) return vm_ref @classmethod @@ -154,10 +155,11 @@ class VMHelper(HelperBase): vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] - LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref) + LOG.debug(_('Creating VBD for VM %(vm_ref)s,' + ' VDI %(vdi_ref)s ... ') % locals()) vbd_ref = session.call_xenapi('VBD.create', vbd_rec) - LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref, - vdi_ref) + LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,' + ' VDI %(vdi_ref)s.') % locals()) return vbd_ref @classmethod @@ -209,11 +211,11 @@ class VMHelper(HelperBase): vif_rec['other_config'] = {} vif_rec['qos_algorithm_type'] = '' vif_rec['qos_algorithm_params'] = {} - LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref, - network_ref) + LOG.debug(_('Creating VIF for VM %(vm_ref)s,' + ' network %(network_ref)s.') % locals()) vif_ref = session.call_xenapi('VIF.create', vif_rec) - LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref, - vm_ref, network_ref) + LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,' + ' network %(network_ref)s.') % locals()) return vif_ref @classmethod @@ -231,8 +233,9 @@ class VMHelper(HelperBase): 'other_config': {}, 'sm_config': {}, 'tags': []}) - LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref, - name_label, virtual_size, read_only, sr_ref) + LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,' + ' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.') + % locals()) return vdi_ref @classmethod @@ -242,7 +245,8 @@ class VMHelper(HelperBase): """ #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added - LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label) + LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...") + % locals()) vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) vm_vdi_uuid = vm_vdi_rec["uuid"] @@ -255,8 +259,8 @@ class VMHelper(HelperBase): template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1] template_vdi_uuid = template_vdi_rec["uuid"] - LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref, - vm_ref) + LOG.debug(_('Created snapshot %(template_vm_ref)s from' + ' VM %(vm_ref)s.') % locals()) parent_uuid = wait_for_vhd_coalesce( session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid) @@ -269,8 +273,8 @@ class VMHelper(HelperBase): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ - logging.debug(_("Asking xapi to upload %s as ID %s"), - vdi_uuids, image_id) + logging.debug(_("Asking xapi to upload %(vdi_uuids)s as" + " ID %(image_id)s") % locals()) params = {'vdi_uuids': vdi_uuids, 'image_id': image_id, @@ -310,7 +314,7 @@ class VMHelper(HelperBase): meta, image_file = c.get_image(image) virtual_size = int(meta['size']) vdi_size = virtual_size - LOG.debug(_("Size for image %s:%d"), image, virtual_size) + LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals()) if type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES @@ -344,7 +348,7 @@ class VMHelper(HelperBase): def _fetch_image_objectstore(cls, session, instance_id, image, access, secret, type): url = images.image_url(image) - LOG.debug(_("Asking xapi to fetch %s as %s"), url, access) + LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url @@ -499,7 +503,8 @@ def get_vhd_parent(session, vdi_rec): parent_uuid = vdi_rec['sm_config']['vhd-parent'] parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid) parent_rec = session.get_xenapi().VDI.get_record(parent_ref) - LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref) + vdi_uuid = vdi_rec['uuid'] + LOG.debug(_("VHD %(vdi_uuid)s has parent %(parent_ref)s") % locals()) return parent_ref, parent_rec else: return None @@ -540,16 +545,17 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, def _poll_vhds(): attempts['counter'] += 1 if attempts['counter'] > max_attempts: - msg = (_("VHD coalesce attempts exceeded (%d > %d), giving up...") - % (attempts['counter'], max_attempts)) + counter = attempts['counter'] + msg = (_("VHD coalesce attempts exceeded (%(counter)d >" + " %(max_attempts)d), giving up...") % locals()) raise exception.Error(msg) scan_sr(session, instance_id, sr_ref) parent_uuid = get_vhd_parent_uuid(session, vdi_ref) if original_parent_uuid and (parent_uuid != original_parent_uuid): - LOG.debug(_("Parent %s doesn't match original parent %s, " - "waiting for coalesce..."), parent_uuid, - original_parent_uuid) + LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent" + " %(original_parent_uuid)s, waiting for coalesce...") + % locals()) else: # Breakout of the loop (normally) and return the parent_uuid raise utils.LoopingCallDone(parent_uuid) @@ -567,8 +573,8 @@ def get_vdi_for_vm_safely(session, vm_ref): else: num_vdis = len(vdi_refs) if num_vdis != 1: - raise Exception(_("Unexpected number of VDIs (%s) found for " - "VM %s") % (num_vdis, vm_ref)) + raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found" + " for VM %(vm_ref)s") % locals()) vdi_ref = vdi_refs[0] vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) @@ -708,8 +714,8 @@ def _write_partition(virtual_size, dev): primary_first = MBR_SIZE_SECTORS primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 - LOG.debug(_('Writing partition table %d %d to %s...'), - primary_first, primary_last, dest) + LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d' + ' to %(dest)s...') % locals()) def execute(cmd, process_input=None, check_exit_code=True): return utils.execute(cmd=cmd, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 2ccd1ec24..628a171fa 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -104,7 +104,9 @@ class VMOps(object): network_ref, instance.mac_address) LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) - LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref) + instance_name = instance.name + LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.') + % locals()) # NOTE(armando): Do we really need to do this in virt? timer = utils.LoopingCall(f=None) @@ -196,7 +198,8 @@ class VMOps(object): template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( self._session, instance.id, vm_ref, label) except self.XenAPI.Failure, exc: - logging.error(_("Unable to Snapshot %s: %s"), vm_ref, exc) + logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s") + % locals()) return try: diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 0cd15b950..d5ebd29d5 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -71,7 +71,7 @@ class VolumeHelper(HelperBase): session.get_xenapi_host(), record, '0', label, description, 'iscsi', '', False, {}) - LOG.debug(_('Introduced %s as %s.'), label, sr_ref) + LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals()) return sr_ref except cls.XenAPI.Failure, exc: LOG.exception(exc) @@ -98,20 +98,20 @@ class VolumeHelper(HelperBase): try: pbds = session.get_xenapi().SR.get_PBDs(sr_ref) except cls.XenAPI.Failure, exc: - LOG.warn(_('Ignoring exception %s when getting PBDs for %s'), - exc, sr_ref) + LOG.warn(_('Ignoring exception %(exc)s when getting PBDs' + ' for %(sr_ref)s') % locals()) for pbd in pbds: try: session.get_xenapi().PBD.unplug(pbd) except cls.XenAPI.Failure, exc: - LOG.warn(_('Ignoring exception %s when unplugging PBD %s'), - exc, pbd) + LOG.warn(_('Ignoring exception %(exc)s when unplugging' + ' PBD %(pbd)s') % locals()) try: session.get_xenapi().SR.forget(sr_ref) LOG.debug(_("Forgetting SR %s done."), sr_ref) except cls.XenAPI.Failure, exc: - LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc, - sr_ref) + LOG.warn(_('Ignoring exception %(exc)s when forgetting' + ' SR %(sr_ref)s') % locals()) @classmethod def introduce_vdi(cls, session, sr_ref): @@ -172,8 +172,8 @@ class VolumeHelper(HelperBase): (volume_id is None) or \ (target_host is None) or \ (target_iqn is None): - raise StorageError(_('Unable to obtain target information %s, %s') - % (device_path, mountpoint)) + raise StorageError(_('Unable to obtain target information' + ' %(device_path)s, %(mountpoint)s') % locals()) volume_info = {} volume_info['deviceNumber'] = device_number volume_info['volumeId'] = volume_id diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 189f968c6..d89a6f995 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -48,8 +48,8 @@ class VolumeOps(object): raise exception.NotFound(_('Instance %s not found') % instance_name) # NOTE: No Resource Pool concept so far - LOG.debug(_("Attach_volume: %s, %s, %s"), - instance_name, device_path, mountpoint) + LOG.debug(_("Attach_volume: %(instance_name)s, %(device_path)s," + " %(mountpoint)s") % locals()) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint) @@ -66,9 +66,8 @@ class VolumeOps(object): except StorageError, exc: LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) - raise Exception(_('Unable to create VDI on SR %s for instance %s') - % (sr_ref, - instance_name)) + raise Exception(_('Unable to create VDI on SR %(sr_ref)s for' + ' instance %(instance_name)s') % locals()) else: try: vbd_ref = VMHelper.create_vbd(self._session, @@ -78,9 +77,8 @@ class VolumeOps(object): except self.XenAPI.Failure, exc: LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) - raise Exception(_('Unable to use SR %s for instance %s') - % (sr_ref, - instance_name)) + raise Exception(_('Unable to use SR %(sr_ref)s for' + ' instance %(instance_name)s') % locals()) else: try: task = self._session.call_xenapi('Async.VBD.plug', @@ -92,8 +90,8 @@ class VolumeOps(object): sr_ref) raise Exception(_('Unable to attach volume to instance %s') % instance_name) - LOG.info(_('Mountpoint %s attached to instance %s'), - mountpoint, instance_name) + LOG.info(_('Mountpoint %(mountpoint)s attached to' + ' instance %(instance_name)s') % locals()) def detach_volume(self, instance_name, mountpoint): """Detach volume storage to VM instance""" @@ -103,7 +101,8 @@ class VolumeOps(object): raise exception.NotFound(_('Instance %s not found') % instance_name) # Detach VBD from VM - LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint) + LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s") + % locals()) device_number = VolumeHelper.mountpoint_to_number(mountpoint) try: vbd_ref = VMHelper.find_vbd_by_number(self._session, @@ -125,5 +124,5 @@ class VolumeOps(object): LOG.exception(exc) # Forget SR VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) - LOG.info(_('Mountpoint %s detached from instance %s'), - mountpoint, instance_name) + LOG.info(_('Mountpoint %(mountpoint)s detached from' + ' instance %(instance_name)s') % locals()) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 927f5905b..a0b0499b8 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -141,7 +141,7 @@ class XenAPIConnection(object): self._vmops = VMOps(session) self._volumeops = VolumeOps(session) - def init_host(self): + def init_host(self, host): #FIXME(armando): implement this #NOTE(armando): would we need a method #to call when shutting down the host? @@ -298,19 +298,14 @@ class XenAPISession(object): return elif status == "success": result = self._session.xenapi.task.get_result(task) - LOG.info(_("Task [%s] %s status: success %s") % ( - name, - task, - result)) + LOG.info(_("Task [%(name)s] %(task)s status:" + " success %(result)s") % locals()) done.send(_parse_xmlrpc_value(result)) else: error_info = self._session.xenapi.task.get_error_info(task) action["error"] = str(error_info) - LOG.warn(_("Task [%s] %s status: %s %s") % ( - name, - task, - status, - error_info)) + LOG.warn(_("Task [%(name)s] %(task)s status:" + " %(status)s %(error_info)s") % locals()) done.send_exception(self.XenAPI.Failure(error_info)) db.instance_action_create(context.get_admin_context(), action) except self.XenAPI.Failure, exc: diff --git a/nova/volume/api.py b/nova/volume/api.py index ce4831cc3..0bcd8a3b0 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -41,10 +41,11 @@ class API(base.Base): def create(self, context, size, name, description): if quota.allowed_volumes(context, 1, size) < 1: - LOG.warn(_("Quota exceeeded for %s, tried to create %sG volume"), - context.project_id, size) + pid = context.project_id + LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" + " %(size)sG volume") % locals()) raise quota.QuotaError(_("Volume quota exceeded. You cannot " - "create a volume of size %s") % size) + "create a volume of size %s") % size) options = { 'size': size, diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 82e3521a8..6f8e25e19 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -103,9 +103,10 @@ class VolumeManager(manager.Manager): volume_ref['host'] = self.host try: - LOG.debug(_("volume %s: creating lv of size %sG"), - volume_ref['name'], - volume_ref['size']) + vol_name = volume_ref['name'] + vol_size = volume_ref['size'] + LOG.debug(_("volume %(vol_name)s: creating lv of" + " size %(vol_size)sG") % locals()) self.driver.create_volume(volume_ref) LOG.debug(_("volume %s: creating export"), volume_ref['name']) diff --git a/nova/wsgi.py b/nova/wsgi.py index a48bede9c..e01cc1e1e 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -64,7 +64,8 @@ class Server(object): def start(self, application, port, host='0.0.0.0', backlog=128): """Run a WSGI server with the given application.""" - logging.audit(_("Starting %s on %s:%s"), sys.argv[0], host, port) + arg0 = sys.argv[0] + logging.audit(_("Starting %(arg0)s on %(host)s:%(port)s") % locals()) socket = eventlet.listen((host, port), backlog=backlog) self.pool.spawn_n(self._run, application, socket) |