summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTrey Morris <trey.morris@rackspace.com>2011-02-14 17:18:59 -0600
committerTrey Morris <trey.morris@rackspace.com>2011-02-14 17:18:59 -0600
commit9d1bdde7efc7b8cb6aa8db6a86777393e838fe8e (patch)
tree487b8c5ad9b654d6e1795dd72ef8e90f17663de4
parentee4cba7779daa5b2e7415fb69cabc698b7dd60da (diff)
parent799f5222e5eea2825f1e05a66e44eb4df709234e (diff)
fixed merge conflict
-rw-r--r--.mailmap1
-rw-r--r--Authors7
-rwxr-xr-xbin/nova-api2
-rwxr-xr-xbin/nova-dhcpbridge6
-rwxr-xr-xbin/nova-manage29
-rw-r--r--bin/nova-spoolsentry6
-rwxr-xr-xcontrib/nova.sh92
-rw-r--r--doc/ext/nova_todo.py5
-rw-r--r--doc/source/adminguide/multi.node.install.rst167
-rw-r--r--doc/source/adminguide/nova.manage.rst35
-rw-r--r--doc/source/adminguide/single.node.install.rst28
-rw-r--r--doc/source/man/novamanage.rst35
-rw-r--r--krm_mapping.json.sample3
-rw-r--r--nova/adminclient.py64
-rw-r--r--nova/api/ec2/__init__.py58
-rw-r--r--nova/api/ec2/admin.py63
-rw-r--r--nova/api/ec2/apirequest.py11
-rw-r--r--nova/api/ec2/cloud.py79
-rw-r--r--nova/api/openstack/__init__.py7
-rw-r--r--nova/api/openstack/common.py2
-rw-r--r--nova/api/openstack/servers.py29
-rw-r--r--nova/auth/ldapdriver.py11
-rw-r--r--nova/auth/manager.py69
-rw-r--r--nova/compute/api.py13
-rw-r--r--nova/compute/instance_types.py4
-rw-r--r--nova/compute/manager.py56
-rw-r--r--nova/compute/monitor.py10
-rw-r--r--nova/db/api.py11
-rw-r--r--nova/db/sqlalchemy/api.py107
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/001_austin.py24
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py24
-rw-r--r--nova/db/sqlalchemy/migration.py11
-rw-r--r--nova/db/sqlalchemy/models.py6
-rw-r--r--nova/db/sqlalchemy/session.py13
-rw-r--r--nova/exception.py36
-rw-r--r--nova/fakerabbit.py11
-rw-r--r--nova/flags.py8
-rw-r--r--nova/image/local.py30
-rw-r--r--nova/image/s3.py19
-rw-r--r--nova/log.py7
-rw-r--r--nova/network/linux_net.py93
-rw-r--r--nova/network/manager.py23
-rw-r--r--nova/objectstore/handler.py43
-rw-r--r--nova/rpc.py25
-rw-r--r--nova/scheduler/manager.py2
-rw-r--r--nova/scheduler/simple.py8
-rw-r--r--nova/service.py9
-rw-r--r--nova/test.py3
-rw-r--r--nova/tests/api/openstack/test_images.py1
-rw-r--r--nova/tests/api/openstack/test_servers.py2
-rw-r--r--nova/tests/db/nova.austin.sqlitebin0 -> 44032 bytes
-rw-r--r--nova/tests/test_api.py34
-rw-r--r--nova/tests/test_cloud.py10
-rw-r--r--nova/tests/test_compute.py55
-rw-r--r--nova/tests/test_localization.py100
-rw-r--r--nova/tests/test_rpc.py3
-rw-r--r--nova/tests/test_virt.py22
-rw-r--r--nova/utils.py52
-rw-r--r--nova/version.py2
-rw-r--r--nova/virt/fake.py5
-rw-r--r--nova/virt/hyperv.py89
-rw-r--r--nova/virt/images.py5
-rw-r--r--nova/virt/libvirt.xml.template6
-rw-r--r--nova/virt/libvirt_conn.py144
-rw-r--r--nova/virt/xenapi/fake.py7
-rw-r--r--nova/virt/xenapi/vm_utils.py70
-rw-r--r--nova/virt/xenapi/vmops.py28
-rw-r--r--nova/virt/xenapi/volume_utils.py18
-rw-r--r--nova/virt/xenapi/volumeops.py25
-rw-r--r--nova/virt/xenapi_conn.py15
-rw-r--r--nova/volume/api.py7
-rw-r--r--nova/volume/driver.py10
-rw-r--r--nova/volume/manager.py9
-rw-r--r--nova/volume/san.py335
-rw-r--r--nova/wsgi.py3
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/agent7
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/glance71
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore46
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py1
-rw-r--r--run_tests.py3
-rwxr-xr-xrun_tests.sh9
-rw-r--r--setup.py27
82 files changed, 1874 insertions, 752 deletions
diff --git a/.mailmap b/.mailmap
index d13219ab0..c6f6c9a8b 100644
--- a/.mailmap
+++ b/.mailmap
@@ -33,3 +33,4 @@
<corywright@gmail.com> <cory.wright@rackspace.com>
<ant@openstack.org> <amesserl@rackspace.com>
<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>
+<justin@fathomdb.com> <superstack@superstack.org>
diff --git a/Authors b/Authors
index 608fec523..b359fec22 100644
--- a/Authors
+++ b/Authors
@@ -3,11 +3,14 @@ Anne Gentle <anne@openstack.org>
Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
+Bilal Akhtar <bilalakhtar@ubuntu.com>
Chiradeep Vittal <chiradeep@cloud.com>
Chmouel Boudjnah <chmouel@chmouel.com>
Chris Behrens <cbehrens@codestud.com>
+Christian Berendt <berendt@b1-systems.de>
Cory Wright <corywright@gmail.com>
David Pravec <David.Pravec@danix.org>
+Dan Prince <dan.prince@rackspace.com>
Dean Troyer <dtroyer@gmail.com>
Devin Carlen <devin.carlen@gmail.com>
Ed Leafe <ed@leafe.com>
@@ -21,7 +24,9 @@ Jay Pipes <jaypipes@gmail.com>
Jesse Andrews <anotherjesse@gmail.com>
Joe Heck <heckj@mac.com>
Joel Moore <joelbm24@gmail.com>
+John Dewey <john@dewey.ws>
Jonathan Bryce <jbryce@jbryce.com>
+Jordan Rinke <jordan@openstack.org>
Josh Durgin <joshd@hq.newdream.net>
Josh Kearney <josh.kearney@rackspace.com>
Joshua McKenty <jmckenty@gmail.com>
@@ -38,6 +43,7 @@ MORITA Kazutaka <morita.kazutaka@gmail.com>
Muneyuki Noguchi <noguchimn@nttdata.co.jp>
Nachi Ueno <ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp> <nati.ueno@gmail.com> <nova@u4>
Paul Voccio <paul@openstack.org>
+Ricardo Carrillo Cruz <emaildericky@gmail.com>
Rick Clark <rick@openstack.org>
Rick Harris <rconradharris@gmail.com>
Rob Kost <kost@isi.edu>
@@ -49,6 +55,7 @@ Soren Hansen <soren.hansen@rackspace.com>
Thierry Carrez <thierry@openstack.org>
Todd Willey <todd@ansolabs.com>
Trey Morris <trey.morris@rackspace.com>
+Tushar Patil <tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in>
Vishvananda Ishaya <vishvananda@gmail.com>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Zhixue Wu <Zhixue.Wu@citrix.com>
diff --git a/bin/nova-api b/bin/nova-api
index 44bbfaf86..11176a021 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -56,7 +56,7 @@ def run_app(paste_config_file):
if config is None:
LOG.debug(_("No paste configuration for app: %s"), api)
continue
- LOG.debug(_("App Config: %s\n%r"), api, config)
+ LOG.debug(_("App Config: %(api)s\n%(config)r") % locals())
wsgi.paste_config_to_flags(config, {
"verbose": FLAGS.verbose,
"%s_host" % api: config.get('host', '0.0.0.0'),
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 1a994d956..d38ba2543 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -120,9 +120,9 @@ def main():
mac = argv[2]
ip = argv[3]
hostname = argv[4]
- LOG.debug(_("Called %s for mac %s with ip %s and "
- "hostname %s on interface %s"),
- action, mac, ip, hostname, interface)
+ msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s and"
+ " hostname %(hostname)s on interface %(interface)s") % locals()
+ LOG.debug(msg)
globals()[action + '_lease'](mac, ip, hostname, interface)
else:
print init_leases(interface)
diff --git a/bin/nova-manage b/bin/nova-manage
index cf693140a..68036504d 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -267,6 +267,14 @@ class RoleCommands(object):
self.manager.remove_role(user, role, project)
+def _db_error(caught_exception):
+ print caught_exception
+ print _("The above error may show that the database has not "
+ "been created.\nPlease create a database using "
+ "nova-manage sync db before running this command.")
+ exit(1)
+
+
class UserCommands(object):
"""Class for managing users."""
@@ -282,13 +290,19 @@ class UserCommands(object):
def admin(self, name, access=None, secret=None):
"""creates a new admin and prints exports
arguments: name [access] [secret]"""
- user = self.manager.create_user(name, access, secret, True)
+ try:
+ user = self.manager.create_user(name, access, secret, True)
+ except exception.DBError, e:
+ _db_error(e)
self._print_export(user)
def create(self, name, access=None, secret=None):
"""creates a new user and prints exports
arguments: name [access] [secret]"""
- user = self.manager.create_user(name, access, secret, False)
+ try:
+ user = self.manager.create_user(name, access, secret, False)
+ except exception.DBError, e:
+ _db_error(e)
self._print_export(user)
def delete(self, name):
@@ -409,9 +423,14 @@ class ProjectCommands(object):
with open(filename, 'w') as f:
f.write(zip_file)
except db.api.NoMoreNetworks:
- print ('No more networks available. If this is a new '
- 'installation, you need\nto call something like this:\n\n'
- ' nova-manage network create 10.0.0.0/8 10 64\n\n')
+ print _('No more networks available. If this is a new '
+ 'installation, you need\nto call something like this:\n\n'
+ ' nova-manage network create 10.0.0.0/8 10 64\n\n')
+ except exception.ProcessExecutionError, e:
+ print e
+ print _("The above error may show that the certificate db has not "
+ "been created.\nPlease create a database by running a "
+ "nova-api server on this host.")
class FloatingIpCommands(object):
diff --git a/bin/nova-spoolsentry b/bin/nova-spoolsentry
index ab20268a9..c53482852 100644
--- a/bin/nova-spoolsentry
+++ b/bin/nova-spoolsentry
@@ -74,10 +74,8 @@ class SpoolSentry(object):
return rv
def send_data(self, data):
- data = {
- 'data': base64.b64encode(pickle.dumps(data).encode('zlib')),
- 'key': self.key
- }
+ data = {'data': base64.b64encode(pickle.dumps(data).encode('zlib')),
+ 'key': self.key}
req = urllib2.Request(self.sentry_url)
res = urllib2.urlopen(req, urllib.urlencode(data))
if res.getcode() != 200:
diff --git a/contrib/nova.sh b/contrib/nova.sh
index 08dc89bae..9259035ca 100755
--- a/contrib/nova.sh
+++ b/contrib/nova.sh
@@ -1,15 +1,14 @@
#!/usr/bin/env bash
DIR=`pwd`
CMD=$1
-SOURCE_BRANCH=lp:nova
-if [ -n "$2" ]; then
- SOURCE_BRANCH=$2
+if [ "$CMD" = "branch" ]; then
+ SOURCE_BRANCH=${2:-lp:nova}
+ DIRNAME=${3:-nova}
+else
+ DIRNAME=${2:-nova}
fi
-DIRNAME=nova
+
NOVA_DIR=$DIR/$DIRNAME
-if [ -n "$3" ]; then
- NOVA_DIR=$DIR/$3
-fi
if [ ! -n "$HOST_IP" ]; then
# NOTE(vish): This will just get the first ip in the list, so if you
@@ -24,6 +23,8 @@ TEST=${TEST:-0}
USE_LDAP=${USE_LDAP:-0}
# Use OpenDJ instead of OpenLDAP when using LDAP
USE_OPENDJ=${USE_OPENDJ:-0}
+# Use IPv6
+USE_IPV6=${USE_IPV6:-0}
LIBVIRT_TYPE=${LIBVIRT_TYPE:-qemu}
NET_MAN=${NET_MAN:-VlanManager}
# NOTE(vish): If you are using FlatDHCP on multiple hosts, set the interface
@@ -43,30 +44,17 @@ else
AUTH=dbdriver.DbDriver
fi
-mkdir -p /etc/nova
-cat >$NOVA_DIR/bin/nova.conf << NOVA_CONF_EOF
---verbose
---nodaemon
---dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf
---network_manager=nova.network.manager.$NET_MAN
---cc_host=$HOST_IP
---routing_source_ip=$HOST_IP
---sql_connection=$SQL_CONN
---auth_driver=nova.auth.$AUTH
---libvirt_type=$LIBVIRT_TYPE
-NOVA_CONF_EOF
-
-if [ -n "$FLAT_INTERFACE" ]; then
- echo "--flat_interface=$FLAT_INTERFACE" >>$NOVA_DIR/bin/nova.conf
-fi
-
if [ "$CMD" == "branch" ]; then
sudo apt-get install -y bzr
+ if [ ! -e "$DIR/.bzr" ]; then
+ bzr init-repo $DIR
+ fi
rm -rf $NOVA_DIR
bzr branch $SOURCE_BRANCH $NOVA_DIR
cd $NOVA_DIR
mkdir -p $NOVA_DIR/instances
mkdir -p $NOVA_DIR/networks
+ exit
fi
# You should only have to run this once
@@ -74,7 +62,7 @@ if [ "$CMD" == "install" ]; then
sudo apt-get install -y python-software-properties
sudo add-apt-repository ppa:nova-core/trunk
sudo apt-get update
- sudo apt-get install -y dnsmasq kpartx kvm gawk iptables ebtables
+ sudo apt-get install -y dnsmasq-base kpartx kvm gawk iptables ebtables
sudo apt-get install -y user-mode-linux kvm libvirt-bin
sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server
sudo apt-get install -y lvm2 iscsitarget open-iscsi
@@ -85,16 +73,16 @@ if [ "$CMD" == "install" ]; then
sudo /etc/init.d/libvirt-bin restart
sudo modprobe nbd
sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot
- sudo apt-get install -y python-daemon python-eventlet python-gflags python-ipy
+ sudo apt-get install -y python-migrate python-eventlet python-gflags python-ipy python-tempita
sudo apt-get install -y python-libvirt python-libxml2 python-routes python-cheetah
- sudo apt-get install -y python-paste python-pastedeploy
-#For IPV6
- sudo apt-get install -y python-netaddr
- sudo apt-get install -y radvd
-#(Nati) Note that this configuration is only needed for nova-network node.
- sudo bash -c "echo 1 > /proc/sys/net/ipv6/conf/all/forwarding"
- sudo bash -c "echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra"
-
+ sudo apt-get install -y python-netaddr python-paste python-pastedeploy python-glance
+
+ if [ "$USE_IPV6" == 1 ]; then
+ sudo apt-get install -y radvd
+ sudo bash -c "echo 1 > /proc/sys/net/ipv6/conf/all/forwarding"
+ sudo bash -c "echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra"
+ fi
+
if [ "$USE_MYSQL" == 1 ]; then
cat <<MYSQL_PRESEED | debconf-set-selections
mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
@@ -103,8 +91,10 @@ mysql-server-5.1 mysql-server/start_on_boot boolean true
MYSQL_PRESEED
apt-get install -y mysql-server python-mysqldb
fi
- wget http://c2477062.cdn.cloudfiles.rackspacecloud.com/images.tgz
- tar -C $DIR -zxf images.tgz
+ mkdir -p $DIR/images
+ wget -c http://images.ansolabs.com/tty.tgz
+ tar -C $DIR/images -zxf tty.tgz
+ exit
fi
NL=`echo -ne '\015'`
@@ -115,9 +105,31 @@ function screen_it {
}
if [ "$CMD" == "run" ]; then
+
+ cat >$NOVA_DIR/bin/nova.conf << NOVA_CONF_EOF
+--verbose
+--nodaemon
+--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf
+--network_manager=nova.network.manager.$NET_MAN
+--cc_host=$HOST_IP
+--routing_source_ip=$HOST_IP
+--sql_connection=$SQL_CONN
+--auth_driver=nova.auth.$AUTH
+--libvirt_type=$LIBVIRT_TYPE
+NOVA_CONF_EOF
+
+ if [ -n "$FLAT_INTERFACE" ]; then
+ echo "--flat_interface=$FLAT_INTERFACE" >>$NOVA_DIR/bin/nova.conf
+ fi
+
+ if [ "$USE_IPV6" == 1 ]; then
+ echo "--use_ipv6" >>$NOVA_DIR/bin/nova.conf
+ fi
+
killall dnsmasq
- #For IPv6
- killall radvd
+ if [ "$USE_IPV6" == 1 ]; then
+ killall radvd
+ fi
screen -d -m -S nova -t nova
sleep 1
if [ "$USE_MYSQL" == 1 ]; then
@@ -150,6 +162,8 @@ if [ "$CMD" == "run" ]; then
cd $DIR
fi
+ # create the database
+ $NOVA_DIR/bin/nova-manage db sync
# create an admin user called 'admin'
$NOVA_DIR/bin/nova-manage user admin admin admin admin
# create a project called 'admin' with project manager of 'admin'
@@ -178,6 +192,7 @@ if [ "$CMD" == "run" ] || [ "$CMD" == "terminate" ]; then
sleep 2
# delete volumes
. $NOVA_DIR/novarc; euca-describe-volumes | grep vol- | cut -f2 | xargs -n1 euca-delete-volume
+ sleep 2
fi
if [ "$CMD" == "run" ] || [ "$CMD" == "clean" ]; then
@@ -192,5 +207,4 @@ if [ "$CMD" == "scrub" ]; then
else
virsh list | grep i- | awk '{print \$1}' | xargs -n1 virsh destroy
fi
- vblade-persist ls | grep vol- | awk '{print \$1\" \"\$2}' | xargs -n2 vblade-persist destroy
fi
diff --git a/doc/ext/nova_todo.py b/doc/ext/nova_todo.py
index efc0c3edd..67bbfd2e0 100644
--- a/doc/ext/nova_todo.py
+++ b/doc/ext/nova_todo.py
@@ -26,7 +26,7 @@ def process_todo_nodes(app, doctree, fromdocname):
# reading through docutils for the proper way to construct an empty list
lists = []
for i in xrange(5):
- lists.append(nodes.bullet_list("", nodes.Text('','')));
+ lists.append(nodes.bullet_list("", nodes.Text('','')))
lists[i].remove(lists[i][0])
lists[i].set_class('todo_list')
@@ -42,7 +42,8 @@ def process_todo_nodes(app, doctree, fromdocname):
# Create a reference
newnode = nodes.reference('', '')
- link = _('%s, line %d') % (filename, todo_info['lineno']);
+ line_info = todo_info['lineno']
+ link = _('%(filename)s, line %(line_info)d') % locals()
innernode = nodes.emphasis(link, link)
newnode['refdocname'] = todo_info['docname']
diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst
index df7078180..c53455e3e 100644
--- a/doc/source/adminguide/multi.node.install.rst
+++ b/doc/source/adminguide/multi.node.install.rst
@@ -1,84 +1,87 @@
Installing Nova on Multiple Servers
===================================
-
+
When you move beyond evaluating the technology and into building an actual
production environment, you will need to know how to configure your datacenter
and how to deploy components across your clusters. This guide should help you
through that process.
-
+
You can install multiple nodes to increase performance and availability of the OpenStack Compute installation.
-
+
This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved either in packaging or bug-fixing. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward.
For a starting architecture, these instructions describing installing a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node.
Requirements for a multi-node installation
------------------------------------------
-
+
* You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know PostgreSQL. We should document both configurations, though.
* For a recommended HA setup, consider a MySQL master/slave replication, with as many slaves as you like, and probably a heartbeat to kick one of the slaves into being a master if it dies.
* For performance optimization, split reads and writes to the database. MySQL proxy is the easiest way to make this work if running MySQL.
-
+
Assumptions
-----------
-
+
* Networking is configured between/through the physical machines on a single subnet.
* Installation and execution are both performed by ROOT user.
+
Scripted Installation
---------------------
-A script available to get your OpenStack cloud running quickly. You can copy the file to the server where you want to install OpenStack Compute services - typically you would install a compute node and a cloud controller node.
+A script available to get your OpenStack cloud running quickly. You can copy the file to the server where you want to install OpenStack Compute services - typically you would install a compute node and a cloud controller node.
-You must run these scripts with root permissions.
+You must run these scripts with root permissions.
From a server you intend to use as a cloud controller node, use this command to get the cloud controller script. This script is a work-in-progress and the maintainer plans to keep it up, but it is offered "as-is." Feel free to collaborate on it in GitHub - https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/.
::
-
- wget --no-check-certificate https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/raw/master/Nova_CC_Installer_v0.1
+
+ wget --no-check-certificate https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/raw/master/nova-CC-install-v1.1.sh
Ensure you can execute the script by modifying the permissions on the script file.
::
-
- sudo chmod 755 Nova_CC_Installer_v0.1
+
+ sudo chmod 755 nova-CC-install-v1.1.sh
::
-
- sudo ./Nova_CC_Installer_v0.1
-Next, from a server you intend to use as a compute node (doesn't contain the database), install the nova services. Copy the nova.conf from the cloud controller node to the compute node.
+ sudo ./nova-CC-install-v1.1.sh
+
+Next, from a server you intend to use as a compute node (doesn't contain the database), install the nova services. You can use the nova-NODE-installer.sh script from the above github-hosted project for the compute node installation.
+
+Copy the nova.conf from the cloud controller node to the compute node.
Restart related services::
-
+
libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
You can go to the `Configuration section`_ for next steps.
Manual Installation - Step-by-Step
----------------------------------
-The following sections show you how to install Nova manually with a cloud controller node and a separate compute node. The cloud controller node contains the database plus all nova- services, and the compute node runs nova- services only.
+The following sections show you how to install Nova manually with a cloud controller node and a separate compute node. The cloud controller node contains the database plus all nova- services, and the compute node runs nova- services only.
Cloud Controller Installation
`````````````````````````````
On the cloud controller node, you install nova services and the related helper applications, and then configure with the nova.conf file. You will then copy the nova.conf file to the compute node, which you install as a second node in the `Compute Installation`_.
-
+
Step 1 - Use apt-get to get the latest code
-------------------------------------------
1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk. The ‘python-software-properties’ package is a pre-requisite for setting up the nova package repo:
::
-
+
sudo apt-get install python-software-properties
sudo add-apt-repository ppa:nova-core/trunk
-
+
2. Run update.
::
-
+
sudo apt-get update
3. Install python required packages, nova-packages, and helper apps.
@@ -93,15 +96,15 @@ Step 2 Set up configuration file (installed in /etc/nova)
---------------------------------------------------------
1. Nova development has consolidated all config files to nova.conf as of November 2010. There is a default set of options that are already configured in nova.conf:
-
+
::
-
+
--daemonize=1
--dhcpbridge_flagfile=/etc/nova/nova.conf
--dhcpbridge=/usr/bin/nova-dhcpbridge
--logdir=/var/log/nova
--state_path=/var/lib/nova
-
+
The following items ALSO need to be defined in /etc/nova/nova.conf. I’ve added some explanation of the variables, as comments CANNOT be in nova.conf. There seems to be an issue with nova-manage not processing the comments/whitespace correctly:
--sql_connection ### Location of Nova SQL DB
@@ -131,31 +134,31 @@ The following code can be cut and paste, and edited to your setup:
Note: CC_ADDR=<the external IP address of your cloud controller>
Detailed explanation of the following example is available above.
-
+
::
-
+
--sql_connection=mysql://root:nova@<CC_ADDR>/nova
--s3_host=<CC_ADDR>
--rabbit_host=<CC_ADDR>
---cc_host=<CC_ADDR>
---verbose
+--cc_host=<CC_ADDR>
+--verbose
--ec2_url=http://<CC_ADDR>:8773/services/Cloud
--network_manager=nova.network.manager.VlanManager
--fixed_range=<network/prefix>
---network_size=<# of addrs>
-
+--network_size=<# of addrs>
+
2. Create a “nova” group, and set permissions::
addgroup nova
-
+
The Nova config file should have its owner set to root:nova, and mode set to 0644, since they contain your MySQL server's root password. ::
chown -R root:nova /etc/nova
- chmod 644 /etc/nova/nova.conf
-
+ chmod 644 /etc/nova/nova.conf
+
Step 3 - Setup the SQL DB (MySQL for this setup)
------------------------------------------------
-
+
1. First you 'preseed' to bypass all the installation prompts::
bash
@@ -165,59 +168,59 @@ Step 3 - Setup the SQL DB (MySQL for this setup)
mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS
mysql-server-5.1 mysql-server/start_on_boot boolean true
MYSQL_PRESEED
-
+
2. Install MySQL::
-
+
apt-get install -y mysql-server
-
+
3. Edit /etc/mysql/my.cnf to change ‘bind-address’ from localhost to any::
sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
service mysql restart
4. MySQL DB configuration:
-
+
Create NOVA database::
mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;'
-
+
Update the DB to include user 'root'@'%' with super user privileges::
mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;"
-
+
Set mySQL root password::
- mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');"
-
+ mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');"
+
Compute Node Installation
`````````````````````````
-
+
Repeat steps 1 and 2 from the Cloud Controller Installation section above, then configure the network for your Compute instances on the Compute node. Copy the nova.conf file from the Cloud Controller node to this node.
-
+
Network Configuration
---------------------
If you use FlatManager as your network manager (as opposed to VlanManager that is shown in the nova.conf example above), there are some additional networking changes you’ll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as it’s set up for you automatically.
-
+
Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. To keep things as simple as possible, have all the VM guests on the same network as the VM hosts (the compute nodes). To do so, set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces config to look like the following::
< begin /etc/network/interfaces >
# The loopback network interface
auto lo
iface lo inet loopback
-
+
# Networking for NOVA
auto br100
-
+
iface br100 inet dhcp
bridge_ports eth0
bridge_stp off
bridge_maxwait 0
bridge_fd 0
< end /etc/network/interfaces >
-
+
Next, restart networking to apply the changes::
-
+
sudo /etc/init.d/networking restart
Configuration
@@ -234,32 +237,32 @@ These are the commands you run to update the database if needed, and then set up
/usr/bin/python /usr/bin/nova-manage user admin <user_name>
/usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name>
/usr/bin/python /usr/bin/nova-manage network create <project-network> <number-of-networks-in-project> <IPs in project>
-
+
Here is an example of what this looks like with real data::
/usr/bin/python /usr/bin/nova-manage db sync
/usr/bin/python /usr/bin/nova-manage user admin dub
/usr/bin/python /usr/bin/nova-manage project create dubproject dub
/usr/bin/python /usr/bin/nova-manage network create 192.168.0.0/24 1 255
-
+
(I chose a /24 since that falls inside my /12 range I set in ‘fixed-range’ in nova.conf. Currently, there can only be one network, and I am using the max IP’s available in a /24. You can choose to use any valid amount that you would like.)
-
+
Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o.
-
-On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. This is ONLY necessary if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device.
-
-
+
+On running the "nova-manage network create" command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. You only need to mark the network as a bridge if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device.
+
+
Step 2 - Create Nova certifications
-----------------------------------
-
-1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions.
+
+1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions.
::
mkdir –p /root/creds
/usr/bin/python /usr/bin/nova-manage project zipfile $NOVA_PROJECT $NOVA_PROJECT_USER /root/creds/novacreds.zip
-
-2. Unzip them in your home directory, and add them to your environment.
+
+2. Unzip them in your home directory, and add them to your environment.
::
@@ -271,7 +274,7 @@ Step 3 - Restart all relevant services
--------------------------------------
Restart all six services in total, just to cover the entire spectrum::
-
+
libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
Step 4 - Closing steps, and cleaning up
@@ -287,26 +290,52 @@ Another common issue is you cannot ping or SSH your instances after issusing the
killall dnsmasq
service nova-network restart
+To avoid issues with KVM and permissions with Nova, run the following commands to ensure we have VM's that are running optimally::
+
+ chgrp kvm /dev/kvm
+ chmod g+rwx /dev/kvm
+
+If you want to use the 10.04 Ubuntu Enterprise Cloud images that are readily available at http://uec-images.ubuntu.com/releases/10.04/release/, you may run into delays with booting. Any server that does not have nova-api running on it needs this iptables entry so that UEC images can get metadata info. On compute nodes, configure the iptables with this next step::
+
+ # iptables -t nat -A PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $NOVA_API_IP:8773
+
Testing the Installation
````````````````````````
+You can confirm that your compute node is talking to your cloud controller. From the cloud controller, run this database query::
+
+ mysql -u$MYSQL_USER -p$MYSQL_PASS nova -e 'select * from services;'
+
+In return, you should see something similar to this::
+ +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
+ | created_at | updated_at | deleted_at | deleted | id | host | binary | topic | report_count | disabled | availability_zone |
+ +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
+ | 2011-01-28 22:52:46 | 2011-02-03 06:55:48 | NULL | 0 | 1 | osdemo02 | nova-network | network | 46064 | 0 | nova |
+ | 2011-01-28 22:52:48 | 2011-02-03 06:55:57 | NULL | 0 | 2 | osdemo02 | nova-compute | compute | 46056 | 0 | nova |
+ | 2011-01-28 22:52:52 | 2011-02-03 06:55:50 | NULL | 0 | 3 | osdemo02 | nova-scheduler | scheduler | 46065 | 0 | nova |
+ | 2011-01-29 23:49:29 | 2011-02-03 06:54:26 | NULL | 0 | 4 | osdemo01 | nova-compute | compute | 37050 | 0 | nova |
+ | 2011-01-30 23:42:24 | 2011-02-03 06:55:44 | NULL | 0 | 9 | osdemo04 | nova-compute | compute | 28484 | 0 | nova |
+ | 2011-01-30 21:27:28 | 2011-02-03 06:54:23 | NULL | 0 | 8 | osdemo05 | nova-compute | compute | 29284 | 0 | nova |
+ +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
+You can see that 'osdemo0{1,2,4,5} are all running 'nova-compute.' When you start spinning up instances, they will allocate on any node that is running nova-compute from this list.
+
You can then use `euca2ools` to test some items::
euca-describe-images
euca-describe-instances
-
+
If you have issues with the API key, you may need to re-source your creds file::
. /root/creds/novarc
-
+
If you don’t get any immediate errors, you’re successfully making calls to your cloud!
-Spinning up a VM for Testing
+Spinning up a VM for Testing
````````````````````````````
-(This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.)
+(This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.)
-The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM.
+The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM.
UPDATE: Due to `bug 661159 <https://bugs.launchpad.net/nova/+bug/661159>`_, we can’t use images without ramdisks yet, so we can’t use the classic Ubuntu cloud images from http://uec-images.ubuntu.com/releases/ yet. For the sake of this tutorial, we’ll use the `ttylinux images from Scott Moser instead <http://smoser.brickies.net/ubuntu/ttylinux-uec/>`_.
@@ -332,13 +361,13 @@ Boot your instance:
::
- euca-run-instances $emi -k mykey -t m1.tiny
+ euca-run-instances $emi -k mykey -t m1.tiny
($emi is replaced with the output from the previous command)
Checking status, and confirming communication:
-Once you have booted the instance, you can check the status the the `euca-describe-instances` command. Here you can view the instance ID, IP, and current status of the VM.
+Once you have booted the instance, you can check the status the the `euca-describe-instances` command. Here you can view the instance ID, IP, and current status of the VM.
::
@@ -360,4 +389,4 @@ You can determine the instance-id with `euca-describe-instances`, and the format
For more information in creating you own custom (production ready) instance images, please visit http://wiki.openstack.org/GettingImages for more information!
-Enjoy your new private cloud, and play responsibly! \ No newline at end of file
+Enjoy your new private cloud, and play responsibly!
diff --git a/doc/source/adminguide/nova.manage.rst b/doc/source/adminguide/nova.manage.rst
index 0ec67c69c..0e9a29b6b 100644
--- a/doc/source/adminguide/nova.manage.rst
+++ b/doc/source/adminguide/nova.manage.rst
@@ -42,6 +42,17 @@ You can also run with a category argument such as user to see a list of all comm
These sections describe the available categories and arguments for nova-manage.
+Nova Db
+~~~~~~~
+
+``nova-manage db version``
+
+ Print the current database version.
+
+``nova-manage db sync``
+
+ Sync the database up to the most recent version. This is the standard way to create the db as well.
+
Nova User
~~~~~~~~~
@@ -68,7 +79,7 @@ Nova User
``nova-manage user modify <accesskey> <secretkey> <admin?T/F>``
Updates the indicated user keys, indicating with T or F if the user is an admin user. Leave any argument blank if you do not want to update it.
-
+
Nova Project
~~~~~~~~~~~~
@@ -79,7 +90,7 @@ Nova Project
``nova-manage project create <projectname>``
Create a new nova project with the name <projectname> (you still need to do nova-manage project add <projectname> to add it to the database).
-
+
``nova-manage project delete <projectname>``
Delete a nova project with the name <projectname>.
@@ -87,7 +98,7 @@ Nova Project
``nova-manage project environment <projectname> <username>``
Exports environment variables for the named project to a file named novarc.
-
+
``nova-manage project list``
Outputs a list of all the projects to the screen.
@@ -103,27 +114,27 @@ Nova Project
``nova-manage project zipfile``
Compresses all related files for a created project into a zip file nova.zip.
-
+
Nova Role
~~~~~~~~~
-nova-manage role <action> [<argument>]
+nova-manage role <action> [<argument>]
``nova-manage role add <username> <rolename> <(optional) projectname>``
- Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: admin, itsec, projectmanager, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects.
+ Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: admin, itsec, projectmanager, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects.
``nova-manage role has <username> <projectname>``
Checks the user or project and responds with True if the user has a global role with a particular project.
``nova-manage role remove <username> <rolename>``
- Remove the indicated role from the user.
+ Remove the indicated role from the user.
Nova Shell
~~~~~~~~~~
``nova-manage shell bpython``
- Starts a new bpython shell.
+ Starts a new bpython shell.
``nova-manage shell ipython``
@@ -150,12 +161,12 @@ Nova VPN
``nova-manage vpn run <projectname>``
- Starts the VPN for the named project.
+ Starts the VPN for the named project.
``nova-manage vpn spawn``
Runs all VPNs.
-
+
Nova Floating IPs
~~~~~~~~~~~~~~~~~
@@ -165,8 +176,8 @@ Nova Floating IPs
``nova-manage floating delete <ip_range>``
- Deletes floating IP addresses in the range given.
-
+ Deletes floating IP addresses in the range given.
+
``nova-manage floating list``
Displays a list of all floating IP addresses.
diff --git a/doc/source/adminguide/single.node.install.rst b/doc/source/adminguide/single.node.install.rst
index 119e3855b..ff43aa90b 100644
--- a/doc/source/adminguide/single.node.install.rst
+++ b/doc/source/adminguide/single.node.install.rst
@@ -52,7 +52,13 @@ When the installation is complete, you'll see the following lines:
Finished processing dependencies for nova==2010.1
-Step 4: Create a Nova administrator
+Step 4: Create the Nova Database
+--------------------------------
+Type or copy/paste in the following line to create your nova db::
+
+ sudo nova-manage db sync
+
+Step 5: Create a Nova administrator
-----------------------------------
Type or copy/paste in the following line to create a user named "anne."::
@@ -63,10 +69,10 @@ You see an access key and a secret key export, such as these made-up ones:::
export EC2_ACCESS_KEY=4e6498a2-blah-blah-blah-17d1333t97fd
export EC2_SECRET_KEY=0a520304-blah-blah-blah-340sp34k05bbe9a7
-Step 5: Create the network
+Step 6: Create the network
--------------------------
-Type or copy/paste in the following line to create a network prior to creating a project.
+Type or copy/paste in the following line to create a network prior to creating a project.
::
@@ -76,7 +82,7 @@ For this command, the IP address is the cidr notation for your netmask, such as
After running this command, entries are made in the 'networks' and 'fixed_ips' table in the database.
-Step 6: Create a project with the user you created
+Step 7: Create a project with the user you created
--------------------------------------------------
Type or copy/paste in the following line to create a project named IRT (for Ice Road Truckers, of course) with the newly-created user named anne.
@@ -106,7 +112,7 @@ Type or copy/paste in the following line to create a project named IRT (for Ice
Data Base Updated
-Step 7: Unzip the nova.zip
+Step 8: Unzip the nova.zip
--------------------------
You should have a nova.zip file in your current working directory. Unzip it with this command:
@@ -128,7 +134,7 @@ You'll see these files extract.
extracting: cacert.pem
-Step 8: Source the rc file
+Step 9: Source the rc file
--------------------------
Type or copy/paste the following to source the novarc file in your current working directory.
@@ -137,14 +143,14 @@ Type or copy/paste the following to source the novarc file in your current worki
. novarc
-Step 9: Pat yourself on the back :)
+Step 10: Pat yourself on the back :)
-----------------------------------
Congratulations, your cloud is up and running, you’ve created an admin user, created a network, retrieved the user's credentials and put them in your environment.
Now you need an image.
-Step 9: Get an image
+Step 11: Get an image
--------------------
To make things easier, we've provided a small image on the Rackspace CDN. Use this command to get it on your server.
@@ -168,7 +174,7 @@ To make things easier, we've provided a small image on the Rackspace CDN. Use th
-Step 10: Decompress the image file
+Step 12: Decompress the image file
----------------------------------
Use this command to extract the image files:::
@@ -187,7 +193,7 @@ You get a directory listing like so:::
|-- image
`-- info.json
-Step 11: Send commands to upload sample image to the cloud
+Step 13: Send commands to upload sample image to the cloud
----------------------------------------------------------
Type or copy/paste the following commands to create a manifest for the kernel.::
@@ -340,7 +346,7 @@ You should see this in response:::
Type or copy/paste the following commands to ssh to the instance using your private key.::
ssh -i mykey.priv root@10.0.0.3
-
+
Troubleshooting Installation
----------------------------
diff --git a/doc/source/man/novamanage.rst b/doc/source/man/novamanage.rst
index 0cb6c7c90..bb9d7a7fe 100644
--- a/doc/source/man/novamanage.rst
+++ b/doc/source/man/novamanage.rst
@@ -42,6 +42,17 @@ You can also run with a category argument such as user to see a list of all comm
These sections describe the available categories and arguments for nova-manage.
+Nova Db
+~~~~~~~
+
+``nova-manage db version``
+
+ Print the current database version.
+
+``nova-manage db sync``
+
+ Sync the database up to the most recent version. This is the standard way to create the db as well.
+
Nova User
~~~~~~~~~
@@ -68,7 +79,7 @@ Nova User
``nova-manage user modify <accesskey> <secretkey> <admin?T/F>``
Updates the indicated user keys, indicating with T or F if the user is an admin user. Leave any argument blank if you do not want to update it.
-
+
Nova Project
~~~~~~~~~~~~
@@ -79,7 +90,7 @@ Nova Project
``nova-manage project create <projectname>``
Create a new nova project with the name <projectname> (you still need to do nova-manage project add <projectname> to add it to the database).
-
+
``nova-manage project delete <projectname>``
Delete a nova project with the name <projectname>.
@@ -87,7 +98,7 @@ Nova Project
``nova-manage project environment <projectname> <username>``
Exports environment variables for the named project to a file named novarc.
-
+
``nova-manage project list``
Outputs a list of all the projects to the screen.
@@ -103,27 +114,27 @@ Nova Project
``nova-manage project zipfile``
Compresses all related files for a created project into a zip file nova.zip.
-
+
Nova Role
~~~~~~~~~
-nova-manage role <action> [<argument>]
+nova-manage role <action> [<argument>]
``nova-manage role add <username> <rolename> <(optional) projectname>``
- Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: admin, itsec, projectmanager, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects.
+ Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: admin, itsec, projectmanager, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects.
``nova-manage role has <username> <projectname>``
Checks the user or project and responds with True if the user has a global role with a particular project.
``nova-manage role remove <username> <rolename>``
- Remove the indicated role from the user.
+ Remove the indicated role from the user.
Nova Shell
~~~~~~~~~~
``nova-manage shell bpython``
- Starts a new bpython shell.
+ Starts a new bpython shell.
``nova-manage shell ipython``
@@ -150,20 +161,20 @@ Nova VPN
``nova-manage vpn run <projectname>``
- Starts the VPN for the named project.
+ Starts the VPN for the named project.
``nova-manage vpn spawn``
Runs all VPNs.
-
+
Nova Floating IPs
~~~~~~~~~~~~~~~~~
``nova-manage floating create <host> <ip_range>``
Creates floating IP addresses for the named host by the given range.
- floating delete <ip_range> Deletes floating IP addresses in the range given.
-
+ floating delete <ip_range> Deletes floating IP addresses in the range given.
+
``nova-manage floating list``
Displays a list of all floating IP addresses.
diff --git a/krm_mapping.json.sample b/krm_mapping.json.sample
deleted file mode 100644
index 1ecfba635..000000000
--- a/krm_mapping.json.sample
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "machine" : ["kernel", "ramdisk"]
-}
diff --git a/nova/adminclient.py b/nova/adminclient.py
index b2609c8c4..c614b274c 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -21,6 +21,7 @@ Nova User API client library.
import base64
import boto
+import boto.exception
import httplib
from boto.ec2.regioninfo import RegionInfo
@@ -190,6 +191,45 @@ class HostInfo(object):
setattr(self, name, value)
+class InstanceType(object):
+ """
+ Information about a Nova instance type, as parsed through SAX.
+
+ **Fields include**
+
+ * name
+ * vcpus
+ * disk_gb
+ * memory_mb
+ * flavor_id
+
+ """
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.name = None
+ self.vcpus = None
+ self.disk_gb = None
+ self.memory_mb = None
+ self.flavor_id = None
+
+ def __repr__(self):
+ return 'InstanceType:%s' % self.name
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == "memoryMb":
+ self.memory_mb = str(value)
+ elif name == "flavorId":
+ self.flavor_id = str(value)
+ elif name == "diskGb":
+ self.disk_gb = str(value)
+ else:
+ setattr(self, name, str(value))
+
+
class NovaAdminClient(object):
def __init__(
@@ -249,10 +289,14 @@ class NovaAdminClient(object):
def get_user(self, name):
"""Grab a single user by name."""
- user = self.apiconn.get_object('DescribeUser', {'Name': name},
- UserInfo)
- if user.username != None:
- return user
+ try:
+ return self.apiconn.get_object('DescribeUser',
+ {'Name': name},
+ UserInfo)
+ except boto.exception.BotoServerError, e:
+ if e.status == 400 and e.error_code == 'NotFound':
+ return None
+ raise
def has_user(self, username):
"""Determine if user exists."""
@@ -337,6 +381,13 @@ class NovaAdminClient(object):
'MemberUsers': member_users}
return self.apiconn.get_object('RegisterProject', params, ProjectInfo)
+ def modify_project(self, projectname, manager_user=None, description=None):
+ """Modifies an existing project."""
+ params = {'Name': projectname,
+ 'ManagerUser': manager_user,
+ 'Description': description}
+ return self.apiconn.get_status('ModifyProject', params)
+
def delete_project(self, projectname):
"""Permanently deletes the specified project."""
return self.apiconn.get_object('DeregisterProject',
@@ -373,3 +424,8 @@ class NovaAdminClient(object):
def get_hosts(self):
return self.apiconn.get_list('DescribeHosts', {}, [('item', HostInfo)])
+
+ def get_instance_types(self):
+ """Grabs the list of all users."""
+ return self.apiconn.get_list('DescribeInstanceTypes', {},
+ [('item', InstanceType)])
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 238cb0f38..1a06b3f01 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -21,7 +21,6 @@ Starting point for routing EC2 requests.
"""
import datetime
-import routes
import webob
import webob.dec
import webob.exc
@@ -33,6 +32,7 @@ from nova import log as logging
from nova import utils
from nova import wsgi
from nova.api.ec2 import apirequest
+from nova.api.ec2 import cloud
from nova.auth import manager
@@ -131,9 +131,11 @@ class Lockout(wsgi.Middleware):
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60)
elif failures >= FLAGS.lockout_attempts:
- LOG.warn(_('Access key %s has had %d failed authentications'
- ' and will be locked out for %d minutes.'),
- access_key, failures, FLAGS.lockout_minutes)
+ lock_mins = FLAGS.lockout_minutes
+ msg = _('Access key %(access_key)s has had %(failures)d'
+ ' failed authentications and will be locked out'
+ ' for %(lock_mins)d minutes.') % locals()
+ LOG.warn(msg)
self.mc.set(failures_key, str(failures),
time=FLAGS.lockout_minutes * 60)
return res
@@ -168,7 +170,7 @@ class Authenticate(wsgi.Middleware):
req.path)
# Be explicit for what exceptions are 403, the rest bubble as 500
except (exception.NotFound, exception.NotAuthorized) as ex:
- LOG.audit(_("Authentication Failure: %s"), str(ex))
+ LOG.audit(_("Authentication Failure: %s"), unicode(ex))
raise webob.exc.HTTPForbidden()
# Authenticated!
@@ -179,8 +181,10 @@ class Authenticate(wsgi.Middleware):
project=project,
remote_address=remote_address)
req.environ['ec2.context'] = ctxt
- LOG.audit(_('Authenticated Request For %s:%s)'), user.name,
- project.name, context=req.environ['ec2.context'])
+ uname = user.name
+ pname = project.name
+ msg = _('Authenticated Request For %(uname)s:%(pname)s)') % locals()
+ LOG.audit(msg, context=req.environ['ec2.context'])
return self.application
@@ -206,10 +210,11 @@ class Requestify(wsgi.Middleware):
LOG.debug(_('action: %s'), action)
for key, value in args.items():
- LOG.debug(_('arg: %s\t\tval: %s'), key, value)
+ LOG.debug(_('arg: %(key)s\t\tval: %(value)s') % locals())
# Success!
- api_request = apirequest.APIRequest(self.controller, action, args)
+ api_request = apirequest.APIRequest(self.controller, action,
+ req.params['Version'], args)
req.environ['ec2.request'] = api_request
req.environ['ec2.action_args'] = args
return self.application
@@ -227,7 +232,7 @@ class Authorizer(wsgi.Middleware):
super(Authorizer, self).__init__(application)
self.action_roles = {
'CloudController': {
- 'DescribeAvailabilityzones': ['all'],
+ 'DescribeAvailabilityZones': ['all'],
'DescribeRegions': ['all'],
'DescribeSnapshots': ['all'],
'DescribeKeyPairs': ['all'],
@@ -277,8 +282,8 @@ class Authorizer(wsgi.Middleware):
if self._matches_any_role(context, allowed_roles):
return self.application
else:
- LOG.audit(_("Unauthorized request for controller=%s "
- "and action=%s"), controller, action, context=context)
+ LOG.audit(_('Unauthorized request for controller=%(controller)s '
+ 'and action=%(action)s') % locals(), context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
@@ -309,18 +314,32 @@ class Executor(wsgi.Application):
result = None
try:
result = api_request.invoke(context)
+ except exception.InstanceNotFound as ex:
+ LOG.info(_('InstanceNotFound raised: %s'), unicode(ex),
+ context=context)
+ ec2_id = cloud.id_to_ec2_id(ex.instance_id)
+ message = _('Instance %s not found') % ec2_id
+ return self._error(req, context, type(ex).__name__, message)
+ except exception.VolumeNotFound as ex:
+ LOG.info(_('VolumeNotFound raised: %s'), unicode(ex),
+ context=context)
+ ec2_id = cloud.id_to_ec2_id(ex.volume_id, 'vol-%08x')
+ message = _('Volume %s not found') % ec2_id
+ return self._error(req, context, type(ex).__name__, message)
except exception.NotFound as ex:
- LOG.info(_('NotFound raised: %s'), str(ex), context=context)
- return self._error(req, context, type(ex).__name__, str(ex))
+ LOG.info(_('NotFound raised: %s'), unicode(ex), context=context)
+ return self._error(req, context, type(ex).__name__, unicode(ex))
except exception.ApiError as ex:
- LOG.exception(_('ApiError raised: %s'), str(ex), context=context)
+ LOG.exception(_('ApiError raised: %s'), unicode(ex),
+ context=context)
if ex.code:
- return self._error(req, context, ex.code, str(ex))
+ return self._error(req, context, ex.code, unicode(ex))
else:
- return self._error(req, context, type(ex).__name__, str(ex))
+ return self._error(req, context, type(ex).__name__,
+ unicode(ex))
except Exception as ex:
extra = {'environment': req.environ}
- LOG.exception(_('Unexpected error raised: %s'), str(ex),
+ LOG.exception(_('Unexpected error raised: %s'), unicode(ex),
extra=extra, context=context)
return self._error(req,
context,
@@ -343,7 +362,8 @@ class Executor(wsgi.Application):
'<Response><Errors><Error><Code>%s</Code>'
'<Message>%s</Message></Error></Errors>'
'<RequestID>%s</RequestID></Response>' %
- (code, message, context.request_id))
+ (utils.utf8(code), utils.utf8(message),
+ utils.utf8(context.request_id)))
return resp
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index 758b612e8..735951082 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -26,6 +26,7 @@ from nova import db
from nova import exception
from nova import log as logging
from nova.auth import manager
+from nova.compute import instance_types
LOG = logging.getLogger('nova.api.ec2.admin')
@@ -62,6 +63,14 @@ def host_dict(host):
return {}
+def instance_dict(name, inst):
+ return {'name': name,
+ 'memory_mb': inst['memory_mb'],
+ 'vcpus': inst['vcpus'],
+ 'disk_gb': inst['local_gb'],
+ 'flavor_id': inst['flavorid']}
+
+
class AdminController(object):
"""
API Controller for users, hosts, nodes, and workers.
@@ -70,6 +79,10 @@ class AdminController(object):
def __str__(self):
return 'AdminController'
+ def describe_instance_types(self, _context, **_kwargs):
+ return {'instanceTypeSet': [instance_dict(n, v) for n, v in
+ instance_types.INSTANCE_TYPES.iteritems()]}
+
def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys."""
return user_dict(manager.AuthManager().get_user(name))
@@ -111,19 +124,23 @@ class AdminController(object):
"""Add or remove a role for a user and project."""
if operation == 'add':
if project:
- LOG.audit(_("Adding role %s to user %s for project %s"), role,
- user, project, context=context)
+ msg = _("Adding role %(role)s to user %(user)s"
+ " for project %(project)s") % locals()
+ LOG.audit(msg, context=context)
else:
- LOG.audit(_("Adding sitewide role %s to user %s"), role, user,
- context=context)
+ msg = _("Adding sitewide role %(role)s to"
+ " user %(user)s") % locals()
+ LOG.audit(msg, context=context)
manager.AuthManager().add_role(user, role, project)
elif operation == 'remove':
if project:
- LOG.audit(_("Removing role %s from user %s for project %s"),
- role, user, project, context=context)
+ msg = _("Removing role %(role)s from user %(user)s"
+ " for project %(project)s") % locals()
+ LOG.audit(msg, context=context)
else:
- LOG.audit(_("Removing sitewide role %s from user %s"), role,
- user, context=context)
+ msg = _("Removing sitewide role %(role)s"
+ " from user %(user)s") % locals()
+ LOG.audit(msg, context=context)
manager.AuthManager().remove_role(user, role, project)
else:
raise exception.ApiError(_('operation must be add or remove'))
@@ -139,8 +156,9 @@ class AdminController(object):
project = name
project = manager.AuthManager().get_project(project)
user = manager.AuthManager().get_user(name)
- LOG.audit(_("Getting x509 for user: %s on project: %s"), name,
- project, context=context)
+ msg = _("Getting x509 for user: %(name)s"
+ " on project: %(project)s") % locals()
+ LOG.audit(msg, context=context)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
def describe_project(self, context, name, **kwargs):
@@ -156,8 +174,9 @@ class AdminController(object):
def register_project(self, context, name, manager_user, description=None,
member_users=None, **kwargs):
"""Creates a new project"""
- LOG.audit(_("Create project %s managed by %s"), name, manager_user,
- context=context)
+ msg = _("Create project %(name)s managed by"
+ " %(manager_user)s") % locals()
+ LOG.audit(msg, context=context)
return project_dict(
manager.AuthManager().create_project(
name,
@@ -165,6 +184,17 @@ class AdminController(object):
description=None,
member_users=None))
+ def modify_project(self, context, name, manager_user, description=None,
+ **kwargs):
+ """Modifies a project"""
+ msg = _("Modify project: %(name)s managed by"
+ " %(manager_user)s") % locals()
+ LOG.audit(msg, context=context)
+ manager.AuthManager().modify_project(name,
+ manager_user=manager_user,
+ description=description)
+ return True
+
def deregister_project(self, context, name):
"""Permanently deletes a project."""
LOG.audit(_("Delete project: %s"), name, context=context)
@@ -181,12 +211,13 @@ class AdminController(object):
**kwargs):
"""Add or remove a user from a project."""
if operation == 'add':
- LOG.audit(_("Adding user %s to project %s"), user, project,
- context=context)
+ msg = _("Adding user %(user)s to project %(project)s") % locals()
+ LOG.audit(msg, context=context)
manager.AuthManager().add_to_project(user, project)
elif operation == 'remove':
- LOG.audit(_("Removing user %s from project %s"), user, project,
- context=context)
+ msg = _("Removing user %(user)s from"
+ " project %(project)s") % locals()
+ LOG.audit(msg, context=context)
manager.AuthManager().remove_from_project(user, project)
else:
raise exception.ApiError(_('operation must be add or remove'))
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 78576470a..7e72d67fb 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -83,9 +83,10 @@ def _try_convert(value):
class APIRequest(object):
- def __init__(self, controller, action, args):
+ def __init__(self, controller, action, version, args):
self.controller = controller
self.action = action
+ self.version = version
self.args = args
def invoke(self, context):
@@ -93,8 +94,10 @@ class APIRequest(object):
method = getattr(self.controller,
_camelcase_to_underscore(self.action))
except AttributeError:
- _error = _('Unsupported API request: controller = %s,'
- 'action = %s') % (self.controller, self.action)
+ controller = self.controller
+ action = self.action
+ _error = _('Unsupported API request: controller = %(controller)s,'
+ ' action = %(action)s') % locals()
LOG.exception(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.
@@ -130,7 +133,7 @@ class APIRequest(object):
response_el = xml.createElement(self.action + 'Response')
response_el.setAttribute('xmlns',
- 'http://ec2.amazonaws.com/doc/2009-11-30/')
+ 'http://ec2.amazonaws.com/doc/%s/' % self.version)
request_id_el = xml.createElement('requestId')
request_id_el.appendChild(xml.createTextNode(request_id))
response_el.appendChild(request_id_el)
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index f63ec9085..16a3a4521 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -252,18 +252,18 @@ class CloudController(object):
regions = []
for region in FLAGS.region_list:
name, _sep, host = region.partition('=')
- endpoint = '%s://%s:%s%s' % (FLAGS.ec2_prefix,
+ endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme,
host,
FLAGS.ec2_port,
- FLAGS.ec2_suffix)
+ FLAGS.ec2_path)
regions.append({'regionName': name,
'regionEndpoint': endpoint})
else:
regions = [{'regionName': 'nova',
- 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_prefix,
+ 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme,
FLAGS.ec2_host,
FLAGS.ec2_port,
- FLAGS.ec2_suffix)}]
+ FLAGS.ec2_path)}]
return {'regionInfo': regions}
def describe_snapshots(self,
@@ -327,7 +327,9 @@ class CloudController(object):
if not group_name is None:
groups = [g for g in groups if g.name in group_name]
- return {'securityGroupInfo': groups}
+ return {'securityGroupInfo':
+ list(sorted(groups,
+ key=lambda k: (k['ownerId'], k['groupName'])))}
def _format_security_group(self, context, group):
g = {}
@@ -512,8 +514,11 @@ class CloudController(object):
def get_console_output(self, context, instance_id, **kwargs):
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
- # instance_id is passed in as a list of instances
- ec2_id = instance_id[0]
+ # instance_id may be passed in as a list of instances
+ if type(instance_id) == list:
+ ec2_id = instance_id[0]
+ else:
+ ec2_id = instance_id
instance_id = ec2_id_to_id(ec2_id)
output = self.compute_api.get_console_output(
context, instance_id=instance_id)
@@ -529,11 +534,14 @@ class CloudController(object):
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
- volume_id = [ec2_id_to_id(x) for x in volume_id]
- volumes = self.volume_api.get_all(context)
- # NOTE(vish): volume_id is an optional list of volume ids to filter by.
- volumes = [self._format_volume(context, v) for v in volumes
- if volume_id is None or v['id'] in volume_id]
+ volumes = []
+ for ec2_id in volume_id:
+ internal_id = ec2_id_to_id(ec2_id)
+ volume = self.volume_api.get(context, internal_id)
+ volumes.append(volume)
+ else:
+ volumes = self.volume_api.get_all(context)
+ volumes = [self._format_volume(context, v) for v in volumes]
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
@@ -601,8 +609,9 @@ class CloudController(object):
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume_id = ec2_id_to_id(volume_id)
instance_id = ec2_id_to_id(instance_id)
- LOG.audit(_("Attach volume %s to instance %s at %s"), volume_id,
- instance_id, device, context=context)
+ msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
+ " at %(device)s") % locals()
+ LOG.audit(msg, context=context)
self.compute_api.attach_volume(context,
instance_id=instance_id,
volume_id=volume_id,
@@ -657,8 +666,11 @@ class CloudController(object):
reservations = {}
# NOTE(vish): instance_id is an optional list of ids to filter by
if instance_id:
- instance_id = [ec2_id_to_id(x) for x in instance_id]
- instances = [self.compute_api.get(context, x) for x in instance_id]
+ instances = []
+ for ec2_id in instance_id:
+ internal_id = ec2_id_to_id(ec2_id)
+ instance = self.compute_api.get(context, internal_id)
+ instances.append(instance)
else:
instances = self.compute_api.get_all(context, **kwargs)
for instance in instances:
@@ -707,7 +719,12 @@ class CloudController(object):
r = {}
r['reservationId'] = instance['reservation_id']
r['ownerId'] = instance['project_id']
- r['groupSet'] = self._convert_to_set([], 'groups')
+ security_group_names = []
+ if instance.get('security_groups'):
+ for security_group in instance['security_groups']:
+ security_group_names.append(security_group['name'])
+ r['groupSet'] = self._convert_to_set(security_group_names,
+ 'groupId')
r['instancesSet'] = []
reservations[instance['reservation_id']] = r
reservations[instance['reservation_id']]['instancesSet'].append(i)
@@ -751,8 +768,8 @@ class CloudController(object):
return {'releaseResponse': ["Address released."]}
def associate_address(self, context, instance_id, public_ip, **kwargs):
- LOG.audit(_("Associate address %s to instance %s"), public_ip,
- instance_id, context=context)
+ LOG.audit(_("Associate address %(public_ip)s to"
+ " instance %(instance_id)s") % locals(), context=context)
instance_id = ec2_id_to_id(instance_id)
self.compute_api.associate_floating_ip(context,
instance_id=instance_id,
@@ -824,11 +841,26 @@ class CloudController(object):
self.compute_api.update(context, instance_id=instance_id, **kwargs)
return True
+ def _format_image(self, context, image):
+ """Convert from format defined by BaseImageService to S3 format."""
+ i = {}
+ i['imageId'] = image.get('id')
+ i['kernelId'] = image.get('kernel_id')
+ i['ramdiskId'] = image.get('ramdisk_id')
+ i['imageOwnerId'] = image.get('owner_id')
+ i['imageLocation'] = image.get('location')
+ i['imageState'] = image.get('status')
+ i['type'] = image.get('type')
+ i['isPublic'] = image.get('is_public')
+ i['architecture'] = image.get('architecture')
+ return i
+
def describe_images(self, context, image_id=None, **kwargs):
- # Note: image_id is a list!
+ # NOTE: image_id is a list!
images = self.image_service.index(context)
if image_id:
- images = filter(lambda x: x['imageId'] in image_id, images)
+ images = filter(lambda x: x['id'] in image_id, images)
+ images = [self._format_image(context, i) for i in images]
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
@@ -840,8 +872,9 @@ class CloudController(object):
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
image_id = self.image_service.register(context, image_location)
- LOG.audit(_("Registered image %s with id %s"), image_location,
- image_id, context=context)
+ msg = _("Registered image %(image_location)s with"
+ " id %(image_id)s") % locals()
+ LOG.audit(msg, context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index f2caac483..056c7dd27 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -38,9 +38,6 @@ from nova.api.openstack import shared_ip_groups
LOG = logging.getLogger('nova.api.openstack')
FLAGS = flags.FLAGS
-flags.DEFINE_string('os_krm_mapping_file',
- 'krm_mapping.json',
- 'Location of OpenStack Flavor/OS:EC2 Kernel/Ramdisk/Machine JSON file.')
flags.DEFINE_bool('allow_admin_api',
False,
'When True, this API service will accept admin operations.')
@@ -54,8 +51,8 @@ class FaultWrapper(wsgi.Middleware):
try:
return req.get_response(self.application)
except Exception as ex:
- LOG.exception(_("Caught error: %s"), str(ex))
- exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
+ LOG.exception(_("Caught error: %s"), unicode(ex))
+ exc = webob.exc.HTTPInternalServerError(explanation=unicode(ex))
return faults.Fault(exc)
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 037ed47a0..6d2fa16e8 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -54,7 +54,7 @@ def get_image_id_from_image_hash(image_service, context, image_hash):
except NotImplementedError:
items = image_service.index(context)
for image in items:
- image_id = image['imageId']
+ image_id = image['id']
if abs(hash(image_id)) == int(image_hash):
return image_id
raise exception.NotFound(image_hash)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index c604bd215..8b72704ba 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -124,17 +124,23 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
- def _get_kernel_ramdisk_from_image(self, image_id):
- mapping_filename = FLAGS.os_krm_mapping_file
-
- with open(mapping_filename) as f:
- mapping = json.load(f)
- if image_id in mapping:
- return mapping[image_id]
+ def _get_kernel_ramdisk_from_image(self, req, image_id):
+ """
+ Machine images are associated with Kernels and Ramdisk images via
+ metadata stored in Glance as 'image_properties'
+ """
+ def lookup(param):
+ _image_id = image_id
+ try:
+ return image['properties'][param]
+ except KeyError:
+ raise exception.NotFound(
+ _("%(param)s property not found for image %(_image_id)s") %
+ locals())
- raise exception.NotFound(
- _("No entry for image '%s' in mapping file '%s'") %
- (image_id, mapping_filename))
+ image_id = str(image_id)
+ image = self._image_service.show(req.environ['nova.context'], image_id)
+ return lookup('kernel_id'), lookup('ramdisk_id')
def create(self, req):
""" Creates a new server for a given user """
@@ -146,7 +152,8 @@ class Controller(wsgi.Controller):
req.environ['nova.context'])[0]
image_id = common.get_image_id_from_image_hash(self._image_service,
req.environ['nova.context'], env['server']['imageId'])
- kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(image_id)
+ kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
+ req, image_id)
instances = self.compute_api.create(
req.environ['nova.context'],
instance_types.get_by_flavor_id(env['server']['flavorId']),
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index a6915ce03..e652f1caa 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -473,8 +473,8 @@ class LdapDriver(object):
raise exception.NotFound(_("The group at dn %s doesn't exist") %
group_dn)
if self.__is_in_group(uid, group_dn):
- raise exception.Duplicate(_("User %s is already a member of "
- "the group %s") % (uid, group_dn))
+ raise exception.Duplicate(_("User %(uid)s is already a member of "
+ "the group %(group_dn)s") % locals())
attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
self.conn.modify_s(group_dn, attr)
@@ -585,10 +585,11 @@ class LdapDriver(object):
else:
return None
- @staticmethod
- def __dn_to_uid(dn):
+ def __dn_to_uid(self, dn):
"""Convert user dn to uid"""
- return dn.split(',')[0].split('=')[1]
+ query = '(objectclass=novaUser)'
+ user = self.__find_object(dn, query)
+ return user[FLAGS.ldap_user_id_attribute][0]
class FakeLdapDriver(LdapDriver):
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 1652e24e1..450ab803a 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -272,16 +272,22 @@ class AuthManager(object):
project = self.get_project(project_id)
if project == None:
- LOG.audit(_("failed authorization: no project named %s (user=%s)"),
- project_id, user.name)
+ pjid = project_id
+ uname = user.name
+ LOG.audit(_("failed authorization: no project named %(pjid)s"
+ " (user=%(uname)s)") % locals())
raise exception.NotFound(_('No project called %s could be found')
% project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
- LOG.audit(_("Failed authorization: user %s not admin and not "
- "member of project %s"), user.name, project.name)
- raise exception.NotFound(_('User %s is not a member of project %s')
- % (user.id, project.id))
+ uname = user.name
+ uid = user.id
+ pjname = project.name
+ pjid = project.id
+ LOG.audit(_("Failed authorization: user %(uname)s not admin"
+ " and not member of project %(pjname)s") % locals())
+ raise exception.NotFound(_('User %(uid)s is not a member of'
+ ' project %(pjid)s') % locals())
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
@@ -408,14 +414,16 @@ class AuthManager(object):
raise exception.NotFound(_("The %s role can not be found") % role)
if project is not None and role in FLAGS.global_roles:
raise exception.NotFound(_("The %s role is global only") % role)
+ uid = User.safe_id(user)
+ pid = Project.safe_id(project)
if project:
- LOG.audit(_("Adding role %s to user %s in project %s"), role,
- User.safe_id(user), Project.safe_id(project))
+ LOG.audit(_("Adding role %(role)s to user %(uid)s"
+ " in project %(pid)s") % locals())
else:
- LOG.audit(_("Adding sitewide role %s to user %s"), role,
- User.safe_id(user))
+ LOG.audit(_("Adding sitewide role %(role)s to user %(uid)s")
+ % locals())
with self.driver() as drv:
- drv.add_role(User.safe_id(user), role, Project.safe_id(project))
+ drv.add_role(uid, role, pid)
def remove_role(self, user, role, project=None):
"""Removes role for user
@@ -434,14 +442,16 @@ class AuthManager(object):
@type project: Project or project_id
@param project: Project in which to remove local role.
"""
+ uid = User.safe_id(user)
+ pid = Project.safe_id(project)
if project:
- LOG.audit(_("Removing role %s from user %s on project %s"),
- role, User.safe_id(user), Project.safe_id(project))
+ LOG.audit(_("Removing role %(role)s from user %(uid)s"
+ " on project %(pid)s") % locals())
else:
- LOG.audit(_("Removing sitewide role %s from user %s"), role,
- User.safe_id(user))
+ LOG.audit(_("Removing sitewide role %(role)s"
+ " from user %(uid)s") % locals())
with self.driver() as drv:
- drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
+ drv.remove_role(uid, role, pid)
@staticmethod
def get_roles(project_roles=True):
@@ -502,8 +512,8 @@ class AuthManager(object):
description,
member_users)
if project_dict:
- LOG.audit(_("Created project %s with manager %s"), name,
- manager_user)
+ LOG.audit(_("Created project %(name)s with"
+ " manager %(manager_user)s") % locals())
project = Project(**project_dict)
return project
@@ -530,11 +540,12 @@ class AuthManager(object):
def add_to_project(self, user, project):
"""Add user to project"""
- LOG.audit(_("Adding user %s to project %s"), User.safe_id(user),
- Project.safe_id(project))
+ uid = User.safe_id(user)
+ pid = Project.safe_id(project)
+ LOG.audit(_("Adding user %(uid)s to project %(pid)s") % locals())
with self.driver() as drv:
return drv.add_to_project(User.safe_id(user),
- Project.safe_id(project))
+ Project.safe_id(project))
def is_project_manager(self, user, project):
"""Checks if user is project manager"""
@@ -550,11 +561,11 @@ class AuthManager(object):
def remove_from_project(self, user, project):
"""Removes a user from a project"""
- LOG.audit(_("Remove user %s from project %s"), User.safe_id(user),
- Project.safe_id(project))
+ uid = User.safe_id(user)
+ pid = Project.safe_id(project)
+ LOG.audit(_("Remove user %(uid)s from project %(pid)s") % locals())
with self.driver() as drv:
- return drv.remove_from_project(User.safe_id(user),
- Project.safe_id(project))
+ return drv.remove_from_project(uid, pid)
@staticmethod
def get_project_vpn_data(project):
@@ -634,7 +645,10 @@ class AuthManager(object):
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
rv = User(**user_dict)
- LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin)
+ rvname = rv.name
+ rvadmin = rv.admin
+ LOG.audit(_("Created user %(rvname)s"
+ " (admin: %(rvadmin)r)") % locals())
return rv
def delete_user(self, user):
@@ -656,7 +670,8 @@ class AuthManager(object):
if secret_key:
LOG.audit(_("Secret Key change for user %s"), uid)
if admin is not None:
- LOG.audit(_("Admin status set to %r for user %s"), admin, uid)
+ LOG.audit(_("Admin status set to %(admin)r"
+ " for user %(uid)s") % locals())
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 43332ed27..4703b580c 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -92,8 +92,9 @@ class API(base.Base):
type_data = instance_types.INSTANCE_TYPES[instance_type]
num_instances = quota.allowed_instances(context, max_count, type_data)
if num_instances < min_count:
- LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"),
- context.project_id, min_count)
+ pid = context.project_id
+ LOG.warn(_("Quota exceeeded for %(pid)s,"
+ " tried to run %(min_count)s instances") % locals())
raise quota.QuotaError(_("Instance quota exceeded. You can only "
"run %s more instances of this type.") %
num_instances, "InstanceLimitExceeded")
@@ -183,8 +184,10 @@ class API(base.Base):
instance = self.update(context, instance_id, **updates)
instances.append(instance)
- LOG.debug(_("Casting to scheduler for %s/%s's instance %s"),
- context.project_id, context.user_id, instance_id)
+ pid = context.project_id
+ uid = context.user_id
+ LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
+ " instance %(instance_id)s") % locals())
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "run_instance",
@@ -315,7 +318,7 @@ class API(base.Base):
def get(self, context, instance_id):
"""Get a single instance with the given ID."""
- rv = self.db.instance_get_by_id(context, instance_id)
+ rv = self.db.instance_get(context, instance_id)
return dict(rv.iteritems())
def get_all(self, context, project_id=None, reservation_id=None,
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 196d6a8df..309313fd0 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -38,8 +38,8 @@ def get_by_type(instance_type):
if instance_type is None:
return FLAGS.default_instance_type
if instance_type not in INSTANCE_TYPES:
- raise exception.ApiError(_("Unknown instance type: %s"),
- instance_type)
+ raise exception.ApiError(_("Unknown instance type: %s") % \
+ instance_type, "Invalid")
return instance_type
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index b03f58693..490dff5ba 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -37,7 +37,6 @@ terminating it.
import datetime
import random
import string
-import logging
import socket
import functools
@@ -77,8 +76,8 @@ def checks_instance_lock(function):
LOG.info(_("check_instance_lock: decorating: |%s|"), function,
context=context)
- LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"),
- self, context, instance_id, context=context)
+ LOG.info(_("check_instance_lock: arguments: |%(self)s| |%(context)s|"
+ " |%(instance_id)s|") % locals(), context=context)
locked = self.get_lock(context, instance_id)
admin = context.is_admin
LOG.info(_("check_instance_lock: locked: |%s|"), locked,
@@ -118,7 +117,7 @@ class ComputeManager(manager.Manager):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
- self.driver.init_host()
+ self.driver.init_host(host=self.host)
def _update_state(self, context, instance_id):
"""Update the state of an instance from the driver info."""
@@ -231,22 +230,25 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("Terminating instance %s"), instance_id, context=context)
- if not FLAGS.stub_network:
- address = self.db.instance_get_floating_address(context,
- instance_ref['id'])
- if address:
- LOG.debug(_("Disassociating address %s"), address,
+ fixed_ip = instance_ref.get('fixed_ip')
+ if not FLAGS.stub_network and fixed_ip:
+ floating_ips = fixed_ip.get('floating_ips') or []
+ for floating_ip in floating_ips:
+ address = floating_ip['address']
+ LOG.debug("Disassociating address %s", address,
context=context)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later.
+ network_topic = self.db.queue_get_for(context,
+ FLAGS.network_topic,
+ floating_ip['host'])
rpc.cast(context,
- self.get_network_topic(context),
+ network_topic,
{"method": "disassociate_floating_ip",
"args": {"floating_address": address}})
- address = self.db.instance_get_fixed_address(context,
- instance_ref['id'])
+ address = fixed_ip['address']
if address:
LOG.debug(_("Deallocating address %s"), address,
context=context)
@@ -256,7 +258,7 @@ class ComputeManager(manager.Manager):
self.network_manager.deallocate_fixed_ip(context.elevated(),
address)
- volumes = instance_ref.get('volumes', []) or []
+ volumes = instance_ref.get('volumes') or []
for volume in volumes:
self.detach_volume(context, instance_id, volume['id'])
if instance_ref['state'] == power_state.SHUTOFF:
@@ -278,11 +280,11 @@ class ComputeManager(manager.Manager):
LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
if instance_ref['state'] != power_state.RUNNING:
+ state = instance_ref['state']
+ running = power_state.RUNNING
LOG.warn(_('trying to reboot a non-running '
- 'instance: %s (state: %s excepted: %s)'),
- instance_id,
- instance_ref['state'],
- power_state.RUNNING,
+ 'instance: %(instance_id)s (state: %(state)s '
+ 'expected: %(running)s)') % locals(),
context=context)
self.db.instance_set_state(context,
@@ -307,9 +309,11 @@ class ComputeManager(manager.Manager):
LOG.audit(_('instance %s: snapshotting'), instance_id,
context=context)
if instance_ref['state'] != power_state.RUNNING:
+ state = instance_ref['state']
+ running = power_state.RUNNING
LOG.warn(_('trying to snapshot a non-running '
- 'instance: %s (state: %s excepted: %s)'),
- instance_id, instance_ref['state'], power_state.RUNNING)
+ 'instance: %(instance_id)s (state: %(state)s '
+ 'expected: %(running)s)') % locals())
self.driver.snapshot(instance_ref, image_id)
@@ -529,8 +533,8 @@ class ComputeManager(manager.Manager):
"""Attach a volume to an instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id,
- volume_id, mountpoint, context=context)
+ LOG.audit(_("instance %(instance_id)s: attaching volume %(volume_id)s"
+ " to %(mountpoint)s") % locals(), context=context)
dev_path = self.volume_manager.setup_compute_volume(context,
volume_id)
try:
@@ -545,8 +549,8 @@ class ComputeManager(manager.Manager):
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
- LOG.exception(_("instance %s: attach failed %s, removing"),
- instance_id, mountpoint, context=context)
+ LOG.exception(_("instance %(instance_id)s: attach failed"
+ " %(mountpoint)s, removing") % locals(), context=context)
self.volume_manager.remove_compute_volume(context,
volume_id)
raise exc
@@ -560,9 +564,9 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
volume_ref = self.db.volume_get(context, volume_id)
- LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"),
- volume_id, volume_ref['mountpoint'], instance_id,
- context=context)
+ mp = volume_ref['mountpoint']
+ LOG.audit(_("Detach volume %(volume_id)s from mountpoint %(mp)s"
+ " on instance %(instance_id)s") % locals(), context=context)
if instance_ref['name'] not in self.driver.list_instances():
LOG.warn(_("Detaching volume from unknown instance %s"),
instance_id, context=context)
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 14d0e8ca1..04e08a235 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -352,8 +352,9 @@ class Instance(object):
rd += rd_bytes
wr += wr_bytes
except TypeError:
- LOG.error(_('Cannot get blockstats for "%s" on "%s"'),
- disk, self.instance_id)
+ iid = self.instance_id
+ LOG.error(_('Cannot get blockstats for "%(disk)s"'
+ ' on "%(iid)s"') % locals())
raise
return '%d:%d' % (rd, wr)
@@ -374,8 +375,9 @@ class Instance(object):
rx += stats[0]
tx += stats[4]
except TypeError:
- LOG.error(_('Cannot get ifstats for "%s" on "%s"'),
- interface, self.instance_id)
+ iid = self.instance_id
+ LOG.error(_('Cannot get ifstats for "%(interface)s"'
+ ' on "%(iid)s"') % locals())
raise
return '%d:%d' % (rx, tx)
diff --git a/nova/db/api.py b/nova/db/api.py
index a2c1dbdce..850a5126f 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -71,7 +71,6 @@ class NoMoreTargets(exception.Error):
"""No more available blades"""
pass
-
###################
@@ -356,6 +355,11 @@ def instance_get_all_by_project(context, project_id):
return IMPL.instance_get_all_by_project(context, project_id)
+def instance_get_all_by_host(context, host):
+ """Get all instance belonging to a host."""
+ return IMPL.instance_get_all_by_host(context, host)
+
+
def instance_get_all_by_reservation(context, reservation_id):
"""Get all instance belonging to a reservation."""
return IMPL.instance_get_all_by_reservation(context, reservation_id)
@@ -380,11 +384,6 @@ def instance_get_project_vpn(context, project_id):
return IMPL.instance_get_project_vpn(context, project_id)
-def instance_get_by_id(context, instance_id):
- """Get an instance by id."""
- return IMPL.instance_get_by_id(context, instance_id)
-
-
def instance_is_vpn(context, instance_id):
"""True if instance is a vpn."""
return IMPL.instance_is_vpn(context, instance_id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 827f81ae2..329ad3cef 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -19,6 +19,7 @@
Implementation of SQLAlchemy backend.
"""
+import datetime
import warnings
from nova import db
@@ -247,7 +248,8 @@ def service_get_by_args(context, host, binary):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound(_('No service for %s, %s') % (host, binary))
+ raise exception.NotFound(_('No service for %(host)s, %(binary)s')
+ % locals())
return result
@@ -577,7 +579,7 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time):
'AND instance_id IS NOT NULL '
'AND allocated = 0',
{'host': host,
- 'time': time.isoformat()})
+ 'time': time})
return result.rowcount
@@ -680,8 +682,14 @@ def instance_data_get_for_project(context, project_id):
def instance_destroy(context, instance_id):
session = get_session()
with session.begin():
- instance_ref = instance_get(context, instance_id, session=session)
- instance_ref.delete(session=session)
+ session.execute('update instances set deleted=1,'
+ 'deleted_at=:at where id=:id',
+ {'id': instance_id,
+ 'at': datetime.datetime.utcnow()})
+ session.execute('update security_group_instance_association '
+ 'set deleted=1,deleted_at=:at where instance_id=:id',
+ {'id': instance_id,
+ 'at': datetime.datetime.utcnow()})
@require_context
@@ -695,6 +703,7 @@ def instance_get(context, instance_id, session=None):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
+ options(joinedload_all('fixed_ip.network')).\
filter_by(id=instance_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
@@ -708,7 +717,9 @@ def instance_get(context, instance_id, session=None):
filter_by(deleted=False).\
first()
if not result:
- raise exception.NotFound(_('No instance for id %s') % instance_id)
+ raise exception.InstanceNotFound(_('Instance %s not found')
+ % instance_id,
+ instance_id)
return result
@@ -719,6 +730,7 @@ def instance_get_all(context):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -729,11 +741,24 @@ def instance_get_all_by_user(context, user_id):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(user_id=user_id).\
all()
+@require_admin_context
+def instance_get_all_by_host(context, host):
+ session = get_session()
+ return session.query(models.Instance).\
+ options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
+ filter_by(host=host).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
@require_context
def instance_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
@@ -742,6 +767,7 @@ def instance_get_all_by_project(context, project_id):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
filter_by(project_id=project_id).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -755,6 +781,7 @@ def instance_get_all_by_reservation(context, reservation_id):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
filter_by(reservation_id=reservation_id).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -762,6 +789,7 @@ def instance_get_all_by_reservation(context, reservation_id):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
filter_by(project_id=context.project_id).\
filter_by(reservation_id=reservation_id).\
filter_by(deleted=False).\
@@ -781,33 +809,6 @@ def instance_get_project_vpn(context, project_id):
@require_context
-def instance_get_by_id(context, instance_id):
- session = get_session()
-
- if is_admin_context(context):
- result = session.query(models.Instance).\
- options(joinedload_all('fixed_ip.floating_ips')).\
- options(joinedload('security_groups')).\
- options(joinedload_all('fixed_ip.network')).\
- filter_by(id=instance_id).\
- filter_by(deleted=can_read_deleted(context)).\
- first()
- elif is_user_context(context):
- result = session.query(models.Instance).\
- options(joinedload('security_groups')).\
- options(joinedload_all('fixed_ip.floating_ips')).\
- options(joinedload_all('fixed_ip.network')).\
- filter_by(project_id=context.project_id).\
- filter_by(id=instance_id).\
- filter_by(deleted=False).\
- first()
- if not result:
- raise exception.NotFound(_('Instance %s not found') % (instance_id))
-
- return result
-
-
-@require_context
def instance_get_fixed_address(context, instance_id):
session = get_session()
with session.begin():
@@ -946,8 +947,8 @@ def key_pair_get(context, user_id, name, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound(_('no keypair for user %s, name %s') %
- (user_id, name))
+ raise exception.NotFound(_('no keypair for user %(user_id)s,'
+ ' name %(name)s') % locals())
return result
@@ -1419,17 +1420,20 @@ def volume_get(context, volume_id, session=None):
if is_admin_context(context):
result = session.query(models.Volume).\
+ options(joinedload('instance')).\
filter_by(id=volume_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.Volume).\
+ options(joinedload('instance')).\
filter_by(project_id=context.project_id).\
filter_by(id=volume_id).\
filter_by(deleted=False).\
first()
if not result:
- raise exception.NotFound(_('No volume for id %s') % volume_id)
+ raise exception.VolumeNotFound(_('Volume %s not found') % volume_id,
+ volume_id)
return result
@@ -1474,7 +1478,8 @@ def volume_get_instance(context, volume_id):
options(joinedload('instance')).\
first()
if not result:
- raise exception.NotFound(_('Volume %s not found') % ec2_id)
+ raise exception.VolumeNotFound(_('Volume %s not found') % volume_id,
+ volume_id)
return result.instance
@@ -1561,8 +1566,8 @@ def security_group_get_by_name(context, project_id, group_name):
first()
if not result:
raise exception.NotFound(
- _('No security group named %s for project: %s')
- % (group_name, project_id))
+ _('No security group named %(group_name)s'
+ ' for project: %(project_id)s') % locals())
return result
@@ -1615,6 +1620,11 @@ def security_group_destroy(context, security_group_id):
# TODO(vish): do we have to use sql here?
session.execute('update security_groups set deleted=1 where id=:id',
{'id': security_group_id})
+ session.execute('update security_group_instance_association '
+ 'set deleted=1,deleted_at=:at '
+ 'where security_group_id=:id',
+ {'id': security_group_id,
+ 'at': datetime.datetime.utcnow()})
session.execute('update security_group_rules set deleted=1 '
'where group_id=:id',
{'id': security_group_id})
@@ -1946,8 +1956,8 @@ def console_pool_get(context, pool_id):
filter_by(id=pool_id).\
first()
if not result:
- raise exception.NotFound(_("No console pool with id %(pool_id)s") %
- {'pool_id': pool_id})
+ raise exception.NotFound(_("No console pool with id %(pool_id)s")
+ % locals())
return result
@@ -1963,12 +1973,9 @@ def console_pool_get_by_host_type(context, compute_host, host,
options(joinedload('consoles')).\
first()
if not result:
- raise exception.NotFound(_('No console pool of type %(type)s '
+ raise exception.NotFound(_('No console pool of type %(console_type)s '
'for compute host %(compute_host)s '
- 'on proxy host %(host)s') %
- {'type': console_type,
- 'compute_host': compute_host,
- 'host': host})
+ 'on proxy host %(host)s') % locals())
return result
@@ -2006,9 +2013,7 @@ def console_get_by_pool_instance(context, pool_id, instance_id):
first()
if not result:
raise exception.NotFound(_('No console for instance %(instance_id)s '
- 'in pool %(pool_id)s') %
- {'instance_id': instance_id,
- 'pool_id': pool_id})
+ 'in pool %(pool_id)s') % locals())
return result
@@ -2029,9 +2034,7 @@ def console_get(context, console_id, instance_id=None):
query = query.filter_by(instance_id=instance_id)
result = query.options(joinedload('pool')).first()
if not result:
- idesc = (_("on instance %s") % instance_id) if instance_id else ""
+ idesc = (_("on instance %s") % instance_id) if instance_id else ""
raise exception.NotFound(_("No console with id %(console_id)s"
- " %(instance)s") %
- {'instance': idesc,
- 'console_id': console_id})
+ " %(idesc)s") % locals())
return result
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py
index a312a7190..366944591 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py
@@ -134,6 +134,9 @@ instances = Table('instances', meta,
Column('ramdisk_id',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
+ Column('server_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
Column('launch_index', Integer()),
Column('key_name',
String(length=255, convert_unicode=False, assert_unicode=None,
@@ -178,23 +181,6 @@ instances = Table('instances', meta,
)
-iscsi_targets = Table('iscsi_targets', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('target_num', Integer()),
- Column('host',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('volume_id',
- Integer(),
- ForeignKey('volumes.id'),
- nullable=True),
- )
-
-
key_pairs = Table('key_pairs', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
@@ -523,7 +509,7 @@ def upgrade(migrate_engine):
meta.bind = migrate_engine
for table in (auth_tokens, export_devices, fixed_ips, floating_ips,
- instances, iscsi_targets, key_pairs, networks,
+ instances, key_pairs, networks,
projects, quotas, security_groups, security_group_inst_assoc,
security_group_rules, services, users,
user_project_association, user_project_role_association,
@@ -539,7 +525,7 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
for table in (auth_tokens, export_devices, fixed_ips, floating_ips,
- instances, iscsi_targets, key_pairs, networks,
+ instances, key_pairs, networks,
projects, quotas, security_groups, security_group_inst_assoc,
security_group_rules, services, users,
user_project_association, user_project_role_association,
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
index bd3a3e6f8..699b837f8 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
@@ -41,6 +41,10 @@ networks = Table('networks', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
+volumes = Table('volumes', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
#
# New Tables
@@ -131,6 +135,23 @@ instance_actions = Table('instance_actions', meta,
)
+iscsi_targets = Table('iscsi_targets', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('target_num', Integer()),
+ Column('host',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('volume_id',
+ Integer(),
+ ForeignKey('volumes.id'),
+ nullable=True),
+ )
+
+
#
# Tables to alter
#
@@ -188,7 +209,8 @@ def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
- for table in (certificates, consoles, console_pools, instance_actions):
+ for table in (certificates, consoles, console_pools, instance_actions,
+ iscsi_targets):
try:
table.create()
except Exception:
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index 33d14827b..2a13c5466 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -46,12 +46,15 @@ def db_version():
meta.reflect(bind=engine)
try:
for table in ('auth_tokens', 'export_devices', 'fixed_ips',
- 'floating_ips', 'instances', 'iscsi_targets',
+ 'floating_ips', 'instances',
'key_pairs', 'networks', 'projects', 'quotas',
- 'security_group_rules',
- 'security_group_instance_association', 'services',
+ 'security_group_instance_association',
+ 'security_group_rules', 'security_groups',
+ 'services',
'users', 'user_project_association',
- 'user_project_role_association', 'volumes'):
+ 'user_project_role_association',
+ 'user_role_association',
+ 'volumes'):
assert table in meta.tables
return db_version_control(1)
except AssertionError:
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index dc476acf4..e151bee18 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -311,10 +311,14 @@ class SecurityGroup(BASE, NovaBase):
secondary="security_group_instance_association",
primaryjoin='and_('
'SecurityGroup.id == '
- 'SecurityGroupInstanceAssociation.security_group_id,'
+ 'SecurityGroupInstanceAssociation.security_group_id,'
+ 'SecurityGroupInstanceAssociation.deleted == False,'
'SecurityGroup.deleted == False)',
secondaryjoin='and_('
'SecurityGroupInstanceAssociation.instance_id == Instance.id,'
+ # (anthony) the condition below shouldn't be necessary now that the
+ # association is being marked as deleted. However, removing this
+ # may cause existing deployments to choke, so I'm leaving it
'Instance.deleted == False)',
backref='security_groups')
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index c3876c02a..4a9a28f43 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -20,8 +20,10 @@ Session Handling for SQLAlchemy backend
"""
from sqlalchemy import create_engine
+from sqlalchemy import pool
from sqlalchemy.orm import sessionmaker
+from nova import exception
from nova import flags
FLAGS = flags.FLAGS
@@ -36,11 +38,18 @@ def get_session(autocommit=True, expire_on_commit=False):
global _MAKER
if not _MAKER:
if not _ENGINE:
+ kwargs = {'pool_recycle': FLAGS.sql_idle_timeout,
+ 'echo': False}
+
+ if FLAGS.sql_connection.startswith('sqlite'):
+ kwargs['poolclass'] = pool.NullPool
+
_ENGINE = create_engine(FLAGS.sql_connection,
- pool_recycle=FLAGS.sql_idle_timeout,
- echo=False)
+ **kwargs)
_MAKER = (sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit))
session = _MAKER()
+ session.query = exception.wrap_db_error(session.query)
+ session.flush = exception.wrap_db_error(session.flush)
return session
diff --git a/nova/exception.py b/nova/exception.py
index ecd814e5d..7d65bd6a5 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -33,8 +33,9 @@ class ProcessExecutionError(IOError):
description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
- message = _("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r")\
- % (description, cmd, exit_code, stdout, stderr)
+ message = _("%(description)s\nCommand: %(cmd)s\n"
+ "Exit code: %(exit_code)s\nStdout: %(stdout)r\n"
+ "Stderr: %(stderr)r") % locals()
IOError.__init__(self, message)
@@ -45,7 +46,6 @@ class Error(Exception):
class ApiError(Error):
-
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
@@ -56,6 +56,18 @@ class NotFound(Error):
pass
+class InstanceNotFound(NotFound):
+ def __init__(self, message, instance_id):
+ self.instance_id = instance_id
+ super(InstanceNotFound, self).__init__(message)
+
+
+class VolumeNotFound(NotFound):
+ def __init__(self, message, volume_id):
+ self.volume_id = volume_id
+ super(VolumeNotFound, self).__init__(message)
+
+
class Duplicate(Error):
pass
@@ -80,6 +92,24 @@ class TimeoutException(Error):
pass
+class DBError(Error):
+ """Wraps an implementation specific exception"""
+ def __init__(self, inner_exception):
+ self.inner_exception = inner_exception
+ super(DBError, self).__init__(str(inner_exception))
+
+
+def wrap_db_error(f):
+ def _wrap(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except Exception, e:
+ LOG.exception(_('DB exception wrapped'))
+ raise DBError(e)
+ return _wrap
+ _wrap.func_name = f.func_name
+
+
def wrap_exception(f):
def _wrap(*args, **kw):
try:
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index 7c2d7177b..dd82a9366 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -45,8 +45,9 @@ class Exchange(object):
self._routes = {}
def publish(self, message, routing_key=None):
- LOG.debug(_('(%s) publish (key: %s) %s'),
- self.name, routing_key, message)
+ nm = self.name
+ LOG.debug(_('(%(nm)s) publish (key: %(routing_key)s)'
+ ' %(message)s') % locals())
routing_key = routing_key.split('.')[0]
if routing_key in self._routes:
for f in self._routes[routing_key]:
@@ -92,8 +93,8 @@ class Backend(base.BaseBackend):
def queue_bind(self, queue, exchange, routing_key, **kwargs):
global EXCHANGES
global QUEUES
- LOG.debug(_('Binding %s to %s with key %s'),
- queue, exchange, routing_key)
+ LOG.debug(_('Binding %(queue)s to %(exchange)s with'
+ ' key %(routing_key)s') % locals())
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
def declare_consumer(self, queue, callback, *args, **kwargs):
@@ -117,7 +118,7 @@ class Backend(base.BaseBackend):
content_type=content_type,
content_encoding=content_encoding)
message.result = True
- LOG.debug(_('Getting from %s: %s'), queue, message)
+ LOG.debug(_('Getting from %(queue)s: %(message)s') % locals())
return message
def prepare_message(self, message_data, delivery_mode,
diff --git a/nova/flags.py b/nova/flags.py
index 81e2e36f9..3ba3fe6fa 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -208,7 +208,7 @@ def _get_my_ip():
(addr, port) = csock.getsockname()
csock.close()
return addr
- except socket.gaierror as ex:
+ except socket.error as ex:
return "127.0.0.1"
@@ -218,7 +218,7 @@ def _get_my_ip():
DEFINE_string('my_ip', _get_my_ip(), 'host ip address')
DEFINE_list('region_list',
[],
- 'list of region=url pairs separated by commas')
+ 'list of region=fqdn pairs separated by commas')
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
@@ -286,8 +286,8 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
DEFINE_string('sql_connection',
'sqlite:///$state_path/nova.sqlite',
'connection string for sql database')
-DEFINE_string('sql_idle_timeout',
- '3600',
+DEFINE_integer('sql_idle_timeout',
+ 3600,
'timeout for idle sql database connections')
DEFINE_integer('sql_max_retries', 12, 'sql connection attempts')
DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval')
diff --git a/nova/image/local.py b/nova/image/local.py
index b44593221..f78b9aa89 100644
--- a/nova/image/local.py
+++ b/nova/image/local.py
@@ -18,6 +18,7 @@
import cPickle as pickle
import os.path
import random
+import tempfile
from nova import exception
from nova.image import service
@@ -26,15 +27,12 @@ from nova.image import service
class LocalImageService(service.BaseImageService):
"""Image service storing images to local disk.
+ It assumes that image_ids are integers.
- It assumes that image_ids are integers."""
+ """
def __init__(self):
- self._path = "/tmp/nova/images"
- try:
- os.makedirs(self._path)
- except OSError: # Exists
- pass
+ self._path = tempfile.mkdtemp()
def _path_to(self, image_id):
return os.path.join(self._path, str(image_id))
@@ -56,9 +54,7 @@ class LocalImageService(service.BaseImageService):
raise exception.NotFound
def create(self, context, data):
- """
- Store the image data and return the new image id.
- """
+ """Store the image data and return the new image id."""
id = random.randint(0, 2 ** 31 - 1)
data['id'] = id
self.update(context, id, data)
@@ -72,8 +68,9 @@ class LocalImageService(service.BaseImageService):
raise exception.NotFound
def delete(self, context, image_id):
- """
- Delete the given image. Raises OSError if the image does not exist.
+ """Delete the given image.
+ Raises OSError if the image does not exist.
+
"""
try:
os.unlink(self._path_to(image_id))
@@ -81,8 +78,13 @@ class LocalImageService(service.BaseImageService):
raise exception.NotFound
def delete_all(self):
- """
- Clears out all images in local directory
- """
+ """Clears out all images in local directory."""
for id in self._ids():
os.unlink(self._path_to(id))
+
+ def delete_imagedir(self):
+ """Deletes the local directory.
+ Raises OSError if directory is not empty.
+
+ """
+ os.rmdir(self._path)
diff --git a/nova/image/s3.py b/nova/image/s3.py
index 7b04aa072..71304cdd6 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -36,6 +36,22 @@ from nova.image import service
FLAGS = flags.FLAGS
+def map_s3_to_base(image):
+ """Convert from S3 format to format defined by BaseImageService."""
+ i = {}
+ i['id'] = image.get('imageId')
+ i['name'] = image.get('imageId')
+ i['kernel_id'] = image.get('kernelId')
+ i['ramdisk_id'] = image.get('ramdiskId')
+ i['location'] = image.get('imageLocation')
+ i['owner_id'] = image.get('imageOwnerId')
+ i['status'] = image.get('imageState')
+ i['type'] = image.get('type')
+ i['is_public'] = image.get('isPublic')
+ i['architecture'] = image.get('architecture')
+ return i
+
+
class S3ImageService(service.BaseImageService):
def modify(self, context, image_id, operation):
@@ -70,7 +86,8 @@ class S3ImageService(service.BaseImageService):
response = self._conn(context).make_request(
method='GET',
bucket='_images')
- return json.loads(response.read())
+ images = json.loads(response.read())
+ return [map_s3_to_base(i) for i in images]
def show(self, context, image_id):
"""return a image object if the context has permissions"""
diff --git a/nova/log.py b/nova/log.py
index e1c9f46f4..b541488bd 100644
--- a/nova/log.py
+++ b/nova/log.py
@@ -31,6 +31,7 @@ import cStringIO
import json
import logging
import logging.handlers
+import sys
import traceback
from nova import flags
@@ -191,6 +192,12 @@ class NovaLogger(logging.Logger):
kwargs.pop('exc_info')
self.error(message, **kwargs)
+
+def handle_exception(type, value, tb):
+ logging.root.critical(str(value), exc_info=(type, value, tb))
+
+
+sys.excepthook = handle_exception
logging.setLoggerClass(NovaLogger)
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index d29e17603..ed37e8ba7 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -20,6 +20,7 @@ Implements vlans, bridges, and iptables rules using linux utilities.
import os
from nova import db
+from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
@@ -37,6 +38,9 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('dhcpbridge_flagfile',
'/etc/nova/nova-dhcpbridge.conf',
'location of flagfile for dhcpbridge')
+flags.DEFINE_string('dhcp_domain',
+ 'novalocal',
+ 'domain to use for building the hostnames')
flags.DEFINE_string('networks_path', '$state_path/networks',
'Location to keep network config files')
@@ -152,6 +156,8 @@ def ensure_floating_forward(floating_ip, fixed_ip):
"""Ensure floating ip forwarding rule"""
_confirm_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
% (floating_ip, fixed_ip))
+ _confirm_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s"
+ % (floating_ip, fixed_ip))
_confirm_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
% (fixed_ip, floating_ip))
@@ -160,6 +166,8 @@ def remove_floating_forward(floating_ip, fixed_ip):
"""Remove forwarding for floating ip"""
_remove_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
% (floating_ip, fixed_ip))
+ _remove_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s"
+ % (floating_ip, fixed_ip))
_remove_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
% (fixed_ip, floating_ip))
@@ -177,32 +185,77 @@ def ensure_vlan(vlan_num):
LOG.debug(_("Starting VLAN inteface %s"), interface)
_execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
_execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
- _execute("sudo ifconfig %s up" % interface)
+ _execute("sudo ip link set %s up" % interface)
return interface
def ensure_bridge(bridge, interface, net_attrs=None):
- """Create a bridge unless it already exists"""
+ """Create a bridge unless it already exists.
+
+ :param interface: the interface to create the bridge on.
+ :param net_attrs: dictionary with attributes used to create the bridge.
+
+ If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
+ using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
+ the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
+
+ The code will attempt to move any ips that already exist on the interface
+ onto the bridge and reset the default gateway if necessary.
+ """
if not _device_exists(bridge):
LOG.debug(_("Starting Bridge interface for %s"), interface)
_execute("sudo brctl addbr %s" % bridge)
_execute("sudo brctl setfd %s 0" % bridge)
# _execute("sudo brctl setageing %s 10" % bridge)
_execute("sudo brctl stp %s off" % bridge)
- if interface:
- _execute("sudo brctl addif %s %s" % (bridge, interface))
+ _execute("sudo ip link set %s up" % bridge)
if net_attrs:
- _execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \
- (bridge,
- net_attrs['gateway'],
- net_attrs['broadcast'],
- net_attrs['netmask']))
+ # NOTE(vish): The ip for dnsmasq has to be the first address on the
+ # bridge for it to respond to reqests properly
+ suffix = net_attrs['cidr'].rpartition('/')[2]
+ out, err = _execute("sudo ip addr add %s/%s brd %s dev %s" %
+ (net_attrs['gateway'],
+ suffix,
+ net_attrs['broadcast'],
+ bridge),
+ check_exit_code=False)
+ if err and err != "RTNETLINK answers: File exists\n":
+ raise exception.Error("Failed to add ip: %s" % err)
if(FLAGS.use_ipv6):
- _execute("sudo ifconfig %s add %s up" % \
- (bridge,
- net_attrs['cidr_v6']))
- else:
- _execute("sudo ifconfig %s up" % bridge)
+ _execute("sudo ip -f inet6 addr change %s dev %s" %
+ (net_attrs['cidr_v6'], bridge))
+ # NOTE(vish): If the public interface is the same as the
+ # bridge, then the bridge has to be in promiscuous
+ # to forward packets properly.
+ if(FLAGS.public_interface == bridge):
+ _execute("sudo ip link set dev %s promisc on" % bridge)
+ if interface:
+ # NOTE(vish): This will break if there is already an ip on the
+ # interface, so we move any ips to the bridge
+ gateway = None
+ out, err = _execute("sudo route -n")
+ for line in out.split("\n"):
+ fields = line.split()
+ if fields and fields[0] == "0.0.0.0" and fields[-1] == interface:
+ gateway = fields[1]
+ out, err = _execute("sudo ip addr show dev %s scope global" %
+ interface)
+ for line in out.split("\n"):
+ fields = line.split()
+ if fields and fields[0] == "inet":
+ params = ' '.join(fields[1:-1])
+ _execute("sudo ip addr del %s dev %s" % (params, fields[-1]))
+ _execute("sudo ip addr add %s dev %s" % (params, bridge))
+ if gateway:
+ _execute("sudo route add 0.0.0.0 gw %s" % gateway)
+ out, err = _execute("sudo brctl addif %s %s" %
+ (bridge, interface),
+ check_exit_code=False)
+
+ if (err and err != "device %s is already a member of a bridge; can't "
+ "enslave it to bridge %s.\n" % (interface, bridge)):
+ raise exception.Error("Failed to add interface: %s" % err)
+
if FLAGS.use_nova_chains:
(out, err) = _execute("sudo iptables -N nova_forward",
check_exit_code=False)
@@ -298,10 +351,9 @@ interface %s
% pid, check_exit_code=False)
if conffile in out:
try:
- _execute('sudo kill -HUP %d' % pid)
- return
+ _execute('sudo kill %d' % pid)
except Exception as exc: # pylint: disable-msg=W0703
- LOG.debug(_("Hupping radvd threw %s"), exc)
+ LOG.debug(_("killing radvd threw %s"), exc)
else:
LOG.debug(_("Pid %d is stale, relaunching radvd"), pid)
command = _ra_cmd(network_ref)
@@ -314,8 +366,9 @@ interface %s
def _host_dhcp(fixed_ip_ref):
"""Return a host string for an address"""
instance_ref = fixed_ip_ref['instance']
- return "%s,%s.novalocal,%s" % (instance_ref['mac_address'],
+ return "%s,%s.%s,%s" % (instance_ref['mac_address'],
instance_ref['hostname'],
+ FLAGS.dhcp_domain,
fixed_ip_ref['address'])
@@ -330,7 +383,8 @@ def _execute(cmd, *args, **kwargs):
def _device_exists(device):
"""Check if ethernet device exists"""
- (_out, err) = _execute("ifconfig %s" % device, check_exit_code=False)
+ (_out, err) = _execute("ip link show dev %s" % device,
+ check_exit_code=False)
return not err
@@ -360,6 +414,7 @@ def _dnsmasq_cmd(net):
' --strict-order',
' --bind-interfaces',
' --conf-file=',
+ ' --domain=%s' % FLAGS.dhcp_domain,
' --pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
' --listen-address=%s' % net['gateway'],
' --except-interface=lo',
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 8420bc28c..981f5ec80 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -83,7 +83,7 @@ flags.DEFINE_string('floating_range', '4.4.4.0/24',
'Floating IP address block')
flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block')
flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block')
-flags.DEFINE_integer('cnt_vpn_clients', 5,
+flags.DEFINE_integer('cnt_vpn_clients', 0,
'Number of addresses reserved for vpn clients')
flags.DEFINE_string('network_driver', 'nova.network.linux_net',
'Driver to use for network creation')
@@ -118,6 +118,10 @@ class NetworkManager(manager.Manager):
super(NetworkManager, self).__init__(*args, **kwargs)
def init_host(self):
+ """Do any initialization that needs to be run if this is a
+ standalone service.
+ """
+ self.driver.init_host()
# Set up networking for the projects for which we're already
# the designated network host.
ctxt = context.get_admin_context()
@@ -198,8 +202,9 @@ class NetworkManager(manager.Manager):
raise exception.Error(_("IP %s leased that isn't associated") %
address)
if instance_ref['mac_address'] != mac:
- raise exception.Error(_("IP %s leased to bad mac %s vs %s") %
- (address, instance_ref['mac_address'], mac))
+ inst_addr = instance_ref['mac_address']
+ raise exception.Error(_("IP %(address)s leased to bad"
+ " mac %(inst_addr)s vs %(mac)s") % locals())
now = datetime.datetime.utcnow()
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
@@ -218,8 +223,9 @@ class NetworkManager(manager.Manager):
raise exception.Error(_("IP %s released that isn't associated") %
address)
if instance_ref['mac_address'] != mac:
- raise exception.Error(_("IP %s released from bad mac %s vs %s") %
- (address, instance_ref['mac_address'], mac))
+ inst_addr = instance_ref['mac_address']
+ raise exception.Error(_("IP %(address)s released from"
+ " bad mac %(inst_addr)s vs %(mac)s") % locals())
if not fixed_ip_ref['leased']:
LOG.warn(_("IP %s released that was not leased"), address,
context=context)
@@ -431,6 +437,10 @@ class FlatDHCPManager(FlatManager):
self.driver.ensure_bridge(network_ref['bridge'],
FLAGS.flat_interface,
network_ref)
+ if not FLAGS.fake_network:
+ self.driver.update_dhcp(context, network_id)
+ if(FLAGS.use_ipv6):
+ self.driver.update_ra(context, network_id)
class VlanManager(NetworkManager):
@@ -465,7 +475,6 @@ class VlanManager(NetworkManager):
"""
super(VlanManager, self).init_host()
self.driver.metadata_forward()
- self.driver.init_host()
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
"""Gets a fixed ip from the pool."""
@@ -500,7 +509,7 @@ class VlanManager(NetworkManager):
network_ref['bridge'])
def create_networks(self, context, cidr, num_networks, network_size,
- vlan_start, vpn_start, cidr_v6):
+ cidr_v6, vlan_start, vpn_start):
"""Create networks based on parameters."""
fixed_net = IPy.IP(cidr)
fixed_net_v6 = IPy.IP(cidr_v6)
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index 43ed7ffe7..05ddace4b 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -180,7 +180,7 @@ class S3(ErrorHandlingResource):
def render_GET(self, request): # pylint: disable-msg=R0201
"""Renders the GET request for a list of buckets as XML"""
LOG.debug(_('List of buckets requested'), context=request.context)
- buckets = [b for b in bucket.Bucket.all() \
+ buckets = [b for b in bucket.Bucket.all()
if b.is_authorized(request.context)]
render_xml(request, {"ListAllMyBucketsResult": {
@@ -268,12 +268,14 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
- LOG.debug(_("Getting object: %s / %s"), self.bucket.name, self.name)
+ bname = self.bucket.name
+ nm = self.name
+ LOG.debug(_("Getting object: %(bname)s / %(nm)s") % locals())
if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to get object %s from bucket "
- "%s"), self.name, self.bucket.name,
- context=request.context)
+ LOG.audit(_("Unauthorized attempt to get object %(nm)s"
+ " from bucket %(bname)s") % locals(),
+ context=request.context)
raise exception.NotAuthorized()
obj = self.bucket[urllib.unquote(self.name)]
@@ -289,12 +291,13 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
- LOG.debug(_("Putting object: %s / %s"), self.bucket.name, self.name)
+ nm = self.name
+ bname = self.bucket.name
+ LOG.debug(_("Putting object: %(bname)s / %(nm)s") % locals())
if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to upload object %s to bucket "
- "%s"),
- self.name, self.bucket.name, context=request.context)
+ LOG.audit(_("Unauthorized attempt to upload object %(nm)s to"
+ " bucket %(bname)s") % locals(), context=request.context)
raise exception.NotAuthorized()
key = urllib.unquote(self.name)
@@ -310,16 +313,14 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
-
- LOG.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name,
+ nm = self.name
+ bname = self.bucket.name
+ LOG.debug(_("Deleting object: %(bname)s / %(nm)s") % locals(),
context=request.context)
if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to delete object "
- "%(object)s from bucket %(bucket)s") %
- {'object': self.name,
- 'bucket': self.bucket.name},
- context=request.context)
+ LOG.audit(_("Unauthorized attempt to delete object %(nm)s from "
+ "bucket %(bname)s") % locals(), context=request.context)
raise exception.NotAuthorized()
del self.bucket[urllib.unquote(self.name)]
@@ -390,10 +391,10 @@ class ImagesResource(resource.Resource):
image_location = get_argument(request, 'image_location', u'')
image_path = os.path.join(FLAGS.images_path, image_id)
- if not image_path.startswith(FLAGS.images_path) or \
- os.path.exists(image_path):
+ if ((not image_path.startswith(FLAGS.images_path)) or
+ os.path.exists(image_path)):
LOG.audit(_("Not authorized to upload image: invalid directory "
- "%s"),
+ "%s"),
image_path, context=request.context)
raise exception.NotAuthorized()
@@ -427,8 +428,8 @@ class ImagesResource(resource.Resource):
if operation:
# operation implies publicity toggle
newstatus = (operation == 'add')
- LOG.audit(_("Toggling publicity flag of image %s %r"), image_id,
- newstatus, context=request.context)
+ LOG.audit(_("Toggling publicity flag of image %(image_id)s"
+ " %(newstatus)r") % locals(), context=request.context)
image_object.set_public(newstatus)
else:
# other attributes imply update
diff --git a/nova/rpc.py b/nova/rpc.py
index bbfa71138..2b1f7298b 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -46,7 +46,7 @@ LOG = logging.getLogger('nova.rpc')
class Connection(carrot_connection.BrokerConnection):
"""Connection instance object"""
@classmethod
- def instance(cls, new=False):
+ def instance(cls, new=True):
"""Returns the instance"""
if new or not hasattr(cls, '_instance'):
params = dict(hostname=FLAGS.rabbit_host,
@@ -89,15 +89,16 @@ class Consumer(messaging.Consumer):
self.failed_connection = False
break
except: # Catching all because carrot sucks
- LOG.exception(_("AMQP server on %s:%d is unreachable."
- " Trying again in %d seconds.") % (
- FLAGS.rabbit_host,
- FLAGS.rabbit_port,
- FLAGS.rabbit_retry_interval))
+ fl_host = FLAGS.rabbit_host
+ fl_port = FLAGS.rabbit_port
+ fl_intv = FLAGS.rabbit_retry_interval
+ LOG.exception(_("AMQP server on %(fl_host)s:%(fl_port)d is"
+ " unreachable. Trying again in %(fl_intv)d seconds.")
+ % locals())
self.failed_connection = True
if self.failed_connection:
LOG.exception(_("Unable to connect to AMQP server "
- "after %d tries. Shutting down."),
+ "after %d tries. Shutting down."),
FLAGS.rabbit_max_retries)
sys.exit(1)
@@ -152,7 +153,7 @@ class TopicConsumer(Consumer):
class AdapterConsumer(TopicConsumer):
"""Calls methods on a proxy object based on method and args"""
def __init__(self, connection=None, topic="broadcast", proxy=None):
- LOG.debug(_('Initing the Adapter Consumer for %s') % (topic))
+ LOG.debug(_('Initing the Adapter Consumer for %s') % topic)
self.proxy = proxy
super(AdapterConsumer, self).__init__(connection=connection,
topic=topic)
@@ -167,7 +168,7 @@ class AdapterConsumer(TopicConsumer):
Example: {'method': 'echo', 'args': {'value': 42}}
"""
- LOG.debug(_('received %s') % (message_data))
+ LOG.debug(_('received %s') % message_data)
msg_id = message_data.pop('_msg_id', None)
ctxt = _unpack_context(message_data)
@@ -180,7 +181,7 @@ class AdapterConsumer(TopicConsumer):
# messages stay in the queue indefinitely, so for now
# we just log the message and send an error string
# back to the caller
- LOG.warn(_('no method for message: %s') % (message_data))
+ LOG.warn(_('no method for message: %s') % message_data)
msg_reply(msg_id, _('No method for message: %s') % message_data)
return
@@ -245,7 +246,7 @@ def msg_reply(msg_id, reply=None, failure=None):
LOG.error(_("Returning exception %s to caller"), message)
LOG.error(tb)
failure = (failure[0].__name__, str(failure[1]), tb)
- conn = Connection.instance(True)
+ conn = Connection.instance()
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
try:
publisher.send({'result': reply, 'failure': failure})
@@ -318,7 +319,7 @@ def call(context, topic, msg):
self.result = data['result']
wait_msg = WaitMessage()
- conn = Connection.instance(True)
+ conn = Connection.instance()
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
consumer.register_callback(wait_msg)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index a4d6dd574..e9b47512e 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -66,4 +66,4 @@ class SchedulerManager(manager.Manager):
db.queue_get_for(context, topic, host),
{"method": method,
"args": kwargs})
- LOG.debug(_("Casting to %s %s for %s"), topic, host, method)
+ LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals())
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index baf4966d4..0191ceb3d 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -43,7 +43,9 @@ class SimpleScheduler(chance.ChanceScheduler):
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances."""
instance_ref = db.instance_get(context, instance_id)
- if instance_ref['availability_zone'] and context.is_admin:
+ if (instance_ref['availability_zone']
+ and ':' in instance_ref['availability_zone']
+ and context.is_admin):
zone, _x, host = instance_ref['availability_zone'].partition(':')
service = db.service_get_by_args(context.elevated(), host,
'nova-compute')
@@ -75,7 +77,9 @@ class SimpleScheduler(chance.ChanceScheduler):
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
volume_ref = db.volume_get(context, volume_id)
- if (':' in volume_ref['availability_zone']) and context.is_admin:
+ if (volume_ref['availability_zone']
+ and ':' in volume_ref['availability_zone']
+ and context.is_admin):
zone, _x, host = volume_ref['availability_zone'].partition(':')
service = db.service_get_by_args(context.elevated(), host,
'nova-volume')
diff --git a/nova/service.py b/nova/service.py
index 91e00d3d1..59648adf2 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -157,8 +157,9 @@ class Service(object):
report_interval = FLAGS.report_interval
if not periodic_interval:
periodic_interval = FLAGS.periodic_interval
- logging.audit(_("Starting %s node (version %s)"), topic,
- version.version_string_with_vcs())
+ vcs_string = version.version_string_with_vcs()
+ logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)")
+ % locals())
service_obj = cls(host, binary, topic, manager,
report_interval, periodic_interval)
@@ -221,10 +222,10 @@ def serve(*services):
name = '_'.join(x.binary for x in services)
logging.debug(_("Serving %s"), name)
-
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
- logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
+ flag_get = FLAGS.get(flag, None)
+ logging.debug("%(flag)s : %(flag_get)s" % locals())
for x in services:
x.start()
diff --git a/nova/test.py b/nova/test.py
index 881baccd5..a12cf9d32 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -69,9 +69,10 @@ class TestCase(unittest.TestCase):
network_manager.VlanManager().create_networks(ctxt,
FLAGS.fixed_range,
5, 16,
+ FLAGS.fixed_range_v6,
FLAGS.vlan_start,
FLAGS.vpn_start,
- FLAGS.fixed_range_v6)
+ )
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 5d9ddefbe..8ab4d7569 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -143,6 +143,7 @@ class LocalImageServiceTest(unittest.TestCase,
def tearDown(self):
self.service.delete_all()
+ self.service.delete_imagedir()
self.stubs.UnsetAll()
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 29883e7c8..724f14f19 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -76,7 +76,7 @@ class ServersTest(unittest.TestCase):
fakes.stub_out_key_pair_funcs(self.stubs)
fakes.stub_out_image_service(self.stubs)
self.stubs.Set(nova.db.api, 'instance_get_all', return_servers)
- self.stubs.Set(nova.db.api, 'instance_get_by_id', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
return_servers)
self.stubs.Set(nova.db.api, 'instance_add_security_group',
diff --git a/nova/tests/db/nova.austin.sqlite b/nova/tests/db/nova.austin.sqlite
new file mode 100644
index 000000000..ad1326bce
--- /dev/null
+++ b/nova/tests/db/nova.austin.sqlite
Binary files differ
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index 66a16b0cb..2569e262b 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -36,6 +36,7 @@ from nova.auth import manager
class FakeHttplibSocket(object):
"""a fake socket implementation for httplib.HTTPResponse, trivial"""
def __init__(self, response_string):
+ self.response_string = response_string
self._buffer = StringIO.StringIO(response_string)
def makefile(self, _mode, _other):
@@ -66,13 +67,16 @@ class FakeHttplibConnection(object):
# For some reason, the response doesn't have "HTTP/1.0 " prepended; I
# guess that's a function the web server usually provides.
resp = "HTTP/1.0 %s" % resp
- sock = FakeHttplibSocket(resp)
- self.http_response = httplib.HTTPResponse(sock)
+ self.sock = FakeHttplibSocket(resp)
+ self.http_response = httplib.HTTPResponse(self.sock)
self.http_response.begin()
def getresponse(self):
return self.http_response
+ def getresponsebody(self):
+ return self.sock.response_string
+
def close(self):
"""Required for compatibility with boto/tornado"""
pass
@@ -104,7 +108,7 @@ class ApiEc2TestCase(test.TestCase):
self.app = ec2.Authenticate(ec2.Requestify(ec2.Executor(),
'nova.api.ec2.cloud.CloudController'))
- def expect_http(self, host=None, is_secure=False):
+ def expect_http(self, host=None, is_secure=False, api_version=None):
"""Returns a new EC2 connection"""
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
@@ -113,13 +117,31 @@ class ApiEc2TestCase(test.TestCase):
region=regioninfo.RegionInfo(None, 'test', self.host),
port=8773,
path='/services/Cloud')
+ if api_version:
+ self.ec2.APIVersion = api_version
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
- http = FakeHttplibConnection(
+ self.http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
# pylint: disable-msg=E1103
- self.ec2.new_http_connection(host, is_secure).AndReturn(http)
- return http
+ self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
+ return self.http
+
+ def test_xmlns_version_matches_request_version(self):
+ self.expect_http(api_version='2010-10-30')
+ self.mox.ReplayAll()
+
+ user = self.manager.create_user('fake', 'fake', 'fake')
+ project = self.manager.create_project('fake', 'fake', 'fake')
+
+ # Any request should be fine
+ self.ec2.get_all_instances()
+ self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(),
+ 'The version in the xmlns of the response does '
+ 'not match the API version given in the request.')
+
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
def test_describe_instances(self):
"""Test that, after creating a user and a project, the describe
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 771b1fcc0..445cc6e8b 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -87,6 +87,16 @@ class CloudTestCase(test.TestCase):
# NOTE(vish): create depends on pool, so just call helper directly
return cloud._gen_key(self.context, self.context.user.id, name)
+ def test_describe_regions(self):
+ """Makes sure describe regions runs without raising an exception"""
+ result = self.cloud.describe_regions(self.context)
+ self.assertEqual(len(result['regionInfo']), 1)
+ regions = FLAGS.region_list
+ FLAGS.region_list = ["one=test_host1", "two=test_host2"]
+ result = self.cloud.describe_regions(self.context)
+ self.assertEqual(len(result['regionInfo']), 2)
+ FLAGS.region_list = regions
+
def test_describe_addresses(self):
"""Makes sure describe addresses runs without raising an exception"""
address = "10.10.10.10"
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 09f6ee94a..2aa0690e7 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -49,7 +49,7 @@ class ComputeTestCase(test.TestCase):
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
- self.context = context.get_admin_context()
+ self.context = context.RequestContext('fake', 'fake', False)
def tearDown(self):
self.manager.delete_user(self.user)
@@ -69,6 +69,13 @@ class ComputeTestCase(test.TestCase):
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)['id']
+ def _create_group(self):
+ values = {'name': 'testgroup',
+ 'description': 'testgroup',
+ 'user_id': self.user.id,
+ 'project_id': self.project.id}
+ return db.security_group_create(self.context, values)
+
def test_create_instance_defaults_display_name(self):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
@@ -82,21 +89,53 @@ class ComputeTestCase(test.TestCase):
def test_create_instance_associates_security_groups(self):
"""Make sure create associates security groups"""
- values = {'name': 'default',
- 'description': 'default',
- 'user_id': self.user.id,
- 'project_id': self.project.id}
- group = db.security_group_create(self.context, values)
+ group = self._create_group()
ref = self.compute_api.create(
self.context,
instance_type=FLAGS.default_instance_type,
image_id=None,
- security_group=['default'])
+ security_group=['testgroup'])
try:
self.assertEqual(len(db.security_group_get_by_instance(
- self.context, ref[0]['id'])), 1)
+ self.context, ref[0]['id'])), 1)
+ group = db.security_group_get(self.context, group['id'])
+ self.assert_(len(group.instances) == 1)
+ finally:
+ db.security_group_destroy(self.context, group['id'])
+ db.instance_destroy(self.context, ref[0]['id'])
+
+ def test_destroy_instance_disassociates_security_groups(self):
+ """Make sure destroying disassociates security groups"""
+ group = self._create_group()
+
+ ref = self.compute_api.create(
+ self.context,
+ instance_type=FLAGS.default_instance_type,
+ image_id=None,
+ security_group=['testgroup'])
+ try:
+ db.instance_destroy(self.context, ref[0]['id'])
+ group = db.security_group_get(self.context, group['id'])
+ self.assert_(len(group.instances) == 0)
finally:
db.security_group_destroy(self.context, group['id'])
+
+ def test_destroy_security_group_disassociates_instances(self):
+ """Make sure destroying security groups disassociates instances"""
+ group = self._create_group()
+
+ ref = self.compute_api.create(
+ self.context,
+ instance_type=FLAGS.default_instance_type,
+ image_id=None,
+ security_group=['testgroup'])
+
+ try:
+ db.security_group_destroy(self.context, group['id'])
+ group = db.security_group_get(context.get_admin_context(
+ read_deleted=True), group['id'])
+ self.assert_(len(group.instances) == 0)
+ finally:
db.instance_destroy(self.context, ref[0]['id'])
def test_run_terminate(self):
diff --git a/nova/tests/test_localization.py b/nova/tests/test_localization.py
new file mode 100644
index 000000000..6992773f5
--- /dev/null
+++ b/nova/tests/test_localization.py
@@ -0,0 +1,100 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import glob
+import logging
+import os
+import re
+import sys
+import unittest
+
+import nova
+
+
+class LocalizationTestCase(unittest.TestCase):
+ def test_multiple_positional_format_placeholders(self):
+ pat = re.compile("\W_\(")
+ single_pat = re.compile("\W%\W")
+ root_path = os.path.dirname(nova.__file__)
+ problems = {}
+ for root, dirs, files in os.walk(root_path):
+ for fname in files:
+ if not fname.endswith(".py"):
+ continue
+ pth = os.path.join(root, fname)
+ txt = fulltext = file(pth).read()
+ txt_lines = fulltext.splitlines()
+ if not pat.search(txt):
+ continue
+ problems[pth] = []
+ pos = txt.find("_(")
+ while pos > -1:
+ # Make sure that this isn't part of a dunder;
+ # e.g., __init__(...
+ # or something like 'self.assert_(...'
+ test_txt = txt[pos - 1: pos + 10]
+ if not (pat.search(test_txt)):
+ txt = txt[pos + 2:]
+ pos = txt.find("_(")
+ continue
+ pos += 2
+ txt = txt[pos:]
+ innerChars = []
+ # Count pairs of open/close parens until _() closing
+ # paren is found.
+ parenCount = 1
+ pos = 0
+ while parenCount > 0:
+ char = txt[pos]
+ if char == "(":
+ parenCount += 1
+ elif char == ")":
+ parenCount -= 1
+ innerChars.append(char)
+ pos += 1
+ inner_all = "".join(innerChars)
+ # Filter out '%%' and '%('
+ inner = inner_all.replace("%%", "").replace("%(", "")
+ # Filter out the single '%' operators
+ inner = single_pat.sub("", inner)
+ # Within the remaining content, count %
+ fmtCount = inner.count("%")
+ if fmtCount > 1:
+ inner_first = inner_all.splitlines()[0]
+ lns = ["%s" % (p + 1)
+ for p, t in enumerate(txt_lines)
+ if inner_first in t]
+ lnums = ", ".join(lns)
+ # Using ugly string concatenation to avoid having
+ # this test fail itself.
+ inner_all = "_" + "(" + "%s" % inner_all
+ problems[pth].append("Line: %s Text: %s" %
+ (lnums, inner_all))
+ # Look for more
+ pos = txt.find("_(")
+ if not problems[pth]:
+ del problems[pth]
+ if problems:
+ out = ["Problem(s) found in localized string formatting",
+ "(see http://www.gnu.org/software/hello/manual/"
+ "gettext/Python.html for more information)",
+ "",
+ " ------------ Files to fix ------------"]
+ for pth in problems:
+ out.append(" %s:" % pth)
+ for val in set(problems[pth]):
+ out.append(" %s" % val)
+ raise AssertionError("\n".join(out))
diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py
index 85593ab46..4820e04fb 100644
--- a/nova/tests/test_rpc.py
+++ b/nova/tests/test_rpc.py
@@ -86,7 +86,8 @@ class RpcTestCase(test.TestCase):
@staticmethod
def echo(context, queue, value):
"""Calls echo in the passed queue"""
- LOG.debug(_("Nested received %s, %s"), queue, value)
+ LOG.debug(_("Nested received %(queue)s, %(value)s")
+ % locals())
ret = rpc.call(context,
queue,
{"method": "echo",
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index f6800e3d9..6e5a0114b 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -221,7 +221,12 @@ class IptablesFirewallTestCase(test.TestCase):
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
- self.fw = libvirt_conn.IptablesFirewallDriver()
+
+ class FakeLibvirtConnection(object):
+ pass
+ self.fake_libvirt_connection = FakeLibvirtConnection()
+ self.fw = libvirt_conn.IptablesFirewallDriver(
+ get_connection=lambda: self.fake_libvirt_connection)
def tearDown(self):
self.manager.delete_project(self.project)
@@ -256,7 +261,7 @@ class IptablesFirewallTestCase(test.TestCase):
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
- '# Completed on Tue Jan 18 23:47:56 2011'
+ '# Completed on Tue Jan 18 23:47:56 2011',
]
def test_static_filters(self):
@@ -474,6 +479,19 @@ class NWFilterTestCase(test.TestCase):
'project_id': 'fake'})
inst_id = instance_ref['id']
+ ip = '10.11.12.13'
+
+ network_ref = db.project_get_network(self.context,
+ 'fake')
+
+ fixed_ip = {'address': ip,
+ 'network_id': network_ref['id']}
+
+ admin_ctxt = context.get_admin_context()
+ db.fixed_ip_create(admin_ctxt, fixed_ip)
+ db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
+ 'instance_id': instance_ref['id']})
+
def _ensure_all_called():
instance_filter = 'nova-instance-%s' % instance_ref['name']
secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
diff --git a/nova/utils.py b/nova/utils.py
index 6d3ddd092..8d7ff1f64 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -138,7 +138,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
result = obj.communicate()
obj.stdin.close()
if obj.returncode:
- LOG.debug(_("Result was %s") % (obj.returncode))
+ LOG.debug(_("Result was %s") % obj.returncode)
if check_exit_code and obj.returncode != 0:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=obj.returncode,
@@ -152,6 +152,42 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
return result
+def ssh_execute(ssh, cmd, process_input=None,
+ addl_env=None, check_exit_code=True):
+ LOG.debug(_("Running cmd (SSH): %s"), cmd)
+ if addl_env:
+ raise exception.Error("Environment not supported over SSH")
+
+ if process_input:
+ # This is (probably) fixable if we need it...
+ raise exception.Error("process_input not supported over SSH")
+
+ stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
+ channel = stdout_stream.channel
+
+ #stdin.write('process_input would go here')
+ #stdin.flush()
+
+ # NOTE(justinsb): This seems suspicious...
+ # ...other SSH clients have buffering issues with this approach
+ stdout = stdout_stream.read()
+ stderr = stderr_stream.read()
+ stdin_stream.close()
+
+ exit_status = channel.recv_exit_status()
+
+ # exit_status == -1 if no exit code was returned
+ if exit_status != -1:
+ LOG.debug(_("Result was %s") % exit_status)
+ if check_exit_code and exit_status != 0:
+ raise exception.ProcessExecutionError(exit_code=exit_status,
+ stdout=stdout,
+ stderr=stderr,
+ cmd=cmd)
+
+ return (stdout, stderr)
+
+
def abspath(s):
return os.path.join(os.path.dirname(__file__), s)
@@ -206,19 +242,17 @@ def last_octet(address):
def get_my_linklocal(interface):
try:
if_str = execute("ip -f inet6 -o addr show %s" % interface)
- condition = "\s+inet6\s+([0-9a-f:]+/\d+)\s+scope\s+link"
+ condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link"
links = [re.search(condition, x) for x in if_str[0].split('\n')]
address = [w.group(1) for w in links if w is not None]
if address[0] is not None:
return address[0]
else:
- return 'fe00::'
- except IndexError as ex:
- LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex)
- except ProcessExecutionError as ex:
- LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex)
- except:
- return 'fe00::'
+ raise exception.Error(_("Link Local address is not found.:%s")
+ % if_str)
+ except Exception as ex:
+ raise exception.Error(_("Couldn't get Link Local IP of %(interface)s"
+ " :%(ex)s") % locals())
def to_global_ipv6(prefix, mac):
diff --git a/nova/version.py b/nova/version.py
index 7b27acb6a..c3ecc2245 100644
--- a/nova/version.py
+++ b/nova/version.py
@@ -21,7 +21,7 @@ except ImportError:
'revision_id': 'LOCALREVISION',
'revno': 0}
-NOVA_VERSION = ['2011', '1']
+NOVA_VERSION = ['2011', '2']
YEAR, COUNT = NOVA_VERSION
FINAL = False # This becomes true at Release Candidate time
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index f8b3c7807..161445b86 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -76,9 +76,10 @@ class FakeConnection(object):
cls._instance = cls()
return cls._instance
- def init_host(self):
+ def init_host(self, host):
"""
- Initialize anything that is necessary for the driver to function
+ Initialize anything that is necessary for the driver to function,
+ including catching up with currently running VM's on the given host.
"""
return
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 30dc1c79b..29d18dac5 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -113,7 +113,7 @@ class HyperVConnection(object):
self._conn = wmi.WMI(moniker='//./root/virtualization')
self._cim_conn = wmi.WMI(moniker='//./root/cimv2')
- def init_host(self):
+ def init_host(self, host):
#FIXME(chiradeep): implement this
LOG.debug(_('In init host'))
pass
@@ -129,7 +129,7 @@ class HyperVConnection(object):
vm = self._lookup(instance.name)
if vm is not None:
raise exception.Duplicate(_('Attempt to create duplicate vm %s') %
- instance.name)
+ instance.name)
user = manager.AuthManager().get_user(instance['user_id'])
project = manager.AuthManager().get_project(instance['project_id'])
@@ -159,7 +159,7 @@ class HyperVConnection(object):
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = instance['name']
(job, ret_val) = vs_man_svc.DefineVirtualSystem(
- [], None, vs_gs_data.GetText_(1))[1:]
+ [], None, vs_gs_data.GetText_(1))[1:]
if ret_val == WMI_JOB_STATUS_STARTED:
success = self._check_job_status(job)
else:
@@ -184,40 +184,40 @@ class HyperVConnection(object):
memsetting.Limit = mem
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
- vm.path_(), [memsetting.GetText_(1)])
+ vm.path_(), [memsetting.GetText_(1)])
LOG.debug(_('Set memory for vm %s...'), instance.name)
procsetting = vmsetting.associators(
- wmi_result_class='Msvm_ProcessorSettingData')[0]
+ wmi_result_class='Msvm_ProcessorSettingData')[0]
vcpus = long(instance['vcpus'])
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
- procsetting.Limit = vcpus
+ procsetting.Limit = 100000 # static assignment to 100%
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
- vm.path_(), [procsetting.GetText_(1)])
+ vm.path_(), [procsetting.GetText_(1)])
LOG.debug(_('Set vcpus for vm %s...'), instance.name)
def _create_disk(self, vm_name, vhdfile):
"""Create a disk and attach it to the vm"""
- LOG.debug(_('Creating disk for %s by attaching disk file %s'),
- vm_name, vhdfile)
+ LOG.debug(_('Creating disk for %(vm_name)s by attaching'
+ ' disk file %(vhdfile)s') % locals())
#Find the IDE controller for the vm.
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
+ wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
- wmi_result_class='MSVM_ResourceAllocationSettingData')
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
ctrller = [r for r in rasds
if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\
- and r.Address == "0"]
+ and r.Address == "0"]
#Find the default disk drive object for the vm and clone it.
diskdflt = self._conn.query(
- "SELECT * FROM Msvm_ResourceAllocationSettingData \
- WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
- AND InstanceID LIKE '%Default%'")[0]
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
+ AND InstanceID LIKE '%Default%'")[0]
diskdrive = self._clone_wmi_obj(
- 'Msvm_ResourceAllocationSettingData', diskdflt)
+ 'Msvm_ResourceAllocationSettingData', diskdflt)
#Set the IDE ctrller as parent.
diskdrive.Parent = ctrller[0].path_()
diskdrive.Address = 0
@@ -263,17 +263,18 @@ class HyperVConnection(object):
default_nic_data = [n for n in emulatednics_data
if n.InstanceID.rfind('Default') > 0]
new_nic_data = self._clone_wmi_obj(
- 'Msvm_EmulatedEthernetPortSettingData',
- default_nic_data[0])
+ 'Msvm_EmulatedEthernetPortSettingData',
+ default_nic_data[0])
#Create a port on the vswitch.
(new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name,
"", extswitch.path_())
if ret_val != 0:
LOG.error(_('Failed creating a port on the external vswitch'))
raise Exception(_('Failed creating port for %s'),
- vm_name)
- LOG.debug(_("Created switch port %s on switch %s"),
- vm_name, extswitch.path_())
+ vm_name)
+ ext_path = extswitch.path_()
+ LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
+ % locals())
#Connect the new nic to the new port.
new_nic_data.Connection = [new_port]
new_nic_data.ElementName = vm_name + ' nic'
@@ -283,7 +284,7 @@ class HyperVConnection(object):
new_resources = self._add_virt_resource(new_nic_data, vm)
if new_resources is None:
raise Exception(_('Failed to add nic to VM %s'),
- vm_name)
+ vm_name)
LOG.info(_("Created nic for %s "), vm_name)
def _add_virt_resource(self, res_setting_data, target_vm):
@@ -319,8 +320,10 @@ class HyperVConnection(object):
if job.JobState != WMI_JOB_STATE_COMPLETED:
LOG.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription)
return False
- LOG.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description,
- job.ElapsedTime)
+ desc = job.Description
+ elap = job.ElapsedTime
+ LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s ")
+ % locals())
return True
def _find_external_network(self):
@@ -386,7 +389,9 @@ class HyperVConnection(object):
vhdfile = self._cim_conn.CIM_DataFile(Name=disk)
for vf in vhdfile:
vf.Delete()
- LOG.debug(_("Del: disk %s vm %s"), vhdfile, instance.name)
+ instance_name = instance.name
+ LOG.debug(_("Del: disk %(vhdfile)s vm %(instance_name)s")
+ % locals())
def get_info(self, instance_id):
"""Get information about the VM"""
@@ -402,12 +407,14 @@ class HyperVConnection(object):
summary_info = vs_man_svc.GetSummaryInformation(
[4, 100, 103, 105], settings_paths)[1]
info = summary_info[0]
- LOG.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \
- cpu_time=%s"), instance_id,
- str(HYPERV_POWER_STATE[info.EnabledState]),
- str(info.MemoryUsage),
- str(info.NumberOfProcessors),
- str(info.UpTime))
+ state = str(HYPERV_POWER_STATE[info.EnabledState])
+ memusage = str(info.MemoryUsage)
+ numprocs = str(info.NumberOfProcessors)
+ uptime = str(info.UpTime)
+
+ LOG.debug(_("Got Info for vm %(instance_id)s: state=%(state)s,"
+ " mem=%(memusage)s, num_cpu=%(numprocs)s,"
+ " cpu_time=%(uptime)s") % locals())
return {'state': HYPERV_POWER_STATE[info.EnabledState],
'max_mem': info.MemoryUsage,
@@ -441,22 +448,22 @@ class HyperVConnection(object):
#already in the state requested
success = True
if success:
- LOG.info(_("Successfully changed vm state of %s to %s"), vm_name,
- req_state)
+ LOG.info(_("Successfully changed vm state of %(vm_name)s"
+ " to %(req_state)s") % locals())
else:
- LOG.error(_("Failed to change vm state of %s to %s"), vm_name,
- req_state)
- raise Exception(_("Failed to change vm state of %s to %s"),
- vm_name, req_state)
+ msg = _("Failed to change vm state of %(vm_name)s"
+ " to %(req_state)s") % locals()
+ LOG.error(msg)
+ raise Exception(msg)
def attach_volume(self, instance_name, device_path, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot attach volume to missing %s vm' %
- instance_name)
+ raise exception.NotFound('Cannot attach volume to missing %s vm'
+ % instance_name)
def detach_volume(self, instance_name, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot detach volume from missing %s ' %
- instance_name)
+ raise exception.NotFound('Cannot detach volume from missing %s '
+ % instance_name)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index ecf0e5efb..7a6fef330 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -67,7 +67,7 @@ def _fetch_image_no_curl(url, path, headers):
urlopened = urllib2.urlopen(request)
urlretrieve(urlopened, path)
- LOG.debug(_("Finished retreving %s -- placed in %s"), url, path)
+ LOG.debug(_("Finished retreving %(url)s -- placed in %(path)s") % locals())
def _fetch_s3_image(image, path, user, project):
@@ -111,5 +111,8 @@ def _image_path(path):
def image_url(image):
+ if FLAGS.image_service == "nova.image.glance.GlanceImageService":
+ return "http://%s:%s/images/%s" % (FLAGS.glance_host,
+ FLAGS.glance_port, image)
return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
image)
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index 8139c3620..88bfbc668 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -75,11 +75,13 @@
<!-- <model type='virtio'/> CANT RUN virtio network right now -->
<filterref filter="nova-instance-${name}">
<parameter name="IP" value="${ip_address}" />
- <parameter name="DHCPSERVER" value="${dhcp_server}" />
- <parameter name="RASERVER" value="${ra_server}" />
+ <parameter name="DHCPSERVER" value="${dhcp_server}" />
#if $getVar('extra_params', False)
${extra_params}
#end if
+#if $getVar('ra_server', False)
+ <parameter name="RASERVER" value="${ra_server}" />
+#end if
</filterref>
</interface>
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index d8c1bf48a..4e0fd106f 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -149,16 +149,34 @@ class LibvirtConnection(object):
self._wrapped_conn = None
self.read_only = read_only
- self.nwfilter = NWFilterFirewall(self._get_connection)
+ fw_class = utils.import_class(FLAGS.firewall_driver)
+ self.firewall_driver = fw_class(get_connection=self._get_connection)
- if not FLAGS.firewall_driver:
- self.firewall_driver = self.nwfilter
- self.nwfilter.handle_security_groups = True
- else:
- self.firewall_driver = utils.import_object(FLAGS.firewall_driver)
+ def init_host(self, host):
+ # Adopt existing VM's running here
+ ctxt = context.get_admin_context()
+ for instance in db.instance_get_all_by_host(ctxt, host):
+ try:
+ LOG.debug(_('Checking state of %s'), instance['name'])
+ state = self.get_info(instance['name'])['state']
+ except exception.NotFound:
+ state = power_state.SHUTOFF
- def init_host(self):
- pass
+ LOG.debug(_('Current state of %(name)s was %(state)s.'),
+ {'name': instance['name'], 'state': state})
+ db.instance_set_state(ctxt, instance['id'], state)
+
+ if state == power_state.SHUTOFF:
+ # TODO(soren): This is what the compute manager does when you
+ # terminate # an instance. At some point I figure we'll have a
+ # "terminated" state and some sort of cleanup job that runs
+ # occasionally, cleaning them out.
+ db.instance_destroy(ctxt, instance['id'])
+
+ if state != power_state.RUNNING:
+ continue
+ self.firewall_driver.prepare_instance_filter(instance)
+ self.firewall_driver.apply_instance_filter(instance)
def _get_connection(self):
if not self._wrapped_conn or not self._test_connection():
@@ -236,8 +254,9 @@ class LibvirtConnection(object):
def _cleanup(self, instance):
target = os.path.join(FLAGS.instances_path, instance['name'])
- LOG.info(_('instance %s: deleting instance files %s'),
- instance['name'], target)
+ instance_name = instance['name']
+ LOG.info(_('instance %(instance_name)s: deleting instance files'
+ ' %(target)s') % locals())
if os.path.exists(target):
shutil.rmtree(target)
@@ -385,7 +404,7 @@ class LibvirtConnection(object):
instance['id'],
power_state.NOSTATE,
'launching')
- self.nwfilter.setup_basic_filtering(instance)
+ self.firewall_driver.setup_basic_filtering(instance)
self.firewall_driver.prepare_instance_filter(instance)
self._create_image(instance, xml)
self._conn.createXML(xml, 0)
@@ -418,7 +437,7 @@ class LibvirtConnection(object):
virsh_output = virsh_output[0].strip()
if virsh_output.startswith('/dev/'):
- LOG.info(_('cool, it\'s a device'))
+ LOG.info(_("cool, it's a device"))
out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
virsh_output, check_exit_code=False)
return out
@@ -426,7 +445,7 @@ class LibvirtConnection(object):
return ''
def _append_to_file(self, data, fpath):
- LOG.info(_('data: %r, fpath: %r'), data, fpath)
+ LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals())
fp = open(fpath, 'a+')
fp.write(data)
return fpath
@@ -434,7 +453,7 @@ class LibvirtConnection(object):
def _dump_file(self, fpath):
fp = open(fpath, 'r+')
contents = fp.read()
- LOG.info(_('Contents of file %s: %r'), fpath, contents)
+ LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals())
return contents
@exception.wrap_exception
@@ -621,21 +640,22 @@ class LibvirtConnection(object):
'dns': network_ref['dns'],
'ra_server': ra_server}
if key or net:
+ inst_name = inst['name']
+ img_id = inst.image_id
if key:
- LOG.info(_('instance %s: injecting key into image %s'),
- inst['name'], inst.image_id)
+ LOG.info(_('instance %(inst_name)s: injecting key into'
+ ' image %(img_id)s') % locals())
if net:
- LOG.info(_('instance %s: injecting net into image %s'),
- inst['name'], inst.image_id)
+ LOG.info(_('instance %(inst_name)s: injecting net into'
+ ' image %(img_id)s') % locals())
try:
disk.inject_data(basepath('disk'), key, net,
partition=target_partition,
nbd=FLAGS.use_cow_images)
except Exception as e:
# This could be a windows image, or a vmdk format disk
- LOG.warn(_('instance %s: ignoring error injecting data'
- ' into image %s (%s)'),
- inst['name'], inst.image_id, e)
+ LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
+ ' data into image %(img_id)s (%(e)s)') % locals())
if FLAGS.libvirt_type == 'uml':
utils.execute('sudo chown root %s' % basepath('disk'))
@@ -643,9 +663,6 @@ class LibvirtConnection(object):
def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
LOG.debug(_('instance %s: starting toXML method'), instance['name'])
- network = db.project_get_network(context.get_admin_context(),
- instance['project_id'])
- LOG.debug(_('instance %s: starting toXML method'), instance['name'])
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
# FIXME(vish): stick this in db
@@ -656,8 +673,7 @@ class LibvirtConnection(object):
# Assume that the gateway also acts as the dhcp server.
dhcp_server = network['gateway']
ra_server = network['ra_server']
- if not ra_server:
- ra_server = 'fd00::'
+
if FLAGS.allow_project_net_traffic:
if FLAGS.use_ipv6:
net, mask = _get_net_and_mask(network['cidr'])
@@ -696,11 +712,13 @@ class LibvirtConnection(object):
'mac_address': instance['mac_address'],
'ip_address': ip_address,
'dhcp_server': dhcp_server,
- 'ra_server': ra_server,
'extra_params': extra_params,
'rescue': rescue,
'local': instance_type['local_gb'],
'driver_type': driver_type}
+
+ if ra_server:
+ xml_info['ra_server'] = ra_server + "/128"
if not rescue:
if instance['kernel_id']:
xml_info['kernel'] = xml_info['basepath'] + "/kernel"
@@ -883,6 +901,20 @@ class FirewallDriver(object):
the security group."""
raise NotImplementedError()
+ def setup_basic_filtering(self, instance):
+ """Create rules to block spoofing and allow dhcp.
+
+ This gets called when spawning an instance, before
+ :method:`prepare_instance_filter`.
+
+ """
+ raise NotImplementedError()
+
+ def _ra_server_for_instance(self, instance):
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
+ return network['ra_server']
+
class NWFilterFirewall(FirewallDriver):
"""
@@ -930,11 +962,15 @@ class NWFilterFirewall(FirewallDriver):
"""
- def __init__(self, get_connection):
+ def __init__(self, get_connection, **kwargs):
self._libvirt_get_connection = get_connection
self.static_filters_configured = False
self.handle_security_groups = False
+ def apply_instance_filter(self, instance):
+ """No-op. Everything is done in prepare_instance_filter"""
+ pass
+
def _get_connection(self):
return self._libvirt_get_connection()
_conn = property(_get_connection)
@@ -1093,7 +1129,9 @@ class NWFilterFirewall(FirewallDriver):
'nova-base-ipv6',
'nova-allow-dhcp-server']
if FLAGS.use_ipv6:
- instance_secgroup_filter_children += ['nova-allow-ra-server']
+ ra_server = self._ra_server_for_instance(instance)
+ if ra_server:
+ instance_secgroup_filter_children += ['nova-allow-ra-server']
ctxt = context.get_admin_context()
@@ -1120,10 +1158,6 @@ class NWFilterFirewall(FirewallDriver):
return
- def apply_instance_filter(self, instance):
- """No-op. Everything is done in prepare_instance_filter"""
- pass
-
def refresh_security_group_rules(self, security_group_id):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
@@ -1171,9 +1205,14 @@ class NWFilterFirewall(FirewallDriver):
class IptablesFirewallDriver(FirewallDriver):
- def __init__(self, execute=None):
+ def __init__(self, execute=None, **kwargs):
self.execute = execute or utils.execute
self.instances = {}
+ self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
+
+ def setup_basic_filtering(self, instance):
+ """Use NWFilter from libvirt for this."""
+ return self.nwfilter.setup_basic_filtering(instance)
def apply_instance_filter(self, instance):
"""No-op. Everything is done in prepare_instance_filter"""
@@ -1229,6 +1268,7 @@ class IptablesFirewallDriver(FirewallDriver):
our_chains += [':nova-local - [0:0]']
our_rules += ['-A FORWARD -j nova-local']
+ our_rules += ['-A OUTPUT -j nova-local']
security_groups = {}
# Add our chains
@@ -1269,13 +1309,23 @@ class IptablesFirewallDriver(FirewallDriver):
if(ip_version == 4):
# Allow DHCP responses
dhcp_server = self._dhcp_server_for_instance(instance)
- our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' %
- (chain_name, dhcp_server)]
+ our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68 '
+ '-j ACCEPT ' % (chain_name, dhcp_server)]
+ #Allow project network traffic
+ if (FLAGS.allow_project_net_traffic):
+ cidr = self._project_cidr_for_instance(instance)
+ our_rules += ['-A %s -s %s -j ACCEPT' % (chain_name, cidr)]
elif(ip_version == 6):
# Allow RA responses
ra_server = self._ra_server_for_instance(instance)
- our_rules += ['-A %s -s %s -p icmpv6' %
- (chain_name, ra_server)]
+ if ra_server:
+ our_rules += ['-A %s -s %s -p icmpv6 -j ACCEPT' %
+ (chain_name, ra_server + "/128")]
+ #Allow project network traffic
+ if (FLAGS.allow_project_net_traffic):
+ cidrv6 = self._project_cidrv6_for_instance(instance)
+ our_rules += ['-A %s -s %s -j ACCEPT' %
+ (chain_name, cidrv6)]
# If nothing matches, jump to the fallback chain
our_rules += ['-A %s -j nova-fallback' % (chain_name,)]
@@ -1362,11 +1412,21 @@ class IptablesFirewallDriver(FirewallDriver):
instance['id'])
def _dhcp_server_for_instance(self, instance):
- network = db.project_get_network(context.get_admin_context(),
- instance['project_id'])
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
return network['gateway']
def _ra_server_for_instance(self, instance):
- network = db.project_get_network(context.get_admin_context(),
- instance['project_id'])
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
return network['ra_server']
+
+ def _project_cidr_for_instance(self, instance):
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
+ return network['cidr']
+
+ def _project_cidrv6_for_instance(self, instance):
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
+ return network['cidr_v6']
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 4bfaf4b57..e8352771c 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -69,7 +69,9 @@ LOG = logging.getLogger("nova.virt.xenapi.fake")
def log_db_contents(msg=None):
- LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
+ text = msg or ""
+ content = pformat(_db_content)
+ LOG.debug(_("%(text)s: _db_content => %(content)s") % locals())
def reset():
@@ -331,7 +333,8 @@ class SessionBase(object):
if impl is not None:
def callit(*params):
- LOG.debug(_('Calling %s %s'), name, impl)
+ localname = name
+ LOG.debug(_('Calling %(localname)s %(impl)s') % locals())
self._check_session(params)
return impl(*params)
return callit
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 6a9c96fc6..f5c19099a 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -134,7 +134,8 @@ class VMHelper(HelperBase):
'pae': 'true', 'viridian': 'true'}
LOG.debug(_('Created VM %s...'), instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
- LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
+ instance_name = instance.name
+ LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals())
return vm_ref
@classmethod
@@ -154,10 +155,11 @@ class VMHelper(HelperBase):
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
- LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref)
+ LOG.debug(_('Creating VBD for VM %(vm_ref)s,'
+ ' VDI %(vdi_ref)s ... ') % locals())
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
- LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
- vdi_ref)
+ LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
+ ' VDI %(vdi_ref)s.') % locals())
return vbd_ref
@classmethod
@@ -209,11 +211,11 @@ class VMHelper(HelperBase):
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
- LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
- network_ref)
+ LOG.debug(_('Creating VIF for VM %(vm_ref)s,'
+ ' network %(network_ref)s.') % locals())
vif_ref = session.call_xenapi('VIF.create', vif_rec)
- LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
- vm_ref, network_ref)
+ LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,'
+ ' network %(network_ref)s.') % locals())
return vif_ref
@classmethod
@@ -231,8 +233,9 @@ class VMHelper(HelperBase):
'other_config': {},
'sm_config': {},
'tags': []})
- LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref,
- name_label, virtual_size, read_only, sr_ref)
+ LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,'
+ ' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.')
+ % locals())
return vdi_ref
@classmethod
@@ -242,7 +245,8 @@ class VMHelper(HelperBase):
"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
- LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label)
+ LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...")
+ % locals())
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
vm_vdi_uuid = vm_vdi_rec["uuid"]
@@ -255,8 +259,8 @@ class VMHelper(HelperBase):
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
- LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
- vm_ref)
+ LOG.debug(_('Created snapshot %(template_vm_ref)s from'
+ ' VM %(vm_ref)s.') % locals())
parent_uuid = wait_for_vhd_coalesce(
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
@@ -269,8 +273,8 @@ class VMHelper(HelperBase):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
- logging.debug(_("Asking xapi to upload %s as ID %s"),
- vdi_uuids, image_id)
+ logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
+ " ID %(image_id)s") % locals())
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
@@ -310,7 +314,7 @@ class VMHelper(HelperBase):
meta, image_file = c.get_image(image)
virtual_size = int(meta['size'])
vdi_size = virtual_size
- LOG.debug(_("Size for image %s:%d"), image, virtual_size)
+ LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
if type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
@@ -344,7 +348,7 @@ class VMHelper(HelperBase):
def _fetch_image_objectstore(cls, session, instance_id, image, access,
secret, type):
url = images.image_url(image)
- LOG.debug(_("Asking xapi to fetch %s as %s"), url, access)
+ LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
@@ -436,6 +440,14 @@ class VMHelper(HelperBase):
return None
@classmethod
+ def lookup_kernel_ramdisk(cls, session, vm):
+ vm_rec = session.get_xenapi().VM.get_record(vm)
+ if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec:
+ return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk'])
+ else:
+ return (None, None)
+
+ @classmethod
def compile_info(cls, record):
"""Fill record with VM status information"""
LOG.info(_("(VM_UTILS) xenserver vm state -> |%s|"),
@@ -499,7 +511,8 @@ def get_vhd_parent(session, vdi_rec):
parent_uuid = vdi_rec['sm_config']['vhd-parent']
parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid)
parent_rec = session.get_xenapi().VDI.get_record(parent_ref)
- LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
+ vdi_uuid = vdi_rec['uuid']
+ LOG.debug(_("VHD %(vdi_uuid)s has parent %(parent_ref)s") % locals())
return parent_ref, parent_rec
else:
return None
@@ -540,16 +553,17 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
def _poll_vhds():
attempts['counter'] += 1
if attempts['counter'] > max_attempts:
- msg = (_("VHD coalesce attempts exceeded (%d > %d), giving up...")
- % (attempts['counter'], max_attempts))
+ counter = attempts['counter']
+ msg = (_("VHD coalesce attempts exceeded (%(counter)d >"
+ " %(max_attempts)d), giving up...") % locals())
raise exception.Error(msg)
scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
- LOG.debug(_("Parent %s doesn't match original parent %s, "
- "waiting for coalesce..."), parent_uuid,
- original_parent_uuid)
+ LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent"
+ " %(original_parent_uuid)s, waiting for coalesce...")
+ % locals())
else:
# Breakout of the loop (normally) and return the parent_uuid
raise utils.LoopingCallDone(parent_uuid)
@@ -567,8 +581,8 @@ def get_vdi_for_vm_safely(session, vm_ref):
else:
num_vdis = len(vdi_refs)
if num_vdis != 1:
- raise Exception(_("Unexpected number of VDIs (%s) found for "
- "VM %s") % (num_vdis, vm_ref))
+ raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found"
+ " for VM %(vm_ref)s") % locals())
vdi_ref = vdi_refs[0]
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
@@ -634,7 +648,7 @@ def with_vdi_attached_here(session, vdi, read_only, f):
session.get_xenapi().VBD.plug(vbd)
LOG.debug(_('Plugging VBD %s done.'), vbd)
orig_dev = session.get_xenapi().VBD.get_device(vbd)
- LOG.debug(_('VBD %s plugged as %s'), vbd, orig_dev)
+ LOG.debug(_('VBD %(vbd)s plugged as %(orig_dev)s') % locals())
dev = remap_vbd_dev(orig_dev)
if dev != orig_dev:
LOG.debug(_('VBD %(vbd)s plugged into wrong dev, '
@@ -708,8 +722,8 @@ def _write_partition(virtual_size, dev):
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
- LOG.debug(_('Writing partition table %d %d to %s...'),
- primary_first, primary_last, dest)
+ LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d'
+ ' to %(dest)s...') % locals())
def execute(cmd, process_input=None, check_exit_code=True):
return utils.execute(cmd=cmd,
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index db05a24ff..00028cdaa 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -139,7 +139,9 @@ class VMOps(object):
LOG.debug(_('Starting VM %s...'), vm_ref)
self._session.call_xenapi('VM.start', vm_ref, False, False)
- LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref)
+ instance_name = instance.name
+ LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
+ % locals())
# NOTE(armando): Do we really need to do this in virt?
timer = utils.LoopingCall(f=None)
@@ -182,7 +184,7 @@ class VMOps(object):
if isinstance(instance_or_vm, (int, long)):
ctx = context.get_admin_context()
try:
- instance_obj = db.instance_get_by_id(ctx, instance_or_vm)
+ instance_obj = db.instance_get(ctx, instance_or_vm)
instance_name = instance_obj.name
except exception.NotFound:
# The unit tests screw this up, as they use an integer for
@@ -231,7 +233,8 @@ class VMOps(object):
template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
self._session, instance.id, vm_ref, label)
except self.XenAPI.Failure, exc:
- logging.error(_("Unable to Snapshot %s: %s"), vm_ref, exc)
+ logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s")
+ % locals())
return
try:
@@ -318,8 +321,23 @@ class VMOps(object):
def _destroy_vm(self, instance, vm):
"""Destroys a VM record """
try:
- task = self._session.call_xenapi('Async.VM.destroy', vm)
- self._session.wait_for_task(instance.id, task)
+ kernel = None
+ ramdisk = None
+ if instance.kernel_id or instance.ramdisk_id:
+ (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
+ self._session, vm)
+ task1 = self._session.call_xenapi('Async.VM.destroy', vm)
+ LOG.debug(_("Removing kernel/ramdisk files"))
+ fn = "remove_kernel_ramdisk"
+ args = {}
+ if kernel:
+ args['kernel-file'] = kernel
+ if ramdisk:
+ args['ramdisk-file'] = ramdisk
+ task2 = self._session.async_call_plugin('glance', fn, args)
+ self._session.wait_for_task(instance.id, task1)
+ self._session.wait_for_task(instance.id, task2)
+ LOG.debug(_("kernel/ramdisk files removed"))
except self.XenAPI.Failure, exc:
LOG.exception(exc)
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 0cd15b950..d5ebd29d5 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -71,7 +71,7 @@ class VolumeHelper(HelperBase):
session.get_xenapi_host(),
record,
'0', label, description, 'iscsi', '', False, {})
- LOG.debug(_('Introduced %s as %s.'), label, sr_ref)
+ LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
return sr_ref
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
@@ -98,20 +98,20 @@ class VolumeHelper(HelperBase):
try:
pbds = session.get_xenapi().SR.get_PBDs(sr_ref)
except cls.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %s when getting PBDs for %s'),
- exc, sr_ref)
+ LOG.warn(_('Ignoring exception %(exc)s when getting PBDs'
+ ' for %(sr_ref)s') % locals())
for pbd in pbds:
try:
session.get_xenapi().PBD.unplug(pbd)
except cls.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %s when unplugging PBD %s'),
- exc, pbd)
+ LOG.warn(_('Ignoring exception %(exc)s when unplugging'
+ ' PBD %(pbd)s') % locals())
try:
session.get_xenapi().SR.forget(sr_ref)
LOG.debug(_("Forgetting SR %s done."), sr_ref)
except cls.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc,
- sr_ref)
+ LOG.warn(_('Ignoring exception %(exc)s when forgetting'
+ ' SR %(sr_ref)s') % locals())
@classmethod
def introduce_vdi(cls, session, sr_ref):
@@ -172,8 +172,8 @@ class VolumeHelper(HelperBase):
(volume_id is None) or \
(target_host is None) or \
(target_iqn is None):
- raise StorageError(_('Unable to obtain target information %s, %s')
- % (device_path, mountpoint))
+ raise StorageError(_('Unable to obtain target information'
+ ' %(device_path)s, %(mountpoint)s') % locals())
volume_info = {}
volume_info['deviceNumber'] = device_number
volume_info['volumeId'] = volume_id
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 189f968c6..d89a6f995 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -48,8 +48,8 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# NOTE: No Resource Pool concept so far
- LOG.debug(_("Attach_volume: %s, %s, %s"),
- instance_name, device_path, mountpoint)
+ LOG.debug(_("Attach_volume: %(instance_name)s, %(device_path)s,"
+ " %(mountpoint)s") % locals())
# Create the iSCSI SR, and the PDB through which hosts access SRs.
# But first, retrieve target info, like Host, IQN, LUN and SCSIID
vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint)
@@ -66,9 +66,8 @@ class VolumeOps(object):
except StorageError, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- raise Exception(_('Unable to create VDI on SR %s for instance %s')
- % (sr_ref,
- instance_name))
+ raise Exception(_('Unable to create VDI on SR %(sr_ref)s for'
+ ' instance %(instance_name)s') % locals())
else:
try:
vbd_ref = VMHelper.create_vbd(self._session,
@@ -78,9 +77,8 @@ class VolumeOps(object):
except self.XenAPI.Failure, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- raise Exception(_('Unable to use SR %s for instance %s')
- % (sr_ref,
- instance_name))
+ raise Exception(_('Unable to use SR %(sr_ref)s for'
+ ' instance %(instance_name)s') % locals())
else:
try:
task = self._session.call_xenapi('Async.VBD.plug',
@@ -92,8 +90,8 @@ class VolumeOps(object):
sr_ref)
raise Exception(_('Unable to attach volume to instance %s')
% instance_name)
- LOG.info(_('Mountpoint %s attached to instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %(mountpoint)s attached to'
+ ' instance %(instance_name)s') % locals())
def detach_volume(self, instance_name, mountpoint):
"""Detach volume storage to VM instance"""
@@ -103,7 +101,8 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# Detach VBD from VM
- LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
+ LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
+ % locals())
device_number = VolumeHelper.mountpoint_to_number(mountpoint)
try:
vbd_ref = VMHelper.find_vbd_by_number(self._session,
@@ -125,5 +124,5 @@ class VolumeOps(object):
LOG.exception(exc)
# Forget SR
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- LOG.info(_('Mountpoint %s detached from instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %(mountpoint)s detached from'
+ ' instance %(instance_name)s') % locals())
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 4e5442aa6..2720d175f 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -141,7 +141,7 @@ class XenAPIConnection(object):
self._vmops = VMOps(session)
self._volumeops = VolumeOps(session)
- def init_host(self):
+ def init_host(self, host):
#FIXME(armando): implement this
#NOTE(armando): would we need a method
#to call when shutting down the host?
@@ -302,19 +302,14 @@ class XenAPISession(object):
return
elif status == "success":
result = self._session.xenapi.task.get_result(task)
- LOG.info(_("Task [%s] %s status: success %s") % (
- name,
- task,
- result))
+ LOG.info(_("Task [%(name)s] %(task)s status:"
+ " success %(result)s") % locals())
done.send(_parse_xmlrpc_value(result))
else:
error_info = self._session.xenapi.task.get_error_info(task)
action["error"] = str(error_info)
- LOG.warn(_("Task [%s] %s status: %s %s") % (
- name,
- task,
- status,
- error_info))
+ LOG.warn(_("Task [%(name)s] %(task)s status:"
+ " %(status)s %(error_info)s") % locals())
done.send_exception(self.XenAPI.Failure(error_info))
db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc:
diff --git a/nova/volume/api.py b/nova/volume/api.py
index ce4831cc3..478c83486 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -41,10 +41,11 @@ class API(base.Base):
def create(self, context, size, name, description):
if quota.allowed_volumes(context, 1, size) < 1:
- LOG.warn(_("Quota exceeeded for %s, tried to create %sG volume"),
- context.project_id, size)
+ pid = context.project_id
+ LOG.warn(_("Quota exceeeded for %(pid)s, tried to create"
+ " %(size)sG volume") % locals())
raise quota.QuotaError(_("Volume quota exceeded. You cannot "
- "create a volume of size %s") % size)
+ "create a volume of size %sG") % size)
options = {
'size': size,
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index da7307733..82f4c2f54 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -294,8 +294,10 @@ class ISCSIDriver(VolumeDriver):
self._execute("sudo ietadm --op delete --tid=%s" %
iscsi_target)
- def _get_name_and_portal(self, volume_name, host):
+ def _get_name_and_portal(self, volume):
"""Gets iscsi name and portal from volume name and host."""
+ volume_name = volume['name']
+ host = volume['host']
(out, _err) = self._execute("sudo iscsiadm -m discovery -t "
"sendtargets -p %s" % host)
for target in out.splitlines():
@@ -307,8 +309,7 @@ class ISCSIDriver(VolumeDriver):
def discover_volume(self, volume):
"""Discover volume on a remote host."""
- iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'],
- volume['host'])
+ iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
self._execute("sudo iscsiadm -m node -T %s -p %s --login" %
(iscsi_name, iscsi_portal))
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
@@ -319,8 +320,7 @@ class ISCSIDriver(VolumeDriver):
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host."""
- iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'],
- volume['host'])
+ iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
"-n node.startup -v manual" %
(iscsi_name, iscsi_portal))
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 82e3521a8..6e70ec881 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -87,7 +87,7 @@ class VolumeManager(manager.Manager):
if volume['status'] in ['available', 'in-use']:
self.driver.ensure_export(ctxt, volume)
else:
- LOG.info(_("volume %s: skipping export"), volume_ref['name'])
+ LOG.info(_("volume %s: skipping export"), volume['name'])
def create_volume(self, context, volume_id):
"""Creates and exports the volume."""
@@ -103,9 +103,10 @@ class VolumeManager(manager.Manager):
volume_ref['host'] = self.host
try:
- LOG.debug(_("volume %s: creating lv of size %sG"),
- volume_ref['name'],
- volume_ref['size'])
+ vol_name = volume_ref['name']
+ vol_size = volume_ref['size']
+ LOG.debug(_("volume %(vol_name)s: creating lv of"
+ " size %(vol_size)sG") % locals())
self.driver.create_volume(volume_ref)
LOG.debug(_("volume %s: creating export"), volume_ref['name'])
diff --git a/nova/volume/san.py b/nova/volume/san.py
new file mode 100644
index 000000000..26d6125e7
--- /dev/null
+++ b/nova/volume/san.py
@@ -0,0 +1,335 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Drivers for san-stored volumes.
+The unique thing about a SAN is that we don't expect that we can run the volume
+ controller on the SAN hardware. We expect to access it over SSH or some API.
+"""
+
+import os
+import paramiko
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.utils import ssh_execute
+from nova.volume.driver import ISCSIDriver
+
+LOG = logging.getLogger("nova.volume.driver")
+FLAGS = flags.FLAGS
+flags.DEFINE_boolean('san_thin_provision', 'true',
+ 'Use thin provisioning for SAN volumes?')
+flags.DEFINE_string('san_ip', '',
+ 'IP address of SAN controller')
+flags.DEFINE_string('san_login', 'admin',
+ 'Username for SAN controller')
+flags.DEFINE_string('san_password', '',
+ 'Password for SAN controller')
+flags.DEFINE_string('san_privatekey', '',
+ 'Filename of private key to use for SSH authentication')
+
+
+class SanISCSIDriver(ISCSIDriver):
+ """ Base class for SAN-style storage volumes
+ (storage providers we access over SSH)"""
+ #Override because SAN ip != host ip
+ def _get_name_and_portal(self, volume):
+ """Gets iscsi name and portal from volume name and host."""
+ volume_name = volume['name']
+
+ # TODO(justinsb): store in volume, remerge with generic iSCSI code
+ host = FLAGS.san_ip
+
+ (out, _err) = self._execute("sudo iscsiadm -m discovery -t "
+ "sendtargets -p %s" % host)
+
+ location = None
+ find_iscsi_name = self._build_iscsi_target_name(volume)
+ for target in out.splitlines():
+ if find_iscsi_name in target:
+ (location, _sep, iscsi_name) = target.partition(" ")
+ break
+ if not location:
+ raise exception.Error(_("Could not find iSCSI export "
+ " for volume %s") %
+ volume_name)
+
+ iscsi_portal = location.split(",")[0]
+ LOG.debug("iscsi_name=%s, iscsi_portal=%s" %
+ (iscsi_name, iscsi_portal))
+ return (iscsi_name, iscsi_portal)
+
+ def _build_iscsi_target_name(self, volume):
+ return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
+
+ # discover_volume is still OK
+ # undiscover_volume is still OK
+
+ def _connect_to_ssh(self):
+ ssh = paramiko.SSHClient()
+ #TODO(justinsb): We need a better SSH key policy
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ if FLAGS.san_password:
+ ssh.connect(FLAGS.san_ip,
+ username=FLAGS.san_login,
+ password=FLAGS.san_password)
+ elif FLAGS.san_privatekey:
+ privatekeyfile = os.path.expanduser(FLAGS.san_privatekey)
+ # It sucks that paramiko doesn't support DSA keys
+ privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
+ ssh.connect(FLAGS.san_ip,
+ username=FLAGS.san_login,
+ pkey=privatekey)
+ else:
+ raise exception.Error("Specify san_password or san_privatekey")
+ return ssh
+
+ def _run_ssh(self, command, check_exit_code=True):
+ #TODO(justinsb): SSH connection caching (?)
+ ssh = self._connect_to_ssh()
+
+ #TODO(justinsb): Reintroduce the retry hack
+ ret = ssh_execute(ssh, command, check_exit_code=check_exit_code)
+
+ ssh.close()
+
+ return ret
+
+ def ensure_export(self, context, volume):
+ """Synchronously recreates an export for a logical volume."""
+ pass
+
+ def create_export(self, context, volume):
+ """Exports the volume."""
+ pass
+
+ def remove_export(self, context, volume):
+ """Removes an export for a logical volume."""
+ pass
+
+ def check_for_setup_error(self):
+ """Returns an error if prerequisites aren't met"""
+ if not (FLAGS.san_password or FLAGS.san_privatekey):
+ raise exception.Error("Specify san_password or san_privatekey")
+
+ if not (FLAGS.san_ip):
+ raise exception.Error("san_ip must be set")
+
+
+def _collect_lines(data):
+ """ Split lines from data into an array, trimming them """
+ matches = []
+ for line in data.splitlines():
+ match = line.strip()
+ matches.append(match)
+
+ return matches
+
+
+def _get_prefixed_values(data, prefix):
+ """Collect lines which start with prefix; with trimming"""
+ matches = []
+ for line in data.splitlines():
+ line = line.strip()
+ if line.startswith(prefix):
+ match = line[len(prefix):]
+ match = match.strip()
+ matches.append(match)
+
+ return matches
+
+
+class SolarisISCSIDriver(SanISCSIDriver):
+ """Executes commands relating to Solaris-hosted ISCSI volumes.
+ Basic setup for a Solaris iSCSI server:
+ pkg install storage-server SUNWiscsit
+ svcadm enable stmf
+ svcadm enable -r svc:/network/iscsi/target:default
+ pfexec itadm create-tpg e1000g0 ${MYIP}
+ pfexec itadm create-target -t e1000g0
+
+ Then grant the user that will be logging on lots of permissions.
+ I'm not sure exactly which though:
+ zfs allow justinsb create,mount,destroy rpool
+ usermod -P'File System Management' justinsb
+ usermod -P'Primary Administrator' justinsb
+
+ Also make sure you can login using san_login & san_password/san_privatekey
+ """
+
+ def _view_exists(self, luid):
+ cmd = "pfexec /usr/sbin/stmfadm list-view -l %s" % (luid)
+ (out, _err) = self._run_ssh(cmd,
+ check_exit_code=False)
+ if "no views found" in out:
+ return False
+
+ if "View Entry:" in out:
+ return True
+
+ raise exception.Error("Cannot parse list-view output: %s" % (out))
+
+ def _get_target_groups(self):
+ """Gets list of target groups from host."""
+ (out, _err) = self._run_ssh("pfexec /usr/sbin/stmfadm list-tg")
+ matches = _get_prefixed_values(out, 'Target group: ')
+ LOG.debug("target_groups=%s" % matches)
+ return matches
+
+ def _target_group_exists(self, target_group_name):
+ return target_group_name not in self._get_target_groups()
+
+ def _get_target_group_members(self, target_group_name):
+ (out, _err) = self._run_ssh("pfexec /usr/sbin/stmfadm list-tg -v %s" %
+ (target_group_name))
+ matches = _get_prefixed_values(out, 'Member: ')
+ LOG.debug("members of %s=%s" % (target_group_name, matches))
+ return matches
+
+ def _is_target_group_member(self, target_group_name, iscsi_target_name):
+ return iscsi_target_name in (
+ self._get_target_group_members(target_group_name))
+
+ def _get_iscsi_targets(self):
+ cmd = ("pfexec /usr/sbin/itadm list-target | "
+ "awk '{print $1}' | grep -v ^TARGET")
+ (out, _err) = self._run_ssh(cmd)
+ matches = _collect_lines(out)
+ LOG.debug("_get_iscsi_targets=%s" % (matches))
+ return matches
+
+ def _iscsi_target_exists(self, iscsi_target_name):
+ return iscsi_target_name in self._get_iscsi_targets()
+
+ def _build_zfs_poolname(self, volume):
+ #TODO(justinsb): rpool should be configurable
+ zfs_poolname = 'rpool/%s' % (volume['name'])
+ return zfs_poolname
+
+ def create_volume(self, volume):
+ """Creates a volume."""
+ if int(volume['size']) == 0:
+ sizestr = '100M'
+ else:
+ sizestr = '%sG' % volume['size']
+
+ zfs_poolname = self._build_zfs_poolname(volume)
+
+ thin_provision_arg = '-s' if FLAGS.san_thin_provision else ''
+ # Create a zfs volume
+ self._run_ssh("pfexec /usr/sbin/zfs create %s -V %s %s" %
+ (thin_provision_arg,
+ sizestr,
+ zfs_poolname))
+
+ def _get_luid(self, volume):
+ zfs_poolname = self._build_zfs_poolname(volume)
+
+ cmd = ("pfexec /usr/sbin/sbdadm list-lu | "
+ "grep -w %s | awk '{print $1}'" %
+ (zfs_poolname))
+
+ (stdout, _stderr) = self._run_ssh(cmd)
+
+ luid = stdout.strip()
+ return luid
+
+ def _is_lu_created(self, volume):
+ luid = self._get_luid(volume)
+ return luid
+
+ def delete_volume(self, volume):
+ """Deletes a volume."""
+ zfs_poolname = self._build_zfs_poolname(volume)
+ self._run_ssh("pfexec /usr/sbin/zfs destroy %s" %
+ (zfs_poolname))
+
+ def local_path(self, volume):
+ # TODO(justinsb): Is this needed here?
+ escaped_group = FLAGS.volume_group.replace('-', '--')
+ escaped_name = volume['name'].replace('-', '--')
+ return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
+
+ def ensure_export(self, context, volume):
+ """Synchronously recreates an export for a logical volume."""
+ #TODO(justinsb): On bootup, this is called for every volume.
+ # It then runs ~5 SSH commands for each volume,
+ # most of which fetch the same info each time
+ # This makes initial start stupid-slow
+ self._do_export(volume, force_create=False)
+
+ def create_export(self, context, volume):
+ self._do_export(volume, force_create=True)
+
+ def _do_export(self, volume, force_create):
+ # Create a Logical Unit (LU) backed by the zfs volume
+ zfs_poolname = self._build_zfs_poolname(volume)
+
+ if force_create or not self._is_lu_created(volume):
+ cmd = ("pfexec /usr/sbin/sbdadm create-lu /dev/zvol/rdsk/%s" %
+ (zfs_poolname))
+ self._run_ssh(cmd)
+
+ luid = self._get_luid(volume)
+ iscsi_name = self._build_iscsi_target_name(volume)
+ target_group_name = 'tg-%s' % volume['name']
+
+ # Create a iSCSI target, mapped to just this volume
+ if force_create or not self._target_group_exists(target_group_name):
+ self._run_ssh("pfexec /usr/sbin/stmfadm create-tg %s" %
+ (target_group_name))
+
+ # Yes, we add the initiatior before we create it!
+ # Otherwise, it complains that the target is already active
+ if force_create or not self._is_target_group_member(target_group_name,
+ iscsi_name):
+ self._run_ssh("pfexec /usr/sbin/stmfadm add-tg-member -g %s %s" %
+ (target_group_name, iscsi_name))
+ if force_create or not self._iscsi_target_exists(iscsi_name):
+ self._run_ssh("pfexec /usr/sbin/itadm create-target -n %s" %
+ (iscsi_name))
+ if force_create or not self._view_exists(luid):
+ self._run_ssh("pfexec /usr/sbin/stmfadm add-view -t %s %s" %
+ (target_group_name, luid))
+
+ def remove_export(self, context, volume):
+ """Removes an export for a logical volume."""
+
+ # This is the reverse of _do_export
+ luid = self._get_luid(volume)
+ iscsi_name = self._build_iscsi_target_name(volume)
+ target_group_name = 'tg-%s' % volume['name']
+
+ if self._view_exists(luid):
+ self._run_ssh("pfexec /usr/sbin/stmfadm remove-view -l %s -a" %
+ (luid))
+
+ if self._iscsi_target_exists(iscsi_name):
+ self._run_ssh("pfexec /usr/sbin/stmfadm offline-target %s" %
+ (iscsi_name))
+ self._run_ssh("pfexec /usr/sbin/itadm delete-target %s" %
+ (iscsi_name))
+
+ # We don't delete the tg-member; we delete the whole tg!
+
+ if self._target_group_exists(target_group_name):
+ self._run_ssh("pfexec /usr/sbin/stmfadm delete-tg %s" %
+ (target_group_name))
+
+ if self._is_lu_created(volume):
+ self._run_ssh("pfexec /usr/sbin/sbdadm delete-lu %s" %
+ (luid))
diff --git a/nova/wsgi.py b/nova/wsgi.py
index a48bede9c..e01cc1e1e 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -64,7 +64,8 @@ class Server(object):
def start(self, application, port, host='0.0.0.0', backlog=128):
"""Run a WSGI server with the given application."""
- logging.audit(_("Starting %s on %s:%s"), sys.argv[0], host, port)
+ arg0 = sys.argv[0]
+ logging.audit(_("Starting %(arg0)s on %(host)s:%(port)s") % locals())
socket = eventlet.listen((host, port), backlog=backlog)
self.pool.spawn_n(self._run, application, socket)
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
index b4c742396..07c7e4df9 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
@@ -73,8 +73,8 @@ def key_init(self, arg_dict):
@jsonify
def password(self, arg_dict):
"""Writes a request to xenstore that tells the agent to set
- the root password for the given VM. The password should be
- encrypted using the shared secret key that was returned by a
+ the root password for the given VM. The password should be
+ encrypted using the shared secret key that was returned by a
previous call to key_init. The encrypted password value should
be passed as the value for the 'enc_pass' key in arg_dict.
"""
@@ -120,7 +120,8 @@ def _wait_for_agent(self, request_id, arg_dict):
# First, delete the request record
arg_dict["path"] = "data/host/%s" % request_id
xenstore.delete_record(self, arg_dict)
- raise TimeoutError("TIMEOUT: No response from agent within %s seconds." %
+ raise TimeoutError(
+ "TIMEOUT: No response from agent within %s seconds." %
AGENT_TIMEOUT)
ret = xenstore.read_record(self, arg_dict)
# Note: the response for None with be a string that includes
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
index aadacce57..8cb439259 100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
@@ -43,32 +43,47 @@ CHUNK_SIZE = 8192
KERNEL_DIR = '/boot/guest'
FILE_SR_PATH = '/var/run/sr-mount'
-def copy_kernel_vdi(session,args):
+
+def remove_kernel_ramdisk(session, args):
+ """Removes kernel and/or ramdisk from dom0's file system"""
+ kernel_file = exists(args, 'kernel-file')
+ ramdisk_file = exists(args, 'ramdisk-file')
+ if kernel_file:
+ os.remove(kernel_file)
+ if ramdisk_file:
+ os.remove(ramdisk_file)
+ return "ok"
+
+
+def copy_kernel_vdi(session, args):
vdi = exists(args, 'vdi-ref')
- size = exists(args,'image-size')
+ size = exists(args, 'image-size')
#Use the uuid as a filename
- vdi_uuid=session.xenapi.VDI.get_uuid(vdi)
- copy_args={'vdi_uuid':vdi_uuid,'vdi_size':int(size)}
- filename=with_vdi_in_dom0(session, vdi, False,
+ vdi_uuid = session.xenapi.VDI.get_uuid(vdi)
+ copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)}
+ filename = with_vdi_in_dom0(session, vdi, False,
lambda dev:
- _copy_kernel_vdi('/dev/%s' % dev,copy_args))
+ _copy_kernel_vdi('/dev/%s' % dev, copy_args))
return filename
-def _copy_kernel_vdi(dest,copy_args):
- vdi_uuid=copy_args['vdi_uuid']
- vdi_size=copy_args['vdi_size']
- logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",dest,vdi_uuid)
- filename=KERNEL_DIR + '/' + vdi_uuid
+
+def _copy_kernel_vdi(dest, copy_args):
+ vdi_uuid = copy_args['vdi_uuid']
+ vdi_size = copy_args['vdi_size']
+ logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",
+ dest, vdi_uuid)
+ filename = KERNEL_DIR + '/' + vdi_uuid
#read data from /dev/ and write into a file on /boot/guest
- of=open(filename,'wb')
- f=open(dest,'rb')
+ of = open(filename, 'wb')
+ f = open(dest, 'rb')
#copy only vdi_size bytes
- data=f.read(vdi_size)
+ data = f.read(vdi_size)
of.write(data)
f.close()
- of.close()
- logging.debug("Done. Filename: %s",filename)
- return filename
+ of.close()
+ logging.debug("Done. Filename: %s", filename)
+ return filename
+
def put_vdis(session, args):
params = pickle.loads(exists(args, 'params'))
@@ -76,22 +91,23 @@ def put_vdis(session, args):
image_id = params["image_id"]
glance_host = params["glance_host"]
glance_port = params["glance_port"]
-
+
sr_path = get_sr_path(session)
#FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs
- tmp_file = "%s.tar.gz" % os.path.join('/tmp', str(image_id))
+ tmp_file = "%s.tar.gz" % os.path.join('/tmp', str(image_id))
tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path]
- paths = [ "%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids ]
+ paths = ["%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids]
tar_cmd.extend(paths)
logging.debug("Bundling image with cmd: %s", tar_cmd)
subprocess.call(tar_cmd)
- logging.debug("Writing to test file %s", tmp_file)
+ logging.debug("Writing to test file %s", tmp_file)
put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port)
- return "" # FIXME(sirp): return anything useful here?
+ # FIXME(sirp): return anything useful here?
+ return ""
def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port):
- size = os.path.getsize(tmp_file)
+ size = os.path.getsize(tmp_file)
basename = os.path.basename(tmp_file)
bundle = open(tmp_file, 'r')
@@ -112,12 +128,11 @@ def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port):
for header, value in headers.iteritems():
conn.putheader(header, value)
conn.endheaders()
-
+
chunk = bundle.read(CHUNK_SIZE)
while chunk:
conn.send(chunk)
chunk = bundle.read(CHUNK_SIZE)
-
res = conn.getresponse()
#FIXME(sirp): should this be 201 Created?
@@ -126,6 +141,7 @@ def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port):
finally:
bundle.close()
+
def get_sr_path(session):
sr_ref = find_sr(session)
@@ -156,5 +172,6 @@ def find_sr(session):
if __name__ == '__main__':
- XenAPIPlugin.dispatch({'put_vdis': put_vdis,
- 'copy_kernel_vdi': copy_kernel_vdi})
+ XenAPIPlugin.dispatch({'put_vdis': put_vdis,
+ 'copy_kernel_vdi': copy_kernel_vdi,
+ 'remove_kernel_ramdisk': remove_kernel_ramdisk})
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore b/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore
index 8ee2f748d..d0313b4ed 100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore
@@ -43,34 +43,37 @@ SECTOR_SIZE = 512
MBR_SIZE_SECTORS = 63
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
-def is_vdi_pv(session,args):
+
+def is_vdi_pv(session, args):
logging.debug("Checking wheter VDI has PV kernel")
vdi = exists(args, 'vdi-ref')
- pv=with_vdi_in_dom0(session, vdi, False,
+ pv = with_vdi_in_dom0(session, vdi, False,
lambda dev: _is_vdi_pv('/dev/%s' % dev))
if pv:
return 'true'
else:
return 'false'
+
def _is_vdi_pv(dest):
- logging.debug("Running pygrub against %s",dest)
- output=os.popen('pygrub -qn %s' % dest)
- pv=False
+ logging.debug("Running pygrub against %s", dest)
+ output = os.popen('pygrub -qn %s' % dest)
+ pv = False
for line in output.readlines():
#try to find kernel string
- m=re.search('(?<=kernel:)/.*(?:>)',line)
+ m = re.search('(?<=kernel:)/.*(?:>)', line)
if m:
- if m.group(0).find('xen')!=-1:
- pv=True
- logging.debug("PV:%d",pv)
- return pv
-
+ if m.group(0).find('xen') != -1:
+ pv = True
+ logging.debug("PV:%d", pv)
+ return pv
+
+
def get_vdi(session, args):
src_url = exists(args, 'src_url')
username = exists(args, 'username')
password = exists(args, 'password')
- raw_image=validate_bool(args, 'raw', 'false')
+ raw_image = validate_bool(args, 'raw', 'false')
add_partition = validate_bool(args, 'add_partition', 'false')
(proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url)
sr = find_sr(session)
@@ -88,16 +91,17 @@ def get_vdi(session, args):
vdi = create_vdi(session, sr, src_url, vdi_size, False)
with_vdi_in_dom0(session, vdi, False,
lambda dev: get_vdi_(proto, netloc, url_path,
- username, password, add_partition,raw_image,
+ username, password,
+ add_partition, raw_image,
virtual_size, '/dev/%s' % dev))
return session.xenapi.VDI.get_uuid(vdi)
-def get_vdi_(proto, netloc, url_path, username, password, add_partition,raw_image,
- virtual_size, dest):
+def get_vdi_(proto, netloc, url_path, username, password,
+ add_partition, raw_image, virtual_size, dest):
- #Salvatore: vdi should not be partitioned for raw images
- if (add_partition and not raw_image):
+ #vdi should not be partitioned for raw images
+ if add_partition and not raw_image:
write_partition(virtual_size, dest)
offset = (add_partition and not raw_image and MBR_SIZE_BYTES) or 0
@@ -144,7 +148,7 @@ def get_kernel(session, args):
password = exists(args, 'password')
(proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url)
-
+
dest = os.path.join(KERNEL_DIR, url_path[1:])
# Paranoid check against people using ../ to do rude things.
@@ -154,8 +158,8 @@ def get_kernel(session, args):
dirname = os.path.dirname(dest)
try:
os.makedirs(dirname)
- except os.error, e:
- if e.errno != errno.EEXIST:
+ except os.error, e:
+ if e.errno != errno.EEXIST:
raise
if not os.path.isdir(dirname):
raise Exception('Cannot make directory %s', dirname)
@@ -248,5 +252,5 @@ def download_all(response, length, dest_file, offset):
if __name__ == '__main__':
XenAPIPlugin.dispatch({'get_vdi': get_vdi,
- 'get_kernel': get_kernel,
+ 'get_kernel': get_kernel,
'is_vdi_pv': is_vdi_pv})
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
index 7fea1136d..f51f5fce4 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
@@ -26,6 +26,7 @@ import logging
import logging.handlers
import re
import time
+import XenAPI
##### Logging setup
diff --git a/run_tests.py b/run_tests.py
index 5c8436aee..24786e8ad 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -26,6 +26,8 @@ from nose import config
from nose import result
from nose import core
+from nova import log as logging
+
class NovaTestResult(result.TextTestResult):
def __init__(self, *args, **kw):
@@ -58,6 +60,7 @@ class NovaTestRunner(core.TextTestRunner):
if __name__ == '__main__':
+ logging.basicConfig()
c = config.Config(stream=sys.stdout,
env=os.environ,
verbosity=3,
diff --git a/run_tests.sh b/run_tests.sh
index cf1affcea..4e21fe945 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -65,10 +65,15 @@ then
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py
- wrapper=${with_venv}
+ wrapper=${with_venv}
fi
fi
fi
fi
-run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py bin/* nova setup.py || exit 1
+if [ -z "$noseargs" ];
+then
+ run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py bin/* nova setup.py || exit 1
+else
+ run_tests
+fi
diff --git a/setup.py b/setup.py
index 65adbe992..e3c45ce3e 100644
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,6 @@ import subprocess
from setuptools import setup, find_packages
from setuptools.command.sdist import sdist
-from sphinx.setup_command import BuildDoc
from nova.utils import parse_mailmap, str_dict_replace
from nova import version
@@ -34,14 +33,6 @@ if os.path.isdir('.bzr'):
version_file.write(vcsversion)
-class local_BuildDoc(BuildDoc):
- def run(self):
- for builder in ['html', 'man']:
- self.builder = builder
- self.finalize_options()
- BuildDoc.run(self)
-
-
class local_sdist(sdist):
"""Customized sdist hook - builds the ChangeLog file from VC first"""
@@ -57,9 +48,23 @@ class local_sdist(sdist):
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(str_dict_replace(changelog, mailmap))
sdist.run(self)
+nova_cmdclass = {'sdist': local_sdist}
+
+
+try:
+ from sphinx.setup_command import BuildDoc
+
+ class local_BuildDoc(BuildDoc):
+ def run(self):
+ for builder in ['html', 'man']:
+ self.builder = builder
+ self.finalize_options()
+ BuildDoc.run(self)
+ nova_cmdclass['build_sphinx'] = local_BuildDoc
+
+except:
+ pass
-nova_cmdclass = {'sdist': local_sdist,
- 'build_sphinx': local_BuildDoc}
try:
from babel.messages import frontend as babel