diff options
| author | Ryan Lane <laner@controller> | 2010-12-22 23:44:05 +0000 |
|---|---|---|
| committer | Ryan Lane <laner@controller> | 2010-12-22 23:44:05 +0000 |
| commit | 3f37287c1adfe35756c58938ea8d826181bad2e2 (patch) | |
| tree | ebe0a7e1bc18fed15aa0eef26a16746f274ca1be | |
| parent | e55a8ffb862732726c6371ebb20ab3954a16a8e9 (diff) | |
| parent | 5f3f5acbddd66dfb3e8203724ed0ff9d0be3d5ae (diff) | |
| download | nova-3f37287c1adfe35756c58938ea8d826181bad2e2.tar.gz nova-3f37287c1adfe35756c58938ea8d826181bad2e2.tar.xz nova-3f37287c1adfe35756c58938ea8d826181bad2e2.zip | |
Merge from trunk
83 files changed, 2338 insertions, 1189 deletions
@@ -19,6 +19,7 @@ <mordred@inaugust.com> <mordred@hudson> <paul@openstack.org> <pvoccio@castor.local> <paul@openstack.org> <paul.voccio@rackspace.com> +<soren.hansen@rackspace.com> <soren@linux2go.dk> <todd@ansolabs.com> <todd@lapex> <todd@ansolabs.com> <todd@rubidine.com> <vishvananda@gmail.com> <vishvananda@yahoo.com> @@ -6,6 +6,7 @@ Chris Behrens <cbehrens@codestud.com> Chmouel Boudjnah <chmouel@chmouel.com> Dean Troyer <dtroyer@gmail.com> Devin Carlen <devin.carlen@gmail.com> +Ed Leafe <ed@leafe.com> Eldar Nugaev <enugaev@griddynamics.com> Eric Day <eday@oddments.org> Ewan Mellor <ewan.mellor@citrix.com> @@ -14,6 +15,7 @@ Jay Pipes <jaypipes@gmail.com> Jesse Andrews <anotherjesse@gmail.com> Joe Heck <heckj@mac.com> Joel Moore <joelbm24@gmail.com> +Jonathan Bryce <jbryce@jbryce.com> Josh Kearney <josh.kearney@rackspace.com> Joshua McKenty <jmckenty@gmail.com> Justin Santa Barbara <justin@fathomdb.com> @@ -25,6 +27,7 @@ Rick Clark <rick@openstack.org> Ryan Lucio <rlucio@internap.com> Sandy Walsh <sandy.walsh@rackspace.com> Soren Hansen <soren.hansen@rackspace.com> +Thierry Carrez <thierry@openstack.org> Todd Willey <todd@ansolabs.com> Trey Morris <trey.morris@rackspace.com> Vishvananda Ishaya <vishvananda@gmail.com> diff --git a/CA/geninter.sh b/CA/geninter.sh index 7d6c280d5..1fbcc9e73 100755 --- a/CA/geninter.sh +++ b/CA/geninter.sh @@ -16,16 +16,24 @@ # License for the specific language governing permissions and limitations # under the License. -# ARG is the id of the user -export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-intCA-$1" -mkdir INTER/$1 -cd INTER/$1 +# $1 is the id of the project and $2 is the subject of the cert +NAME=$1 +SUBJ=$2 +mkdir -p projects/$NAME +cd projects/$NAME cp ../../openssl.cnf.tmpl openssl.cnf -sed -i -e s/%USERNAME%/$1/g openssl.cnf +sed -i -e s/%USERNAME%/$NAME/g openssl.cnf mkdir certs crl newcerts private +openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes echo "10" > serial touch index.txt -openssl genrsa -out private/cakey.pem 1024 -config ./openssl.cnf -batch -nodes -openssl req -new -sha2 -key private/cakey.pem -out ../../reqs/inter$1.csr -batch -subj "$SUBJ" -cd ../../ -openssl ca -extensions v3_ca -days 365 -out INTER/$1/cacert.pem -in reqs/inter$1.csr -config openssl.cnf -batch +# NOTE(vish): Disabling intermediate ca's because we don't actually need them. +# It makes more sense to have each project have its own root ca. +# openssl genrsa -out private/cakey.pem 1024 -config ./openssl.cnf -batch -nodes +# openssl req -new -sha256 -key private/cakey.pem -out ../../reqs/inter$NAME.csr -batch -subj "$SUBJ" +openssl ca -gencrl -config ./openssl.cnf -out crl.pem +if [ "`id -u`" != "`grep nova /etc/passwd | cut -d':' -f3`" ]; then + sudo chown -R nova:nogroup . +fi +# cd ../../ +# openssl ca -extensions v3_ca -days 365 -out INTER/$NAME/cacert.pem -in reqs/inter$NAME.csr -config openssl.cnf -batch diff --git a/CA/genrootca.sh b/CA/genrootca.sh index 31976092e..8f2c3ee3f 100755 --- a/CA/genrootca.sh +++ b/CA/genrootca.sh @@ -25,4 +25,5 @@ else openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes touch index.txt echo "10" > serial + openssl ca -gencrl -config ./openssl.cnf -out crl.pem fi diff --git a/CA/genvpn.sh b/CA/genvpn.sh new file mode 100755 index 000000000..7e7db185d --- /dev/null +++ b/CA/genvpn.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This gets zipped and run on the cloudpipe-managed OpenVPN server +NAME=$1 +SUBJ=$2 + +mkdir -p projects/$NAME +cd projects/$NAME + +# generate a server priv key +openssl genrsa -out server.key 2048 + +# generate a server CSR +openssl req -new -key server.key -out server.csr -batch -subj "$SUBJ" + +novauid=`getent passwd nova | awk -F: '{print $3}'` +if [ ! -z "${novauid}" ] && [ "`id -u`" != "${novauid}" ]; then + sudo chown -R nova:nogroup . +fi diff --git a/CA/openssl.cnf.tmpl b/CA/openssl.cnf.tmpl index 639b8e80a..dd81f1c2b 100644 --- a/CA/openssl.cnf.tmpl +++ b/CA/openssl.cnf.tmpl @@ -24,7 +24,6 @@ dir = . [ ca ] default_ca = CA_default -unique_subject = no [ CA_default ] serial = $dir/serial @@ -32,6 +31,8 @@ database = $dir/index.txt new_certs_dir = $dir/newcerts certificate = $dir/cacert.pem private_key = $dir/private/cakey.pem +unique_subject = no +default_crl_days = 365 default_days = 365 default_md = md5 preserve = no diff --git a/CA/INTER/.gitignore b/CA/projects/.gitignore index 72e8ffc0d..72e8ffc0d 100644 --- a/CA/INTER/.gitignore +++ b/CA/projects/.gitignore diff --git a/CA/INTER/.placeholder b/CA/projects/.placeholder index e69de29bb..e69de29bb 100644 --- a/CA/INTER/.placeholder +++ b/CA/projects/.placeholder diff --git a/MANIFEST.in b/MANIFEST.in index 982b727aa..199ce30b6 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -13,7 +13,7 @@ include nova/cloudpipe/client.ovpn.template include nova/compute/fakevirtinstance.xml include nova/compute/interfaces.template include nova/virt/interfaces.template -include nova/virt/libvirt.*.xml.template +include nova/virt/libvirt*.xml.template include nova/tests/CA/ include nova/tests/CA/cacert.pem include nova/tests/CA/private/ diff --git a/bin/nova-manage b/bin/nova-manage index 0c1b621ed..599e02a7e 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -72,6 +72,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) from nova import context +from nova import crypto from nova import db from nova import exception from nova import flags @@ -96,47 +97,43 @@ class VpnCommands(object): self.manager = manager.AuthManager() self.pipe = pipelib.CloudPipe() - def list(self): - """Print a listing of the VPNs for all projects.""" + def list(self, project=None): + """Print a listing of the VPN data for one or all projects. + + args: [project=all]""" print "%-12s\t" % 'project', print "%-20s\t" % 'ip:port', + print "%-20s\t" % 'private_ip', print "%s" % 'state' - for project in self.manager.get_projects(): + if project: + projects = [self.manager.get_project(project)] + else: + projects = self.manager.get_projects() + # NOTE(vish): This hits the database a lot. We could optimize + # by getting all networks in one query and all vpns + # in aother query, then doing lookups by project + for project in projects: print "%-12s\t" % project.name, - - try: - s = "%s:%s" % (project.vpn_ip, project.vpn_port) - except exception.NotFound: - s = "None" - print "%-20s\t" % s, - - vpn = self._vpn_for(project.id) + ipport = "%s:%s" % (project.vpn_ip, project.vpn_port) + print "%-20s\t" % ipport, + ctxt = context.get_admin_context() + vpn = db.instance_get_project_vpn(ctxt, project.id) if vpn: - command = "ping -c1 -w1 %s > /dev/null; echo $?" - out, _err = utils.execute(command % vpn['private_dns_name'], - check_exit_code=False) - if out.strip() == '0': - net = 'up' - else: - net = 'down' - print vpn['private_dns_name'], - print vpn['node_name'], - print vpn['instance_id'], + address = None + state = 'down' + if vpn.get('fixed_ip', None): + address = vpn['fixed_ip']['address'] + if project.vpn_ip and utils.vpn_ping(project.vpn_ip, + project.vpn_port): + state = 'up' + print address, + print vpn['host'], + print vpn['ec2_id'], print vpn['state_description'], - print net - + print state else: print None - def _vpn_for(self, project_id): - """Get the VPN instance for a project ID.""" - for instance in db.instance_get_all(context.get_admin_context()): - if (instance['image_id'] == FLAGS.vpn_image_id - and not instance['state_description'] in - ['shutting_down', 'shutdown'] - and instance['project_id'] == project_id): - return instance - def spawn(self): """Run all VPNs.""" for p in reversed(self.manager.get_projects()): @@ -149,6 +146,21 @@ class VpnCommands(object): """Start the VPN for a given project.""" self.pipe.launch_vpn_instance(project_id) + def change(self, project_id, ip, port): + """Change the ip and port for a vpn. + + args: project, ip, port""" + project = self.manager.get_project(project_id) + if not project: + print 'No project %s' % (project_id) + return + admin = context.get_admin_context() + network_ref = db.project_get_network(admin, project_id) + db.network_update(admin, + network_ref['id'], + {'vpn_public_address': ip, + 'vpn_public_port': int(port)}) + class ShellCommands(object): def bpython(self): @@ -295,6 +307,14 @@ class UserCommands(object): is_admin = False self.manager.modify_user(name, access_key, secret_key, is_admin) + def revoke(self, user_id, project_id=None): + """revoke certs for a user + arguments: user_id [project_id]""" + if project_id: + crypto.revoke_certs_by_user_and_project(user_id, project_id) + else: + crypto.revoke_certs_by_user(user_id) + class ProjectCommands(object): """Class for managing projects.""" diff --git a/doc/ext/nova_autodoc.py b/doc/ext/nova_autodoc.py index 39aa2c2cf..5429bb656 100644 --- a/doc/ext/nova_autodoc.py +++ b/doc/ext/nova_autodoc.py @@ -1,5 +1,8 @@ +import gettext import os +gettext.install('nova') + from nova import utils def setup(app): diff --git a/nova/adminclient.py b/nova/adminclient.py index 5a62cce7d..6ae9f0c0f 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -194,6 +194,7 @@ class HostInfo(object): class NovaAdminClient(object): + def __init__( self, clc_url=DEFAULT_CLC_URL, diff --git a/nova/api/__init__.py b/nova/api/__init__.py index 80f9f2109..803470570 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -29,9 +29,7 @@ import routes import webob.dec from nova import flags -from nova import utils from nova import wsgi -from nova.api import cloudpipe from nova.api import ec2 from nova.api import openstack from nova.api.ec2 import metadatarequesthandler @@ -41,6 +39,7 @@ flags.DEFINE_string('osapi_subdomain', 'api', 'subdomain running the OpenStack API') flags.DEFINE_string('ec2api_subdomain', 'ec2', 'subdomain running the EC2 API') + FLAGS = flags.FLAGS @@ -80,7 +79,6 @@ class API(wsgi.Router): mapper.connect('%s/{path_info:.*}' % s, controller=mrh, conditions=ec2api_subdomain) - mapper.connect("/cloudpipe/{path_info:.*}", controller=cloudpipe.API()) super(API, self).__init__(mapper) @webob.dec.wsgify diff --git a/nova/api/cloudpipe/__init__.py b/nova/api/cloudpipe/__init__.py deleted file mode 100644 index 6d40990a8..000000000 --- a/nova/api/cloudpipe/__init__.py +++ /dev/null @@ -1,69 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -REST API Request Handlers for CloudPipe -""" - -import logging -import urllib -import webob -import webob.dec -import webob.exc - -from nova import crypto -from nova import wsgi -from nova.auth import manager -from nova.api.ec2 import cloud - - -_log = logging.getLogger("api") -_log.setLevel(logging.DEBUG) - - -class API(wsgi.Application): - - def __init__(self): - self.controller = cloud.CloudController() - - @webob.dec.wsgify - def __call__(self, req): - if req.method == 'POST': - return self.sign_csr(req) - _log.debug("Cloudpipe path is %s" % req.path_info) - if req.path_info.endswith("/getca/"): - return self.send_root_ca(req) - return webob.exc.HTTPNotFound() - - def get_project_id_from_ip(self, ip): - # TODO(eday): This was removed with the ORM branch, fix! - instance = self.controller.get_instance_by_ip(ip) - return instance['project_id'] - - def send_root_ca(self, req): - _log.debug("Getting root ca") - project_id = self.get_project_id_from_ip(req.remote_addr) - res = webob.Response() - res.headers["Content-Type"] = "text/plain" - res.body = crypto.fetch_ca(project_id) - return res - - def sign_csr(self, req): - project_id = self.get_project_id_from_ip(req.remote_addr) - cert = self.str_params['cert'] - return crypto.sign_csr(urllib.unquote(cert), project_id) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index a6ee16c33..d1e2596c3 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -26,8 +26,8 @@ import webob import webob.dec import webob.exc -from nova import exception from nova import context +from nova import exception from nova import flags from nova import wsgi from nova.api.ec2 import apirequest @@ -37,16 +37,79 @@ from nova.auth import manager FLAGS = flags.FLAGS +flags.DEFINE_boolean('use_lockout', False, + 'Whether or not to use lockout middleware.') +flags.DEFINE_integer('lockout_attempts', 5, + 'Number of failed auths before lockout.') +flags.DEFINE_integer('lockout_minutes', 15, + 'Number of minutes to lockout if triggered.') +flags.DEFINE_integer('lockout_window', 15, + 'Number of minutes for lockout window.') +flags.DEFINE_list('lockout_memcached_servers', None, + 'Memcached servers or None for in process cache.') + + _log = logging.getLogger("api") _log.setLevel(logging.DEBUG) class API(wsgi.Middleware): - """Routing for all EC2 API requests.""" def __init__(self): self.application = Authenticate(Router(Authorizer(Executor()))) + if FLAGS.use_lockout: + self.application = Lockout(self.application) + + +class Lockout(wsgi.Middleware): + """Lockout for x minutes on y failed auths in a z minute period. + + x = lockout_timeout flag + y = lockout_window flag + z = lockout_attempts flag + + Uses memcached if lockout_memcached_servers flag is set, otherwise it + uses a very simple in-proccess cache. Due to the simplicity of + the implementation, the timeout window is started with the first + failed request, so it will block if there are x failed logins within + that period. + + There is a possible race condition where simultaneous requests could + sneak in before the lockout hits, but this is extremely rare and would + only result in a couple of extra failed attempts.""" + + def __init__(self, application): + """middleware can use fake for testing.""" + if FLAGS.lockout_memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + self.mc = memcache.Client(FLAGS.lockout_memcached_servers, + debug=0) + super(Lockout, self).__init__(application) + + @webob.dec.wsgify + def __call__(self, req): + access_key = str(req.params['AWSAccessKeyId']) + failures_key = "authfailures-%s" % access_key + failures = int(self.mc.get(failures_key) or 0) + if failures >= FLAGS.lockout_attempts: + detail = "Too many failed authentications." + raise webob.exc.HTTPForbidden(detail=detail) + res = req.get_response(self.application) + if res.status_int == 403: + failures = self.mc.incr(failures_key) + if failures is None: + # NOTE(vish): To use incr, failures has to be a string. + self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60) + elif failures >= FLAGS.lockout_attempts: + _log.warn('Access key %s has had %d failed authentications' + ' and will be locked out for %d minutes.' % + (access_key, failures, FLAGS.lockout_minutes)) + self.mc.set(failures_key, str(failures), + time=FLAGS.lockout_minutes * 60) + return res class Authenticate(wsgi.Middleware): @@ -77,7 +140,7 @@ class Authenticate(wsgi.Middleware): req.host, req.path) except exception.Error, ex: - logging.debug("Authentication Failure: %s" % ex) + logging.debug(_("Authentication Failure: %s") % ex) raise webob.exc.HTTPForbidden() # Authenticated! @@ -120,9 +183,9 @@ class Router(wsgi.Middleware): except: raise webob.exc.HTTPBadRequest() - _log.debug('action: %s' % action) + _log.debug(_('action: %s') % action) for key, value in args.items(): - _log.debug('arg: %s\t\tval: %s' % (key, value)) + _log.debug(_('arg: %s\t\tval: %s') % (key, value)) # Success! req.environ['ec2.controller'] = controller diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index 1c6ab688d..fac01369e 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -168,6 +168,7 @@ class AdminController(object): # FIXME(vish): these host commands don't work yet, perhaps some of the # required data can be retrieved from service objects? + def describe_hosts(self, _context, **_kwargs): """Returns status info for all nodes. Includes: * Disk Space diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index 5758781b6..a90fbeb0c 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -92,8 +92,8 @@ class APIRequest(object): method = getattr(self.controller, _camelcase_to_underscore(self.action)) except AttributeError: - _error = ('Unsupported API request: controller = %s,' - 'action = %s') % (self.controller, self.action) + _error = _('Unsupported API request: controller = %s,' + 'action = %s') % (self.controller, self.action) _log.warning(_error) # TODO: Raise custom exception, trap in apiserver, # and reraise as 400 error. diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ebb13aedc..e09261f00 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -27,7 +27,6 @@ import datetime import logging import re import os -import time from nova import context import IPy @@ -114,7 +113,7 @@ class CloudController(object): start = os.getcwd() os.chdir(FLAGS.ca_path) # TODO(vish): Do this with M2Crypto instead - utils.runthis("Generating root CA: %s", "sh genrootca.sh") + utils.runthis(_("Generating root CA: %s"), "sh genrootca.sh") os.chdir(start) def _get_mpi_data(self, context, project_id): @@ -196,15 +195,19 @@ class CloudController(object): if FLAGS.region_list: regions = [] for region in FLAGS.region_list: - name, _sep, url = region.partition('=') + name, _sep, host = region.partition('=') + endpoint = '%s://%s:%s%s' % (FLAGS.ec2_prefix, + host, + FLAGS.cc_port, + FLAGS.ec2_suffix) regions.append({'regionName': name, - 'regionEndpoint': url}) + 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', - 'regionEndpoint': FLAGS.ec2_url}] - if region_name: - regions = [r for r in regions if r['regionName'] in region_name] - return {'regionInfo': regions} + 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_prefix, + FLAGS.cc_host, + FLAGS.cc_port, + FLAGS.ec2_suffix)}] def describe_snapshots(self, context, @@ -318,11 +321,11 @@ class CloudController(object): ip_protocol = str(ip_protocol) if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: - raise InvalidInputException('%s is not a valid ipProtocol' % + raise InvalidInputException(_('%s is not a valid ipProtocol') % (ip_protocol,)) if ((min(from_port, to_port) < -1) or (max(from_port, to_port) > 65535)): - raise InvalidInputException('Invalid port range') + raise InvalidInputException(_('Invalid port range')) values['protocol'] = ip_protocol values['from_port'] = from_port @@ -360,7 +363,8 @@ class CloudController(object): criteria = self._revoke_rule_args_to_dict(context, **kwargs) if criteria == None: - raise exception.ApiError("No rule for the specified parameters.") + raise exception.ApiError(_("No rule for the specified " + "parameters.")) for rule in security_group.rules: match = True @@ -371,7 +375,7 @@ class CloudController(object): db.security_group_rule_destroy(context, rule['id']) self._trigger_refresh_security_group(context, security_group) return True - raise exception.ApiError("No rule for the specified parameters.") + raise exception.ApiError(_("No rule for the specified parameters.")) # TODO(soren): This has only been tested with Boto as the client. # Unfortunately, it seems Boto is using an old API @@ -387,8 +391,8 @@ class CloudController(object): values['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values): - raise exception.ApiError('This rule already exists in group %s' % - group_name) + raise exception.ApiError(_('This rule already exists in group %s') + % group_name) security_group_rule = db.security_group_rule_create(context, values) @@ -416,7 +420,7 @@ class CloudController(object): def create_security_group(self, context, group_name, group_description): self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): - raise exception.ApiError('group %s already exists' % group_name) + raise exception.ApiError(_('group %s already exists') % group_name) group = {'user_id': context.user.id, 'project_id': context.project_id, @@ -529,13 +533,13 @@ class CloudController(object): def attach_volume(self, context, volume_id, instance_id, device, **kwargs): volume_ref = db.volume_get_by_ec2_id(context, volume_id) if not re.match("^/dev/[a-z]d[a-z]+$", device): - raise exception.ApiError("Invalid device specified: %s. " - "Example device: /dev/vdb" % device) + raise exception.ApiError(_("Invalid device specified: %s. " + "Example device: /dev/vdb") % device) # TODO(vish): abstract status checking? if volume_ref['status'] != "available": - raise exception.ApiError("Volume status must be available") + raise exception.ApiError(_("Volume status must be available")) if volume_ref['attach_status'] == "attached": - raise exception.ApiError("Volume is already attached") + raise exception.ApiError(_("Volume is already attached")) internal_id = ec2_id_to_internal_id(instance_id) instance_ref = self.compute_api.get_instance(context, internal_id) host = instance_ref['host'] @@ -557,10 +561,10 @@ class CloudController(object): instance_ref = db.volume_get_instance(context.elevated(), volume_ref['id']) if not instance_ref: - raise exception.ApiError("Volume isn't attached to anything!") + raise exception.ApiError(_("Volume isn't attached to anything!")) # TODO(vish): abstract status checking? if volume_ref['status'] == "available": - raise exception.ApiError("Volume is already detached") + raise exception.ApiError(_("Volume is already detached")) try: host = instance_ref['host'] rpc.cast(context, @@ -689,23 +693,29 @@ class CloudController(object): def allocate_address(self, context, **kwargs): # check quota if quota.allowed_floating_ips(context, 1) < 1: - logging.warn("Quota exceeeded for %s, tried to allocate address", + logging.warn(_("Quota exceeeded for %s, tried to allocate " + "address"), context.project_id) - raise quota.QuotaError("Address quota exceeded. You cannot " - "allocate any more addresses") - network_topic = self._get_network_topic(context) + raise quota.QuotaError(_("Address quota exceeded. You cannot " + "allocate any more addresses")) + # NOTE(vish): We don't know which network host should get the ip + # when we allocate, so just send it to any one. This + # will probably need to move into a network supervisor + # at some point. public_ip = rpc.call(context, - network_topic, + FLAGS.network_topic, {"method": "allocate_floating_ip", "args": {"project_id": context.project_id}}) return {'addressSet': [{'publicIp': public_ip}]} def release_address(self, context, public_ip, **kwargs): - # NOTE(vish): Should we make sure this works? floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) - network_topic = self._get_network_topic(context) + # NOTE(vish): We don't know which network host should get the ip + # when we deallocate, so just send it to any one. This + # will probably need to move into a network supervisor + # at some point. rpc.cast(context, - network_topic, + FLAGS.network_topic, {"method": "deallocate_floating_ip", "args": {"floating_address": floating_ip_ref['address']}}) return {'releaseResponse': ["Address released."]} @@ -716,7 +726,10 @@ class CloudController(object): fixed_address = db.instance_get_fixed_address(context, instance_ref['id']) floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) - network_topic = self._get_network_topic(context) + # NOTE(vish): Perhaps we should just pass this on to compute and + # let compute communicate with network. + network_topic = self.compute_api.get_network_topic(context, + internal_id) rpc.cast(context, network_topic, {"method": "associate_floating_ip", @@ -726,24 +739,18 @@ class CloudController(object): def disassociate_address(self, context, public_ip, **kwargs): floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) - network_topic = self._get_network_topic(context) + # NOTE(vish): Get the topic from the host name of the network of + # the associated fixed ip. + if not floating_ip_ref.get('fixed_ip'): + raise exception.ApiError('Address is not associated.') + host = floating_ip_ref['fixed_ip']['network']['host'] + topic = db.queue_get_for(context, FLAGS.network_topic, host) rpc.cast(context, - network_topic, + topic, {"method": "disassociate_floating_ip", "args": {"floating_address": floating_ip_ref['address']}}) return {'disassociateResponse': ["Address disassociated."]} - def _get_network_topic(self, context): - """Retrieves the network host for a project""" - network_ref = self.network_manager.get_network(context) - host = network_ref['host'] - if not host: - host = rpc.call(context, - FLAGS.network_topic, - {"method": "set_network_host", - "args": {"network_id": network_ref['id']}}) - return db.queue_get_for(context, FLAGS.network_topic, host) - def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) instances = self.compute_api.create_instances(context, @@ -751,11 +758,12 @@ class CloudController(object): kwargs['image_id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, - kernel_id=kwargs.get('kernel_id'), + kernel_id=kwargs.get('kernel_id', None), ramdisk_id=kwargs.get('ramdisk_id'), display_name=kwargs.get('display_name'), description=kwargs.get('display_description'), key_name=kwargs.get('key_name'), + user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), generate_hostname=internal_id_to_ec2_id) return self._format_run_instances(context, @@ -805,7 +813,7 @@ class CloudController(object): # TODO: return error if not authorized volume_ref = db.volume_get_by_ec2_id(context, volume_id) if volume_ref['status'] != "available": - raise exception.ApiError("Volume status must be available") + raise exception.ApiError(_("Volume status must be available")) now = datetime.datetime.utcnow() db.volume_update(context, volume_ref['id'], {'status': 'deleting', 'terminated_at': now}) @@ -836,11 +844,12 @@ class CloudController(object): def describe_image_attribute(self, context, image_id, attribute, **kwargs): if attribute != 'launchPermission': - raise exception.ApiError('attribute not supported: %s' % attribute) + raise exception.ApiError(_('attribute not supported: %s') + % attribute) try: image = self.image_service.show(context, image_id) except IndexError: - raise exception.ApiError('invalid id: %s' % image_id) + raise exception.ApiError(_('invalid id: %s') % image_id) result = {'image_id': image_id, 'launchPermission': []} if image['isPublic']: result['launchPermission'].append({'group': 'all'}) @@ -850,13 +859,14 @@ class CloudController(object): operation_type, **kwargs): # TODO(devcamcar): Support users and groups other than 'all'. if attribute != 'launchPermission': - raise exception.ApiError('attribute not supported: %s' % attribute) + raise exception.ApiError(_('attribute not supported: %s') + % attribute) if not 'user_group' in kwargs: - raise exception.ApiError('user or group not specified') + raise exception.ApiError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': - raise exception.ApiError('only group "all" is supported') + raise exception.ApiError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: - raise exception.ApiError('operation_type must be add or remove') + raise exception.ApiError(_('operation_type must be add or remove')) return self.image_service.modify(context, image_id, operation_type) def update_image(self, context, image_id, **kwargs): diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 2f4f414cc..0e9e686ff 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -65,7 +65,7 @@ class MetadataRequestHandler(object): cc = cloud.CloudController() meta_data = cc.get_metadata(req.remote_addr) if meta_data is None: - logging.error('Failed to get metadata for ip: %s' % + logging.error(_('Failed to get metadata for ip: %s') % req.remote_addr) raise webob.exc.HTTPNotFound() data = self.lookup(req.path_info, meta_data) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index b9ecbd9b8..de95ee548 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -66,7 +66,7 @@ class API(wsgi.Middleware): try: return req.get_response(self.application) except Exception as ex: - logging.warn("Caught error: %s" % str(ex)) + logging.warn(_("Caught error: %s") % str(ex)) logging.debug(traceback.format_exc()) exc = webob.exc.HTTPInternalServerError(explanation=str(ex)) return faults.Fault(exc) @@ -133,7 +133,7 @@ class RateLimitingMiddleware(wsgi.Middleware): if delay: # TODO(gundlach): Get the retry-after format correct. exc = webob.exc.HTTPRequestEntityTooLarge( - explanation='Too many requests.', + explanation=_('Too many requests.'), headers={'Retry-After': time.time() + delay}) raise faults.Fault(exc) return self.application @@ -170,9 +170,16 @@ class APIRouter(wsgi.Router): def __init__(self): mapper = routes.Mapper() + + server_members = {'action': 'POST'} + if FLAGS.allow_admin_api: + logging.debug("Including admin operations in API.") + server_members['pause'] = 'POST' + server_members['unpause'] = 'POST' + mapper.resource("server", "servers", controller=servers.Controller(), collection={'detail': 'GET'}, - member={'action': 'POST'}) + member=server_members) mapper.resource("backup_schedule", "backup_schedules", controller=backup_schedules.Controller(), @@ -186,10 +193,6 @@ class APIRouter(wsgi.Router): mapper.resource("sharedipgroup", "sharedipgroups", controller=sharedipgroups.Controller()) - if FLAGS.allow_admin_api: - logging.debug("Including admin operations in API.") - # TODO: Place routes for admin operations here. - super(APIRouter, self).__init__(mapper) diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index 3ed691d7b..fc70b5c6c 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -24,6 +24,7 @@ import nova.image.service class Controller(wsgi.Controller): + def __init__(self): pass diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 7704f48f1..5c3322f7c 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -15,6 +15,9 @@ # License for the specific language governing permissions and limitations # under the License. +import logging +import traceback + from webob import exc from nova import exception @@ -27,6 +30,10 @@ from nova.compute import power_state import nova.api.openstack +LOG = logging.getLogger('server') +LOG.setLevel(logging.DEBUG) + + def _entity_list(entities): """ Coerces a list of servers into proper dictionary format """ return dict(servers=entities) @@ -166,3 +173,25 @@ class Controller(wsgi.Controller): except: return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + + def pause(self, req, id): + """ Permit Admins to Pause the server. """ + ctxt = req.environ['nova.context'] + try: + self.compute_api.pause(ctxt, id) + except: + readable = traceback.format_exc() + logging.error("Compute.api::pause %s", readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def unpause(self, req, id): + """ Permit Admins to Unpause the server. """ + ctxt = req.environ['nova.context'] + try: + self.compute_api.unpause(ctxt, id) + except: + readable = traceback.format_exc() + logging.error("Compute.api::unpause %s", readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py index a1584322b..47e435cb6 100644 --- a/nova/auth/dbdriver.py +++ b/nova/auth/dbdriver.py @@ -37,7 +37,6 @@ class DbDriver(object): def __init__(self): """Imports the LDAP module""" pass - db def __enter__(self): return self @@ -83,7 +82,7 @@ class DbDriver(object): user_ref = db.user_create(context.get_admin_context(), values) return self._db_user_to_auth_user(user_ref) except exception.Duplicate, e: - raise exception.Duplicate('User %s already exists' % name) + raise exception.Duplicate(_('User %s already exists') % name) def _db_user_to_auth_user(self, user_ref): return {'id': user_ref['id'], @@ -105,8 +104,9 @@ class DbDriver(object): """Create a project""" manager = db.user_get(context.get_admin_context(), manager_uid) if not manager: - raise exception.NotFound("Project can't be created because " - "manager %s doesn't exist" % manager_uid) + raise exception.NotFound(_("Project can't be created because " + "manager %s doesn't exist") + % manager_uid) # description is a required attribute if description is None: @@ -133,8 +133,8 @@ class DbDriver(object): try: project = db.project_create(context.get_admin_context(), values) except exception.Duplicate: - raise exception.Duplicate("Project can't be created because " - "project %s already exists" % name) + raise exception.Duplicate(_("Project can't be created because " + "project %s already exists") % name) for member in members: db.project_add_member(context.get_admin_context(), @@ -155,8 +155,8 @@ class DbDriver(object): if manager_uid: manager = db.user_get(context.get_admin_context(), manager_uid) if not manager: - raise exception.NotFound("Project can't be modified because " - "manager %s doesn't exist" % + raise exception.NotFound(_("Project can't be modified because " + "manager %s doesn't exist") % manager_uid) values['project_manager'] = manager['id'] if description: @@ -243,8 +243,8 @@ class DbDriver(object): def _validate_user_and_project(self, user_id, project_id): user = db.user_get(context.get_admin_context(), user_id) if not user: - raise exception.NotFound('User "%s" not found' % user_id) + raise exception.NotFound(_('User "%s" not found') % user_id) project = db.project_get(context.get_admin_context(), project_id) if not project: - raise exception.NotFound('Project "%s" not found' % project_id) + raise exception.NotFound(_('Project "%s" not found') % project_id) return user, project diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index 46e0135b4..33cd03430 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -15,7 +15,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""Fake LDAP server for test harness, backs to ReDIS. +"""Fake LDAP server for test harness. This class does very little error checking, and knows nothing about ldap class definitions. It implements the minimum emulation of the python ldap @@ -23,34 +23,65 @@ library to work with nova. """ +import fnmatch import json -import redis -from nova import flags -FLAGS = flags.FLAGS -flags.DEFINE_string('redis_host', '127.0.0.1', - 'Host that redis is running on.') -flags.DEFINE_integer('redis_port', 6379, - 'Port that redis is running on.') -flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') - - -class Redis(object): +class Store(object): def __init__(self): if hasattr(self.__class__, '_instance'): - raise Exception('Attempted to instantiate singleton') + raise Exception(_('Attempted to instantiate singleton')) @classmethod def instance(cls): if not hasattr(cls, '_instance'): - inst = redis.Redis(host=FLAGS.redis_host, - port=FLAGS.redis_port, - db=FLAGS.redis_db) - cls._instance = inst + cls._instance = _StorageDict() return cls._instance +class _StorageDict(dict): + def keys(self, pat=None): + ret = super(_StorageDict, self).keys() + if pat is not None: + ret = fnmatch.filter(ret, pat) + return ret + + def delete(self, key): + try: + del self[key] + except KeyError: + pass + + def flushdb(self): + self.clear() + + def hgetall(self, key): + """Returns the hash for the given key; creates + the hash if the key doesn't exist.""" + try: + return self[key] + except KeyError: + self[key] = {} + return self[key] + + def hget(self, key, field): + hashdict = self.hgetall(key) + try: + return hashdict[field] + except KeyError: + hashdict[field] = {} + return hashdict[field] + + def hset(self, key, field, val): + hashdict = self.hgetall(key) + hashdict[field] = val + + def hmset(self, key, value_dict): + hashdict = self.hgetall(key) + for field, val in value_dict.items(): + hashdict[field] = val + + SCOPE_BASE = 0 SCOPE_ONELEVEL = 1 # Not implemented SCOPE_SUBTREE = 2 @@ -169,8 +200,6 @@ def _to_json(unencoded): class FakeLDAP(object): - #TODO(vish): refactor this class to use a wrapper instead of accessing - # redis directly """Fake LDAP connection.""" def simple_bind_s(self, dn, password): @@ -183,14 +212,13 @@ class FakeLDAP(object): def add_s(self, dn, attr): """Add an object with the specified attributes at dn.""" - key = "%s%s" % (self.__redis_prefix, dn) - + key = "%s%s" % (self.__prefix, dn) value_dict = dict([(k, _to_json(v)) for k, v in attr]) - Redis.instance().hmset(key, value_dict) + Store.instance().hmset(key, value_dict) def delete_s(self, dn): """Remove the ldap object at specified dn.""" - Redis.instance().delete("%s%s" % (self.__redis_prefix, dn)) + Store.instance().delete("%s%s" % (self.__prefix, dn)) def modify_s(self, dn, attrs): """Modify the object at dn using the attribute list. @@ -201,18 +229,18 @@ class FakeLDAP(object): ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value) """ - redis = Redis.instance() - key = "%s%s" % (self.__redis_prefix, dn) + store = Store.instance() + key = "%s%s" % (self.__prefix, dn) for cmd, k, v in attrs: - values = _from_json(redis.hget(key, k)) + values = _from_json(store.hget(key, k)) if cmd == MOD_ADD: values.append(v) elif cmd == MOD_REPLACE: values = [v] else: values.remove(v) - values = redis.hset(key, k, _to_json(values)) + values = store.hset(key, k, _to_json(values)) def search_s(self, dn, scope, query=None, fields=None): """Search for all matching objects under dn using the query. @@ -226,16 +254,17 @@ class FakeLDAP(object): """ if scope != SCOPE_BASE and scope != SCOPE_SUBTREE: raise NotImplementedError(str(scope)) - redis = Redis.instance() + store = Store.instance() if scope == SCOPE_BASE: - keys = ["%s%s" % (self.__redis_prefix, dn)] + keys = ["%s%s" % (self.__prefix, dn)] else: - keys = redis.keys("%s*%s" % (self.__redis_prefix, dn)) + keys = store.keys("%s*%s" % (self.__prefix, dn)) + objects = [] for key in keys: - # get the attributes from redis - attrs = redis.hgetall(key) - # turn the values from redis into lists + # get the attributes from the store + attrs = store.hgetall(key) + # turn the values from the store into lists # pylint: disable-msg=E1103 attrs = dict([(k, _from_json(v)) for k, v in attrs.iteritems()]) @@ -244,13 +273,13 @@ class FakeLDAP(object): # filter the attributes by fields attrs = dict([(k, v) for k, v in attrs.iteritems() if not fields or k in fields]) - objects.append((key[len(self.__redis_prefix):], attrs)) + objects.append((key[len(self.__prefix):], attrs)) # pylint: enable-msg=E1103 if objects == []: raise NO_SUCH_OBJECT() return objects @property - def __redis_prefix(self): # pylint: disable-msg=R0201 - """Get the prefix to use for all redis keys.""" + def __prefix(self): # pylint: disable-msg=R0201 + """Get the prefix to use for all keys.""" return 'ldap:' diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index c10939d74..e289ea5a2 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -159,7 +159,7 @@ class LdapDriver(object): self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: - raise exception.NotFound("LDAP object for %s doesn't exist" + raise exception.NotFound(_("LDAP object for %s doesn't exist") % name) else: attr = [ @@ -182,11 +182,12 @@ class LdapDriver(object): description=None, member_uids=None): """Create a project""" if self.__project_exists(name): - raise exception.Duplicate("Project can't be created because " - "project %s already exists" % name) + raise exception.Duplicate(_("Project can't be created because " + "project %s already exists") % name) if not self.__user_exists(manager_uid): - raise exception.NotFound("Project can't be created because " - "manager %s doesn't exist" % manager_uid) + raise exception.NotFound(_("Project can't be created because " + "manager %s doesn't exist") + % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) # description is a required attribute if description is None: @@ -195,8 +196,8 @@ class LdapDriver(object): if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Project can't be created " - "because user %s doesn't exist" + raise exception.NotFound(_("Project can't be created " + "because user %s doesn't exist") % member_uid) members.append(self.__uid_to_dn(member_uid)) # always add the manager as a member because members is required @@ -218,9 +219,9 @@ class LdapDriver(object): attr = [] if manager_uid: if not self.__user_exists(manager_uid): - raise exception.NotFound("Project can't be modified because " - "manager %s doesn't exist" % - manager_uid) + raise exception.NotFound(_("Project can't be modified because " + "manager %s doesn't exist") + % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn)) if description: @@ -416,8 +417,9 @@ class LdapDriver(object): if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Group can't be created " - "because user %s doesn't exist" % member_uid) + raise exception.NotFound(_("Group can't be created " + "because user %s doesn't exist") + % member_uid) members.append(self.__uid_to_dn(member_uid)) dn = self.__uid_to_dn(uid) if not dn in members: @@ -432,8 +434,9 @@ class LdapDriver(object): def __is_in_group(self, uid, group_dn): """Check if user is in group""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be searched in group " - "becuase the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be searched in group " + "because the user doesn't exist") + % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -444,28 +447,30 @@ class LdapDriver(object): def __add_to_group(self, uid, group_dn): """Add user to group""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be added to the group " - "becuase the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be added to the group " + "because the user doesn't exist") + % uid) if not self.__group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + raise exception.NotFound(_("The group at dn %s doesn't exist") + % group_dn) if self.__is_in_group(uid, group_dn): - raise exception.Duplicate("User %s is already a member of " - "the group %s" % (uid, group_dn)) + raise exception.Duplicate(_("User %s is already a member of " + "the group %s") % (uid, group_dn)) attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))] self.conn.modify_s(group_dn, attr) def __remove_from_group(self, uid, group_dn): """Remove user from group""" if not self.__group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + raise exception.NotFound(_("The group at dn %s doesn't exist") + % group_dn) if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be removed from the " + "group because the user doesn't exist") + % uid) if not self.__is_in_group(uid, group_dn): - raise exception.NotFound("User %s is not a member of the group" % - (uid,)) + raise exception.NotFound(_("User %s is not a member of the group") + % uid) # NOTE(vish): remove user from group and any sub_groups sub_dns = self.__find_group_dns_with_member( group_dn, uid) @@ -479,15 +484,16 @@ class LdapDriver(object): try: self.conn.modify_s(group_dn, attr) except self.ldap.OBJECT_CLASS_VIOLATION: - logging.debug("Attempted to remove the last member of a group. " - "Deleting the group at %s instead.", group_dn) + logging.debug(_("Attempted to remove the last member of a group. " + "Deleting the group at %s instead."), group_dn) self.__delete_group(group_dn) def __remove_from_all(self, uid): """Remove user from all roles and projects""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be removed from all " + "because the user doesn't exist") + % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -500,7 +506,8 @@ class LdapDriver(object): def __delete_group(self, group_dn): """Delete Group""" if not self.__group_exists(group_dn): - raise exception.NotFound("Group at dn %s doesn't exist" % group_dn) + raise exception.NotFound(_("Group at dn %s doesn't exist") + % group_dn) self.conn.delete_s(group_dn) def __delete_roles(self, project_dn): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 11c3bd6df..d3e266952 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -64,12 +64,9 @@ flags.DEFINE_string('credential_key_file', 'pk.pem', 'Filename of private key in credentials zip') flags.DEFINE_string('credential_cert_file', 'cert.pem', 'Filename of certificate in credentials zip') -flags.DEFINE_string('credential_rc_file', 'novarc', - 'Filename of rc in credentials zip') -flags.DEFINE_string('credential_cert_subject', - '/C=US/ST=California/L=MountainView/O=AnsoLabs/' - 'OU=NovaDev/CN=%s-%s', - 'Subject for certificate for users') +flags.DEFINE_string('credential_rc_file', '%src', + 'Filename of rc in credentials zip, %s will be ' + 'replaced by name of the region (nova by default)') flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', 'Driver that auth manager uses') @@ -257,12 +254,12 @@ class AuthManager(object): # TODO(vish): check for valid timestamp (access_key, _sep, project_id) = access.partition(':') - logging.info('Looking up user: %r', access_key) + logging.info(_('Looking up user: %r'), access_key) user = self.get_user_from_access_key(access_key) logging.info('user: %r', user) if user == None: - raise exception.NotFound('No user found for access key %s' % - access_key) + raise exception.NotFound(_('No user found for access key %s') + % access_key) # NOTE(vish): if we stop using project name as id we need better # logic to find a default project for user @@ -271,12 +268,12 @@ class AuthManager(object): project = self.get_project(project_id) if project == None: - raise exception.NotFound('No project called %s could be found' % - project_id) + raise exception.NotFound(_('No project called %s could be found') + % project_id) if not self.is_admin(user) and not self.is_project_member(user, project): - raise exception.NotFound('User %s is not a member of project %s' % - (user.id, project.id)) + raise exception.NotFound(_('User %s is not a member of project %s') + % (user.id, project.id)) if check_type == 's3': sign = signer.Signer(user.secret.encode()) expected_signature = sign.s3_authorization(headers, verb, path) @@ -284,7 +281,7 @@ class AuthManager(object): logging.debug('expected_signature: %s', expected_signature) logging.debug('signature: %s', signature) if signature != expected_signature: - raise exception.NotAuthorized('Signature does not match') + raise exception.NotAuthorized(_('Signature does not match')) elif check_type == 'ec2': # NOTE(vish): hmac can't handle unicode, so encode ensures that # secret isn't unicode @@ -294,7 +291,7 @@ class AuthManager(object): logging.debug('expected_signature: %s', expected_signature) logging.debug('signature: %s', signature) if signature != expected_signature: - raise exception.NotAuthorized('Signature does not match') + raise exception.NotAuthorized(_('Signature does not match')) return (user, project) def get_access_key(self, user, project): @@ -364,7 +361,7 @@ class AuthManager(object): with self.driver() as drv: if role == 'projectmanager': if not project: - raise exception.Error("Must specify project") + raise exception.Error(_("Must specify project")) return self.is_project_manager(user, project) global_role = drv.has_role(User.safe_id(user), @@ -398,9 +395,9 @@ class AuthManager(object): @param project: Project in which to add local role. """ if role not in FLAGS.allowed_roles: - raise exception.NotFound("The %s role can not be found" % role) + raise exception.NotFound(_("The %s role can not be found") % role) if project is not None and role in FLAGS.global_roles: - raise exception.NotFound("The %s role is global only" % role) + raise exception.NotFound(_("The %s role is global only") % role) with self.driver() as drv: drv.add_role(User.safe_id(user), role, Project.safe_id(project)) @@ -543,10 +540,10 @@ class AuthManager(object): """ network_ref = db.project_get_network(context.get_admin_context(), - Project.safe_id(project)) + Project.safe_id(project), False) - if not network_ref['vpn_public_port']: - raise exception.NotFound('project network data has not been set') + if not network_ref: + return (None, None) return (network_ref['vpn_public_address'], network_ref['vpn_public_port']) @@ -628,27 +625,37 @@ class AuthManager(object): def get_key_pairs(context): return db.key_pair_get_all_by_user(context.elevated(), context.user_id) - def get_credentials(self, user, project=None): + def get_credentials(self, user, project=None, use_dmz=True): """Get credential zip for user in project""" if not isinstance(user, User): user = self.get_user(user) if project is None: project = user.id pid = Project.safe_id(project) - rc = self.__generate_rc(user.access, user.secret, pid) - private_key, signed_cert = self._generate_x509_cert(user.id, pid) + private_key, signed_cert = crypto.generate_x509_cert(user.id, pid) tmpdir = tempfile.mkdtemp() zf = os.path.join(tmpdir, "temp.zip") zippy = zipfile.ZipFile(zf, 'w') - zippy.writestr(FLAGS.credential_rc_file, rc) + if use_dmz and FLAGS.region_list: + regions = {} + for item in FLAGS.region_list: + region, _sep, region_host = item.partition("=") + regions[region] = region_host + else: + regions = {'nova': FLAGS.cc_host} + for region, host in regions.iteritems(): + rc = self.__generate_rc(user.access, + user.secret, + pid, + use_dmz, + host) + zippy.writestr(FLAGS.credential_rc_file % region, rc) + zippy.writestr(FLAGS.credential_key_file, private_key) zippy.writestr(FLAGS.credential_cert_file, signed_cert) - try: - (vpn_ip, vpn_port) = self.get_project_vpn_data(project) - except exception.NotFound: - vpn_ip = None + (vpn_ip, vpn_port) = self.get_project_vpn_data(project) if vpn_ip: configfile = open(FLAGS.vpn_client_template, "r") s = string.Template(configfile.read()) @@ -659,10 +666,9 @@ class AuthManager(object): port=vpn_port) zippy.writestr(FLAGS.credential_vpn_file, config) else: - logging.warn("No vpn data for project %s" % - pid) + logging.warn(_("No vpn data for project %s"), pid) - zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id)) + zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid)) zippy.close() with open(zf, 'rb') as f: read_buffer = f.read() @@ -670,38 +676,38 @@ class AuthManager(object): shutil.rmtree(tmpdir) return read_buffer - def get_environment_rc(self, user, project=None): + def get_environment_rc(self, user, project=None, use_dmz=True): """Get credential zip for user in project""" if not isinstance(user, User): user = self.get_user(user) if project is None: project = user.id pid = Project.safe_id(project) - return self.__generate_rc(user.access, user.secret, pid) + return self.__generate_rc(user.access, user.secret, pid, use_dmz) @staticmethod - def __generate_rc(access, secret, pid): + def __generate_rc(access, secret, pid, use_dmz=True, host=None): """Generate rc file for user""" + if use_dmz: + cc_host = FLAGS.cc_dmz + else: + cc_host = FLAGS.cc_host + # NOTE(vish): Always use the dmz since it is used from inside the + # instance + s3_host = FLAGS.s3_dmz + if host: + s3_host = host + cc_host = host rc = open(FLAGS.credentials_template).read() rc = rc % {'access': access, 'project': pid, 'secret': secret, - 'ec2': FLAGS.ec2_url, - 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port), + 'ec2': '%s://%s:%s%s' % (FLAGS.ec2_prefix, + cc_host, + FLAGS.cc_port, + FLAGS.ec2_suffix), + 's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port), 'nova': FLAGS.ca_file, 'cert': FLAGS.credential_cert_file, 'key': FLAGS.credential_key_file} return rc - - def _generate_x509_cert(self, uid, pid): - """Generate x509 cert for user""" - (private_key, csr) = crypto.generate_x509_cert( - self.__cert_subject(uid)) - # TODO(joshua): This should be async call back to the cloud controller - signed_cert = crypto.sign_csr(csr, pid) - return (private_key, signed_cert) - - @staticmethod - def __cert_subject(uid): - """Helper to generate cert subject""" - return FLAGS.credential_cert_subject % (uid, utils.isotime()) diff --git a/nova/cloudpipe/bootscript.sh b/nova/cloudpipe/bootscript.sh deleted file mode 100755 index 30d9ad102..000000000 --- a/nova/cloudpipe/bootscript.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This gets zipped and run on the cloudpipe-managed OpenVPN server - -export SUPERVISOR="http://10.255.255.1:8773/cloudpipe" -export VPN_IP=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{print $1}'` -export BROADCAST=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f3 | awk '{print $1}'` -export DHCP_MASK=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f4 | awk '{print $1}'` -export GATEWAY=`netstat -r | grep default | cut -d' ' -f10` -export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-vpn-$VPN_IP" - -DHCP_LOWER=`echo $BROADCAST | awk -F. '{print $1"."$2"."$3"." $4 - 10 }'` -DHCP_UPPER=`echo $BROADCAST | awk -F. '{print $1"."$2"."$3"." $4 - 1 }'` - -# generate a server DH -openssl dhparam -out /etc/openvpn/dh1024.pem 1024 - -# generate a server priv key -openssl genrsa -out /etc/openvpn/server.key 2048 - -# generate a server CSR -openssl req -new -key /etc/openvpn/server.key -out /etc/openvpn/server.csr -batch -subj "$SUBJ" - -# URLEncode the CSR -CSRTEXT=`cat /etc/openvpn/server.csr` -CSRTEXT=$(python -c "import urllib; print urllib.quote('''$CSRTEXT''')") - -# SIGN the csr and save as server.crt -# CURL fetch to the supervisor, POSTing the CSR text, saving the result as the CRT file -curl --fail $SUPERVISOR -d "cert=$CSRTEXT" > /etc/openvpn/server.crt -curl --fail $SUPERVISOR/getca/ > /etc/openvpn/ca.crt - -# Customize the server.conf.template -cd /etc/openvpn - -sed -e s/VPN_IP/$VPN_IP/g server.conf.template > server.conf -sed -i -e s/DHCP_SUBNET/$DHCP_MASK/g server.conf -sed -i -e s/DHCP_LOWER/$DHCP_LOWER/g server.conf -sed -i -e s/DHCP_UPPER/$DHCP_UPPER/g server.conf -sed -i -e s/max-clients\ 1/max-clients\ 10/g server.conf - -echo "\npush \"route 10.255.255.1 255.255.255.255 $GATEWAY\"\n" >> server.conf -echo "\npush \"route 10.255.255.253 255.255.255.255 $GATEWAY\"\n" >> server.conf -echo "\nduplicate-cn\n" >> server.conf - -/etc/init.d/openvpn start diff --git a/nova/cloudpipe/bootscript.template b/nova/cloudpipe/bootscript.template new file mode 100755 index 000000000..11578c134 --- /dev/null +++ b/nova/cloudpipe/bootscript.template @@ -0,0 +1,50 @@ +#!/bin/bash +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This gets zipped and run on the cloudpipe-managed OpenVPN server + +export VPN_IP=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{print $$1}'` +export BROADCAST=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f3 | awk '{print $$1}'` +export DHCP_MASK=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f4 | awk '{print $$1}'` +export GATEWAY=`netstat -r | grep default | cut -d' ' -f10` + +DHCP_LOWER=`echo $$BROADCAST | awk -F. '{print $$1"."$$2"."$$3"." $$4 - ${num_vpn} }'` +DHCP_UPPER=`echo $$BROADCAST | awk -F. '{print $$1"."$$2"."$$3"." $$4 - 1 }'` + +# generate a server DH +openssl dhparam -out /etc/openvpn/dh1024.pem 1024 + +cp crl.pem /etc/openvpn/ +cp server.key /etc/openvpn/ +cp ca.crt /etc/openvpn/ +cp server.crt /etc/openvpn/ +# Customize the server.conf.template +cd /etc/openvpn + +sed -e s/VPN_IP/$$VPN_IP/g server.conf.template > server.conf +sed -i -e s/DHCP_SUBNET/$$DHCP_MASK/g server.conf +sed -i -e s/DHCP_LOWER/$$DHCP_LOWER/g server.conf +sed -i -e s/DHCP_UPPER/$$DHCP_UPPER/g server.conf +sed -i -e s/max-clients\ 1/max-clients\ 10/g server.conf + +echo "push \"route ${dmz_net} ${dmz_mask} $$GATEWAY\"" >> server.conf +echo "duplicate-cn" >> server.conf +echo "crl-verify /etc/openvpn/crl.pem" >> server.conf + +/etc/init.d/openvpn start diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 3472201cd..09361828d 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -22,13 +22,15 @@ an instance with it. """ -import base64 import logging import os +import string import tempfile import zipfile from nova import context +from nova import crypto +from nova import db from nova import exception from nova import flags from nova import utils @@ -39,8 +41,17 @@ from nova.api.ec2 import cloud FLAGS = flags.FLAGS flags.DEFINE_string('boot_script_template', - utils.abspath('cloudpipe/bootscript.sh'), - 'Template for script to run on cloudpipe instance boot') + utils.abspath('cloudpipe/bootscript.template'), + _('Template for script to run on cloudpipe instance boot')) +flags.DEFINE_string('dmz_net', + '10.0.0.0', + _('Network to push into openvpn config')) +flags.DEFINE_string('dmz_mask', + '255.255.255.0', + _('Netmask to push into openvpn config')) + + +LOG = logging.getLogger('nova-cloudpipe') class CloudPipe(object): @@ -48,64 +59,96 @@ class CloudPipe(object): self.controller = cloud.CloudController() self.manager = manager.AuthManager() - def launch_vpn_instance(self, project_id): - logging.debug("Launching VPN for %s" % (project_id)) - project = self.manager.get_project(project_id) + def get_encoded_zip(self, project_id): # Make a payload.zip tmpfolder = tempfile.mkdtemp() filename = "payload.zip" zippath = os.path.join(tmpfolder, filename) z = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED) - - z.write(FLAGS.boot_script_template, 'autorun.sh') + shellfile = open(FLAGS.boot_script_template, "r") + s = string.Template(shellfile.read()) + shellfile.close() + boot_script = s.substitute(cc_dmz=FLAGS.cc_dmz, + cc_port=FLAGS.cc_port, + dmz_net=FLAGS.dmz_net, + dmz_mask=FLAGS.dmz_mask, + num_vpn=FLAGS.cnt_vpn_clients) + # genvpn, sign csr + crypto.generate_vpn_files(project_id) + z.writestr('autorun.sh', boot_script) + crl = os.path.join(crypto.ca_folder(project_id), 'crl.pem') + z.write(crl, 'crl.pem') + server_key = os.path.join(crypto.ca_folder(project_id), 'server.key') + z.write(server_key, 'server.key') + ca_crt = os.path.join(crypto.ca_path(project_id)) + z.write(ca_crt, 'ca.crt') + server_crt = os.path.join(crypto.ca_folder(project_id), 'server.crt') + z.write(server_crt, 'server.crt') z.close() - - key_name = self.setup_key_pair(project.project_manager_id, project_id) zippy = open(zippath, "r") - context = context.RequestContext(user=project.project_manager, - project=project) - - reservation = self.controller.run_instances(context, - # Run instances expects encoded userdata, it is decoded in the - # get_metadata_call. autorun.sh also decodes the zip file, hence - # the double encoding. - user_data=zippy.read().encode("base64").encode("base64"), + # NOTE(vish): run instances expects encoded userdata, it is decoded + # in the get_metadata_call. autorun.sh also decodes the zip file, + # hence the double encoding. + encoded = zippy.read().encode("base64").encode("base64") + zippy.close() + return encoded + + def launch_vpn_instance(self, project_id): + LOG.debug(_("Launching VPN for %s") % (project_id)) + project = self.manager.get_project(project_id) + ctxt = context.RequestContext(user=project.project_manager, + project=project) + key_name = self.setup_key_pair(ctxt) + group_name = self.setup_security_group(ctxt) + + reservation = self.controller.run_instances(ctxt, + user_data=self.get_encoded_zip(project_id), max_count=1, min_count=1, instance_type='m1.tiny', image_id=FLAGS.vpn_image_id, key_name=key_name, - security_groups=["vpn-secgroup"]) - zippy.close() + security_group=[group_name]) + + def setup_security_group(self, context): + group_name = '%s%s' % (context.project.id, FLAGS.vpn_key_suffix) + if db.security_group_exists(context, context.project.id, group_name): + return group_name + group = {'user_id': context.user.id, + 'project_id': context.project.id, + 'name': group_name, + 'description': 'Group for vpn'} + group_ref = db.security_group_create(context, group) + rule = {'parent_group_id': group_ref['id'], + 'cidr': '0.0.0.0/0', + 'protocol': 'udp', + 'from_port': 1194, + 'to_port': 1194} + db.security_group_rule_create(context, rule) + rule = {'parent_group_id': group_ref['id'], + 'cidr': '0.0.0.0/0', + 'protocol': 'icmp', + 'from_port': -1, + 'to_port': -1} + db.security_group_rule_create(context, rule) + # NOTE(vish): No need to trigger the group since the instance + # has not been run yet. + return group_name - def setup_key_pair(self, user_id, project_id): - key_name = '%s%s' % (project_id, FLAGS.vpn_key_suffix) + def setup_key_pair(self, context): + key_name = '%s%s' % (context.project.id, FLAGS.vpn_key_suffix) try: - private_key, fingerprint = self.manager.generate_key_pair(user_id, - key_name) + result = cloud._gen_key(context, context.user.id, key_name) + private_key = result['private_key'] try: - key_dir = os.path.join(FLAGS.keys_path, user_id) + key_dir = os.path.join(FLAGS.keys_path, context.user.id) if not os.path.exists(key_dir): os.makedirs(key_dir) - file_name = os.path.join(key_dir, '%s.pem' % key_name) - with open(file_name, 'w') as f: + key_path = os.path.join(key_dir, '%s.pem' % key_name) + with open(key_path, 'w') as f: f.write(private_key) except: pass except exception.Duplicate: pass return key_name - - # def setup_secgroups(self, username): - # conn = self.euca.connection_for(username) - # try: - # secgroup = conn.create_security_group("vpn-secgroup", - # "vpn-secgroup") - # secgroup.authorize(ip_protocol = "udp", from_port = "1194", - # to_port = "1194", cidr_ip = "0.0.0.0/0") - # secgroup.authorize(ip_protocol = "tcp", from_port = "80", - # to_port = "80", cidr_ip = "0.0.0.0/0") - # secgroup.authorize(ip_protocol = "tcp", from_port = "22", - # to_port = "22", cidr_ip = "0.0.0.0/0") - # except: - # pass diff --git a/nova/compute/api.py b/nova/compute/api.py index 8e0efa4cc..4953fe559 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -53,10 +53,28 @@ class ComputeAPI(base.Base): self.image_service = image_service super(ComputeAPI, self).__init__(**kwargs) + def get_network_topic(self, context, instance_id): + try: + instance = self.db.instance_get_by_internal_id(context, + instance_id) + except exception.NotFound as e: + logging.warning("Instance %d was not found in get_network_topic", + instance_id) + raise e + + host = instance['host'] + if not host: + raise exception.Error("Instance %d has no host" % instance_id) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) + return rpc.call(context, + topic, + {"method": "get_network_topic", "args": {'fake': 1}}) + def create_instances(self, context, instance_type, image_id, min_count=1, max_count=1, kernel_id=None, ramdisk_id=None, display_name='', description='', key_name=None, key_data=None, security_group='default', + user_data=None, generate_hostname=generate_default_hostname): """Create the number of instances requested if quote and other arguments check out ok.""" @@ -73,14 +91,19 @@ class ComputeAPI(base.Base): is_vpn = image_id == FLAGS.vpn_image_id if not is_vpn: image = self.image_service.show(context, image_id) + + # If kernel_id/ramdisk_id isn't explicitly set in API call + # we take the defaults from the image's metadata if kernel_id is None: - kernel_id = image.get('kernelId', FLAGS.default_kernel) + kernel_id = image.get('kernelId', None) if ramdisk_id is None: - ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) + ramdisk_id = image.get('ramdiskId', None) # Make sure we have access to kernel and ramdisk - self.image_service.show(context, kernel_id) - self.image_service.show(context, ramdisk_id) + if kernel_id: + self.image_service.show(context, kernel_id) + if ramdisk_id: + self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] @@ -103,8 +126,8 @@ class ComputeAPI(base.Base): base_options = { 'reservation_id': utils.generate_uid('r'), 'image_id': image_id, - 'kernel_id': kernel_id, - 'ramdisk_id': ramdisk_id, + 'kernel_id': kernel_id or '', + 'ramdisk_id': ramdisk_id or '', 'state_description': 'scheduling', 'user_id': context.user_id, 'project_id': context.project_id, @@ -115,12 +138,13 @@ class ComputeAPI(base.Base): 'local_gb': type_data['local_gb'], 'display_name': display_name, 'display_description': description, + 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data} elevated = context.elevated() instances = [] - logging.debug("Going to run %s instances...", num_instances) + logging.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, @@ -145,19 +169,7 @@ class ComputeAPI(base.Base): instance = self.update_instance(context, instance_id, **updates) instances.append(instance) - # TODO(vish): This probably should be done in the scheduler - # or in compute as a call. The network should be - # allocated after the host is assigned and setup - # can happen at the same time. - address = self.network_manager.allocate_fixed_ip(context, - instance_id, - is_vpn) - rpc.cast(elevated, - self._get_network_topic(context), - {"method": "setup_fixed_ip", - "args": {"address": address}}) - - logging.debug("Casting to scheduler for %s/%s's instance %s", + logging.debug(_("Casting to scheduler for %s/%s's instance %s"), context.project_id, context.user_id, instance_id) rpc.cast(context, FLAGS.scheduler_topic, @@ -204,12 +216,12 @@ class ComputeAPI(base.Base): instance = self.db.instance_get_by_internal_id(context, instance_id) except exception.NotFound as e: - logging.warning("Instance %d was not found during terminate", + logging.warning(_("Instance %d was not found during terminate"), instance_id) raise e if (instance['state_description'] == 'terminating'): - logging.warning("Instance %d is already being terminated", + logging.warning(_("Instance %d is already being terminated"), instance_id) return @@ -219,28 +231,6 @@ class ComputeAPI(base.Base): state=0, terminated_at=datetime.datetime.utcnow()) - # FIXME(ja): where should network deallocate occur? - address = self.db.instance_get_floating_address(context, - instance['id']) - if address: - logging.debug("Disassociating address %s" % address) - # NOTE(vish): Right now we don't really care if the ip is - # disassociated. We may need to worry about - # checking this later. Perhaps in the scheduler? - rpc.cast(context, - self._get_network_topic(context), - {"method": "disassociate_floating_ip", - "args": {"floating_address": address}}) - - address = self.db.instance_get_fixed_address(context, instance['id']) - if address: - logging.debug("Deallocating address %s" % address) - # NOTE(vish): Currently, nothing needs to be done on the - # network node until release. If this changes, - # we will need to cast here. - self.network_manager.deallocate_fixed_ip(context.elevated(), - address) - host = instance['host'] if host: rpc.cast(context, @@ -275,6 +265,24 @@ class ComputeAPI(base.Base): {"method": "reboot_instance", "args": {"instance_id": instance['id']}}) + def pause(self, context, instance_id): + """Pause the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "pause_instance", + "args": {"instance_id": instance['id']}}) + + def unpause(self, context, instance_id): + """Unpause the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unpause_instance", + "args": {"instance_id": instance['id']}}) + def rescue(self, context, instance_id): """Rescue the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -292,14 +300,3 @@ class ComputeAPI(base.Base): self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "unrescue_instance", "args": {"instance_id": instance['id']}}) - - def _get_network_topic(self, context): - """Retrieves the network host for a project""" - network_ref = self.network_manager.get_network(context) - host = network_ref['host'] - if not host: - host = rpc.call(context, - FLAGS.network_topic, - {"method": "set_network_host", - "args": {"network_id": network_ref['id']}}) - return self.db.queue_get_for(context, FLAGS.network_topic, host) diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 675cd0259..814a258cd 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -67,12 +67,12 @@ def partition(infile, outfile, local_bytes=0, resize=True, execute('resize2fs %s' % infile) file_size = FLAGS.minimum_root_size elif file_size % sector_size != 0: - logging.warn("Input partition size not evenly divisible by" - " sector size: %d / %d", file_size, sector_size) + logging.warn(_("Input partition size not evenly divisible by" + " sector size: %d / %d"), file_size, sector_size) primary_sectors = file_size / sector_size if local_bytes % sector_size != 0: - logging.warn("Bytes for local storage not evenly divisible" - " by sector size: %d / %d", local_bytes, sector_size) + logging.warn(_("Bytes for local storage not evenly divisible" + " by sector size: %d / %d"), local_bytes, sector_size) local_sectors = local_bytes / sector_size mbr_last = 62 # a @@ -106,6 +106,13 @@ def partition(infile, outfile, local_bytes=0, resize=True, % (outfile, local_type, local_first, local_last)) +def extend(image, size, execute): + file_size = os.path.getsize(image) + if file_size >= size: + return + return execute('truncate -s size %s' % (image,)) + + def inject_data(image, key=None, net=None, partition=None, execute=None): """Injects a ssh key and optionally net data into a disk image. @@ -115,20 +122,30 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): If partition is not specified it mounts the image as a single partition. """ - out, err = execute('sudo losetup -f --show %s' % image) + out, err = execute('sudo losetup --find --show %s' % image) if err: - raise exception.Error('Could not attach image to loopback: %s' % err) + raise exception.Error(_('Could not attach image to loopback: %s') + % err) device = out.strip() try: if not partition is None: # create partition out, err = execute('sudo kpartx -a %s' % device) if err: - raise exception.Error('Failed to load partition: %s' % err) + raise exception.Error(_('Failed to load partition: %s') % err) mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1], partition) else: mapped_device = device + + # We can only loopback mount raw images. If the device isn't there, + # it's normally because it's a .vmdk or a .vdi etc + if not os.path.exists(mapped_device): + raise exception.Error('Mapped device was not found (we can' + ' only inject raw disk images): %s' % + mapped_device) + + # Configure ext2fs so that it doesn't auto-check every N boots out, err = execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) tmpdir = tempfile.mkdtemp() @@ -137,7 +154,8 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): out, err = execute( 'sudo mount %s %s' % (mapped_device, tmpdir)) if err: - raise exception.Error('Failed to mount filesystem: %s' % err) + raise exception.Error(_('Failed to mount filesystem: %s') + % err) try: if key: @@ -156,7 +174,7 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): execute('sudo kpartx -d %s' % device) finally: # remove loopback - execute('sudo losetup -d %s' % device) + execute('sudo losetup --detach %s' % device) def _inject_key_into_fs(key, fs, execute=None): @@ -165,7 +183,7 @@ def _inject_key_into_fs(key, fs, execute=None): key is an ssh key string. fs is the path to the base of the filesystem into which to inject the key. """ - sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh') + sshdir = os.path.join(fs, 'root', '.ssh') execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter execute('sudo chown root %s' % sshdir) execute('sudo chmod 700 %s' % sshdir) diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index 6e47170bd..196d6a8df 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -38,7 +38,8 @@ def get_by_type(instance_type): if instance_type is None: return FLAGS.default_instance_type if instance_type not in INSTANCE_TYPES: - raise exception.ApiError("Unknown instance type: %s" % instance_type) + raise exception.ApiError(_("Unknown instance type: %s"), + instance_type) return instance_type diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7eb60e262..de114bdeb 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -40,6 +40,7 @@ import logging from nova import exception from nova import flags from nova import manager +from nova import rpc from nova import utils from nova.compute import power_state @@ -48,6 +49,8 @@ flags.DEFINE_string('instances_path', '$state_path/instances', 'where instances are stored on disk') flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for controlling virtualization') +flags.DEFINE_string('stub_network', False, + 'Stub network related code') class ComputeManager(manager.Manager): @@ -65,6 +68,12 @@ class ComputeManager(manager.Manager): self.volume_manager = utils.import_object(FLAGS.volume_manager) super(ComputeManager, self).__init__(*args, **kwargs) + def init_host(self): + """Do any initialization that needs to be run if this is a + standalone service. + """ + self.driver.init_host() + def _update_state(self, context, instance_id): """Update the state of an instance from the driver info.""" # FIXME(ja): include other fields from state? @@ -76,6 +85,20 @@ class ComputeManager(manager.Manager): state = power_state.NOSTATE self.db.instance_set_state(context, instance_id, state) + def get_network_topic(self, context, **_kwargs): + """Retrieves the network host for a project on this host""" + # TODO(vish): This method should be memoized. This will make + # the call to get_network_host cheaper, so that + # it can pas messages instead of checking the db + # locally. + if FLAGS.stub_network: + host = FLAGS.network_host + else: + host = self.network_manager.get_network_host(context) + return self.db.queue_get_for(context, + FLAGS.network_topic, + host) + @exception.wrap_exception def refresh_security_group(self, context, security_group_id, **_kwargs): """This call passes stright through to the virtualization driver.""" @@ -87,13 +110,32 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): - raise exception.Error("Instance has already been created") - logging.debug("instance %s: starting...", instance_id) - self.network_manager.setup_compute_network(context, instance_id) + raise exception.Error(_("Instance has already been created")) + logging.debug(_("instance %s: starting..."), instance_id) self.db.instance_update(context, instance_id, {'host': self.host}) + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'networking') + + is_vpn = instance_ref['image_id'] == FLAGS.vpn_image_id + # NOTE(vish): This could be a cast because we don't do anything + # with the address currently, but I'm leaving it as + # a call to ensure that network setup completes. We + # will eventually also need to save the address here. + if not FLAGS.stub_network: + address = rpc.call(context, + self.get_network_topic(context), + {"method": "allocate_fixed_ip", + "args": {"instance_id": instance_id, + "vpn": is_vpn}}) + + self.network_manager.setup_compute_network(context, + instance_id) + # TODO(vish) check to make sure the availability zone matches self.db.instance_set_state(context, instance_id, @@ -107,7 +149,7 @@ class ComputeManager(manager.Manager): instance_id, {'launched_at': now}) except Exception: # pylint: disable-msg=W0702 - logging.exception("instance %s: Failed to spawn", + logging.exception(_("instance %s: Failed to spawn"), instance_ref['name']) self.db.instance_set_state(context, instance_id, @@ -119,16 +161,41 @@ class ComputeManager(manager.Manager): def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() - logging.debug("instance %s: terminating", instance_id) instance_ref = self.db.instance_get(context, instance_id) + + if not FLAGS.stub_network: + address = self.db.instance_get_floating_address(context, + instance_ref['id']) + if address: + logging.debug(_("Disassociating address %s") % address) + # NOTE(vish): Right now we don't really care if the ip is + # disassociated. We may need to worry about + # checking this later. + rpc.cast(context, + self.get_network_topic(context), + {"method": "disassociate_floating_ip", + "args": {"floating_address": address}}) + + address = self.db.instance_get_fixed_address(context, + instance_ref['id']) + if address: + logging.debug(_("Deallocating address %s") % address) + # NOTE(vish): Currently, nothing needs to be done on the + # network node until release. If this changes, + # we will need to cast here. + self.network_manager.deallocate_fixed_ip(context.elevated(), + address) + + logging.debug(_("instance %s: terminating"), instance_id) + volumes = instance_ref.get('volumes', []) or [] for volume in volumes: self.detach_volume(context, instance_id, volume['id']) if instance_ref['state'] == power_state.SHUTOFF: self.db.instance_destroy(context, instance_id) - raise exception.Error('trying to destroy already destroyed' - ' instance: %s' % instance_id) + raise exception.Error(_('trying to destroy already destroyed' + ' instance: %s') % instance_id) self.driver.destroy(instance_ref) # TODO(ja): should we keep it in a terminated state for a bit? @@ -142,13 +209,13 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) if instance_ref['state'] != power_state.RUNNING: - logging.warn('trying to reboot a non-running ' - 'instance: %s (state: %s excepted: %s)', + logging.warn(_('trying to reboot a non-running ' + 'instance: %s (state: %s excepted: %s)'), instance_ref['internal_id'], instance_ref['state'], power_state.RUNNING) - logging.debug('instance %s: rebooting', instance_ref['name']) + logging.debug(_('instance %s: rebooting'), instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -162,7 +229,7 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug('instance %s: rescuing', + logging.debug(_('instance %s: rescuing'), instance_ref['internal_id']) self.db.instance_set_state(context, instance_id, @@ -177,7 +244,7 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug('instance %s: unrescuing', + logging.debug(_('instance %s: unrescuing'), instance_ref['internal_id']) self.db.instance_set_state(context, instance_id, @@ -186,11 +253,52 @@ class ComputeManager(manager.Manager): self.driver.unrescue(instance_ref) self._update_state(context, instance_id) + @staticmethod + def _update_state_callback(self, context, instance_id, result): + """Update instance state when async task completes.""" + self._update_state(context, instance_id) + + @exception.wrap_exception + def pause_instance(self, context, instance_id): + """Pause an instance on this server.""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + logging.debug('instance %s: pausing', + instance_ref['internal_id']) + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'pausing') + self.driver.pause(instance_ref, + lambda result: self._update_state_callback(self, + context, + instance_id, + result)) + + @exception.wrap_exception + def unpause_instance(self, context, instance_id): + """Unpause a paused instance on this server.""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + logging.debug('instance %s: unpausing', + instance_ref['internal_id']) + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'unpausing') + self.driver.unpause(instance_ref, + lambda result: self._update_state_callback(self, + context, + instance_id, + result)) + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" context = context.elevated() - logging.debug("instance %s: getting console output", instance_id) + logging.debug(_("instance %s: getting console output"), instance_id) instance_ref = self.db.instance_get(context, instance_id) return self.driver.get_console_output(instance_ref) @@ -199,7 +307,7 @@ class ComputeManager(manager.Manager): def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() - logging.debug("instance %s: attaching volume %s to %s", instance_id, + logging.debug(_("instance %s: attaching volume %s to %s"), instance_id, volume_id, mountpoint) instance_ref = self.db.instance_get(context, instance_id) dev_path = self.volume_manager.setup_compute_volume(context, @@ -216,7 +324,7 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - logging.exception("instance %s: attach failed %s, removing", + logging.exception(_("instance %s: attach failed %s, removing"), instance_id, mountpoint) self.volume_manager.remove_compute_volume(context, volume_id) @@ -228,13 +336,13 @@ class ComputeManager(manager.Manager): def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" context = context.elevated() - logging.debug("instance %s: detaching volume %s", + logging.debug(_("instance %s: detaching volume %s"), instance_id, volume_id) instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) if instance_ref['name'] not in self.driver.list_instances(): - logging.warn("Detaching volume from unknown instance %s", + logging.warn(_("Detaching volume from unknown instance %s"), instance_ref['name']) else: self.driver.detach_volume(instance_ref['name'], diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 22653113a..60c347a5e 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -255,7 +255,7 @@ class Instance(object): Updates the instances statistics and stores the resulting graphs in the internal object store on the cloud controller. """ - logging.debug('updating %s...', self.instance_id) + logging.debug(_('updating %s...'), self.instance_id) try: data = self.fetch_cpu_stats() @@ -285,7 +285,7 @@ class Instance(object): graph_disk(self, '1w') graph_disk(self, '1m') except Exception: - logging.exception('unexpected error during update') + logging.exception(_('unexpected error during update')) self.last_updated = utcnow() @@ -351,7 +351,7 @@ class Instance(object): rd += rd_bytes wr += wr_bytes except TypeError: - logging.error('Cannot get blockstats for "%s" on "%s"', + logging.error(_('Cannot get blockstats for "%s" on "%s"'), disk, self.instance_id) raise @@ -373,7 +373,7 @@ class Instance(object): rx += stats[0] tx += stats[4] except TypeError: - logging.error('Cannot get ifstats for "%s" on "%s"', + logging.error(_('Cannot get ifstats for "%s" on "%s"'), interface, self.instance_id) raise @@ -408,7 +408,7 @@ class InstanceMonitor(object, service.Service): try: conn = virt_connection.get_connection(read_only=True) except Exception, exn: - logging.exception('unexpected exception getting connection') + logging.exception(_('unexpected exception getting connection')) time.sleep(FLAGS.monitoring_instances_delay) return @@ -423,7 +423,7 @@ class InstanceMonitor(object, service.Service): if not domain_id in self._instances: instance = Instance(conn, domain_id) self._instances[domain_id] = instance - logging.debug('Found instance: %s', domain_id) + logging.debug(_('Found instance: %s'), domain_id) for key in self._instances.keys(): instance = self._instances[key] diff --git a/nova/crypto.py b/nova/crypto.py index aacc50b17..e4133ac85 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -19,7 +19,6 @@ Wrappers around standard crypto data elements. Includes root and intermediate CAs, SSH key_pairs and x509 certificates. - """ import base64 @@ -34,28 +33,57 @@ import utils import M2Crypto -from nova import exception +from nova import context +from nova import db from nova import flags FLAGS = flags.FLAGS -flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA') +flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA')) +flags.DEFINE_string('key_file', + os.path.join('private', 'cakey.pem'), + _('Filename of private key')) +flags.DEFINE_string('crl_file', 'crl.pem', + _('Filename of root Certificate Revokation List')) flags.DEFINE_string('keys_path', '$state_path/keys', - 'Where we keep our keys') + _('Where we keep our keys')) flags.DEFINE_string('ca_path', '$state_path/CA', - 'Where we keep our root CA') -flags.DEFINE_boolean('use_intermediate_ca', False, - 'Should we use intermediate CAs for each project?') + _('Where we keep our root CA')) +flags.DEFINE_boolean('use_project_ca', False, + _('Should we use a CA for each project?')) +flags.DEFINE_string('user_cert_subject', + '/C=US/ST=California/L=MountainView/O=AnsoLabs/' + 'OU=NovaDev/CN=%s-%s-%s', + _('Subject for certificate for users, ' + '%s for project, user, timestamp')) +flags.DEFINE_string('project_cert_subject', + '/C=US/ST=California/L=MountainView/O=AnsoLabs/' + 'OU=NovaDev/CN=project-ca-%s-%s', + _('Subject for certificate for projects, ' + '%s for project, timestamp')) +flags.DEFINE_string('vpn_cert_subject', + '/C=US/ST=California/L=MountainView/O=AnsoLabs/' + 'OU=NovaDev/CN=project-vpn-%s-%s', + _('Subject for certificate for vpns, ' + '%s for project, timestamp')) -def ca_path(project_id): - if project_id: - return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id) - return "%s/cacert.pem" % (FLAGS.ca_path) +def ca_folder(project_id=None): + if FLAGS.use_project_ca and project_id: + return os.path.join(FLAGS.ca_path, 'projects', project_id) + return FLAGS.ca_path + + +def ca_path(project_id=None): + return os.path.join(ca_folder(project_id), FLAGS.ca_file) + + +def key_path(project_id=None): + return os.path.join(ca_folder(project_id), FLAGS.key_file) def fetch_ca(project_id=None, chain=True): - if not FLAGS.use_intermediate_ca: + if not FLAGS.use_project_ca: project_id = None buffer = "" if project_id: @@ -92,8 +120,8 @@ def generate_key_pair(bits=1024): def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'): - pub_key_buffer = M2Crypto.BIO.MemoryBuffer(ssl_public_key) - rsa_key = M2Crypto.RSA.load_pub_key_bio(pub_key_buffer) + buf = M2Crypto.BIO.MemoryBuffer(ssl_public_key) + rsa_key = M2Crypto.RSA.load_pub_key_bio(buf) e, n = rsa_key.pub() key_type = 'ssh-rsa' @@ -106,53 +134,134 @@ def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'): return '%s %s %s@%s\n' % (key_type, b64_blob, name, suffix) -def generate_x509_cert(subject, bits=1024): +def revoke_cert(project_id, file_name): + """Revoke a cert by file name""" + start = os.getcwd() + os.chdir(ca_folder(project_id)) + # NOTE(vish): potential race condition here + utils.execute("openssl ca -config ./openssl.cnf -revoke '%s'" % file_name) + utils.execute("openssl ca -gencrl -config ./openssl.cnf -out '%s'" % + FLAGS.crl_file) + os.chdir(start) + + +def revoke_certs_by_user(user_id): + """Revoke all user certs""" + admin = context.get_admin_context() + for cert in db.certificate_get_all_by_user(admin, user_id): + revoke_cert(cert['project_id'], cert['file_name']) + + +def revoke_certs_by_project(project_id): + """Revoke all project certs""" + # NOTE(vish): This is somewhat useless because we can just shut down + # the vpn. + admin = context.get_admin_context() + for cert in db.certificate_get_all_by_project(admin, project_id): + revoke_cert(cert['project_id'], cert['file_name']) + + +def revoke_certs_by_user_and_project(user_id, project_id): + """Revoke certs for user in project""" + admin = context.get_admin_context() + for cert in db.certificate_get_all_by_user(admin, user_id, project_id): + revoke_cert(cert['project_id'], cert['file_name']) + + +def _project_cert_subject(project_id): + """Helper to generate user cert subject""" + return FLAGS.project_cert_subject % (project_id, utils.isotime()) + + +def _vpn_cert_subject(project_id): + """Helper to generate user cert subject""" + return FLAGS.vpn_cert_subject % (project_id, utils.isotime()) + + +def _user_cert_subject(user_id, project_id): + """Helper to generate user cert subject""" + return FLAGS.user_cert_subject % (project_id, user_id, utils.isotime()) + + +def generate_x509_cert(user_id, project_id, bits=1024): + """Generate and sign a cert for user in project""" + subject = _user_cert_subject(user_id, project_id) tmpdir = tempfile.mkdtemp() keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key')) csrfile = os.path.join(tmpdir, 'temp.csr') - logging.debug("openssl genrsa -out %s %s" % (keyfile, bits)) - utils.runthis("Generating private key: %s", - "openssl genrsa -out %s %s" % (keyfile, bits)) - utils.runthis("Generating CSR: %s", - "openssl req -new -key %s -out %s -batch -subj %s" % + utils.execute("openssl genrsa -out %s %s" % (keyfile, bits)) + utils.execute("openssl req -new -key %s -out %s -batch -subj %s" % (keyfile, csrfile, subject)) private_key = open(keyfile).read() csr = open(csrfile).read() shutil.rmtree(tmpdir) - return (private_key, csr) + (serial, signed_csr) = sign_csr(csr, project_id) + fname = os.path.join(ca_folder(project_id), "newcerts/%s.pem" % serial) + cert = {'user_id': user_id, + 'project_id': project_id, + 'file_name': fname} + db.certificate_create(context.get_admin_context(), cert) + return (private_key, signed_csr) -def sign_csr(csr_text, intermediate=None): - if not FLAGS.use_intermediate_ca: - intermediate = None - if not intermediate: - return _sign_csr(csr_text, FLAGS.ca_path) - user_ca = "%s/INTER/%s" % (FLAGS.ca_path, intermediate) - if not os.path.exists(user_ca): +def _ensure_project_folder(project_id): + if not os.path.exists(ca_path(project_id)): start = os.getcwd() - os.chdir(FLAGS.ca_path) - utils.runthis("Generating intermediate CA: %s", - "sh geninter.sh %s" % (intermediate)) + os.chdir(ca_folder()) + utils.execute("sh geninter.sh %s %s" % + (project_id, _project_cert_subject(project_id))) os.chdir(start) - return _sign_csr(csr_text, user_ca) + + +def generate_vpn_files(project_id): + project_folder = ca_folder(project_id) + csr_fn = os.path.join(project_folder, "server.csr") + crt_fn = os.path.join(project_folder, "server.crt") + + if os.path.exists(crt_fn): + return + _ensure_project_folder(project_id) + start = os.getcwd() + os.chdir(ca_folder()) + # TODO(vish): the shell scripts could all be done in python + utils.execute("sh genvpn.sh %s %s" % + (project_id, _vpn_cert_subject(project_id))) + with open(csr_fn, "r") as csrfile: + csr_text = csrfile.read() + (serial, signed_csr) = sign_csr(csr_text, project_id) + with open(crt_fn, "w") as crtfile: + crtfile.write(signed_csr) + os.chdir(start) + + +def sign_csr(csr_text, project_id=None): + if not FLAGS.use_project_ca: + project_id = None + if not project_id: + return _sign_csr(csr_text, ca_folder()) + _ensure_project_folder(project_id) + project_folder = ca_folder(project_id) + return _sign_csr(csr_text, ca_folder(project_id)) def _sign_csr(csr_text, ca_folder): tmpfolder = tempfile.mkdtemp() - csrfile = open("%s/inbound.csr" % (tmpfolder), "w") + inbound = os.path.join(tmpfolder, "inbound.csr") + outbound = os.path.join(tmpfolder, "outbound.csr") + csrfile = open(inbound, "w") csrfile.write(csr_text) csrfile.close() - logging.debug("Flags path: %s" % ca_folder) + logging.debug(_("Flags path: %s") % ca_folder) start = os.getcwd() # Change working dir to CA os.chdir(ca_folder) - utils.runthis("Signing cert: %s", - "openssl ca -batch -out %s/outbound.crt " - "-config ./openssl.cnf -infiles %s/inbound.csr" % - (tmpfolder, tmpfolder)) + utils.execute("openssl ca -batch -out %s -config " + "./openssl.cnf -infiles %s" % (outbound, inbound)) + out, _err = utils.execute("openssl x509 -in %s -serial -noout" % outbound) + serial = out.rpartition("=")[2] os.chdir(start) - with open("%s/outbound.crt" % (tmpfolder), "r") as crtfile: - return crtfile.read() + with open(outbound, "r") as crtfile: + return (serial, crtfile.read()) def mkreq(bits, subject="foo", ca=0): @@ -160,8 +269,7 @@ def mkreq(bits, subject="foo", ca=0): req = M2Crypto.X509.Request() rsa = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None) pk.assign_rsa(rsa) - # Should not be freed here - rsa = None + rsa = None # should not be freed here req.set_pubkey(pk) req.set_subject(subject) req.sign(pk, 'sha512') @@ -225,7 +333,6 @@ def mkcacert(subject='nova', years=1): # IN THE SOFTWARE. # http://code.google.com/p/boto - def compute_md5(fp): """ :type fp: file diff --git a/nova/db/api.py b/nova/db/api.py index 8f9dc2443..fde3f0852 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -130,6 +130,45 @@ def service_update(context, service_id, values): ################### +def certificate_create(context, values): + """Create a certificate from the values dictionary.""" + return IMPL.certificate_create(context, values) + + +def certificate_destroy(context, certificate_id): + """Destroy the certificate or raise if it does not exist.""" + return IMPL.certificate_destroy(context, certificate_id) + + +def certificate_get_all_by_project(context, project_id): + """Get all certificates for a project.""" + return IMPL.certificate_get_all_by_project(context, project_id) + + +def certificate_get_all_by_user(context, user_id): + """Get all certificates for a user.""" + return IMPL.certificate_get_all_by_user(context, user_id) + + +def certificate_get_all_by_user_and_project(context, user_id, project_id): + """Get all certificates for a user and project.""" + return IMPL.certificate_get_all_by_user_and_project(context, + user_id, + project_id) + + +def certificate_update(context, certificate_id, values): + """Set the given properties on an certificate and update it. + + Raises NotFound if service does not exist. + + """ + return IMPL.service_update(context, certificate_id, values) + + +################### + + def floating_ip_allocate_address(context, host, project_id): """Allocate free floating ip and return the address. @@ -304,6 +343,11 @@ def instance_get_floating_address(context, instance_id): return IMPL.instance_get_floating_address(context, instance_id) +def instance_get_project_vpn(context, project_id): + """Get a vpn instance by project or return None.""" + return IMPL.instance_get_project_vpn(context, project_id) + + def instance_get_by_internal_id(context, internal_id): """Get an instance by internal id.""" return IMPL.instance_get_by_internal_id(context, internal_id) @@ -334,6 +378,11 @@ def instance_add_security_group(context, instance_id, security_group_id): security_group_id) +def instance_action_create(context, values): + """Create an instance action from the values dictionary.""" + return IMPL.instance_action_create(context, values) + + ################### @@ -468,12 +517,14 @@ def network_update(context, network_id, values): ################### -def project_get_network(context, project_id): +def project_get_network(context, project_id, associate=True): """Return the network associated with the project. - Raises NotFound if no such network can be found. + If associate is true, it will attempt to associate a new + network if one is not found, otherwise it returns None. """ + return IMPL.project_get_network(context, project_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 55036d1d1..52d0c389d 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -41,7 +41,7 @@ FLAGS = flags.FLAGS def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: - warnings.warn('Use of empty request context is deprecated', + warnings.warn(_('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin @@ -130,7 +130,7 @@ def service_get(context, service_id, session=None): first() if not result: - raise exception.NotFound('No service for id %s' % service_id) + raise exception.NotFound(_('No service for id %s') % service_id) return result @@ -227,7 +227,7 @@ def service_get_by_args(context, host, binary): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound('No service for %s, %s' % (host, binary)) + raise exception.NotFound(_('No service for %s, %s') % (host, binary)) return result @@ -252,6 +252,84 @@ def service_update(context, service_id, values): ################### +@require_admin_context +def certificate_get(context, certificate_id, session=None): + if not session: + session = get_session() + + result = session.query(models.Certificate).\ + filter_by(id=certificate_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + + if not result: + raise exception.NotFound('No certificate for id %s' % certificate_id) + + return result + + +@require_admin_context +def certificate_create(context, values): + certificate_ref = models.Certificate() + for (key, value) in values.iteritems(): + certificate_ref[key] = value + certificate_ref.save() + return certificate_ref + + +@require_admin_context +def certificate_destroy(context, certificate_id): + session = get_session() + with session.begin(): + certificate_ref = certificate_get(context, + certificate_id, + session=session) + certificate_ref.delete(session=session) + + +@require_admin_context +def certificate_get_all_by_project(context, project_id): + session = get_session() + return session.query(models.Certificate).\ + filter_by(project_id=project_id).\ + filter_by(deleted=False).\ + all() + + +@require_admin_context +def certificate_get_all_by_user(context, user_id): + session = get_session() + return session.query(models.Certificate).\ + filter_by(user_id=user_id).\ + filter_by(deleted=False).\ + all() + + +@require_admin_context +def certificate_get_all_by_user_and_project(_context, user_id, project_id): + session = get_session() + return session.query(models.Certificate).\ + filter_by(user_id=user_id).\ + filter_by(project_id=project_id).\ + filter_by(deleted=False).\ + all() + + +@require_admin_context +def certificate_update(context, certificate_id, values): + session = get_session() + with session.begin(): + certificate_ref = certificate_get(context, + certificate_id, + session=session) + for (key, value) in values.iteritems(): + certificate_ref[key] = value + certificate_ref.save(session=session) + + +################### + + @require_context def floating_ip_allocate_address(context, host, project_id): authorize_project_context(context, project_id) @@ -385,6 +463,7 @@ def floating_ip_get_by_address(context, address, session=None): session = get_session() result = session.query(models.FloatingIp).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(address=address).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -491,7 +570,7 @@ def fixed_ip_get_by_address(context, address, session=None): options(joinedload('instance')).\ first() if not result: - raise exception.NotFound('No floating ip for address %s' % address) + raise exception.NotFound(_('No floating ip for address %s') % address) if is_user_context(context): authorize_project_context(context, result.instance.project_id) @@ -528,6 +607,8 @@ def fixed_ip_update(context, address, values): #TODO(gundlach): instance_create and volume_create are nearly identical #and should be refactored. I expect there are other copy-and-paste #functions between the two of them as well. + + @require_context def instance_create(context, values): """Create a new Instance record in the database. @@ -579,19 +660,23 @@ def instance_get(context, instance_id, session=None): if is_admin_context(context): result = session.query(models.Instance).\ + options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload('volumes')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Instance).\ + options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload('volumes')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ filter_by(deleted=False).\ first() if not result: - raise exception.NotFound('No instance for id %s' % instance_id) + raise exception.NotFound(_('No instance for id %s') % instance_id) return result @@ -651,6 +736,18 @@ def instance_get_all_by_reservation(context, reservation_id): all() +@require_admin_context +def instance_get_project_vpn(context, project_id): + session = get_session() + return session.query(models.Instance).\ + options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload('security_groups')).\ + filter_by(project_id=project_id).\ + filter_by(image_id=FLAGS.vpn_image_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + + @require_context def instance_get_by_internal_id(context, internal_id): session = get_session() @@ -669,7 +766,7 @@ def instance_get_by_internal_id(context, internal_id): filter_by(deleted=False).\ first() if not result: - raise exception.NotFound('Instance %s not found' % (internal_id)) + raise exception.NotFound(_('Instance %s not found') % (internal_id)) return result @@ -747,6 +844,18 @@ def instance_add_security_group(context, instance_id, security_group_id): instance_ref.save(session=session) +@require_context +def instance_action_create(context, values): + """Create an instance action from the values dictionary.""" + action_ref = models.InstanceActions() + action_ref.update(values) + + session = get_session() + with session.begin(): + action_ref.save(session=session) + return action_ref + + ################### @@ -790,7 +899,7 @@ def key_pair_get(context, user_id, name, session=None): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound('no keypair for user %s, name %s' % + raise exception.NotFound(_('no keypair for user %s, name %s') % (user_id, name)) return result @@ -905,7 +1014,7 @@ def network_get(context, network_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.NotFound('No network for id %s' % network_id) + raise exception.NotFound(_('No network for id %s') % network_id) return result @@ -913,6 +1022,8 @@ def network_get(context, network_id, session=None): # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable-msg=C0103 + + @require_admin_context def network_get_associated_fixed_ips(context, network_id): session = get_session() @@ -933,7 +1044,7 @@ def network_get_by_bridge(context, bridge): first() if not result: - raise exception.NotFound('No network for bridge %s' % bridge) + raise exception.NotFound(_('No network for bridge %s') % bridge) return result @@ -947,7 +1058,7 @@ def network_get_by_instance(_context, instance_id): filter_by(deleted=False).\ first() if not rv: - raise exception.NotFound('No network for instance %s' % instance_id) + raise exception.NotFound(_('No network for instance %s') % instance_id) return rv @@ -961,7 +1072,7 @@ def network_set_host(context, network_id, host_id): with_lockmode('update').\ first() if not network_ref: - raise exception.NotFound('No network for id %s' % network_id) + raise exception.NotFound(_('No network for id %s') % network_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues @@ -985,24 +1096,26 @@ def network_update(context, network_id, values): @require_context -def project_get_network(context, project_id): +def project_get_network(context, project_id, associate=True): session = get_session() - rv = session.query(models.Network).\ - filter_by(project_id=project_id).\ - filter_by(deleted=False).\ - first() - if not rv: + result = session.query(models.Network).\ + filter_by(project_id=project_id).\ + filter_by(deleted=False).\ + first() + if not result: + if not associate: + return None try: return network_associate(context, project_id) except IntegrityError: # NOTE(vish): We hit this if there is a race and two # processes are attempting to allocate the # network at the same time - rv = session.query(models.Network).\ - filter_by(project_id=project_id).\ - filter_by(deleted=False).\ - first() - return rv + result = session.query(models.Network).\ + filter_by(project_id=project_id).\ + filter_by(deleted=False).\ + first() + return result ################### @@ -1073,7 +1186,7 @@ def auth_get_token(_context, token_hash): filter_by(token_hash=token_hash).\ first() if not tk: - raise exception.NotFound('Token %s does not exist' % token_hash) + raise exception.NotFound(_('Token %s does not exist') % token_hash) return tk @@ -1097,7 +1210,7 @@ def quota_get(context, project_id, session=None): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound('No quota for project_id %s' % project_id) + raise exception.NotFound(_('No quota for project_id %s') % project_id) return result @@ -1252,7 +1365,7 @@ def volume_get(context, volume_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.NotFound('No volume for id %s' % volume_id) + raise exception.NotFound(_('No volume for id %s') % volume_id) return result @@ -1308,7 +1421,7 @@ def volume_get_by_ec2_id(context, ec2_id): raise exception.NotAuthorized() if not result: - raise exception.NotFound('Volume %s not found' % ec2_id) + raise exception.NotFound(_('Volume %s not found') % ec2_id) return result @@ -1332,7 +1445,7 @@ def volume_get_instance(context, volume_id): options(joinedload('instance')).\ first() if not result: - raise exception.NotFound('Volume %s not found' % ec2_id) + raise exception.NotFound(_('Volume %s not found') % ec2_id) return result.instance @@ -1344,7 +1457,7 @@ def volume_get_shelf_and_blade(context, volume_id): filter_by(volume_id=volume_id).\ first() if not result: - raise exception.NotFound('No export device found for volume %s' % + raise exception.NotFound(_('No export device found for volume %s') % volume_id) return (result.shelf_id, result.blade_id) @@ -1357,7 +1470,7 @@ def volume_get_iscsi_target_num(context, volume_id): filter_by(volume_id=volume_id).\ first() if not result: - raise exception.NotFound('No target id found for volume %s' % + raise exception.NotFound(_('No target id found for volume %s') % volume_id) return result.target_num @@ -1402,7 +1515,7 @@ def security_group_get(context, security_group_id, session=None): options(joinedload_all('rules')).\ first() if not result: - raise exception.NotFound("No secuity group with id %s" % + raise exception.NotFound(_("No security group with id %s") % security_group_id) return result @@ -1419,7 +1532,7 @@ def security_group_get_by_name(context, project_id, group_name): first() if not result: raise exception.NotFound( - 'No security group named %s for project: %s' \ + _('No security group named %s for project: %s') % (group_name, project_id)) return result @@ -1507,7 +1620,7 @@ def security_group_rule_get(context, security_group_rule_id, session=None): filter_by(id=security_group_rule_id).\ first() if not result: - raise exception.NotFound("No secuity group rule with id %s" % + raise exception.NotFound(_("No secuity group rule with id %s") % security_group_rule_id) return result @@ -1543,7 +1656,7 @@ def user_get(context, id, session=None): first() if not result: - raise exception.NotFound('No user for id %s' % id) + raise exception.NotFound(_('No user for id %s') % id) return result @@ -1559,7 +1672,7 @@ def user_get_by_access_key(context, access_key, session=None): first() if not result: - raise exception.NotFound('No user for access key %s' % access_key) + raise exception.NotFound(_('No user for access key %s') % access_key) return result @@ -1621,7 +1734,7 @@ def project_get(context, id, session=None): first() if not result: - raise exception.NotFound("No project with id %s" % id) + raise exception.NotFound(_("No project with id %s") % id) return result diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index fe0a9a921..693db8d23 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -22,7 +22,7 @@ SQLAlchemy models for nova data. import datetime from sqlalchemy.orm import relationship, backref, object_mapper -from sqlalchemy import Column, Integer, String, schema +from sqlalchemy import Column, Integer, Float, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base @@ -151,6 +151,16 @@ class Service(BASE, NovaBase): disabled = Column(Boolean, default=False) +class Certificate(BASE, NovaBase): + """Represents a an x509 certificate""" + __tablename__ = 'certificates' + id = Column(Integer, primary_key=True) + + user_id = Column(String(255)) + project_id = Column(String(255)) + file_name = Column(String(255)) + + class Instance(BASE, NovaBase): """Represents a guest vm.""" __tablename__ = 'instances' @@ -226,6 +236,31 @@ class Instance(BASE, NovaBase): # 'shutdown', 'shutoff', 'crashed']) +class InstanceDiagnostics(BASE, NovaBase): + """Represents a guest VM's diagnostics""" + __tablename__ = "instance_diagnostics" + id = Column(Integer, primary_key=True) + instance_id = Column(Integer, ForeignKey('instances.id')) + + memory_available = Column(Float) + memory_free = Column(Float) + cpu_load = Column(Float) + disk_read = Column(Float) + disk_write = Column(Float) + net_tx = Column(Float) + net_rx = Column(Float) + + +class InstanceActions(BASE, NovaBase): + """Represents a guest VM's actions and results""" + __tablename__ = "instance_actions" + id = Column(Integer, primary_key=True) + instance_id = Column(Integer, ForeignKey('instances.id')) + + action = Column(String(255)) + error = Column(Text) + + class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a vm.""" __tablename__ = 'volumes' @@ -526,10 +561,11 @@ def register_models(): it will never need to be called explicitly elsewhere. """ from sqlalchemy import create_engine - models = (Service, Instance, Volume, ExportDevice, IscsiTarget, FixedIp, - FloatingIp, Network, SecurityGroup, - SecurityGroupIngressRule, SecurityGroupInstanceAssociation, - AuthToken, User, Project) # , Image, Host + models = (Service, Instance, InstanceDiagnostics, InstanceActions, + Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, + Network, SecurityGroup, SecurityGroupIngressRule, + SecurityGroupInstanceAssociation, AuthToken, User, + Project, Certificate) # , Image, Host engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/exception.py b/nova/exception.py index 6d6c37338..277033e0f 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -27,23 +27,26 @@ import traceback class ProcessExecutionError(IOError): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): if description is None: - description = "Unexpected error while running command." + description = _("Unexpected error while running command.") if exit_code is None: exit_code = '-' - message = "%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" % ( - description, cmd, exit_code, stdout, stderr) + message = _("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r")\ + % (description, cmd, exit_code, stdout, stderr) IOError.__init__(self, message) class Error(Exception): + def __init__(self, message=None): super(Error, self).__init__(message) class ApiError(Error): + def __init__(self, message='Unknown', code='Unknown'): self.message = message self.code = code @@ -81,7 +84,7 @@ def wrap_exception(f): except Exception, e: if not isinstance(e, Error): #exc_type, exc_value, exc_traceback = sys.exc_info() - logging.exception('Uncaught exception') + logging.exception(_('Uncaught exception')) #logging.error(traceback.extract_stack(exc_traceback)) raise Error(str(e)) raise diff --git a/nova/fakememcache.py b/nova/fakememcache.py new file mode 100644 index 000000000..67f46dbdc --- /dev/null +++ b/nova/fakememcache.py @@ -0,0 +1,59 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Super simple fake memcache client.""" + +import utils + + +class Client(object): + """Replicates a tiny subset of memcached client interface.""" + + def __init__(self, *args, **kwargs): + """Ignores the passed in args""" + self.cache = {} + + def get(self, key): + """Retrieves the value for a key or None.""" + (timeout, value) = self.cache.get(key, (0, None)) + if timeout == 0 or utils.utcnow_ts() < timeout: + return value + return None + + def set(self, key, value, time=0, min_compress_len=0): + """Sets the value for a key.""" + timeout = 0 + if time != 0: + timeout = utils.utcnow_ts() + time + self.cache[key] = (timeout, value) + return True + + def add(self, key, value, time=0, min_compress_len=0): + """Sets the value for a key if it doesn't exist.""" + if not self.get(key) is None: + return False + return self.set(key, value, time, min_compress_len) + + def incr(self, key, delta=1): + """Increments the value for a key.""" + value = self.get(key) + if value is None: + return None + new_value = int(value) + delta + self.cache[key] = (self.cache[key][0], str(new_value)) + return new_value diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index c64617931..79d8b894d 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -25,6 +25,10 @@ from carrot.backends import base from eventlet import greenthread +EXCHANGES = {} +QUEUES = {} + + class Message(base.BaseMessage): pass @@ -37,12 +41,12 @@ class Exchange(object): self._routes = {} def publish(self, message, routing_key=None): - logging.debug('(%s) publish (key: %s) %s', + logging.debug(_('(%s) publish (key: %s) %s'), self.name, routing_key, message) routing_key = routing_key.split('.')[0] if routing_key in self._routes: for f in self._routes[routing_key]: - logging.debug('Publishing to route %s', f) + logging.debug(_('Publishing to route %s'), f) f(message, routing_key=routing_key) def bind(self, callback, routing_key): @@ -68,81 +72,63 @@ class Queue(object): return self._queue.get() -class Backend(object): - """ Singleton backend for testing """ - class __impl(base.BaseBackend): - def __init__(self, *args, **kwargs): - #super(__impl, self).__init__(*args, **kwargs) - self._exchanges = {} - self._queues = {} - - def _reset_all(self): - self._exchanges = {} - self._queues = {} - - def queue_declare(self, queue, **kwargs): - if queue not in self._queues: - logging.debug('Declaring queue %s', queue) - self._queues[queue] = Queue(queue) - - def exchange_declare(self, exchange, type, *args, **kwargs): - if exchange not in self._exchanges: - logging.debug('Declaring exchange %s', exchange) - self._exchanges[exchange] = Exchange(exchange, type) - - def queue_bind(self, queue, exchange, routing_key, **kwargs): - logging.debug('Binding %s to %s with key %s', - queue, exchange, routing_key) - self._exchanges[exchange].bind(self._queues[queue].push, - routing_key) - - def declare_consumer(self, queue, callback, *args, **kwargs): - self.current_queue = queue - self.current_callback = callback - - def consume(self, *args, **kwargs): - while True: - item = self.get(self.current_queue) - if item: - self.current_callback(item) - raise StopIteration() - greenthread.sleep(0) - - def get(self, queue, no_ack=False): - if not queue in self._queues or not self._queues[queue].size(): - return None - (message_data, content_type, content_encoding) = \ - self._queues[queue].pop() - message = Message(backend=self, body=message_data, - content_type=content_type, - content_encoding=content_encoding) - message.result = True - logging.debug('Getting from %s: %s', queue, message) - return message - - def prepare_message(self, message_data, delivery_mode, - content_type, content_encoding, **kwargs): - """Prepare message for sending.""" - return (message_data, content_type, content_encoding) - - def publish(self, message, exchange, routing_key, **kwargs): - if exchange in self._exchanges: - self._exchanges[exchange].publish( - message, routing_key=routing_key) - - __instance = None - - def __init__(self, *args, **kwargs): - if Backend.__instance is None: - Backend.__instance = Backend.__impl(*args, **kwargs) - self.__dict__['_Backend__instance'] = Backend.__instance - - def __getattr__(self, attr): - return getattr(self.__instance, attr) - - def __setattr__(self, attr, value): - return setattr(self.__instance, attr, value) +class Backend(base.BaseBackend): + def queue_declare(self, queue, **kwargs): + global QUEUES + if queue not in QUEUES: + logging.debug(_('Declaring queue %s'), queue) + QUEUES[queue] = Queue(queue) + + def exchange_declare(self, exchange, type, *args, **kwargs): + global EXCHANGES + if exchange not in EXCHANGES: + logging.debug(_('Declaring exchange %s'), exchange) + EXCHANGES[exchange] = Exchange(exchange, type) + + def queue_bind(self, queue, exchange, routing_key, **kwargs): + global EXCHANGES + global QUEUES + logging.debug(_('Binding %s to %s with key %s'), + queue, exchange, routing_key) + EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) + + def declare_consumer(self, queue, callback, *args, **kwargs): + self.current_queue = queue + self.current_callback = callback + + def consume(self, limit=None): + while True: + item = self.get(self.current_queue) + if item: + self.current_callback(item) + raise StopIteration() + greenthread.sleep(0) + + def get(self, queue, no_ack=False): + global QUEUES + if not queue in QUEUES or not QUEUES[queue].size(): + return None + (message_data, content_type, content_encoding) = QUEUES[queue].pop() + message = Message(backend=self, body=message_data, + content_type=content_type, + content_encoding=content_encoding) + message.result = True + logging.debug(_('Getting from %s: %s'), queue, message) + return message + + def prepare_message(self, message_data, delivery_mode, + content_type, content_encoding, **kwargs): + """Prepare message for sending.""" + return (message_data, content_type, content_encoding) + + def publish(self, message, exchange, routing_key, **kwargs): + global EXCHANGES + if exchange in EXCHANGES: + EXCHANGES[exchange].publish(message, routing_key=routing_key) def reset_all(): - Backend()._reset_all() + global EXCHANGES + global QUEUES + EXCHANGES = {} + QUEUES = {} diff --git a/nova/flags.py b/nova/flags.py index 87444565a..76a98d35a 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -29,6 +29,8 @@ import sys import gflags +from nova import utils + class FlagValues(gflags.FlagValues): """Extension of gflags.FlagValues that allows undefined and runtime flags. @@ -211,7 +213,8 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') DEFINE_integer('s3_port', 3333, 's3 port') -DEFINE_string('s3_host', '127.0.0.1', 's3 host') +DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)') +DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on') @@ -230,22 +233,24 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') -DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', - 'Url to ec2 api server') +DEFINE_string('ec2_prefix', 'http', 'prefix for ec2') +DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server') +DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server') +DEFINE_integer('cc_port', 8773, 'cloud controller port') +DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2') DEFINE_string('default_image', 'ami-11111', 'default image to use, testing only') -DEFINE_string('default_kernel', 'aki-11111', - 'default kernel to use, testing only') -DEFINE_string('default_ramdisk', 'ari-11111', - 'default ramdisk to use, testing only') DEFINE_string('default_instance_type', 'm1.small', 'default instance type to use, testing only') +DEFINE_string('null_kernel', 'nokernel', + 'kernel image that indicates not to use a kernel,' + ' but to use a raw disk image instead') -DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server') +DEFINE_string('vpn_image_id', 'ami-cloudpipe', 'AMI for cloudpipe vpn server') DEFINE_string('vpn_key_suffix', - '-key', - 'Suffix to add to project name for vpn key') + '-vpn', + 'Suffix to add to project name for vpn key and secgroups') DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') diff --git a/nova/image/glance.py b/nova/image/glance.py index 1ca6cf2eb..cb3936df1 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -77,8 +77,8 @@ class ParallaxClient(object): data = json.loads(res.read())['images'] return data else: - logging.warn("Parallax returned HTTP error %d from " - "request for /images", res.status_int) + logging.warn(_("Parallax returned HTTP error %d from " + "request for /images"), res.status_int) return [] finally: c.close() @@ -96,8 +96,8 @@ class ParallaxClient(object): data = json.loads(res.read())['images'] return data else: - logging.warn("Parallax returned HTTP error %d from " - "request for /images/detail", res.status_int) + logging.warn(_("Parallax returned HTTP error %d from " + "request for /images/detail"), res.status_int) return [] finally: c.close() diff --git a/nova/image/s3.py b/nova/image/s3.py index 0a25161de..7b04aa072 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -79,7 +79,8 @@ class S3ImageService(service.BaseImageService): result = self.index(context) result = [i for i in result if i['imageId'] == image_id] if not result: - raise exception.NotFound('Image %s could not be found' % image_id) + raise exception.NotFound(_('Image %s could not be found') + % image_id) image = result[0] return image diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 0fefd9415..931a89554 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -19,7 +19,6 @@ Implements vlans, bridges, and iptables rules using linux utilities. import logging import os -import signal # TODO(ja): does the definition of network_path belong here? @@ -46,41 +45,90 @@ flags.DEFINE_string('vlan_interface', 'eth0', 'network device for vlans') flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'), 'location of nova-dhcpbridge') -flags.DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server') -flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') -flags.DEFINE_string('routing_source_ip', '127.0.0.1', +flags.DEFINE_string('routing_source_ip', utils.get_my_ip(), 'Public IP of network host') flags.DEFINE_bool('use_nova_chains', False, 'use the nova_ routing chains instead of default') - -DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] +flags.DEFINE_string('dns_server', None, + 'if set, uses specific dns server for dnsmasq') +flags.DEFINE_string('dmz_cidr', '10.128.0.0/24', + 'dmz range that should be accepted') def metadata_forward(): """Create forwarding rule for metadata""" _confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 " "-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT " - "--to-destination %s:%s" % (FLAGS.cc_host, FLAGS.cc_port)) + "--to-destination %s:%s" % (FLAGS.cc_dmz, FLAGS.cc_port)) def init_host(): """Basic networking setup goes here""" + + if FLAGS.use_nova_chains: + _execute("sudo iptables -N nova_input", check_exit_code=False) + _execute("sudo iptables -D %s -j nova_input" % FLAGS.input_chain, + check_exit_code=False) + _execute("sudo iptables -A %s -j nova_input" % FLAGS.input_chain) + + _execute("sudo iptables -N nova_forward", check_exit_code=False) + _execute("sudo iptables -D FORWARD -j nova_forward", + check_exit_code=False) + _execute("sudo iptables -A FORWARD -j nova_forward") + + _execute("sudo iptables -N nova_output", check_exit_code=False) + _execute("sudo iptables -D OUTPUT -j nova_output", + check_exit_code=False) + _execute("sudo iptables -A OUTPUT -j nova_output") + + _execute("sudo iptables -t nat -N nova_prerouting", + check_exit_code=False) + _execute("sudo iptables -t nat -D PREROUTING -j nova_prerouting", + check_exit_code=False) + _execute("sudo iptables -t nat -A PREROUTING -j nova_prerouting") + + _execute("sudo iptables -t nat -N nova_postrouting", + check_exit_code=False) + _execute("sudo iptables -t nat -D POSTROUTING -j nova_postrouting", + check_exit_code=False) + _execute("sudo iptables -t nat -A POSTROUTING -j nova_postrouting") + + _execute("sudo iptables -t nat -N nova_snatting", + check_exit_code=False) + _execute("sudo iptables -t nat -D POSTROUTING -j nova_snatting", + check_exit_code=False) + _execute("sudo iptables -t nat -A POSTROUTING -j nova_snatting") + + _execute("sudo iptables -t nat -N nova_output", check_exit_code=False) + _execute("sudo iptables -t nat -D OUTPUT -j nova_output", + check_exit_code=False) + _execute("sudo iptables -t nat -A OUTPUT -j nova_output") + else: + # NOTE(vish): This makes it easy to ensure snatting rules always + # come after the accept rules in the postrouting chain + _execute("sudo iptables -t nat -N SNATTING", + check_exit_code=False) + _execute("sudo iptables -t nat -D POSTROUTING -j SNATTING", + check_exit_code=False) + _execute("sudo iptables -t nat -A POSTROUTING -j SNATTING") + # NOTE(devcamcar): Cloud public SNAT entries and the default # SNAT rule for outbound traffic. - _confirm_rule("POSTROUTING", "-t nat -s %s " + _confirm_rule("SNATTING", "-t nat -s %s " "-j SNAT --to-source %s" - % (FLAGS.fixed_range, FLAGS.routing_source_ip)) + % (FLAGS.fixed_range, FLAGS.routing_source_ip), append=True) - _confirm_rule("POSTROUTING", "-t nat -s %s -j MASQUERADE" % - FLAGS.fixed_range) + _confirm_rule("POSTROUTING", "-t nat -s %s -d %s -j ACCEPT" % + (FLAGS.fixed_range, FLAGS.dmz_cidr)) _confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" % {'range': FLAGS.fixed_range}) -def bind_floating_ip(floating_ip): +def bind_floating_ip(floating_ip, check_exit_code=True): """Bind ip to public interface""" _execute("sudo ip addr add %s dev %s" % (floating_ip, - FLAGS.public_interface)) + FLAGS.public_interface), + check_exit_code=check_exit_code) def unbind_floating_ip(floating_ip): @@ -102,27 +150,16 @@ def ensure_floating_forward(floating_ip, fixed_ip): """Ensure floating ip forwarding rule""" _confirm_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s" % (floating_ip, fixed_ip)) - _confirm_rule("POSTROUTING", "-t nat -s %s -j SNAT --to %s" + _confirm_rule("SNATTING", "-t nat -s %s -j SNAT --to %s" % (fixed_ip, floating_ip)) - # TODO(joshua): Get these from the secgroup datastore entries - _confirm_rule("FORWARD", "-d %s -p icmp -j ACCEPT" - % (fixed_ip)) - for (protocol, port) in DEFAULT_PORTS: - _confirm_rule("FORWARD", "-d %s -p %s --dport %s -j ACCEPT" - % (fixed_ip, protocol, port)) def remove_floating_forward(floating_ip, fixed_ip): """Remove forwarding for floating ip""" _remove_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s" % (floating_ip, fixed_ip)) - _remove_rule("POSTROUTING", "-t nat -s %s -j SNAT --to %s" + _remove_rule("SNATTING", "-t nat -s %s -j SNAT --to %s" % (fixed_ip, floating_ip)) - _remove_rule("FORWARD", "-d %s -p icmp -j ACCEPT" - % (fixed_ip)) - for (protocol, port) in DEFAULT_PORTS: - _remove_rule("FORWARD", "-d %s -p %s --dport %s -j ACCEPT" - % (fixed_ip, protocol, port)) def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): @@ -135,7 +172,7 @@ def ensure_vlan(vlan_num): """Create a vlan unless it already exists""" interface = "vlan%s" % vlan_num if not _device_exists(interface): - logging.debug("Starting VLAN inteface %s", interface) + logging.debug(_("Starting VLAN inteface %s"), interface) _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num)) _execute("sudo ifconfig %s up" % interface) @@ -145,7 +182,7 @@ def ensure_vlan(vlan_num): def ensure_bridge(bridge, interface, net_attrs=None): """Create a bridge unless it already exists""" if not _device_exists(bridge): - logging.debug("Starting Bridge interface for %s", interface) + logging.debug(_("Starting Bridge interface for %s"), interface) _execute("sudo brctl addbr %s" % bridge) _execute("sudo brctl setfd %s 0" % bridge) # _execute("sudo brctl setageing %s 10" % bridge) @@ -160,6 +197,15 @@ def ensure_bridge(bridge, interface, net_attrs=None): net_attrs['netmask'])) else: _execute("sudo ifconfig %s up" % bridge) + if FLAGS.use_nova_chains: + (out, err) = _execute("sudo iptables -N nova_forward", + check_exit_code=False) + if err != 'iptables: Chain already exists.\n': + # NOTE(vish): chain didn't exist link chain + _execute("sudo iptables -D FORWARD -j nova_forward", + check_exit_code=False) + _execute("sudo iptables -A FORWARD -j nova_forward") + _confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge) _confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge) @@ -202,9 +248,9 @@ def update_dhcp(context, network_id): _execute('sudo kill -HUP %d' % pid) return except Exception as exc: # pylint: disable-msg=W0703 - logging.debug("Hupping dnsmasq threw %s", exc) + logging.debug(_("Hupping dnsmasq threw %s"), exc) else: - logging.debug("Pid %d is stale, relaunching dnsmasq", pid) + logging.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, @@ -236,13 +282,17 @@ def _device_exists(device): return not err -def _confirm_rule(chain, cmd): +def _confirm_rule(chain, cmd, append=False): """Delete and re-add iptables rule""" if FLAGS.use_nova_chains: chain = "nova_%s" % chain.lower() + if append: + loc = "-A" + else: + loc = "-I" _execute("sudo iptables --delete %s %s" % (chain, cmd), check_exit_code=False) - _execute("sudo iptables -I %s %s" % (chain, cmd)) + _execute("sudo iptables %s %s %s" % (loc, chain, cmd)) def _remove_rule(chain, cmd): @@ -265,6 +315,8 @@ def _dnsmasq_cmd(net): ' --dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'), ' --dhcp-script=%s' % FLAGS.dhcpbridge, ' --leasefile-ro'] + if FLAGS.dns_server: + cmd.append(' -h -R --server=%s' % FLAGS.dns_server) return ''.join(cmd) @@ -276,7 +328,7 @@ def _stop_dnsmasq(network): try: _execute('sudo kill -TERM %d' % pid) except Exception as exc: # pylint: disable-msg=W0703 - logging.debug("Killing dnsmasq threw %s", exc) + logging.debug(_("Killing dnsmasq threw %s"), exc) def _dhcp_file(bridge, kind): diff --git a/nova/network/manager.py b/nova/network/manager.py index 6a30f30b7..16aa8f895 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -47,6 +47,7 @@ topologies. All of the network commands are issued to a subclass of import datetime import logging import math +import socket import IPy @@ -56,6 +57,7 @@ from nova import exception from nova import flags from nova import manager from nova import utils +from nova import rpc FLAGS = flags.FLAGS @@ -87,6 +89,10 @@ flags.DEFINE_bool('update_dhcp_on_disassociate', False, 'Whether to update dhcp when fixed_ip is disassociated') flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600, 'Seconds after which a deallocated ip is disassociated') +flags.DEFINE_string('network_host', socket.gethostname(), + 'Network host to use for ip allocation in flat modes') +flags.DEFINE_bool('fake_call', False, + 'If True, skip using the queue and make local calls') class AddressAlreadyAllocated(exception.Error): @@ -112,10 +118,20 @@ class NetworkManager(manager.Manager): ctxt = context.get_admin_context() for network in self.db.host_get_networks(ctxt, self.host): self._on_set_network_host(ctxt, network['id']) + floating_ips = self.db.floating_ip_get_all_by_host(ctxt, + self.host) + for floating_ip in floating_ips: + if floating_ip.get('fixed_ip', None): + fixed_address = floating_ip['fixed_ip']['address'] + # NOTE(vish): The False here is because we ignore the case + # that the ip is already bound. + self.driver.bind_floating_ip(floating_ip['address'], False) + self.driver.ensure_floating_forward(floating_ip['address'], + fixed_address) def set_network_host(self, context, network_id): """Safely sets the host of the network.""" - logging.debug("setting network host") + logging.debug(_("setting network host")) host = self.db.network_set_host(context, network_id, self.host) @@ -174,10 +190,10 @@ class NetworkManager(manager.Manager): fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: - raise exception.Error("IP %s leased that isn't associated" % + raise exception.Error(_("IP %s leased that isn't associated") % address) if instance_ref['mac_address'] != mac: - raise exception.Error("IP %s leased to bad mac %s vs %s" % + raise exception.Error(_("IP %s leased to bad mac %s vs %s") % (address, instance_ref['mac_address'], mac)) now = datetime.datetime.utcnow() self.db.fixed_ip_update(context, @@ -185,7 +201,8 @@ class NetworkManager(manager.Manager): {'leased': True, 'updated_at': now}) if not fixed_ip_ref['allocated']: - logging.warn("IP %s leased that was already deallocated", address) + logging.warn(_("IP %s leased that was already deallocated"), + address) def release_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is released.""" @@ -193,13 +210,13 @@ class NetworkManager(manager.Manager): fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: - raise exception.Error("IP %s released that isn't associated" % + raise exception.Error(_("IP %s released that isn't associated") % address) if instance_ref['mac_address'] != mac: - raise exception.Error("IP %s released from bad mac %s vs %s" % + raise exception.Error(_("IP %s released from bad mac %s vs %s") % (address, instance_ref['mac_address'], mac)) if not fixed_ip_ref['leased']: - logging.warn("IP %s released that was not leased", address) + logging.warn(_("IP %s released that was not leased"), address) self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': False}) @@ -212,8 +229,8 @@ class NetworkManager(manager.Manager): network_ref = self.db.fixed_ip_get_network(context, address) self.driver.update_dhcp(context, network_ref['id']) - def get_network(self, context): - """Get the network for the current context.""" + def get_network_host(self, context): + """Get the network host for the current context.""" raise NotImplementedError() def create_networks(self, context, num_networks, network_size, @@ -301,10 +318,6 @@ class FlatManager(NetworkManager): """Network is created manually.""" pass - def setup_fixed_ip(self, context, address): - """Currently no setup.""" - pass - def create_networks(self, context, cidr, num_networks, network_size, *args, **kwargs): """Create networks based on parameters.""" @@ -325,14 +338,25 @@ class FlatManager(NetworkManager): if network_ref: self._create_fixed_ips(context, network_ref['id']) - def get_network(self, context): - """Get the network for the current context.""" - # NOTE(vish): To support mutilple network hosts, This could randomly - # select from multiple networks instead of just - # returning the one. It could also potentially be done - # in the scheduler. - return self.db.network_get_by_bridge(context, - FLAGS.flat_network_bridge) + def get_network_host(self, context): + """Get the network host for the current context.""" + network_ref = self.db.network_get_by_bridge(context, + FLAGS.flat_network_bridge) + # NOTE(vish): If the network has no host, use the network_host flag. + # This could eventually be a a db lookup of some sort, but + # a flag is easy to handle for now. + host = network_ref['host'] + if not host: + topic = self.db.queue_get_for(context, + FLAGS.network_topic, + FLAGS.network_host) + if FLAGS.fake_call: + return self.set_network_host(context, network_ref['id']) + host = rpc.call(context, + FLAGS.network_topic, + {"method": "set_network_host", + "args": {"network_id": network_ref['id']}}) + return host def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a network.""" @@ -361,13 +385,18 @@ class FlatDHCPManager(FlatManager): """Sets up matching network for compute hosts.""" network_ref = db.network_get_by_instance(context, instance_id) self.driver.ensure_bridge(network_ref['bridge'], - FLAGS.flat_interface, - network_ref) + FLAGS.flat_interface) - def setup_fixed_ip(self, context, address): + def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): """Setup dhcp for this network.""" + address = super(FlatDHCPManager, self).allocate_fixed_ip(context, + instance_id, + *args, + **kwargs) network_ref = db.fixed_ip_get_network(context, address) - self.driver.update_dhcp(context, network_ref['id']) + if not FLAGS.fake_network: + self.driver.update_dhcp(context, network_ref['id']) + return address def deallocate_fixed_ip(self, context, address, *args, **kwargs): """Returns a fixed ip to the pool.""" @@ -408,7 +437,7 @@ class VlanManager(NetworkManager): self.host, time) if num: - logging.debug("Dissassociated %s stale fixed ip(s)", num) + logging.debug(_("Dissassociated %s stale fixed ip(s)"), num) def init_host(self): """Do any initialization that needs to be run if this is a @@ -436,33 +465,20 @@ class VlanManager(NetworkManager): network_ref['id'], instance_id) self.db.fixed_ip_update(context, address, {'allocated': True}) + if not FLAGS.fake_network: + self.driver.update_dhcp(context, network_ref['id']) return address def deallocate_fixed_ip(self, context, address, *args, **kwargs): """Returns a fixed ip to the pool.""" self.db.fixed_ip_update(context, address, {'allocated': False}) - def setup_fixed_ip(self, context, address): - """Sets forwarding rules and dhcp for fixed ip.""" - fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) - network_ref = self.db.fixed_ip_get_network(context, address) - if self.db.instance_is_vpn(context, fixed_ip_ref['instance_id']): - self.driver.ensure_vlan_forward(network_ref['vpn_public_address'], - network_ref['vpn_public_port'], - network_ref['vpn_private_address']) - self.driver.update_dhcp(context, network_ref['id']) - def setup_compute_network(self, context, instance_id): """Sets up matching network for compute hosts.""" network_ref = db.network_get_by_instance(context, instance_id) self.driver.ensure_vlan_bridge(network_ref['vlan'], network_ref['bridge']) - def restart_nets(self): - """Ensure the network for each user is enabled.""" - # TODO(vish): Implement this - pass - def create_networks(self, context, cidr, num_networks, network_size, vlan_start, vpn_start): """Create networks based on parameters.""" @@ -489,21 +505,45 @@ class VlanManager(NetworkManager): if network_ref: self._create_fixed_ips(context, network_ref['id']) - def get_network(self, context): + def get_network_host(self, context): """Get the network for the current context.""" - return self.db.project_get_network(context.elevated(), - context.project_id) + network_ref = self.db.project_get_network(context.elevated(), + context.project_id) + # NOTE(vish): If the network has no host, do a call to get an + # available host. This should be changed to go through + # the scheduler at some point. + host = network_ref['host'] + if not host: + if FLAGS.fake_call: + return self.set_network_host(context, network_ref['id']) + host = rpc.call(context, + FLAGS.network_topic, + {"method": "set_network_host", + "args": {"network_id": network_ref['id']}}) + + return host def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a network.""" network_ref = self.db.network_get(context, network_id) - net = {} - net['vpn_public_address'] = FLAGS.vpn_ip - db.network_update(context, network_id, net) + if not network_ref['vpn_public_address']: + net = {} + address = FLAGS.vpn_ip + net['vpn_public_address'] = address + db.network_update(context, network_id, net) + else: + address = network_ref['vpn_public_address'] self.driver.ensure_vlan_bridge(network_ref['vlan'], network_ref['bridge'], network_ref) - self.driver.update_dhcp(context, network_id) + # NOTE(vish): only ensure this forward if the address hasn't been set + # manually. + if address == FLAGS.vpn_ip: + self.driver.ensure_vlan_forward(FLAGS.vpn_ip, + network_ref['vpn_public_port'], + network_ref['vpn_private_address']) + if not FLAGS.fake_network: + self.driver.update_dhcp(context, network_id) @property def _bottom_reserved_ips(self): diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index c8920b00c..52257f69f 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -102,7 +102,7 @@ def _render_parts(value, write_cb): _render_parts(subsubvalue, write_cb) write_cb('</' + utils.utf8(name) + '>') else: - raise Exception("Unknown S3 value type %r", value) + raise Exception(_("Unknown S3 value type %r"), value) def get_argument(request, key, default_value): @@ -134,7 +134,7 @@ def get_context(request): check_type='s3') return context.RequestContext(user, project) except exception.Error as ex: - logging.debug("Authentication Failure: %s", ex) + logging.debug(_("Authentication Failure: %s"), ex) raise exception.NotAuthorized() @@ -227,7 +227,7 @@ class BucketResource(ErrorHandlingResource): def render_PUT(self, request): "Creates the bucket resource""" - logging.debug("Creating bucket %s", self.name) + logging.debug(_("Creating bucket %s"), self.name) logging.debug("calling bucket.Bucket.create(%r, %r)", self.name, request.context) @@ -237,7 +237,7 @@ class BucketResource(ErrorHandlingResource): def render_DELETE(self, request): """Deletes the bucket resource""" - logging.debug("Deleting bucket %s", self.name) + logging.debug(_("Deleting bucket %s"), self.name) bucket_object = bucket.Bucket(self.name) if not bucket_object.is_authorized(request.context): @@ -261,7 +261,9 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug("Getting object: %s / %s", self.bucket.name, self.name) + logging.debug(_("Getting object: %s / %s"), + self.bucket.name, + self.name) if not self.bucket.is_authorized(request.context): raise exception.NotAuthorized() @@ -279,7 +281,9 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug("Putting object: %s / %s", self.bucket.name, self.name) + logging.debug(_("Putting object: %s / %s"), + self.bucket.name, + self.name) if not self.bucket.is_authorized(request.context): raise exception.NotAuthorized() @@ -298,7 +302,7 @@ class ObjectResource(ErrorHandlingResource): authorized to delete the object. """ - logging.debug("Deleting object: %s / %s", + logging.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name) @@ -394,17 +398,17 @@ class ImagesResource(resource.Resource): image_id = get_argument(request, 'image_id', u'') image_object = image.Image(image_id) if not image_object.is_authorized(request.context): - logging.debug("not authorized for render_POST in images") + logging.debug(_("not authorized for render_POST in images")) raise exception.NotAuthorized() operation = get_argument(request, 'operation', u'') if operation: # operation implies publicity toggle - logging.debug("handling publicity toggle") + logging.debug(_("handling publicity toggle")) image_object.set_public(operation == 'add') else: # other attributes imply update - logging.debug("update user fields") + logging.debug(_("update user fields")) clean_args = {} for arg in request.args.keys(): clean_args[arg] = request.args[arg][0] diff --git a/nova/rpc.py b/nova/rpc.py index 6a3f552db..844088348 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -91,15 +91,15 @@ class Consumer(messaging.Consumer): self.failed_connection = False break except: # Catching all because carrot sucks - logging.exception("AMQP server on %s:%d is unreachable." \ - " Trying again in %d seconds." % ( + logging.exception(_("AMQP server on %s:%d is unreachable." + " Trying again in %d seconds.") % ( FLAGS.rabbit_host, FLAGS.rabbit_port, FLAGS.rabbit_retry_interval)) self.failed_connection = True if self.failed_connection: - logging.exception("Unable to connect to AMQP server" \ - " after %d tries. Shutting down." % FLAGS.rabbit_max_retries) + logging.exception(_("Unable to connect to AMQP server" + " after %d tries. Shutting down.") % FLAGS.rabbit_max_retries) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): @@ -116,14 +116,14 @@ class Consumer(messaging.Consumer): self.declare() super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) if self.failed_connection: - logging.error("Reconnected to queue") + logging.error(_("Reconnected to queue")) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't # exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: - logging.exception("Failed to fetch message from queue") + logging.exception(_("Failed to fetch message from queue")) self.failed_connection = True def attach_to_eventlet(self): @@ -153,7 +153,7 @@ class TopicConsumer(Consumer): class AdapterConsumer(TopicConsumer): """Calls methods on a proxy object based on method and args""" def __init__(self, connection=None, topic="broadcast", proxy=None): - LOG.debug('Initing the Adapter Consumer for %s' % (topic)) + LOG.debug(_('Initing the Adapter Consumer for %s') % (topic)) self.proxy = proxy super(AdapterConsumer, self).__init__(connection=connection, topic=topic) @@ -168,7 +168,7 @@ class AdapterConsumer(TopicConsumer): Example: {'method': 'echo', 'args': {'value': 42}} """ - LOG.debug('received %s' % (message_data)) + LOG.debug(_('received %s') % (message_data)) msg_id = message_data.pop('_msg_id', None) ctxt = _unpack_context(message_data) @@ -181,8 +181,8 @@ class AdapterConsumer(TopicConsumer): # messages stay in the queue indefinitely, so for now # we just log the message and send an error string # back to the caller - LOG.warn('no method for message: %s' % (message_data)) - msg_reply(msg_id, 'No method for message: %s' % message_data) + LOG.warn(_('no method for message: %s') % (message_data)) + msg_reply(msg_id, _('No method for message: %s') % message_data) return node_func = getattr(self.proxy, str(method)) @@ -242,10 +242,10 @@ def msg_reply(msg_id, reply=None, failure=None): if failure: message = str(failure[1]) tb = traceback.format_exception(*failure) - logging.error("Returning exception %s to caller", message) + logging.error(_("Returning exception %s to caller"), message) logging.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = Connection.instance() + conn = Connection.instance(True) publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) @@ -283,7 +283,7 @@ def _unpack_context(msg): if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value - LOG.debug('unpacked context: %s', context_dict) + LOG.debug(_('unpacked context: %s'), context_dict) return context.RequestContext.from_dict(context_dict) @@ -302,10 +302,10 @@ def _pack_context(msg, context): def call(context, topic, msg): """Sends a message on a topic and wait for a response""" - LOG.debug("Making asynchronous call...") + LOG.debug(_("Making asynchronous call...")) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) - LOG.debug("MSG_ID is %s" % (msg_id)) + LOG.debug(_("MSG_ID is %s") % (msg_id)) _pack_context(msg, context) class WaitMessage(object): @@ -353,7 +353,7 @@ def cast(context, topic, msg): def generic_response(message_data, message): """Logs a result and exits""" - LOG.debug('response %s', message_data) + LOG.debug(_('response %s'), message_data) message.ack() sys.exit(0) @@ -362,8 +362,8 @@ def send_message(topic, message, wait=True): """Sends a message for testing""" msg_id = uuid.uuid4().hex message.update({'_msg_id': msg_id}) - LOG.debug('topic is %s', topic) - LOG.debug('message %s', message) + LOG.debug(_('topic is %s'), topic) + LOG.debug(_('message %s'), message) if wait: consumer = messaging.Consumer(connection=Connection.instance(), diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py index 7fd09b053..9deaa2777 100644 --- a/nova/scheduler/chance.py +++ b/nova/scheduler/chance.py @@ -34,5 +34,5 @@ class ChanceScheduler(driver.Scheduler): hosts = self.hosts_up(context, topic) if not hosts: - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) return hosts[int(random.random() * len(hosts))] diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index f271d573f..08d7033f5 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -58,4 +58,4 @@ class Scheduler(object): def schedule(self, context, topic, *_args, **_kwargs): """Must override at least this method for scheduler to work.""" - raise NotImplementedError("Must implement a fallback schedule") + raise NotImplementedError(_("Must implement a fallback schedule")) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 60a3d2b4b..44e21f2fd 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -65,4 +65,4 @@ class SchedulerManager(manager.Manager): db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) - logging.debug("Casting to %s %s for %s", topic, host, method) + logging.debug(_("Casting to %s %s for %s"), topic, host, method) diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index 7f5093656..f9171ab35 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -47,7 +47,7 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, instance_cores) = result if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores: - raise driver.NoValidHost("All hosts have too many cores") + raise driver.NoValidHost(_("All hosts have too many cores")) if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow @@ -57,7 +57,7 @@ class SimpleScheduler(chance.ChanceScheduler): {'host': service['host'], 'scheduled_at': now}) return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks a host that is up and has the fewest volumes.""" @@ -66,7 +66,8 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, volume_gigabytes) = result if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes: - raise driver.NoValidHost("All hosts have too many gigabytes") + raise driver.NoValidHost(_("All hosts have too many " + "gigabytes")) if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow @@ -76,7 +77,7 @@ class SimpleScheduler(chance.ChanceScheduler): {'host': service['host'], 'scheduled_at': now}) return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) def schedule_set_network_host(self, context, *_args, **_kwargs): """Picks a host that is up and has the fewest networks.""" @@ -85,7 +86,7 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, instance_count) = result if instance_count >= FLAGS.max_networks: - raise driver.NoValidHost("All hosts have too many networks") + raise driver.NoValidHost(_("All hosts have too many networks")) if self.service_is_up(service): return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) diff --git a/nova/service.py b/nova/service.py index ac30aaceb..f1f90742f 100644 --- a/nova/service.py +++ b/nova/service.py @@ -151,7 +151,7 @@ class Service(object): report_interval = FLAGS.report_interval if not periodic_interval: periodic_interval = FLAGS.periodic_interval - logging.warn("Starting %s node", topic) + logging.warn(_("Starting %s node"), topic) service_obj = cls(host, binary, topic, manager, report_interval, periodic_interval) @@ -163,7 +163,7 @@ class Service(object): try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: - logging.warn("Service killed that has no database entry") + logging.warn(_("Service killed that has no database entry")) def stop(self): for x in self.timers: @@ -184,8 +184,8 @@ class Service(object): try: service_ref = db.service_get(ctxt, self.service_id) except exception.NotFound: - logging.debug("The service database object disappeared, " - "Recreating it.") + logging.debug(_("The service database object disappeared, " + "Recreating it.")) self._create_service_ref(ctxt) service_ref = db.service_get(ctxt, self.service_id) @@ -196,13 +196,13 @@ class Service(object): # TODO(termie): make this pattern be more elegant. if getattr(self, "model_disconnected", False): self.model_disconnected = False - logging.error("Recovered model server connection!") + logging.error(_("Recovered model server connection!")) # TODO(vish): this should probably only catch connection errors except Exception: # pylint: disable-msg=W0702 if not getattr(self, "model_disconnected", False): self.model_disconnected = True - logging.exception("model server went away") + logging.exception(_("model server went away")) def serve(*services): @@ -221,7 +221,7 @@ def serve(*services): else: logging.getLogger().setLevel(logging.WARNING) - logging.debug("Full set of FLAGS:") + logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 8444b6fce..3820f5f27 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -56,11 +56,16 @@ def instance_address(context, instance_id): def stub_instance(id, user_id=1): - return Instance(id=id + 123456, state=0, image_id=10, user_id=user_id, + return Instance(id=int(id) + 123456, state=0, image_id=10, user_id=user_id, display_name='server%s' % id, internal_id=id) +def fake_compute_api(cls, req, id): + return True + + class ServersTest(unittest.TestCase): + def setUp(self): self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} @@ -82,9 +87,15 @@ class ServersTest(unittest.TestCase): instance_address) self.stubs.Set(nova.db.api, 'instance_get_floating_address', instance_address) + self.stubs.Set(nova.compute.api.ComputeAPI, 'pause', + fake_compute_api) + self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause', + fake_compute_api) + self.allow_admin = FLAGS.allow_admin_api def tearDown(self): self.stubs.UnsetAll() + FLAGS.allow_admin_api = self.allow_admin def test_get_server_by_id(self): req = webob.Request.blank('/v1.0/servers/1') @@ -211,6 +222,30 @@ class ServersTest(unittest.TestCase): self.assertEqual(s['imageId'], 10) i += 1 + def test_server_pause(self): + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/pause') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(nova.api.API('os')) + self.assertEqual(res.status_int, 202) + + def test_server_unpause(self): + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/unpause') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(nova.api.API('os')) + self.assertEqual(res.status_int, 202) + def test_server_reboot(self): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 4508d6721..15d40bc53 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -208,17 +208,13 @@ class AuthManagerTestCase(object): # so it probably belongs in crypto_unittest # but I'm leaving it where I found it. with user_and_project_generator(self.manager) as (user, project): - # NOTE(todd): Should mention why we must setup controller first - # (somebody please clue me in) - cloud_controller = cloud.CloudController() - cloud_controller.setup() - _key, cert_str = self.manager._generate_x509_cert('test1', - 'testproj') + # NOTE(vish): Setup runs genroot.sh if it hasn't been run + cloud.CloudController().setup() + _key, cert_str = crypto.generate_x509_cert(user.id, project.id) logging.debug(cert_str) - # Need to verify that it's signed by the right intermediate CA - full_chain = crypto.fetch_ca(project_id='testproj', chain=True) - int_cert = crypto.fetch_ca(project_id='testproj', chain=False) + full_chain = crypto.fetch_ca(project_id=project.id, chain=True) + int_cert = crypto.fetch_ca(project_id=project.id, chain=False) cloud_cert = crypto.fetch_ca() logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain) signed_cert = X509.load_cert_string(cert_str) @@ -227,7 +223,8 @@ class AuthManagerTestCase(object): cloud_cert = X509.load_cert_string(cloud_cert) self.assertTrue(signed_cert.verify(chain_cert.get_pubkey())) self.assertTrue(signed_cert.verify(int_cert.get_pubkey())) - if not FLAGS.use_intermediate_ca: + + if not FLAGS.use_project_ca: self.assertTrue(signed_cert.verify(cloud_cert.get_pubkey())) else: self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey())) @@ -333,14 +330,10 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): AuthManagerTestCase.__init__(self) test.TestCase.__init__(self, *args, **kwargs) import nova.auth.fakeldap as fakeldap - FLAGS.redis_db = 8 if FLAGS.flush_db: - logging.info("Flushing redis datastore") - try: - r = fakeldap.Redis.instance() - r.flushdb() - except: - self.skip = True + logging.info("Flushing datastore") + r = fakeldap.Store.instance() + r.flushdb() class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase): diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 53a762310..70d2c44da 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -22,20 +22,18 @@ import logging from M2Crypto import BIO from M2Crypto import RSA import os -import StringIO import tempfile import time from eventlet import greenthread -from xml.etree import ElementTree from nova import context from nova import crypto from nova import db from nova import flags from nova import rpc +from nova import service from nova import test -from nova import utils from nova.auth import manager from nova.compute import power_state from nova.api.ec2 import cloud @@ -54,7 +52,8 @@ os.makedirs(IMAGES_PATH) class CloudTestCase(test.TestCase): def setUp(self): super(CloudTestCase, self).setUp() - self.flags(connection_type='fake', images_path=IMAGES_PATH) + self.flags(connection_type='fake', + images_path=IMAGES_PATH) self.conn = rpc.Connection.instance() logging.getLogger().setLevel(logging.DEBUG) @@ -62,27 +61,23 @@ class CloudTestCase(test.TestCase): # set up our cloud self.cloud = cloud.CloudController() - # set up a service - self.compute = utils.import_object(FLAGS.compute_manager) - self.compute_consumer = rpc.AdapterConsumer(connection=self.conn, - topic=FLAGS.compute_topic, - proxy=self.compute) - self.compute_consumer.attach_to_eventlet() - self.network = utils.import_object(FLAGS.network_manager) - self.network_consumer = rpc.AdapterConsumer(connection=self.conn, - topic=FLAGS.network_topic, - proxy=self.network) - self.network_consumer.attach_to_eventlet() + # set up services + self.compute = service.Service.create(binary='nova-compute') + self.compute.start() + self.network = service.Service.create(binary='nova-network') + self.network.start() self.manager = manager.AuthManager() self.user = self.manager.create_user('admin', 'admin', 'admin', True) self.project = self.manager.create_project('proj', 'admin', 'proj') self.context = context.RequestContext(user=self.user, - project=self.project) + project=self.project) def tearDown(self): self.manager.delete_project(self.project) self.manager.delete_user(self.user) + self.compute.kill() + self.network.kill() super(CloudTestCase, self).tearDown() def _create_key(self, name): @@ -109,12 +104,13 @@ class CloudTestCase(test.TestCase): {'address': address, 'host': FLAGS.host}) self.cloud.allocate_address(self.context) - inst = db.instance_create(self.context, {}) + inst = db.instance_create(self.context, {'host': FLAGS.host}) fixed = self.network.allocate_fixed_ip(self.context, inst['id']) ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) self.cloud.associate_address(self.context, instance_id=ec2_id, public_ip=address) + greenthread.sleep(0.3) self.cloud.disassociate_address(self.context, public_ip=address) self.cloud.release_address(self.context, diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index c6353d357..348bb3351 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -41,6 +41,7 @@ class ComputeTestCase(test.TestCase): logging.getLogger().setLevel(logging.DEBUG) super(ComputeTestCase, self).setUp() self.flags(connection_type='fake', + stub_network=True, network_manager='nova.network.manager.FlatManager') self.compute = utils.import_object(FLAGS.compute_manager) self.compute_api = compute_api.ComputeAPI() @@ -127,6 +128,14 @@ class ComputeTestCase(test.TestCase): self.assert_(instance_ref['launched_at'] < terminate) self.assert_(instance_ref['deleted_at'] > terminate) + def test_pause(self): + """Ensure instance can be paused""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.pause_instance(self.context, instance_id) + self.compute.unpause_instance(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) + def test_reboot(self): """Ensure instance can be rebooted""" instance_id = self._create_instance() diff --git a/nova/tests/middleware_unittest.py b/nova/tests/middleware_unittest.py new file mode 100644 index 000000000..0febf52d6 --- /dev/null +++ b/nova/tests/middleware_unittest.py @@ -0,0 +1,86 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import webob +import webob.dec +import webob.exc + +from nova.api import ec2 +from nova import flags +from nova import test +from nova import utils + + +FLAGS = flags.FLAGS + + +@webob.dec.wsgify +def conditional_forbid(req): + """Helper wsgi app returns 403 if param 'die' is 1.""" + if 'die' in req.params and req.params['die'] == '1': + raise webob.exc.HTTPForbidden() + return 'OK' + + +class LockoutTestCase(test.TrialTestCase): + """Test case for the Lockout middleware.""" + def setUp(self): # pylint: disable-msg=C0103 + super(LockoutTestCase, self).setUp() + utils.set_time_override() + self.lockout = ec2.Lockout(conditional_forbid) + + def tearDown(self): # pylint: disable-msg=C0103 + utils.clear_time_override() + super(LockoutTestCase, self).tearDown() + + def _send_bad_attempts(self, access_key, num_attempts=1): + """Fail x.""" + for i in xrange(num_attempts): + req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key) + self.assertEqual(req.get_response(self.lockout).status_int, 403) + + def _is_locked_out(self, access_key): + """Sends a test request to see if key is locked out.""" + req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key) + return (req.get_response(self.lockout).status_int == 403) + + def test_lockout(self): + self._send_bad_attempts('test', FLAGS.lockout_attempts) + self.assertTrue(self._is_locked_out('test')) + + def test_timeout(self): + self._send_bad_attempts('test', FLAGS.lockout_attempts) + self.assertTrue(self._is_locked_out('test')) + utils.advance_time_seconds(FLAGS.lockout_minutes * 60) + self.assertFalse(self._is_locked_out('test')) + + def test_multiple_keys(self): + self._send_bad_attempts('test1', FLAGS.lockout_attempts) + self.assertTrue(self._is_locked_out('test1')) + self.assertFalse(self._is_locked_out('test2')) + utils.advance_time_seconds(FLAGS.lockout_minutes * 60) + self.assertFalse(self._is_locked_out('test1')) + self.assertFalse(self._is_locked_out('test2')) + + def test_window_timeout(self): + self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) + self.assertFalse(self._is_locked_out('test')) + utils.advance_time_seconds(FLAGS.lockout_window * 60) + self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) + self.assertFalse(self._is_locked_out('test')) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index bcac20585..96473ac7c 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -26,6 +26,7 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import service from nova import test from nova import utils from nova.auth import manager @@ -40,6 +41,7 @@ class NetworkTestCase(test.TestCase): # NOTE(vish): if you change these flags, make sure to change the # flags in the corresponding section in nova-dhcpbridge self.flags(connection_type='fake', + fake_call=True, fake_network=True, network_size=16, num_networks=5) @@ -56,16 +58,13 @@ class NetworkTestCase(test.TestCase): # create the necessary network data for the project user_context = context.RequestContext(project=self.projects[i], user=self.user) - network_ref = self.network.get_network(user_context) - self.network.set_network_host(context.get_admin_context(), - network_ref['id']) + host = self.network.get_network_host(user_context.elevated()) instance_ref = self._create_instance(0) self.instance_id = instance_ref['id'] instance_ref = self._create_instance(1) self.instance2_id = instance_ref['id'] def tearDown(self): - super(NetworkTestCase, self).tearDown() # TODO(termie): this should really be instantiating clean datastores # in between runs, one failure kills all the tests db.instance_destroy(context.get_admin_context(), self.instance_id) @@ -73,6 +72,7 @@ class NetworkTestCase(test.TestCase): for project in self.projects: self.manager.delete_project(project) self.manager.delete_user(self.user) + super(NetworkTestCase, self).tearDown() def _create_instance(self, project_num, mac=None): if not mac: diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py index a2495e65a..6ea2edcab 100644 --- a/nova/tests/rpc_unittest.py +++ b/nova/tests/rpc_unittest.py @@ -33,7 +33,7 @@ class RpcTestCase(test.TestCase): """Test cases for rpc""" def setUp(self): super(RpcTestCase, self).setUp() - self.conn = rpc.Connection.instance() + self.conn = rpc.Connection.instance(True) self.receiver = TestReceiver() self.consumer = rpc.AdapterConsumer(connection=self.conn, topic='test', @@ -79,6 +79,33 @@ class RpcTestCase(test.TestCase): except rpc.RemoteError as exc: self.assertEqual(int(exc.value), value) + def test_nested_calls(self): + """Test that we can do an rpc.call inside another call""" + class Nested(object): + @staticmethod + def echo(context, queue, value): + """Calls echo in the passed queue""" + logging.debug("Nested received %s, %s", queue, value) + ret = rpc.call(context, + queue, + {"method": "echo", + "args": {"value": value}}) + logging.debug("Nested return %s", ret) + return value + + nested = Nested() + conn = rpc.Connection.instance(True) + consumer = rpc.AdapterConsumer(connection=conn, + topic='nested', + proxy=nested) + consumer.attach_to_eventlet() + value = 42 + result = rpc.call(self.context, + 'nested', {"method": "echo", + "args": {"queue": "test", + "value": value}}) + self.assertEqual(value, result) + class TestReceiver(object): """Simple Proxy class so the consumer has methods to call diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py index d1756b8fb..df5e7afa5 100644 --- a/nova/tests/scheduler_unittest.py +++ b/nova/tests/scheduler_unittest.py @@ -78,6 +78,7 @@ class SimpleDriverTestCase(test.TestCase): def setUp(self): super(SimpleDriverTestCase, self).setUp() self.flags(connection_type='fake', + stub_network=True, max_cores=4, max_gigabytes=4, network_manager='nova.network.manager.FlatManager', diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 85e569858..9ad009510 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -33,6 +33,7 @@ flags.DECLARE('instances_path', 'nova.compute.manager') class LibvirtConnTestCase(test.TestCase): def setUp(self): super(LibvirtConnTestCase, self).setUp() + self.flags(fake_call=True) self.manager = manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake', admin=True) @@ -40,33 +41,66 @@ class LibvirtConnTestCase(test.TestCase): self.network = utils.import_object(FLAGS.network_manager) FLAGS.instances_path = '' - def test_get_uri_and_template(self): - ip = '10.11.12.13' - - instance = {'internal_id': 1, - 'memory_kb': '1024000', - 'basepath': '/some/path', - 'bridge_name': 'br100', - 'mac_address': '02:12:34:46:56:67', - 'vcpus': 2, - 'project_id': 'fake', - 'bridge': 'br101', - 'instance_type': 'm1.small'} - + test_ip = '10.11.12.13' + test_instance = {'memory_kb': '1024000', + 'basepath': '/some/path', + 'bridge_name': 'br100', + 'mac_address': '02:12:34:46:56:67', + 'vcpus': 2, + 'project_id': 'fake', + 'bridge': 'br101', + 'instance_type': 'm1.small'} + + def test_xml_and_uri_no_ramdisk_no_kernel(self): + instance_data = dict(self.test_instance) + self.do_test_xml_and_uri(instance_data, + expect_kernel=False, expect_ramdisk=False) + + def test_xml_and_uri_no_ramdisk(self): + instance_data = dict(self.test_instance) + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=False) + + def test_xml_and_uri_no_kernel(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=False, expect_ramdisk=False) + + def test_xml_and_uri(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=True) + + def test_xml_and_uri_rescue(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=True, + rescue=True) + + def do_test_xml_and_uri(self, instance, + expect_ramdisk, expect_kernel, + rescue=False): user_context = context.RequestContext(project=self.project, user=self.user) instance_ref = db.instance_create(user_context, instance) - network_ref = self.network.get_network(user_context) - self.network.set_network_host(context.get_admin_context(), - network_ref['id']) + host = self.network.get_network_host(user_context.elevated()) + network_ref = db.project_get_network(context.get_admin_context(), + self.project.id) - fixed_ip = {'address': ip, + fixed_ip = {'address': self.test_ip, 'network_id': network_ref['id']} ctxt = context.get_admin_context() fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) - db.fixed_ip_update(ctxt, ip, {'allocated': True, - 'instance_id': instance_ref['id']}) + db.fixed_ip_update(ctxt, self.test_ip, + {'allocated': True, + 'instance_id': instance_ref['id']}) type_uri_map = {'qemu': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'qemu'), @@ -78,23 +112,73 @@ class LibvirtConnTestCase(test.TestCase): (lambda t: t.find('./devices/emulator'), None)]), 'uml': ('uml:///system', [(lambda t: t.find('.').get('type'), 'uml'), - (lambda t: t.find('./os/type').text, 'uml')])} + (lambda t: t.find('./os/type').text, 'uml')]), + 'xen': ('xen:///', + [(lambda t: t.find('.').get('type'), 'xen'), + (lambda t: t.find('./os/type').text, 'linux')]), + } + + for hypervisor_type in ['qemu', 'kvm', 'xen']: + check_list = type_uri_map[hypervisor_type][1] + + if rescue: + check = (lambda t: t.find('./os/kernel').text.split('/')[1], + 'rescue-kernel') + check_list.append(check) + check = (lambda t: t.find('./os/initrd').text.split('/')[1], + 'rescue-ramdisk') + check_list.append(check) + else: + if expect_kernel: + check = (lambda t: t.find('./os/kernel').text.split( + '/')[1], 'kernel') + else: + check = (lambda t: t.find('./os/kernel'), None) + check_list.append(check) + + if expect_ramdisk: + check = (lambda t: t.find('./os/initrd').text.split( + '/')[1], 'ramdisk') + else: + check = (lambda t: t.find('./os/initrd'), None) + check_list.append(check) common_checks = [ (lambda t: t.find('.').tag, 'domain'), - (lambda t: t.find('./devices/interface/filterref/parameter').\ - get('name'), 'IP'), - (lambda t: t.find('./devices/interface/filterref/parameter').\ - get('value'), '10.11.12.13')] + (lambda t: t.find( + './devices/interface/filterref/parameter').get('name'), 'IP'), + (lambda t: t.find( + './devices/interface/filterref/parameter').get( + 'value'), '10.11.12.13'), + (lambda t: t.findall( + './devices/interface/filterref/parameter')[1].get( + 'name'), 'DHCPSERVER'), + (lambda t: t.findall( + './devices/interface/filterref/parameter')[1].get( + 'value'), '10.0.0.1'), + (lambda t: t.find('./devices/serial/source').get( + 'path').split('/')[1], 'console.log'), + (lambda t: t.find('./memory').text, '2097152')] + + if rescue: + common_checks += [ + (lambda t: t.findall('./devices/disk/source')[0].get( + 'file').split('/')[1], 'rescue-disk'), + (lambda t: t.findall('./devices/disk/source')[1].get( + 'file').split('/')[1], 'disk')] + else: + common_checks += [(lambda t: t.findall( + './devices/disk/source')[0].get('file').split('/')[1], + 'disk')] for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type conn = libvirt_conn.LibvirtConnection(True) - uri, _template, _rescue = conn.get_uri_and_templates() + uri = conn.get_uri() self.assertEquals(uri, expected_uri) - xml = conn.to_xml(instance_ref) + xml = conn.to_xml(instance_ref, rescue) tree = xml_to_tree(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), @@ -106,6 +190,9 @@ class LibvirtConnTestCase(test.TestCase): expected_result, '%s failed common check %d' % (xml, i)) + # This test is supposed to make sure we don't override a specifically + # set uri + # # Deliberately not just assigning this string to FLAGS.libvirt_uri and # checking against that later on. This way we make sure the # implementation doesn't fiddle around with the FLAGS. @@ -114,7 +201,7 @@ class LibvirtConnTestCase(test.TestCase): for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type conn = libvirt_conn.LibvirtConnection(True) - uri, _template, _rescue = conn.get_uri_and_templates() + uri = conn.get_uri() self.assertEquals(uri, testuri) def tearDown(self): @@ -252,7 +339,7 @@ class NWFilterTestCase(test.TestCase): self.security_group.id) instance = db.instance_get(self.context, inst_id) - d = self.fw.setup_nwfilters_for_instance(instance) + self.fw.setup_base_nwfilters() + self.fw.setup_nwfilters_for_instance(instance) _ensure_all_called() self.teardown_security_group() - return d diff --git a/nova/twistd.py b/nova/twistd.py index cb5648ce6..29be9c4e1 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -43,7 +43,7 @@ else: FLAGS = flags.FLAGS -flags.DEFINE_string('logdir', None, 'directory to keep log files in ' +flags.DEFINE_string('logdir', None, 'directory to keep log files in ' '(will be prepended to $logfile)') @@ -208,7 +208,7 @@ def stop(pidfile): pid = None if not pid: - message = "pidfile %s does not exist. Daemon not running?\n" + message = _("pidfile %s does not exist. Daemon not running?\n") sys.stderr.write(message % pidfile) # Not an error in a restart return @@ -229,7 +229,7 @@ def stop(pidfile): def serve(filename): - logging.debug("Serving %s" % filename) + logging.debug(_("Serving %s") % filename) name = os.path.basename(filename) OptionsClass = WrapTwistedOptions(TwistdServerOptions) options = OptionsClass() @@ -281,7 +281,7 @@ def serve(filename): else: logging.getLogger().setLevel(logging.WARNING) - logging.debug("Full set of FLAGS:") + logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) diff --git a/nova/utils.py b/nova/utils.py index ea1f04ca7..b9045a50c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -21,25 +21,24 @@ System-level utilities and helper functions. """ import datetime -import functools import inspect import logging import os import random import subprocess import socket +import struct import sys +import time from xml.sax import saxutils from eventlet import event from eventlet import greenthread from nova import exception -from nova import flags from nova.exception import ProcessExecutionError -FLAGS = flags.FLAGS TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" @@ -50,7 +49,7 @@ def import_class(import_str): __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ImportError, ValueError, AttributeError): - raise exception.NotFound('Class %s cannot be found' % class_str) + raise exception.NotFound(_('Class %s cannot be found') % class_str) def import_object(import_str): @@ -63,8 +62,53 @@ def import_object(import_str): return cls() +def vpn_ping(address, port, timeout=0.05, session_id=None): + """Sends a vpn negotiation packet and returns the server session. + + Returns False on a failure. Basic packet structure is below. + + Client packet (14 bytes):: + 0 1 8 9 13 + +-+--------+-----+ + |x| cli_id |?????| + +-+--------+-----+ + x = packet identifier 0x38 + cli_id = 64 bit identifier + ? = unknown, probably flags/padding + + Server packet (26 bytes):: + 0 1 8 9 13 14 21 2225 + +-+--------+-----+--------+----+ + |x| srv_id |?????| cli_id |????| + +-+--------+-----+--------+----+ + x = packet identifier 0x40 + cli_id = 64 bit identifier + ? = unknown, probably flags/padding + bit 9 was 1 and the rest were 0 in testing + """ + if session_id is None: + session_id = random.randint(0, 0xffffffffffffffff) + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + data = struct.pack("!BQxxxxxx", 0x38, session_id) + sock.sendto(data, (address, port)) + sock.settimeout(timeout) + try: + received = sock.recv(2048) + except socket.timeout: + return False + finally: + sock.close() + fmt = "!BQxxxxxQxxxx" + if len(received) != struct.calcsize(fmt): + print struct.calcsize(fmt) + return False + (identifier, server_sess, client_sess) = struct.unpack(fmt, received) + if identifier == 0x40 and client_sess == session_id: + return server_sess + + def fetchfile(url, target): - logging.debug("Fetching %s" % url) + logging.debug(_("Fetching %s") % url) # c = pycurl.Curl() # fp = open(target, "wb") # c.setopt(c.URL, url) @@ -76,7 +120,7 @@ def fetchfile(url, target): def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): - logging.debug("Running cmd (subprocess): %s", cmd) + logging.debug(_("Running cmd (subprocess): %s"), cmd) env = os.environ.copy() if addl_env: env.update(addl_env) @@ -89,7 +133,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): result = obj.communicate() obj.stdin.close() if obj.returncode: - logging.debug("Result was %s" % (obj.returncode)) + logging.debug(_("Result was %s") % (obj.returncode)) if check_exit_code and obj.returncode != 0: (stdout, stderr) = result raise ProcessExecutionError(exit_code=obj.returncode, @@ -127,7 +171,7 @@ def debug(arg): def runthis(prompt, cmd, check_exit_code=True): - logging.debug("Running %s" % (cmd)) + logging.debug(_("Running %s") % (cmd)) rv, err = execute(cmd, check_exit_code=check_exit_code) @@ -151,8 +195,6 @@ def last_octet(address): def get_my_ip(): """Returns the actual ip of the local machine.""" - if getattr(FLAGS, 'fake_tests', None): - return '127.0.0.1' try: csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) csock.connect(('8.8.8.8', 80)) @@ -160,17 +202,55 @@ def get_my_ip(): csock.close() return addr except socket.gaierror as ex: - logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex) + logging.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex) return "127.0.0.1" +def utcnow(): + """Overridable version of datetime.datetime.utcnow.""" + if utcnow.override_time: + return utcnow.override_time + return datetime.datetime.utcnow() + + +utcnow.override_time = None + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + return time.mktime(utcnow().timetuple()) + + +def set_time_override(override_time=datetime.datetime.utcnow()): + """Override utils.utcnow to return a constant time.""" + utcnow.override_time = override_time + + +def advance_time_delta(timedelta): + """Advance overriden time using a datetime.timedelta.""" + assert(not utcnow.override_time is None) + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overriden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + def isotime(at=None): + """Returns iso formatted utcnow.""" if not at: - at = datetime.datetime.utcnow() + at = utcnow() return at.strftime(TIME_FORMAT) def parse_isotime(timestr): + """Turn an iso formatted time back into a datetime""" return datetime.datetime.strptime(timestr, TIME_FORMAT) @@ -204,7 +284,7 @@ class LazyPluggable(object): if not self.__backend: backend_name = self.__pivot.value if backend_name not in self.__backends: - raise exception.Error('Invalid backend: %s' % backend_name) + raise exception.Error(_('Invalid backend: %s') % backend_name) backend = self.__backends[backend_name] if type(backend) == type(tuple()): diff --git a/nova/virt/connection.py b/nova/virt/connection.py index c40bb4bb4..61e99944e 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -66,6 +66,6 @@ def get_connection(read_only=False): raise Exception('Unknown connection type "%s"' % t) if conn is None: - logging.error('Failed to open connection to the hypervisor') + logging.error(_('Failed to open connection to the hypervisor')) sys.exit(1) return conn diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 77bc926c2..238acf798 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -76,6 +76,12 @@ class FakeConnection(object): cls._instance = cls() return cls._instance + def init_host(self): + """ + Initialize anything that is necessary for the driver to function + """ + return + def list_instances(self): """ Return the names of all the instances known to the virtualization @@ -130,6 +136,18 @@ class FakeConnection(object): """ pass + def pause(self, instance, callback): + """ + Pause the specified instance. + """ + pass + + def unpause(self, instance, callback): + """ + Unpause the specified instance. + """ + pass + def destroy(self, instance): """ Destroy (shutdown and delete) the specified instance. @@ -163,7 +181,8 @@ class FakeConnection(object): knowledge of the instance """ if instance_name not in self.instances: - raise exception.NotFound("Instance %s Not Found" % instance_name) + raise exception.NotFound(_("Instance %s Not Found") + % instance_name) i = self.instances[instance_name] return {'state': i._state, 'max_mem': 0, @@ -243,5 +262,6 @@ class FakeConnection(object): class FakeInstance(object): + def __init__(self): self._state = power_state.NOSTATE diff --git a/nova/virt/libvirt.qemu.xml.template b/nova/virt/libvirt.qemu.xml.template deleted file mode 100644 index 2538b1ade..000000000 --- a/nova/virt/libvirt.qemu.xml.template +++ /dev/null @@ -1,33 +0,0 @@ -<domain type='%(type)s'> - <name>%(name)s</name> - <os> - <type>hvm</type> - <kernel>%(basepath)s/kernel</kernel> - <initrd>%(basepath)s/ramdisk</initrd> - <cmdline>root=/dev/vda1 console=ttyS0</cmdline> - </os> - <features> - <acpi/> - </features> - <memory>%(memory_kb)s</memory> - <vcpu>%(vcpus)s</vcpu> - <devices> - <disk type='file'> - <source file='%(basepath)s/disk'/> - <target dev='vda' bus='virtio'/> - </disk> - <interface type='bridge'> - <source bridge='%(bridge_name)s'/> - <mac address='%(mac_address)s'/> - <!-- <model type='virtio'/> CANT RUN virtio network right now --> - <filterref filter="nova-instance-%(name)s"> - <parameter name="IP" value="%(ip_address)s" /> - <parameter name="DHCPSERVER" value="%(dhcp_server)s" /> - </filterref> - </interface> - <serial type="file"> - <source path='%(basepath)s/console.log'/> - <target port='1'/> - </serial> - </devices> -</domain> diff --git a/nova/virt/libvirt.rescue.qemu.xml.template b/nova/virt/libvirt.rescue.qemu.xml.template.THIS index c0ffbdcee..a3b88106c 100644 --- a/nova/virt/libvirt.rescue.qemu.xml.template +++ b/nova/virt/libvirt.rescue.qemu.xml.template.THIS @@ -27,6 +27,7 @@ <filterref filter="nova-instance-%(name)s"> <parameter name="IP" value="%(ip_address)s" /> <parameter name="DHCPSERVER" value="%(dhcp_server)s" /> + %(extra_params)s </filterref> </interface> <serial type="file"> diff --git a/nova/virt/libvirt.rescue.uml.xml.template b/nova/virt/libvirt.rescue.uml.xml.template deleted file mode 100644 index 836f47532..000000000 --- a/nova/virt/libvirt.rescue.uml.xml.template +++ /dev/null @@ -1,26 +0,0 @@ -<domain type='%(type)s'> - <name>%(name)s</name> - <memory>%(memory_kb)s</memory> - <os> - <type>%(type)s</type> - <kernel>/usr/bin/linux</kernel> - <root>/dev/ubda1</root> - </os> - <devices> - <disk type='file'> - <source file='%(basepath)s/rescue-disk'/> - <target dev='ubd0' bus='uml'/> - </disk> - <disk type='file'> - <source file='%(basepath)s/disk'/> - <target dev='ubd1' bus='uml'/> - </disk> - <interface type='bridge'> - <source bridge='%(bridge_name)s'/> - <mac address='%(mac_address)s'/> - </interface> - <console type="file"> - <source path='%(basepath)s/console.log'/> - </console> - </devices> -</domain> diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.rescue.uml.xml.template.THIS index bb8b47911..a254692d4 100644 --- a/nova/virt/libvirt.uml.xml.template +++ b/nova/virt/libvirt.rescue.uml.xml.template.THIS @@ -8,15 +8,20 @@ </os> <devices> <disk type='file'> - <source file='%(basepath)s/disk'/> + <source file='%(basepath)s/rescue-disk'/> <target dev='ubd0' bus='uml'/> </disk> + <disk type='file'> + <source file='%(basepath)s/disk'/> + <target dev='ubd1' bus='uml'/> + </disk> <interface type='bridge'> <source bridge='%(bridge_name)s'/> <mac address='%(mac_address)s'/> <filterref filter="nova-instance-%(name)s"> <parameter name="IP" value="%(ip_address)s" /> <parameter name="DHCPSERVER" value="%(dhcp_server)s" /> + %(extra_params)s </filterref> </interface> <console type="file"> diff --git a/nova/virt/libvirt.rescue.xen.xml.template b/nova/virt/libvirt.rescue.xen.xml.template deleted file mode 100644 index 3b8d27237..000000000 --- a/nova/virt/libvirt.rescue.xen.xml.template +++ /dev/null @@ -1,34 +0,0 @@ -<domain type='%(type)s'> - <name>%(name)s</name> - <os> - <type>linux</type> - <kernel>%(basepath)s/kernel</kernel> - <initrd>%(basepath)s/ramdisk</initrd> - <root>/dev/xvda1</root> - <cmdline>ro</cmdline> - </os> - <features> - <acpi/> - </features> - <memory>%(memory_kb)s</memory> - <vcpu>%(vcpus)s</vcpu> - <devices> - <disk type='file'> - <source file='%(basepath)s/rescue-disk'/> - <target dev='sda' /> - </disk> - <disk type='file'> - <source file='%(basepath)s/disk'/> - <target dev='sdb' /> - </disk> - <interface type='bridge'> - <source bridge='%(bridge_name)s'/> - <mac address='%(mac_address)s'/> - </interface> - <console type="file"> - <source path='%(basepath)s/console.log'/> - <target port='1'/> - </console> - </devices> -</domain> - diff --git a/nova/virt/libvirt.xen.xml.template b/nova/virt/libvirt.xen.xml.template deleted file mode 100644 index 9677902c6..000000000 --- a/nova/virt/libvirt.xen.xml.template +++ /dev/null @@ -1,30 +0,0 @@ -<domain type='%(type)s'> - <name>%(name)s</name> - <os> - <type>linux</type> - <kernel>%(basepath)s/kernel</kernel> - <initrd>%(basepath)s/ramdisk</initrd> - <root>/dev/xvda1</root> - <cmdline>ro</cmdline> - </os> - <features> - <acpi/> - </features> - <memory>%(memory_kb)s</memory> - <vcpu>%(vcpus)s</vcpu> - <devices> - <disk type='file'> - <source file='%(basepath)s/disk'/> - <target dev='sda' /> - </disk> - <interface type='bridge'> - <source bridge='%(bridge_name)s'/> - <mac address='%(mac_address)s'/> - </interface> - <console type="file"> - <source path='%(basepath)s/console.log'/> - <target port='1'/> - </console> - </devices> -</domain> - diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template new file mode 100644 index 000000000..3fb2243da --- /dev/null +++ b/nova/virt/libvirt.xml.template @@ -0,0 +1,79 @@ +<domain type='${type}'> + <name>${name}</name> + <memory>${memory_kb}</memory> + <os> +#if $type == 'uml' + #set $disk_prefix = 'ubd' + #set $disk_bus = 'uml' + <type>uml</type> + <kernel>/usr/bin/linux</kernel> + <root>/dev/ubda1</root> +#else + #if $type == 'xen' + #set $disk_prefix = 'sd' + #set $disk_bus = 'scsi' + <type>linux</type> + <root>/dev/xvda1</root> + #else + #set $disk_prefix = 'vd' + #set $disk_bus = 'virtio' + <type>hvm</type> + #end if + #if $getVar('rescue', False) + <kernel>${basepath}/rescue-kernel</kernel> + <initrd>${basepath}/rescue-ramdisk</initrd> + #else + #if $getVar('kernel', None) + <kernel>${kernel}</kernel> + #if $type == 'xen' + <cmdline>ro</cmdline> + #else + <cmdline>root=/dev/vda1 console=ttyS0</cmdline> + #end if + #if $getVar('ramdisk', None) + <initrd>${ramdisk}</initrd> + #end if + #else + <boot dev="hd" /> + #end if + #end if +#end if + </os> + <features> + <acpi/> + </features> + <vcpu>${vcpus}</vcpu> + <devices> +#if $getVar('rescue', False) + <disk type='file'> + <source file='${basepath}/rescue-disk'/> + <target dev='${disk_prefix}a' bus='${disk_bus}'/> + </disk> + <disk type='file'> + <source file='${basepath}/disk'/> + <target dev='${disk_prefix}b' bus='${disk_bus}'/> + </disk> +#else + <disk type='file'> + <source file='${basepath}/disk'/> + <target dev='${disk_prefix}a' bus='${disk_bus}'/> + </disk> +#end if + <interface type='bridge'> + <source bridge='${bridge_name}'/> + <mac address='${mac_address}'/> + <!-- <model type='virtio'/> CANT RUN virtio network right now --> + <filterref filter="nova-instance-${name}"> + <parameter name="IP" value="${ip_address}" /> + <parameter name="DHCPSERVER" value="${dhcp_server}" /> +#if $getVar('extra_params', False) + ${extra_params} +#end if + </filterref> + </interface> + <serial type="file"> + <source path='${basepath}/console.log'/> + <target port='1'/> + </serial> + </devices> +</domain> diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 5a8c71850..8d3a6a261 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -27,12 +27,7 @@ Supports KVM, QEMU, UML, and XEN. :libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen (default: kvm). :libvirt_uri: Override for the default libvirt URI (depends on libvirt_type). -:libvirt_xml_template: Libvirt XML Template (QEmu/KVM). -:libvirt_xen_xml_template: Libvirt XML Template (Xen). -:libvirt_uml_xml_template: Libvirt XML Template (User Mode Linux). -:libvirt_rescue_xml_template: XML template for rescue mode (KVM & QEMU). -:libvirt_rescue_xen_xml_template: XML templage for rescue mode (XEN). -:libvirt_rescue_uml_xml_template: XML template for rescue mode (UML). +:libvirt_xml_template: Libvirt XML Template. :rescue_image_id: Rescue ami image (default: ami-rescue). :rescue_kernel_id: Rescue aki image (default: aki-rescue). :rescue_ramdisk_id: Rescue ari image (default: ari-rescue). @@ -45,6 +40,7 @@ import logging import os import shutil +from eventlet import greenthread from eventlet import event from eventlet import tpool @@ -62,36 +58,20 @@ from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +from Cheetah.Template import Template + libvirt = None libxml2 = None FLAGS = flags.FLAGS -flags.DEFINE_string('libvirt_rescue_xml_template', - utils.abspath('virt/libvirt.rescue.qemu.xml.template'), - 'Libvirt RESCUE XML Template for QEmu/KVM') -flags.DEFINE_string('libvirt_rescue_xen_xml_template', - utils.abspath('virt/libvirt.rescue.xen.xml.template'), - 'Libvirt RESCUE XML Template for xen') -flags.DEFINE_string('libvirt_rescue_uml_xml_template', - utils.abspath('virt/libvirt.rescue.uml.xml.template'), - 'Libvirt RESCUE XML Template for user-mode-linux') # TODO(vish): These flags should probably go into a shared location flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') flags.DEFINE_string('libvirt_xml_template', - utils.abspath('virt/libvirt.qemu.xml.template'), - 'Libvirt XML Template for QEmu/KVM') -flags.DEFINE_string('libvirt_xen_xml_template', - utils.abspath('virt/libvirt.xen.xml.template'), - 'Libvirt XML Template for Xen') -flags.DEFINE_string('libvirt_uml_xml_template', - utils.abspath('virt/libvirt.uml.xml.template'), - 'Libvirt XML Template for user-mode-linux') -flags.DEFINE_string('injected_network_template', - utils.abspath('virt/interfaces.template'), - 'Template file for injected network') + utils.abspath('virt/libvirt.xml.template'), + 'Libvirt XML Template') flags.DEFINE_string('libvirt_type', 'kvm', 'Libvirt domain type (valid options are: ' @@ -117,21 +97,27 @@ def get_connection(read_only): return LibvirtConnection(read_only) +def _get_net_and_mask(cidr): + net = IPy.IP(cidr) + return str(net.net()), str(net.netmask()) + + class LibvirtConnection(object): + def __init__(self, read_only): - (self.libvirt_uri, - template_file, - rescue_file) = self.get_uri_and_templates() + self.libvirt_uri = self.get_uri() - self.libvirt_xml = open(template_file).read() - self.rescue_xml = open(rescue_file).read() + self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() self._wrapped_conn = None self.read_only = read_only + def init_host(self): + NWFilterFirewall(self._conn).setup_base_nwfilters() + @property def _conn(self): if not self._wrapped_conn or not self._test_connection(): - logging.debug('Connecting to libvirt: %s' % self.libvirt_uri) + logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri) self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn @@ -143,24 +129,18 @@ class LibvirtConnection(object): except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ e.get_error_domain() == libvirt.VIR_FROM_REMOTE: - logging.debug('Connection to libvirt broke') + logging.debug(_('Connection to libvirt broke')) return False raise - def get_uri_and_templates(self): + def get_uri(self): if FLAGS.libvirt_type == 'uml': uri = FLAGS.libvirt_uri or 'uml:///system' - template_file = FLAGS.libvirt_uml_xml_template - rescue_file = FLAGS.libvirt_rescue_uml_xml_template elif FLAGS.libvirt_type == 'xen': uri = FLAGS.libvirt_uri or 'xen:///' - template_file = FLAGS.libvirt_xen_xml_template - rescue_file = FLAGS.libvirt_rescue_xen_xml_template else: uri = FLAGS.libvirt_uri or 'qemu:///system' - template_file = FLAGS.libvirt_xml_template - rescue_file = FLAGS.libvirt_rescue_xml_template - return uri, template_file, rescue_file + return uri def _connect(self, uri, read_only): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], @@ -220,7 +200,7 @@ class LibvirtConnection(object): def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) - logging.info('instance %s: deleting instance files %s', + logging.info(_('instance %s: deleting instance files %s'), instance['name'], target) if os.path.exists(target): shutil.rmtree(target) @@ -262,7 +242,7 @@ class LibvirtConnection(object): mount_device = mountpoint.rpartition("/")[2] xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device) if not xml: - raise exception.NotFound("No disk at %s" % mount_device) + raise exception.NotFound(_("No disk at %s") % mount_device) virt_dom.detachDevice(xml) @exception.wrap_exception @@ -278,10 +258,10 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: rebooted', instance['name']) + logging.debug(_('instance %s: rebooted'), instance['name']) timer.stop() except Exception, exn: - logging.error('_wait_for_reboot failed: %s', exn) + logging.error(_('_wait_for_reboot failed: %s'), exn) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -291,6 +271,14 @@ class LibvirtConnection(object): return timer.start(interval=0.5, now=True) @exception.wrap_exception + def pause(self, instance, callback): + raise exception.APIError("pause not supported for libvirt.") + + @exception.wrap_exception + def unpause(self, instance, callback): + raise exception.APIError("unpause not supported for libvirt.") + + @exception.wrap_exception def rescue(self, instance): self.destroy(instance, False) @@ -308,10 +296,10 @@ class LibvirtConnection(object): state = self.get_info(instance['name'])['state'] db.instance_set_state(None, instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: rescued', instance['name']) + logging.debug(_('instance %s: rescued'), instance['name']) timer.stop() except Exception, exn: - logging.error('_wait_for_rescue failed: %s', exn) + logging.error(_('_wait_for_rescue failed: %s'), exn) db.instance_set_state(None, instance['id'], power_state.SHUTDOWN) @@ -336,7 +324,7 @@ class LibvirtConnection(object): NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance) self._create_image(instance, xml) self._conn.createXML(xml, 0) - logging.debug("instance %s: is running", instance['name']) + logging.debug(_("instance %s: is running"), instance['name']) timer = utils.LoopingCall(f=None) @@ -346,10 +334,10 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: booted', instance['name']) + logging.debug(_('instance %s: booted'), instance['name']) timer.stop() except: - logging.exception('instance %s: failed to boot', + logging.exception(_('instance %s: failed to boot'), instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], @@ -364,7 +352,7 @@ class LibvirtConnection(object): virsh_output = virsh_output[0].strip() if virsh_output.startswith('/dev/'): - logging.info('cool, it\'s a device') + logging.info(_('cool, it\'s a device')) out, err = utils.execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) return out @@ -372,7 +360,7 @@ class LibvirtConnection(object): return '' def _append_to_file(self, data, fpath): - logging.info('data: %r, fpath: %r' % (data, fpath)) + logging.info(_('data: %r, fpath: %r') % (data, fpath)) fp = open(fpath, 'a+') fp.write(data) return fpath @@ -414,7 +402,7 @@ class LibvirtConnection(object): # TODO(termie): these are blocking calls, it would be great # if they weren't. - logging.info('instance %s: Creating image', inst['name']) + logging.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') f.write(libvirt_xml) f.close() @@ -433,18 +421,28 @@ class LibvirtConnection(object): if not os.path.exists(basepath('disk')): images.fetch(inst.image_id, basepath('disk-raw'), user, project) - if not os.path.exists(basepath('kernel')): - images.fetch(inst.kernel_id, basepath('kernel'), user, - project) - if not os.path.exists(basepath('ramdisk')): - images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, - project) + + if inst['kernel_id']: + if not os.path.exists(basepath('kernel')): + images.fetch(inst['kernel_id'], basepath('kernel'), + user, project) + if inst['ramdisk_id']: + if not os.path.exists(basepath('ramdisk')): + images.fetch(inst['ramdisk_id'], basepath('ramdisk'), + user, project) def execute(cmd, process_input=None, check_exit_code=True): return utils.execute(cmd=cmd, process_input=process_input, check_exit_code=check_exit_code) + # For now, we assume that if we're not using a kernel, we're using a + # partitioned disk image where the target partition is the first + # partition + target_partition = None + if not inst['kernel_id']: + target_partition = "1" + key = str(inst['key_data']) net = None network_ref = db.network_get_by_instance(context.get_admin_context(), @@ -460,16 +458,24 @@ class LibvirtConnection(object): 'dns': network_ref['dns']} if key or net: if key: - logging.info('instance %s: injecting key into image %s', + logging.info(_('instance %s: injecting key into image %s'), inst['name'], inst.image_id) if net: - logging.info('instance %s: injecting net into image %s', - inst['name'], inst.image_id) - disk.inject_data(basepath('disk-raw'), key, net, - execute=execute) - - if os.path.exists(basepath('disk')): - utils.execute('rm -f %s' % basepath('disk')) + logging.info(_('instance %s: injecting net into image %s'), + inst['name'], inst.image_id) + try: + disk.inject_data(basepath('disk-raw'), key, net, + partition=target_partition, + execute=execute) + except Exception as e: + # This could be a windows image, or a vmdk format disk + logging.warn(_('instance %s: ignoring error injecting data' + ' into image %s (%s)'), + inst['name'], inst.image_id, e) + + if inst['kernel_id']: + if os.path.exists(basepath('disk')): + utils.execute('rm -f %s' % basepath('disk')) local_bytes = (instance_types.INSTANCE_TYPES[inst.instance_type] ['local_gb'] @@ -478,17 +484,23 @@ class LibvirtConnection(object): resize = True if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-': resize = False - disk.partition(basepath('disk-raw'), basepath('disk'), - local_bytes, resize, execute=execute) + + if inst['kernel_id']: + disk.partition(basepath('disk-raw'), basepath('disk'), + local_bytes, resize, execute=execute) + else: + os.rename(basepath('disk-raw'), basepath('disk')) + disk.extend(basepath('disk'), local_bytes, execute=execute) if FLAGS.libvirt_type == 'uml': utils.execute('sudo chown root %s' % basepath('disk')) def to_xml(self, instance, rescue=False): # TODO(termie): cache? - logging.debug('instance %s: starting toXML method', instance['name']) - network = db.project_get_network(context.get_admin_context(), - instance['project_id']) + logging.debug(_('instance %s: starting toXML method'), + instance['name']) + network = db.network_get_by_instance(context.get_admin_context(), + instance['id']) # FIXME(vish): stick this in db instance_type = instance['instance_type'] instance_type = instance_types.INSTANCE_TYPES[instance_type] @@ -496,6 +508,15 @@ class LibvirtConnection(object): instance['id']) # Assume that the gateway also acts as the dhcp server. dhcp_server = network['gateway'] + + if FLAGS.allow_project_net_traffic: + net, mask = _get_net_and_mask(network['cidr']) + extra_params = ("<parameter name=\"PROJNET\" value=\"%s\" />\n" + "<parameter name=\"PROJMASK\" value=\"%s\" />\n" + ) % (net, mask) + else: + extra_params = "\n" + xml_info = {'type': FLAGS.libvirt_type, 'name': instance['name'], 'basepath': os.path.join(FLAGS.instances_path, @@ -505,20 +526,30 @@ class LibvirtConnection(object): 'bridge_name': network['bridge'], 'mac_address': instance['mac_address'], 'ip_address': ip_address, - 'dhcp_server': dhcp_server} - if rescue: - libvirt_xml = self.rescue_xml % xml_info - else: - libvirt_xml = self.libvirt_xml % xml_info - logging.debug('instance %s: finished toXML method', instance['name']) + 'dhcp_server': dhcp_server, + 'extra_params': extra_params, + 'rescue': rescue} + if not rescue: + if instance['kernel_id']: + xml_info['kernel'] = xml_info['basepath'] + "/kernel" + + if instance['ramdisk_id']: + xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk" + + xml_info['disk'] = xml_info['basepath'] + "/disk" - return libvirt_xml + xml = str(Template(self.libvirt_xml, searchList=[xml_info])) + logging.debug(_('instance %s: finished toXML method'), + instance['name']) + + return xml def get_info(self, instance_name): try: virt_dom = self._conn.lookupByName(instance_name) except: - raise exception.NotFound("Instance %s not found" % instance_name) + raise exception.NotFound(_("Instance %s not found") + % instance_name) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() return {'state': state, 'max_mem': max_mem, @@ -704,6 +735,14 @@ class NWFilterFirewall(object): </rule> </filter>''' + nova_vpn_filter = '''<filter name='nova-vpn' chain='root'> + <uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid> + <filterref filter='allow-dhcp-server'/> + <filterref filter='nova-allow-dhcp-server'/> + <filterref filter='nova-base-ipv4'/> + <filterref filter='nova-base-ipv6'/> + </filter>''' + def nova_base_ipv4_filter(self): retval = "<filter name='nova-base-ipv4' chain='ipv4'>" for protocol in ['tcp', 'udp', 'icmp']: @@ -728,12 +767,12 @@ class NWFilterFirewall(object): retval += '</filter>' return retval - def nova_project_filter(self, project, net, mask): - retval = "<filter name='nova-project-%s' chain='ipv4'>" % project + def nova_project_filter(self): + retval = "<filter name='nova-project' chain='ipv4'>" for protocol in ['tcp', 'udp', 'icmp']: retval += """<rule action='accept' direction='in' priority='200'> - <%s srcipaddr='%s' srcipmask='%s' /> - </rule>""" % (protocol, net, mask) + <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' /> + </rule>""" % protocol retval += '</filter>' return retval @@ -744,10 +783,14 @@ class NWFilterFirewall(object): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - @staticmethod - def _get_net_and_mask(cidr): - net = IPy.IP(cidr) - return str(net.net()), str(net.netmask()) + def setup_base_nwfilters(self): + self._define_filter(self.nova_base_ipv4_filter) + self._define_filter(self.nova_base_ipv6_filter) + self._define_filter(self.nova_dhcp_filter) + self._define_filter(self.nova_base_filter) + self._define_filter(self.nova_vpn_filter) + if FLAGS.allow_project_net_traffic: + self._define_filter(self.nova_project_filter) def setup_nwfilters_for_instance(self, instance): """ @@ -756,31 +799,22 @@ class NWFilterFirewall(object): the base filter are all in place. """ - self._define_filter(self.nova_base_ipv4_filter) - self._define_filter(self.nova_base_ipv6_filter) - self._define_filter(self.nova_dhcp_filter) - self._define_filter(self.nova_base_filter) + nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n" + ) % instance['name'] - nwfilter_xml = "<filter name='nova-instance-%s' chain='root'>\n" \ - " <filterref filter='nova-base' />\n" % \ - instance['name'] + if instance['image_id'] == FLAGS.vpn_image_id: + nwfilter_xml += " <filterref filter='nova-vpn' />\n" + else: + nwfilter_xml += " <filterref filter='nova-base' />\n" if FLAGS.allow_project_net_traffic: - network_ref = db.project_get_network(context.get_admin_context(), - instance['project_id']) - net, mask = self._get_net_and_mask(network_ref['cidr']) - project_filter = self.nova_project_filter(instance['project_id'], - net, mask) - self._define_filter(project_filter) - - nwfilter_xml += " <filterref filter='nova-project-%s' />\n" % \ - instance['project_id'] + nwfilter_xml += " <filterref filter='nova-project' />\n" for security_group in instance.security_groups: self.ensure_security_group_filter(security_group['id']) - nwfilter_xml += " <filterref filter='nova-secgroup-%d' />\n" % \ - security_group['id'] + nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n" + ) % security_group['id'] nwfilter_xml += "</filter>" self._define_filter(nwfilter_xml) @@ -796,7 +830,7 @@ class NWFilterFirewall(object): for rule in security_group.rules: rule_xml += "<rule action='accept' direction='in' priority='300'>" if rule.cidr: - net, mask = self._get_net_and_mask(rule.cidr) + net, mask = _get_net_and_mask(rule.cidr) rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ (rule.protocol, net, mask) if rule.protocol in ['tcp', 'udp']: diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index 012954394..ce2c68ce0 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -25,6 +25,7 @@ class NetworkHelper(): """ The class that wraps the helper methods together. """ + def __init__(self): return diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 2f5d78e75..badaaedc1 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -47,6 +47,7 @@ class VMHelper(): """ The class that wraps the helper methods together. """ + def __init__(self): return @@ -228,11 +229,7 @@ class VMHelper(): try: host = session.get_xenapi_host() host_ip = session.get_xenapi().host.get_record(host)["address"] - metrics = session.get_xenapi().VM_guest_metrics.get_record( - record["guest_metrics"]) - diags = { - "Kernel": metrics["os_version"]["uname"], - "Distro": metrics["os_version"]["name"]} + diags = {} xml = get_rrd(host_ip, record["uuid"]) if xml: rrd = minidom.parseString(xml) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 3034df9e1..4d897af35 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -34,6 +34,7 @@ class VMOps(object): """ Management class for VM-related tasks """ + def __init__(self, session): global XenAPI if XenAPI is None: @@ -43,19 +44,23 @@ class VMOps(object): VMHelper.late_import() def list_instances(self): - """ List VM instances """ - return [self._session.get_xenapi().VM.get_name_label(vm) \ - for vm in self._session.get_xenapi().VM.get_all()] + """List VM instances""" + vms = [] + for vm in self._session.get_xenapi().VM.get_all(): + rec = self._session.get_xenapi().VM.get_record(vm) + if not rec["is_a_template"] and not rec["is_control_domain"]: + vms.append(rec["name_label"]) + return vms def spawn(self, instance): - """ Create VM instance """ + """Create VM instance""" vm = VMHelper.lookup(self._session, instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % instance.name) - bridge = db.project_get_network(context.get_admin_context(), - instance.project_id).bridge + bridge = db.network_get_by_instance(context.get_admin_context(), + instance['id'])['bridge'] network_ref = \ NetworkHelper.find_network_with_bridge(self._session, bridge) @@ -80,16 +85,16 @@ class VMOps(object): vm_ref) def reboot(self, instance): - """ Reboot VM instance """ + """Reboot VM instance""" instance_name = instance.name vm = VMHelper.lookup(self._session, instance_name) if vm is None: raise Exception('instance not present %s' % instance_name) task = self._session.call_xenapi('Async.VM.clean_reboot', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) def destroy(self, instance): - """ Destroy VM instance """ + """Destroy VM instance""" vm = VMHelper.lookup(self._session, instance.name) if vm is None: # Don't complain, just return. This lets us clean up instances @@ -100,7 +105,7 @@ class VMOps(object): try: task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) # Disk clean-up @@ -108,17 +113,43 @@ class VMOps(object): for vdi in vdis: try: task = self._session.call_xenapi('Async.VDI.destroy', vdi) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) try: task = self._session.call_xenapi('Async.VM.destroy', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) + except XenAPI.Failure, exc: + logging.warn(exc) + + def _wait_with_callback(self, instance_id, task, callback): + ret = None + try: + ret = self._session.wait_for_task(instance_id, task) except XenAPI.Failure, exc: logging.warn(exc) + callback(ret) + + def pause(self, instance, callback): + """Pause VM instance""" + instance_name = instance.name + vm = VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception('instance not present %s' % instance_name) + task = self._session.call_xenapi('Async.VM.pause', vm) + self._wait_with_callback(instance.id, task, callback) + + def unpause(self, instance, callback): + """Unpause VM instance""" + instance_name = instance.name + vm = VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception('instance not present %s' % instance_name) + task = self._session.call_xenapi('Async.VM.unpause', vm) + self._wait_with_callback(instance.id, task, callback) def get_info(self, instance_id): - """ Return data about VM instance """ + """Return data about VM instance""" vm = VMHelper.lookup_blocking(self._session, instance_id) if vm is None: raise Exception('instance not present %s' % instance_id) @@ -134,6 +165,6 @@ class VMOps(object): return VMHelper.compile_diagnostics(self._session, rec) def get_console_output(self, instance): - """ Return snapshot of console """ + """Return snapshot of console""" # TODO: implement this to fix pylint! return 'FAKE CONSOLE OUTPUT of instance' diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index a4c7a3861..1943ccab0 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -20,6 +20,7 @@ Management class for Storage-related functions (attach, detach, etc). class VolumeOps(object): + def __init__(self, session): self._session = session diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 6beb08f5e..146e2f153 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -54,6 +54,8 @@ import xmlrpclib from eventlet import event from eventlet import tpool +from nova import context +from nova import db from nova import utils from nova import flags from nova.virt.xenapi.vmops import VMOps @@ -93,38 +95,47 @@ def get_connection(_): username = FLAGS.xenapi_connection_username password = FLAGS.xenapi_connection_password if not url or password is None: - raise Exception('Must specify xenapi_connection_url, ' - 'xenapi_connection_username (optionally), and ' - 'xenapi_connection_password to use ' - 'connection_type=xenapi') + raise Exception(_('Must specify xenapi_connection_url, ' + 'xenapi_connection_username (optionally), and ' + 'xenapi_connection_password to use ' + 'connection_type=xenapi')) return XenAPIConnection(url, username, password) class XenAPIConnection(object): - """ A connection to XenServer or Xen Cloud Platform """ + """A connection to XenServer or Xen Cloud Platform""" + def __init__(self, url, user, pw): session = XenAPISession(url, user, pw) self._vmops = VMOps(session) self._volumeops = VolumeOps(session) def list_instances(self): - """ List VM instances """ + """List VM instances""" return self._vmops.list_instances() def spawn(self, instance): - """ Create VM instance """ + """Create VM instance""" self._vmops.spawn(instance) def reboot(self, instance): - """ Reboot VM instance """ + """Reboot VM instance""" self._vmops.reboot(instance) def destroy(self, instance): - """ Destroy VM instance """ + """Destroy VM instance""" self._vmops.destroy(instance) + def pause(self, instance, callback): + """Pause VM instance""" + self._vmops.pause(instance, callback) + + def unpause(self, instance, callback): + """Unpause paused VM instance""" + self._vmops.unpause(instance, callback) + def get_info(self, instance_id): - """ Return data about VM instance """ + """Return data about VM instance""" return self._vmops.get_info(instance_id) def get_diagnostics(self, instance_id): @@ -132,32 +143,33 @@ class XenAPIConnection(object): return self._vmops.get_diagnostics(instance_id) def get_console_output(self, instance): - """ Return snapshot of console """ + """Return snapshot of console""" return self._vmops.get_console_output(instance) def attach_volume(self, instance_name, device_path, mountpoint): - """ Attach volume storage to VM instance """ + """Attach volume storage to VM instance""" return self._volumeops.attach_volume(instance_name, device_path, mountpoint) def detach_volume(self, instance_name, mountpoint): - """ Detach volume storage to VM instance """ + """Detach volume storage to VM instance""" return self._volumeops.detach_volume(instance_name, mountpoint) class XenAPISession(object): - """ The session to invoke XenAPI SDK calls """ + """The session to invoke XenAPI SDK calls""" + def __init__(self, url, user, pw): self._session = XenAPI.Session(url) self._session.login_with_password(user, pw) def get_xenapi(self): - """ Return the xenapi object """ + """Return the xenapi object""" return self._session.xenapi def get_xenapi_host(self): - """ Return the xenapi host """ + """Return the xenapi host""" return self._session.xenapi.session.get_this_host(self._session.handle) def call_xenapi(self, method, *args): @@ -173,46 +185,57 @@ class XenAPISession(object): self._session.xenapi.Async.host.call_plugin, self.get_xenapi_host(), plugin, fn, args) - def wait_for_task(self, task): + def wait_for_task(self, instance_id, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" done = event.Event() - loop = utils.LoopingCall(self._poll_task, task, done) + loop = utils.LoopingCall(self._poll_task, instance_id, task, done) loop.start(FLAGS.xenapi_task_poll_interval, now=True) rv = done.wait() loop.stop() return rv - def _poll_task(self, task, done): + def _poll_task(self, instance_id, task, done): """Poll the given XenAPI task, and fire the given Deferred if we get a result.""" try: - #logging.debug('Polling task %s...', task) + name = self._session.xenapi.task.get_name_label(task) status = self._session.xenapi.task.get_status(task) - if status == 'pending': + action = dict( + instance_id=int(instance_id), + action=name, + error=None) + if status == "pending": return - elif status == 'success': + elif status == "success": result = self._session.xenapi.task.get_result(task) - logging.info('Task %s status: success. %s', task, result) + logging.info(_("Task [%s] %s status: success %s") % ( + name, + task, + result)) done.send(_parse_xmlrpc_value(result)) else: error_info = self._session.xenapi.task.get_error_info(task) - logging.warn('Task %s status: %s. %s', task, status, - error_info) + action["error"] = str(error_info) + logging.warn(_("Task [%s] %s status: %s %s") % ( + name, + task, + status, + error_info)) done.send_exception(XenAPI.Failure(error_info)) - #logging.debug('Polling task %s done.', task) + db.instance_action_create(context.get_admin_context(), action) except XenAPI.Failure, exc: logging.warn(exc) done.send_exception(*sys.exc_info()) def _unwrap_plugin_exceptions(func, *args, **kwargs): - """ Parse exception details """ + """Parse exception details""" try: return func(*args, **kwargs) except XenAPI.Failure, exc: - logging.debug("Got exception: %s", exc) + logging.debug(_("Got exception: %s"), exc) if (len(exc.details) == 4 and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[2] == 'Failure'): @@ -225,7 +248,7 @@ def _unwrap_plugin_exceptions(func, *args, **kwargs): else: raise except xmlrpclib.ProtocolError, exc: - logging.debug("Got exception: %s", exc) + logging.debug(_("Got exception: %s"), exc) raise diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 1cd4c1fd4..8353b9712 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -73,14 +73,14 @@ class VolumeDriver(object): tries = tries + 1 if tries >= FLAGS.num_shell_tries: raise - logging.exception("Recovering from a failed execute." - "Try number %s", tries) + logging.exception(_("Recovering from a failed execute." + "Try number %s"), tries) time.sleep(tries ** 2) def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" if not os.path.isdir("/dev/%s" % FLAGS.volume_group): - raise exception.Error("volume group %s doesn't exist" + raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) def create_volume(self, volume): @@ -205,7 +205,7 @@ class FakeAOEDriver(AOEDriver): @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" - logging.debug("FAKE AOE: %s", cmd) + logging.debug(_("FAKE AOE: %s"), cmd) return (None, None) @@ -310,5 +310,5 @@ class FakeISCSIDriver(ISCSIDriver): @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" - logging.debug("FAKE ISCSI: %s", cmd) + logging.debug(_("FAKE ISCSI: %s"), cmd) return (None, None) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 7da125cac..966334c50 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -81,7 +81,7 @@ class VolumeManager(manager.Manager): self.driver.check_for_setup_error() ctxt = context.get_admin_context() volumes = self.db.volume_get_all_by_host(ctxt, self.host) - logging.debug("Re-exporting %s volumes", len(volumes)) + logging.debug(_("Re-exporting %s volumes"), len(volumes)) for volume in volumes: self.driver.ensure_export(ctxt, volume) @@ -89,7 +89,7 @@ class VolumeManager(manager.Manager): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) - logging.info("volume %s: creating", volume_ref['name']) + logging.info(_("volume %s: creating"), volume_ref['name']) self.db.volume_update(context, volume_id, @@ -98,18 +98,18 @@ class VolumeManager(manager.Manager): # before passing it to the driver. volume_ref['host'] = self.host - logging.debug("volume %s: creating lv of size %sG", + logging.debug(_("volume %s: creating lv of size %sG"), volume_ref['name'], volume_ref['size']) self.driver.create_volume(volume_ref) - logging.debug("volume %s: creating export", volume_ref['name']) + logging.debug(_("volume %s: creating export"), volume_ref['name']) self.driver.create_export(context, volume_ref) now = datetime.datetime.utcnow() self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) - logging.debug("volume %s: created successfully", volume_ref['name']) + logging.debug(_("volume %s: created successfully"), volume_ref['name']) return volume_id def delete_volume(self, context, volume_id): @@ -117,15 +117,15 @@ class VolumeManager(manager.Manager): context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) if volume_ref['attach_status'] == "attached": - raise exception.Error("Volume is still attached") + raise exception.Error(_("Volume is still attached")) if volume_ref['host'] != self.host: - raise exception.Error("Volume is not local to this node") - logging.debug("volume %s: removing export", volume_ref['name']) + raise exception.Error(_("Volume is not local to this node")) + logging.debug(_("volume %s: removing export"), volume_ref['name']) self.driver.remove_export(context, volume_ref) - logging.debug("volume %s: deleting", volume_ref['name']) + logging.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) self.db.volume_destroy(context, volume_id) - logging.debug("volume %s: deleted successfully", volume_ref['name']) + logging.debug(_("volume %s: deleted successfully"), volume_ref['name']) return True def setup_compute_volume(self, context, volume_id): diff --git a/run_tests.py b/run_tests.py index 6a4b7f1ab..312ed7ef3 100644 --- a/run_tests.py +++ b/run_tests.py @@ -60,6 +60,7 @@ from nova.tests.auth_unittest import * from nova.tests.cloud_unittest import * from nova.tests.compute_unittest import * from nova.tests.flags_unittest import * +from nova.tests.middleware_unittest import * from nova.tests.misc_unittest import * from nova.tests.network_unittest import * #from nova.tests.objectstore_unittest import * diff --git a/tools/pip-requires b/tools/pip-requires index 52451b8cb..e9559521b 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -2,6 +2,7 @@ SQLAlchemy==0.6.3 pep8==0.5.0 pylint==0.19 IPy==0.70 +Cheetah==2.4.2.1 M2Crypto==0.20.2 amqplib==0.6.1 anyjson==0.2.4 |
