diff options
| author | Johannes Erdfelt <johannes.erdfelt@rackspace.com> | 2011-05-24 22:51:29 +0000 |
|---|---|---|
| committer | Johannes Erdfelt <johannes.erdfelt@rackspace.com> | 2011-05-24 22:51:29 +0000 |
| commit | 1756c41b96b83c8de21e313a2f2435f1d8b8421d (patch) | |
| tree | e97cc0c495fd30d79834053414f4d76a7d015a63 | |
| parent | d4ac3a309b23875312014abaf3fb8f84d373825a (diff) | |
| parent | 4108c32a5e30d9ee4367e433471fbf2a5bf15ed2 (diff) | |
| download | nova-1756c41b96b83c8de21e313a2f2435f1d8b8421d.tar.gz nova-1756c41b96b83c8de21e313a2f2435f1d8b8421d.tar.xz nova-1756c41b96b83c8de21e313a2f2435f1d8b8421d.zip | |
Merge with trunk
64 files changed, 1788 insertions, 542 deletions
@@ -1,4 +1,5 @@ Alex Meade <alex.meade@rackspace.com> +Andrey Brindeyev <abrindeyev@griddynamics.com> Andy Smith <code@term.ie> Andy Southgate <andy.southgate@citrix.com> Anne Gentle <anne@openstack.org> @@ -16,6 +17,7 @@ Christian Berendt <berendt@b1-systems.de> Chuck Short <zulcss@ubuntu.com> Cory Wright <corywright@gmail.com> Dan Prince <dan.prince@rackspace.com> +Dave Walker <DaveWalker@ubuntu.com> David Pravec <David.Pravec@danix.org> Dean Troyer <dtroyer@gmail.com> Devin Carlen <devin.carlen@gmail.com> @@ -64,6 +66,7 @@ Nachi Ueno <ueno.nachi@lab.ntt.co.jp> Naveed Massjouni <naveedm9@gmail.com> Nirmal Ranganathan <nirmal.ranganathan@rackspace.com> Paul Voccio <paul@openstack.org> +Renuka Apte <renuka.apte@citrix.com> Ricardo Carrillo Cruz <emaildericky@gmail.com> Rick Clark <rick@openstack.org> Rick Harris <rconradharris@gmail.com> diff --git a/MANIFEST.in b/MANIFEST.in index e7a6e7da4..4e145de75 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -35,6 +35,7 @@ include nova/tests/bundle/1mb.manifest.xml include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml include nova/tests/bundle/1mb.part.0 include nova/tests/bundle/1mb.part.1 +include nova/tests/public_key/* include nova/tests/db/nova.austin.sqlite include plugins/xenapi/README include plugins/xenapi/etc/xapi.d/plugins/objectstore diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index f42dfd6b5..5926b97de 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -108,6 +108,13 @@ def main(): interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface) if int(os.environ.get('TESTING', '0')): from nova.tests import fake_flags + + #if FLAGS.fake_rabbit: + # LOG.debug(_("leasing ip")) + # network_manager = utils.import_object(FLAGS.network_manager) + ## reload(fake_flags) + # from nova.tests import fake_flags + action = argv[1] if action in ['add', 'del', 'old']: mac = argv[2] diff --git a/bin/nova-manage b/bin/nova-manage index db964064d..e3ed7b9d0 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -362,27 +362,47 @@ class ProjectCommands(object): def add(self, project_id, user_id): """Adds user to project arguments: project_id user_id""" - self.manager.add_to_project(user_id, project_id) + try: + self.manager.add_to_project(user_id, project_id) + except exception.UserNotFound as ex: + print ex + raise def create(self, name, project_manager, description=None): """Creates a new project arguments: name project_manager [description]""" - self.manager.create_project(name, project_manager, description) + try: + self.manager.create_project(name, project_manager, description) + except exception.UserNotFound as ex: + print ex + raise def modify(self, name, project_manager, description=None): """Modifies a project arguments: name project_manager [description]""" - self.manager.modify_project(name, project_manager, description) + try: + self.manager.modify_project(name, project_manager, description) + except exception.UserNotFound as ex: + print ex + raise def delete(self, name): """Deletes an existing project arguments: name""" - self.manager.delete_project(name) + try: + self.manager.delete_project(name) + except exception.ProjectNotFound as ex: + print ex + raise def environment(self, project_id, user_id, filename='novarc'): """Exports environment variables to an sourcable file arguments: project_id user_id [filename='novarc]""" - rc = self.manager.get_environment_rc(user_id, project_id) + try: + rc = self.manager.get_environment_rc(user_id, project_id) + except (exception.UserNotFound, exception.ProjectNotFound) as ex: + print ex + raise with open(filename, 'w') as f: f.write(rc) @@ -399,7 +419,7 @@ class ProjectCommands(object): if key: try: db.quota_update(ctxt, project_id, key, value) - except exception.NotFound: + except exception.ProjectQuotaNotFound: db.quota_create(ctxt, project_id, key, value) project_quota = quota.get_quota(ctxt, project_id) for key, value in project_quota.iteritems(): @@ -408,7 +428,11 @@ class ProjectCommands(object): def remove(self, project_id, user_id): """Removes user from project arguments: project_id user_id""" - self.manager.remove_from_project(user_id, project_id) + try: + self.manager.remove_from_project(user_id, project_id) + except (exception.UserNotFound, exception.ProjectNotFound) as ex: + print ex + raise def scrub(self, project_id): """Deletes data associated with project @@ -427,6 +451,9 @@ class ProjectCommands(object): zip_file = self.manager.get_credentials(user_id, project_id) with open(filename, 'w') as f: f.write(zip_file) + except (exception.UserNotFound, exception.ProjectNotFound) as ex: + print ex + raise except db.api.NoMoreNetworks: print _('No more networks available. If this is a new ' 'installation, you need\nto call something like this:\n\n' @@ -65,7 +65,7 @@ def format_help(d): indent = MAX_INDENT - 6 out = [] - for k, v in d.iteritems(): + for k, v in sorted(d.iteritems()): if (len(k) + 6) > MAX_INDENT: out.extend([' %s' % k]) initial_indent = ' ' * (indent + 6) diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst index 9613ba990..0a5a7a4d6 100644 --- a/doc/source/devref/index.rst +++ b/doc/source/devref/index.rst @@ -35,6 +35,7 @@ Programming Concepts .. toctree:: :maxdepth: 3 + zone rabbit API Reference diff --git a/doc/source/devref/zone.rst b/doc/source/devref/zone.rst index 3dd9d37d3..263560ee2 100644 --- a/doc/source/devref/zone.rst +++ b/doc/source/devref/zone.rst @@ -17,7 +17,7 @@ Zones ===== -A Nova deployment is called a Zone. At the very least a Zone requires an API node, a Scheduler node, a database and RabbitMQ. Pushed further a Zone may contain many API nodes, many Scheduler, Volume, Network and Compute nodes as well as a cluster of databases and RabbitMQ servers. A Zone allows you to partition your deployments into logical groups for load balancing and instance distribution. +A Nova deployment is called a Zone. A Zone allows you to partition your deployments into logical groups for load balancing and instance distribution. At the very least a Zone requires an API node, a Scheduler node, a database and RabbitMQ. Pushed further a Zone may contain many API nodes, many Scheduler, Volume, Network and Compute nodes as well as a cluster of databases and RabbitMQ servers. The idea behind Zones is, if a particular deployment is not capable of servicing a particular request, the request may be forwarded to (child) Zones for possible processing. Zones may be nested in a tree fashion. @@ -34,7 +34,7 @@ Routing between Zones is based on the Capabilities of that Zone. Capabilities ar key=value;value;value, key=value;value;value -Zones have Capabilities which are general to the Zone and are set via `--zone-capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag. +Zones have Capabilities which are general to the Zone and are set via `--zone_capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag. Flow within a Zone ------------------ @@ -47,7 +47,7 @@ Inter-service communication within a Zone is done with RabbitMQ. Each class of S These capability messages are received by the Scheduler services and stored in the `ZoneManager` object. The SchedulerManager object has a reference to the `ZoneManager` it can use for load balancing. -The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone-name` flag (and defaults to "nova"). +The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone_name` flag (and defaults to "nova"). Zone administrative functions ----------------------------- diff --git a/doc/source/man/novamanage.rst b/doc/source/man/novamanage.rst index 9c54f3608..397cc8e80 100644 --- a/doc/source/man/novamanage.rst +++ b/doc/source/man/novamanage.rst @@ -6,7 +6,7 @@ nova-manage control and manage cloud computer instances and images ------------------------------------------------------ -:Author: nova@lists.launchpad.net +:Author: openstack@lists.launchpad.net :Date: 2010-11-16 :Copyright: OpenStack LLC :Version: 0.1 @@ -121,7 +121,7 @@ Nova Role nova-manage role <action> [<argument>] ``nova-manage role add <username> <rolename> <(optional) projectname>`` - Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: admin, itsec, projectmanager, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects. + Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: cloudadmin, itsec, sysadmin, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects. ``nova-manage role has <username> <projectname>`` Checks the user or project and responds with True if the user has a global role with a particular project. diff --git a/doc/source/runnova/managing.users.rst b/doc/source/runnova/managing.users.rst index 392142e86..d3442bed9 100644 --- a/doc/source/runnova/managing.users.rst +++ b/doc/source/runnova/managing.users.rst @@ -38,11 +38,11 @@ Role-based access control (RBAC) is an approach to restricting system access to Nova’s rights management system employs the RBAC model and currently supports the following five roles: -* **Cloud Administrator.** (admin) Users of this class enjoy complete system access. +* **Cloud Administrator.** (cloudadmin) Users of this class enjoy complete system access. * **IT Security.** (itsec) This role is limited to IT security personnel. It permits role holders to quarantine instances. -* **Project Manager.** (projectmanager)The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and launch and terminate instances. +* **System Administrator.** (sysadmin) The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and launch and terminate instances. * **Network Administrator.** (netadmin) Users with this role are permitted to allocate and assign publicly accessible IP addresses as well as create and modify firewall rules. -* **Developer.** This is a general purpose role that is assigned to users by default. +* **Developer.** (developer) This is a general purpose role that is assigned to users by default. RBAC management is exposed through the dashboard for simplified user management. diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index cd59340bd..c13993dd3 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -338,6 +338,10 @@ class Executor(wsgi.Application): else: return self._error(req, context, type(ex).__name__, unicode(ex)) + except exception.KeyPairExists as ex: + LOG.debug(_('KeyPairExists raised: %s'), unicode(ex), + context=context) + return self._error(req, context, type(ex).__name__, unicode(ex)) except Exception as ex: extra = {'environment': req.environ} LOG.exception(_('Unexpected error raised: %s'), unicode(ex), diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 1fa07d042..c35b6024e 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -27,6 +27,8 @@ import datetime import IPy import os import urllib +import tempfile +import shutil from nova import compute from nova import context @@ -316,6 +318,27 @@ class CloudController(object): 'keyMaterial': data['private_key']} # TODO(vish): when context is no longer an object, pass it here + def import_public_key(self, context, key_name, public_key, + fingerprint=None): + LOG.audit(_("Import key %s"), key_name, context=context) + key = {} + key['user_id'] = context.user_id + key['name'] = key_name + key['public_key'] = public_key + if fingerprint is None: + tmpdir = tempfile.mkdtemp() + pubfile = os.path.join(tmpdir, 'temp.pub') + fh = open(pubfile, 'w') + fh.write(public_key) + fh.close() + (out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', + '%s' % (pubfile)) + fingerprint = out.split(' ')[1] + shutil.rmtree(tmpdir) + key['fingerprint'] = fingerprint + db.key_pair_create(context, key) + return True + def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 348b70d5b..5b7f080ad 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -98,7 +98,8 @@ class APIRouter(wsgi.Router): server_members['inject_network_info'] = 'POST' mapper.resource("zone", "zones", controller=zones.Controller(), - collection={'detail': 'GET', 'info': 'GET'}), + collection={'detail': 'GET', 'info': 'GET', + 'select': 'GET'}) mapper.resource("user", "users", controller=users.Controller(), collection={'detail': 'GET'}) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 311e6bde9..6c6ee22a2 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -17,7 +17,6 @@ import datetime import hashlib -import json import time import webob.exc @@ -25,11 +24,9 @@ import webob.dec from nova import auth from nova import context -from nova import db from nova import exception from nova import flags from nova import log as logging -from nova import manager from nova import utils from nova import wsgi from nova.api.openstack import faults @@ -102,11 +99,11 @@ class AuthMiddleware(wsgi.Middleware): token, user = self._authorize_user(username, key, req) if user and token: res = webob.Response() - res.headers['X-Auth-Token'] = token.token_hash + res.headers['X-Auth-Token'] = token['token_hash'] res.headers['X-Server-Management-Url'] = \ - token.server_management_url - res.headers['X-Storage-Url'] = token.storage_url - res.headers['X-CDN-Management-Url'] = token.cdn_management_url + token['server_management_url'] + res.headers['X-Storage-Url'] = token['storage_url'] + res.headers['X-CDN-Management-Url'] = token['cdn_management_url'] res.content_type = 'text/plain' res.status = '204' LOG.debug(_("Successfully authenticated '%s'") % username) @@ -130,11 +127,11 @@ class AuthMiddleware(wsgi.Middleware): except exception.NotFound: return None if token: - delta = datetime.datetime.now() - token.created_at + delta = datetime.datetime.utcnow() - token['created_at'] if delta.days >= 2: - self.db.auth_token_destroy(ctxt, token.token_hash) + self.db.auth_token_destroy(ctxt, token['token_hash']) else: - return self.auth.get_user(token.user_id) + return self.auth.get_user(token['user_id']) return None def _authorize_user(self, username, key, req): diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 7ea7afef6..8e77b25fb 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -105,15 +105,14 @@ class ExtensionDescriptor(object): actions = [] return actions - def get_response_extensions(self): - """List of extensions.ResponseExtension extension objects. + def get_request_extensions(self): + """List of extensions.RequestException extension objects. - Response extensions are used to insert information into existing - response data. + Request extensions are used to handle custom request data. """ - response_exts = [] - return response_exts + request_exts = [] + return request_exts class ActionExtensionController(common.OpenstackController): @@ -137,7 +136,7 @@ class ActionExtensionController(common.OpenstackController): return res -class ResponseExtensionController(common.OpenstackController): +class RequestExtensionController(common.OpenstackController): def __init__(self, application): self.application = application @@ -148,20 +147,9 @@ class ResponseExtensionController(common.OpenstackController): def process(self, req, *args, **kwargs): res = req.get_response(self.application) - content_type = req.best_match_content_type() - # currently response handlers are un-ordered + # currently request handlers are un-ordered for handler in self.handlers: - res = handler(res) - try: - body = res.body - headers = res.headers - except AttributeError: - default_xmlns = None - body = self._serialize(res, content_type, default_xmlns) - headers = {"Content-Type": content_type} - res = webob.Response() - res.body = body - res.headers = headers + res = handler(req, res) return res @@ -226,24 +214,24 @@ class ExtensionMiddleware(wsgi.Middleware): return action_controllers - def _response_ext_controllers(self, application, ext_mgr, mapper): - """Returns a dict of ResponseExtensionController-s by collection.""" - response_ext_controllers = {} - for resp_ext in ext_mgr.get_response_extensions(): - if not resp_ext.key in response_ext_controllers.keys(): - controller = ResponseExtensionController(application) - mapper.connect(resp_ext.url_route + '.:(format)', + def _request_ext_controllers(self, application, ext_mgr, mapper): + """Returns a dict of RequestExtensionController-s by collection.""" + request_ext_controllers = {} + for req_ext in ext_mgr.get_request_extensions(): + if not req_ext.key in request_ext_controllers.keys(): + controller = RequestExtensionController(application) + mapper.connect(req_ext.url_route + '.:(format)', action='process', controller=controller, - conditions=resp_ext.conditions) + conditions=req_ext.conditions) - mapper.connect(resp_ext.url_route, + mapper.connect(req_ext.url_route, action='process', controller=controller, - conditions=resp_ext.conditions) - response_ext_controllers[resp_ext.key] = controller + conditions=req_ext.conditions) + request_ext_controllers[req_ext.key] = controller - return response_ext_controllers + return request_ext_controllers def __init__(self, application, ext_mgr=None): @@ -271,13 +259,13 @@ class ExtensionMiddleware(wsgi.Middleware): controller = action_controllers[action.collection] controller.add_action(action.action_name, action.handler) - # extended responses - resp_controllers = self._response_ext_controllers(application, ext_mgr, + # extended requests + req_controllers = self._request_ext_controllers(application, ext_mgr, mapper) - for response_ext in ext_mgr.get_response_extensions(): - LOG.debug(_('Extended response: %s'), response_ext.key) - controller = resp_controllers[response_ext.key] - controller.add_handler(response_ext.handler) + for request_ext in ext_mgr.get_request_extensions(): + LOG.debug(_('Extended request: %s'), request_ext.key) + controller = req_controllers[request_ext.key] + controller.add_handler(request_ext.handler) self._router = routes.middleware.RoutesMiddleware(self._dispatch, mapper) @@ -347,17 +335,17 @@ class ExtensionManager(object): pass return actions - def get_response_extensions(self): - """Returns a list of ResponseExtension objects.""" - response_exts = [] + def get_request_extensions(self): + """Returns a list of RequestExtension objects.""" + request_exts = [] for alias, ext in self.extensions.iteritems(): try: - response_exts.extend(ext.get_response_extensions()) + request_exts.extend(ext.get_request_extensions()) except AttributeError: - # NOTE(dprince): Extension aren't required to have response + # NOTE(dprince): Extension aren't required to have request # extensions pass - return response_exts + return request_exts def _check_extension(self, extension): """Checks for required methods in extension objects.""" @@ -421,9 +409,13 @@ class ExtensionManager(object): self.extensions[alias] = ext -class ResponseExtension(object): - """Add data to responses from core nova OpenStack API controllers.""" +class RequestExtension(object): + """Extend requests and responses of core nova OpenStack API controllers. + Provide a way to add data to responses and handle custom request data + that is sent to core nova OpenStack API controllers. + + """ def __init__(self, method, url_route, handler): self.url_route = url_route self.handler = handler diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index 227ffecdc..af73d8f6d 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -13,7 +13,12 @@ # License for the specific language governing permissions and limitations # under the License. +import json +import urlparse + +from nova import crypto from nova import db +from nova import exception from nova import flags from nova import log as logging from nova.api.openstack import common @@ -21,6 +26,12 @@ from nova.scheduler import api FLAGS = flags.FLAGS +flags.DEFINE_string('build_plan_encryption_key', + None, + '128bit (hex) encryption key for scheduler build plans.') + + +LOG = logging.getLogger('nova.api.openstack.zones') def _filter_keys(item, keys): @@ -97,3 +108,35 @@ class Controller(common.OpenstackController): zone_id = int(id) zone = api.zone_update(context, zone_id, env["zone"]) return dict(zone=_scrub_zone(zone)) + + def select(self, req): + """Returns a weighted list of costs to create instances + of desired capabilities.""" + ctx = req.environ['nova.context'] + qs = req.environ['QUERY_STRING'] + param_dict = urlparse.parse_qs(qs) + param_dict.pop("fresh", None) + # parse_qs returns a dict where the values are lists, + # since query strings can have multiple values for the + # same key. We need to convert that to single values. + for key in param_dict: + param_dict[key] = param_dict[key][0] + build_plan = api.select(ctx, specs=param_dict) + cooked = self._scrub_build_plan(build_plan) + return {"weights": cooked} + + def _scrub_build_plan(self, build_plan): + """Remove all the confidential data and return a sanitized + version of the build plan. Include an encrypted full version + of the weighting entry so we can get back to it later.""" + if not FLAGS.build_plan_encryption_key: + raise exception.FlagNotSet(flag='build_plan_encryption_key') + + encryptor = crypto.encryptor(FLAGS.build_plan_encryption_key) + cooked = [] + for entry in build_plan: + json_entry = json.dumps(entry) + cipher_text = encryptor(json_entry) + cooked.append(dict(weight=entry['weight'], + blob=cipher_text)) + return cooked diff --git a/nova/compute/api.py b/nova/compute/api.py index a05112afb..a12b7dee5 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -251,11 +251,18 @@ class API(base.Base): uid = context.user_id LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals()) + + # NOTE(sandy): For now we're just going to pass in the + # instance_type record to the scheduler. In a later phase + # we'll be ripping this whole for-loop out and deferring the + # creation of the Instance record. At that point all this will + # change. rpc.cast(context, FLAGS.scheduler_topic, {"method": "run_instance", "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id, + "instance_type": instance_type, "availability_zone": availability_zone, "injected_files": injected_files}}) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 923feaa59..11565c25e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -628,7 +628,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance_type = self.db.instance_type_get_by_flavor_id(context, migration_ref['new_flavor_id']) self.db.instance_update(context, instance_id, - dict(instance_type=instance_type['name'], + dict(instance_type_id=instance_type['id'], memory_mb=instance_type['memory_mb'], vcpus=instance_type['vcpus'], local_gb=instance_type['local_gb'])) diff --git a/nova/crypto.py b/nova/crypto.py index 14b9cbef6..bdc32482a 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -332,6 +332,51 @@ def mkcacert(subject='nova', years=1): return cert, pk, pkey +def _build_cipher(key, iv, encode=True): + """Make a 128bit AES CBC encode/decode Cipher object. + Padding is handled internally.""" + operation = 1 if encode else 0 + return M2Crypto.EVP.Cipher(alg='aes_128_cbc', key=key, iv=iv, op=operation) + + +def encryptor(key, iv=None): + """Simple symmetric key encryption.""" + key = base64.b64decode(key) + if iv is None: + iv = '\0' * 16 + else: + iv = base64.b64decode(iv) + + def encrypt(data): + cipher = _build_cipher(key, iv, encode=True) + v = cipher.update(data) + v = v + cipher.final() + del cipher + v = base64.b64encode(v) + return v + + return encrypt + + +def decryptor(key, iv=None): + """Simple symmetric key decryption.""" + key = base64.b64decode(key) + if iv is None: + iv = '\0' * 16 + else: + iv = base64.b64decode(iv) + + def decrypt(data): + data = base64.b64decode(data) + cipher = _build_cipher(key, iv, encode=False) + v = cipher.update(data) + v = v + cipher.final() + del cipher + return v + + return decrypt + + # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 3681f30db..b53e81053 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -873,6 +873,7 @@ def instance_get_all(context): options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ip.network')).\ + options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -885,6 +886,7 @@ def instance_get_all_by_user(context, user_id): options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ip.network')).\ + options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(user_id=user_id).\ diff --git a/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py b/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py new file mode 100644 index 000000000..cda890c94 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py @@ -0,0 +1,68 @@ +from sqlalchemy import Column, Integer, MetaData, String, Table +from nova import log as logging + +meta = MetaData() + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + types = {} + for instance in migrate_engine.execute(instances.select()): + if instance.instance_type_id is None: + types[instance.id] = None + continue + try: + types[instance.id] = int(instance.instance_type_id) + except ValueError: + logging.warn("Instance %s did not have instance_type_id " + "converted to an integer because its value is %s" % + (instance.id, instance.instance_type_id)) + types[instance.id] = None + + integer_column = Column('instance_type_id_int', Integer(), nullable=True) + string_column = instances.c.instance_type_id + + integer_column.create(instances) + for instance_id, instance_type_id in types.iteritems(): + update = instances.update().\ + where(instances.c.id == instance_id).\ + values(instance_type_id_int=instance_type_id) + migrate_engine.execute(update) + + string_column.alter(name='instance_type_id_str') + integer_column.alter(name='instance_type_id') + string_column.drop() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + integer_column = instances.c.instance_type_id + string_column = Column('instance_type_id_str', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + types = {} + for instance in migrate_engine.execute(instances.select()): + if instance.instance_type_id is None: + types[instance.id] = None + else: + types[instance.id] = str(instance.instance_type_id) + + string_column.create(instances) + for instance_id, instance_type_id in types.iteritems(): + update = instances.update().\ + where(instances.c.id == instance_id).\ + values(instance_type_id_str=instance_type_id) + migrate_engine.execute(update) + + integer_column.alter(name='instance_type_id_int') + string_column.alter(name='instance_type_id') + integer_column.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py new file mode 100644 index 000000000..a169afb40 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table +#from nova import log as logging + +meta = MetaData() + +c_manageent = Column('server_manageent_url', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + +c_management = Column('server_management_url', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + tokens = Table('auth_tokens', meta, autoload=True, + autoload_with=migrate_engine) + + tokens.create_column(c_management) + migrate_engine.execute(tokens.update() + .values(server_management_url=tokens.c.server_manageent_url)) + + tokens.c.server_manageent_url.drop() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + tokens = Table('auth_tokens', meta, autoload=True, + autoload_with=migrate_engine) + + tokens.create_column(c_manageent) + migrate_engine.execute(tokens.update() + .values(server_manageent_url=tokens.c.server_management_url)) + + tokens.c.server_management_url.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 0b46d5a05..1215448f8 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -209,7 +209,7 @@ class Instance(BASE, NovaBase): hostname = Column(String(255)) host = Column(String(255)) # , ForeignKey('hosts.id')) - instance_type_id = Column(String(255)) + instance_type_id = Column(Integer) user_data = Column(Text) @@ -495,7 +495,7 @@ class AuthToken(BASE, NovaBase): __tablename__ = 'auth_tokens' token_hash = Column(String(255), primary_key=True) user_id = Column(String(255)) - server_manageent_url = Column(String(255)) + server_management_url = Column(String(255)) storage_url = Column(String(255)) cdn_management_url = Column(String(255)) diff --git a/nova/exception.py b/nova/exception.py index cf6069454..56c20d111 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -255,6 +255,10 @@ class NotFound(NovaException): super(NotFound, self).__init__(**kwargs) +class FlagNotSet(NotFound): + message = _("Required flag %(flag)s not set.") + + class InstanceNotFound(NotFound): message = _("Instance %(instance_id)s could not be found.") diff --git a/nova/flags.py b/nova/flags.py index 519793643..9eaac5596 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -110,7 +110,7 @@ class FlagValues(gflags.FlagValues): return name in self.__dict__['__dirty'] def ClearDirty(self): - self.__dict__['__is_dirty'] = [] + self.__dict__['__dirty'] = [] def WasAlreadyParsed(self): return self.__dict__['__was_already_parsed'] @@ -119,11 +119,12 @@ class FlagValues(gflags.FlagValues): if '__stored_argv' not in self.__dict__: return new_flags = FlagValues(self) - for k in self.__dict__['__dirty']: + for k in self.FlagDict().iterkeys(): new_flags[k] = gflags.FlagValues.__getitem__(self, k) + new_flags.Reset() new_flags(self.__dict__['__stored_argv']) - for k in self.__dict__['__dirty']: + for k in new_flags.FlagDict().iterkeys(): setattr(self, k, getattr(new_flags, k)) self.ClearDirty() @@ -369,6 +370,9 @@ DEFINE_string('host', socket.gethostname(), DEFINE_string('node_availability_zone', 'nova', 'availability zone of this node') +DEFINE_string('notification_driver', + 'nova.notifier.no_op_notifier', + 'Default driver for sending notifications') DEFINE_list('memcached_servers', None, 'Memcached servers or None for in process cache.') diff --git a/nova/network/api.py b/nova/network/api.py index 1d8193b28..e2eacdf42 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -16,9 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles all requests relating to instances (guest vms). -""" +"""Handles all requests relating to instances (guest vms).""" from nova import db from nova import exception @@ -28,6 +26,7 @@ from nova import quota from nova import rpc from nova.db import base + FLAGS = flags.FLAGS LOG = logging.getLogger('nova.network') @@ -37,19 +36,19 @@ class API(base.Base): def allocate_floating_ip(self, context): if quota.allowed_floating_ips(context, 1) < 1: - LOG.warn(_("Quota exceeeded for %s, tried to allocate " - "address"), - context.project_id) - raise quota.QuotaError(_("Address quota exceeded. You cannot " - "allocate any more addresses")) + LOG.warn(_('Quota exceeeded for %s, tried to allocate ' + 'address'), + context.project_id) + raise quota.QuotaError(_('Address quota exceeded. You cannot ' + 'allocate any more addresses')) # NOTE(vish): We don't know which network host should get the ip # when we allocate, so just send it to any one. This # will probably need to move into a network supervisor # at some point. return rpc.call(context, FLAGS.network_topic, - {"method": "allocate_floating_ip", - "args": {"project_id": context.project_id}}) + {'method': 'allocate_floating_ip', + 'args': {'project_id': context.project_id}}) def release_floating_ip(self, context, address, affect_auto_assigned=False): @@ -62,8 +61,8 @@ class API(base.Base): # at some point. rpc.cast(context, FLAGS.network_topic, - {"method": "deallocate_floating_ip", - "args": {"floating_address": floating_ip['address']}}) + {'method': 'deallocate_floating_ip', + 'args': {'floating_address': floating_ip['address']}}) def associate_floating_ip(self, context, floating_ip, fixed_ip, affect_auto_assigned=False): @@ -74,17 +73,17 @@ class API(base.Base): return # Check if the floating ip address is allocated if floating_ip['project_id'] is None: - raise exception.ApiError(_("Address (%s) is not allocated") % + raise exception.ApiError(_('Address (%s) is not allocated') % floating_ip['address']) # Check if the floating ip address is allocated to the same project if floating_ip['project_id'] != context.project_id: - LOG.warn(_("Address (%(address)s) is not allocated to your " - "project (%(project)s)"), + LOG.warn(_('Address (%(address)s) is not allocated to your ' + 'project (%(project)s)'), {'address': floating_ip['address'], 'project': context.project_id}) - raise exception.ApiError(_("Address (%(address)s) is not " - "allocated to your project" - "(%(project)s)") % + raise exception.ApiError(_('Address (%(address)s) is not ' + 'allocated to your project' + '(%(project)s)') % {'address': floating_ip['address'], 'project': context.project_id}) # NOTE(vish): Perhaps we should just pass this on to compute and @@ -92,9 +91,9 @@ class API(base.Base): host = fixed_ip['network']['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.network_topic, host), - {"method": "associate_floating_ip", - "args": {"floating_address": floating_ip['address'], - "fixed_address": fixed_ip['address']}}) + {'method': 'associate_floating_ip', + 'args': {'floating_address': floating_ip['address'], + 'fixed_address': fixed_ip['address']}}) def disassociate_floating_ip(self, context, address, affect_auto_assigned=False): @@ -108,5 +107,5 @@ class API(base.Base): host = floating_ip['fixed_ip']['network']['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.network_topic, host), - {"method": "disassociate_floating_ip", - "args": {"floating_address": floating_ip['address']}}) + {'method': 'disassociate_floating_ip', + 'args': {'floating_address': floating_ip['address']}}) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index b50a4b4ea..815cd29c3 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -15,26 +15,27 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -""" -Implements vlans, bridges, and iptables rules using linux utilities. -""" +"""Implements vlans, bridges, and iptables rules using linux utilities.""" + +import calendar import inspect import os -import calendar from nova import db from nova import exception from nova import flags from nova import log as logging from nova import utils +from IPy import IP + LOG = logging.getLogger("nova.linux_net") def _bin_file(script): - """Return the absolute path to scipt in the bin directory""" - return os.path.abspath(os.path.join(__file__, "../../../bin", script)) + """Return the absolute path to scipt in the bin directory.""" + return os.path.abspath(os.path.join(__file__, '../../../bin', script)) FLAGS = flags.FLAGS @@ -66,11 +67,13 @@ binary_name = os.path.basename(inspect.stack()[-1][1]) class IptablesRule(object): - """An iptables rule + """An iptables rule. You shouldn't need to use this class directly, it's only used by - IptablesManager + IptablesManager. + """ + def __init__(self, chain, rule, wrap=True, top=False): self.chain = chain self.rule = rule @@ -95,7 +98,7 @@ class IptablesRule(object): class IptablesTable(object): - """An iptables table""" + """An iptables table.""" def __init__(self): self.rules = [] @@ -103,15 +106,16 @@ class IptablesTable(object): self.unwrapped_chains = set() def add_chain(self, name, wrap=True): - """Adds a named chain to the table + """Adds a named chain to the table. The chain name is wrapped to be unique for the component creating it, so different components of Nova can safely create identically named chains without interfering with one another. At the moment, its wrapped name is <binary name>-<chain name>, - so if nova-compute creates a chain named "OUTPUT", it'll actually - end up named "nova-compute-OUTPUT". + so if nova-compute creates a chain named 'OUTPUT', it'll actually + end up named 'nova-compute-OUTPUT'. + """ if wrap: self.chains.add(name) @@ -119,12 +123,13 @@ class IptablesTable(object): self.unwrapped_chains.add(name) def remove_chain(self, name, wrap=True): - """Remove named chain + """Remove named chain. This removal "cascades". All rule in the chain are removed, as are all rules in other chains that jump to it. If the chain is not found, this is merely logged. + """ if wrap: chain_set = self.chains @@ -132,7 +137,7 @@ class IptablesTable(object): chain_set = self.unwrapped_chains if name not in chain_set: - LOG.debug(_("Attempted to remove chain %s which doesn't exist"), + LOG.debug(_('Attempted to remove chain %s which does not exist'), name) return @@ -147,17 +152,18 @@ class IptablesTable(object): self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules) def add_rule(self, chain, rule, wrap=True, top=False): - """Add a rule to the table + """Add a rule to the table. This is just like what you'd feed to iptables, just without - the "-A <chain name>" bit at the start. + the '-A <chain name>' bit at the start. However, if you need to jump to one of your wrapped chains, prepend its name with a '$' which will ensure the wrapping is applied correctly. + """ if wrap and chain not in self.chains: - raise ValueError(_("Unknown chain: %r") % chain) + raise ValueError(_('Unknown chain: %r') % chain) if '$' in rule: rule = ' '.join(map(self._wrap_target_chain, rule.split(' '))) @@ -170,23 +176,24 @@ class IptablesTable(object): return s def remove_rule(self, chain, rule, wrap=True, top=False): - """Remove a rule from a chain + """Remove a rule from a chain. Note: The rule must be exactly identical to the one that was added. You cannot switch arguments around like you can with the iptables CLI tool. + """ try: self.rules.remove(IptablesRule(chain, rule, wrap, top)) except ValueError: - LOG.debug(_("Tried to remove rule that wasn't there:" - " %(chain)r %(rule)r %(wrap)r %(top)r"), + LOG.debug(_('Tried to remove rule that was not there:' + ' %(chain)r %(rule)r %(wrap)r %(top)r'), {'chain': chain, 'rule': rule, 'top': top, 'wrap': wrap}) class IptablesManager(object): - """Wrapper for iptables + """Wrapper for iptables. See IptablesTable for some usage docs @@ -205,7 +212,9 @@ class IptablesManager(object): For ipv4, the builtin PREROUTING, OUTPUT, and POSTROUTING nat chains are wrapped in the same was as the builtin filter chains. Additionally, there's a snat chain that is applied after the POSTROUTING chain. + """ + def __init__(self, execute=None): if not execute: self.execute = _execute @@ -267,11 +276,12 @@ class IptablesManager(object): @utils.synchronized('iptables', external=True) def apply(self): - """Apply the current in-memory set of iptables rules + """Apply the current in-memory set of iptables rules. This will blow away any rules left over from previous runs of the same component of Nova, and replace them with our current set of rules. This happens atomically, thanks to iptables-restore. + """ s = [('iptables', self.ipv4)] if FLAGS.use_ipv6: @@ -348,63 +358,63 @@ class IptablesManager(object): def metadata_forward(): - """Create forwarding rule for metadata""" - iptables_manager.ipv4['nat'].add_rule("PREROUTING", - "-s 0.0.0.0/0 -d 169.254.169.254/32 " - "-p tcp -m tcp --dport 80 -j DNAT " - "--to-destination %s:%s" % \ + """Create forwarding rule for metadata.""" + iptables_manager.ipv4['nat'].add_rule('PREROUTING', + '-s 0.0.0.0/0 -d 169.254.169.254/32 ' + '-p tcp -m tcp --dport 80 -j DNAT ' + '--to-destination %s:%s' % \ (FLAGS.ec2_dmz_host, FLAGS.ec2_port)) iptables_manager.apply() def init_host(): - """Basic networking setup goes here""" + """Basic networking setup goes here.""" # NOTE(devcamcar): Cloud public SNAT entries and the default # SNAT rule for outbound traffic. - iptables_manager.ipv4['nat'].add_rule("snat", - "-s %s -j SNAT --to-source %s" % \ + iptables_manager.ipv4['nat'].add_rule('snat', + '-s %s -j SNAT --to-source %s' % \ (FLAGS.fixed_range, FLAGS.routing_source_ip)) - iptables_manager.ipv4['nat'].add_rule("POSTROUTING", - "-s %s -d %s -j ACCEPT" % \ + iptables_manager.ipv4['nat'].add_rule('POSTROUTING', + '-s %s -d %s -j ACCEPT' % \ (FLAGS.fixed_range, FLAGS.dmz_cidr)) - iptables_manager.ipv4['nat'].add_rule("POSTROUTING", - "-s %(range)s -d %(range)s " - "-j ACCEPT" % \ + iptables_manager.ipv4['nat'].add_rule('POSTROUTING', + '-s %(range)s -d %(range)s ' + '-j ACCEPT' % \ {'range': FLAGS.fixed_range}) iptables_manager.apply() def bind_floating_ip(floating_ip, check_exit_code=True): - """Bind ip to public interface""" + """Bind ip to public interface.""" _execute('sudo', 'ip', 'addr', 'add', floating_ip, 'dev', FLAGS.public_interface, check_exit_code=check_exit_code) def unbind_floating_ip(floating_ip): - """Unbind a public ip from public interface""" + """Unbind a public ip from public interface.""" _execute('sudo', 'ip', 'addr', 'del', floating_ip, 'dev', FLAGS.public_interface) def ensure_metadata_ip(): - """Sets up local metadata ip""" + """Sets up local metadata ip.""" _execute('sudo', 'ip', 'addr', 'add', '169.254.169.254/32', 'scope', 'link', 'dev', 'lo', check_exit_code=False) def ensure_vlan_forward(public_ip, port, private_ip): - """Sets up forwarding rules for vlan""" - iptables_manager.ipv4['filter'].add_rule("FORWARD", - "-d %s -p udp " - "--dport 1194 " - "-j ACCEPT" % private_ip) - iptables_manager.ipv4['nat'].add_rule("PREROUTING", - "-d %s -p udp " - "--dport %s -j DNAT --to %s:1194" % + """Sets up forwarding rules for vlan.""" + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '-d %s -p udp ' + '--dport 1194 ' + '-j ACCEPT' % private_ip) + iptables_manager.ipv4['nat'].add_rule('PREROUTING', + '-d %s -p udp ' + '--dport %s -j DNAT --to %s:1194' % (public_ip, port, private_ip)) iptables_manager.ipv4['nat'].add_rule("OUTPUT", "-d %s -p udp " @@ -414,37 +424,38 @@ def ensure_vlan_forward(public_ip, port, private_ip): def ensure_floating_forward(floating_ip, fixed_ip): - """Ensure floating ip forwarding rule""" + """Ensure floating ip forwarding rule.""" for chain, rule in floating_forward_rules(floating_ip, fixed_ip): iptables_manager.ipv4['nat'].add_rule(chain, rule) iptables_manager.apply() def remove_floating_forward(floating_ip, fixed_ip): - """Remove forwarding for floating ip""" + """Remove forwarding for floating ip.""" for chain, rule in floating_forward_rules(floating_ip, fixed_ip): iptables_manager.ipv4['nat'].remove_rule(chain, rule) iptables_manager.apply() def floating_forward_rules(floating_ip, fixed_ip): - return [("PREROUTING", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)), - ("OUTPUT", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)), - ("floating-snat", - "-s %s -j SNAT --to %s" % (fixed_ip, floating_ip))] + return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)), + ('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)), + ('floating-snat', + '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))] def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): - """Create a vlan and bridge unless they already exist""" + """Create a vlan and bridge unless they already exist.""" interface = ensure_vlan(vlan_num) ensure_bridge(bridge, interface, net_attrs) +@utils.synchronized('ensure_vlan', external=True) def ensure_vlan(vlan_num): - """Create a vlan unless it already exists""" - interface = "vlan%s" % vlan_num + """Create a vlan unless it already exists.""" + interface = 'vlan%s' % vlan_num if not _device_exists(interface): - LOG.debug(_("Starting VLAN inteface %s"), interface) + LOG.debug(_('Starting VLAN inteface %s'), interface) _execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD') _execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num) _execute('sudo', 'ip', 'link', 'set', interface, 'up') @@ -464,12 +475,13 @@ def ensure_bridge(bridge, interface, net_attrs=None): The code will attempt to move any ips that already exist on the interface onto the bridge and reset the default gateway if necessary. + """ if not _device_exists(bridge): - LOG.debug(_("Starting Bridge interface for %s"), interface) + LOG.debug(_('Starting Bridge interface for %s'), interface) _execute('sudo', 'brctl', 'addbr', bridge) _execute('sudo', 'brctl', 'setfd', bridge, 0) - # _execute("sudo brctl setageing %s 10" % bridge) + # _execute('sudo brctl setageing %s 10' % bridge) _execute('sudo', 'brctl', 'stp', bridge, 'off') _execute('sudo', 'ip', 'link', 'set', bridge, 'up') if net_attrs: @@ -477,15 +489,15 @@ def ensure_bridge(bridge, interface, net_attrs=None): # bridge for it to respond to reqests properly suffix = net_attrs['cidr'].rpartition('/')[2] out, err = _execute('sudo', 'ip', 'addr', 'add', - "%s/%s" % + '%s/%s' % (net_attrs['gateway'], suffix), 'brd', net_attrs['broadcast'], 'dev', bridge, check_exit_code=False) - if err and err != "RTNETLINK answers: File exists\n": - raise exception.Error("Failed to add ip: %s" % err) + if err and err != 'RTNETLINK answers: File exists\n': + raise exception.Error('Failed to add ip: %s' % err) if(FLAGS.use_ipv6): _execute('sudo', 'ip', '-f', 'inet6', 'addr', 'change', net_attrs['cidr_v6'], @@ -501,17 +513,17 @@ def ensure_bridge(bridge, interface, net_attrs=None): # interface, so we move any ips to the bridge gateway = None out, err = _execute('sudo', 'route', '-n') - for line in out.split("\n"): + for line in out.split('\n'): fields = line.split() - if fields and fields[0] == "0.0.0.0" and fields[-1] == interface: + if fields and fields[0] == '0.0.0.0' and fields[-1] == interface: gateway = fields[1] _execute('sudo', 'route', 'del', 'default', 'gw', gateway, 'dev', interface, check_exit_code=False) out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface, 'scope', 'global') - for line in out.split("\n"): + for line in out.split('\n'): fields = line.split() - if fields and fields[0] == "inet": + if fields and fields[0] == 'inet': params = fields[1:-1] _execute(*_ip_bridge_cmd('del', params, fields[-1])) _execute(*_ip_bridge_cmd('add', params, bridge)) @@ -522,18 +534,18 @@ def ensure_bridge(bridge, interface, net_attrs=None): if (err and err != "device %s is already a member of a bridge; can't " "enslave it to bridge %s.\n" % (interface, bridge)): - raise exception.Error("Failed to add interface: %s" % err) + raise exception.Error('Failed to add interface: %s' % err) - iptables_manager.ipv4['filter'].add_rule("FORWARD", - "--in-interface %s -j ACCEPT" % \ + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '--in-interface %s -j ACCEPT' % \ bridge) - iptables_manager.ipv4['filter'].add_rule("FORWARD", - "--out-interface %s -j ACCEPT" % \ + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '--out-interface %s -j ACCEPT' % \ bridge) def get_dhcp_leases(context, network_id): - """Return a network's hosts config in dnsmasq leasefile format""" + """Return a network's hosts config in dnsmasq leasefile format.""" hosts = [] for fixed_ip_ref in db.network_get_associated_fixed_ips(context, network_id): @@ -542,7 +554,7 @@ def get_dhcp_leases(context, network_id): def get_dhcp_hosts(context, network_id): - """Get a string containing a network's hosts config in dhcp-host format""" + """Get network's hosts config in dhcp-host format.""" hosts = [] for fixed_ip_ref in db.network_get_associated_fixed_ips(context, network_id): @@ -555,10 +567,11 @@ def get_dhcp_hosts(context, network_id): # aren't reloaded. @utils.synchronized('dnsmasq_start') def update_dhcp(context, network_id): - """(Re)starts a dnsmasq server for a given network + """(Re)starts a dnsmasq server for a given network. + + If a dnsmasq instance is already running then send a HUP + signal causing it to reload, otherwise spawn a new instance. - if a dnsmasq instance is already running then send a HUP - signal causing it to reload, otherwise spawn a new instance """ network_ref = db.network_get(context, network_id) @@ -573,16 +586,16 @@ def update_dhcp(context, network_id): # if dnsmasq is already running, then tell it to reload if pid: - out, _err = _execute('cat', "/proc/%d/cmdline" % pid, + out, _err = _execute('cat', '/proc/%d/cmdline' % pid, check_exit_code=False) if conffile in out: try: _execute('sudo', 'kill', '-HUP', pid) return except Exception as exc: # pylint: disable=W0703 - LOG.debug(_("Hupping dnsmasq threw %s"), exc) + LOG.debug(_('Hupping dnsmasq threw %s'), exc) else: - LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) + LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid) # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, @@ -625,18 +638,18 @@ interface %s try: _execute('sudo', 'kill', pid) except Exception as exc: # pylint: disable=W0703 - LOG.debug(_("killing radvd threw %s"), exc) + LOG.debug(_('killing radvd threw %s'), exc) else: - LOG.debug(_("Pid %d is stale, relaunching radvd"), pid) + LOG.debug(_('Pid %d is stale, relaunching radvd'), pid) command = _ra_cmd(network_ref) _execute(*command) db.network_update(context, network_id, - {"gateway_v6": + {'gateway_v6': utils.get_my_linklocal(network_ref['bridge'])}) def _host_lease(fixed_ip_ref): - """Return a host string for an address in leasefile format""" + """Return a host string for an address in leasefile format.""" instance_ref = fixed_ip_ref['instance'] if instance_ref['updated_at']: timestamp = instance_ref['updated_at'] @@ -645,39 +658,39 @@ def _host_lease(fixed_ip_ref): seconds_since_epoch = calendar.timegm(timestamp.utctimetuple()) - return "%d %s %s %s *" % (seconds_since_epoch + FLAGS.dhcp_lease_time, + return '%d %s %s %s *' % (seconds_since_epoch + FLAGS.dhcp_lease_time, instance_ref['mac_address'], fixed_ip_ref['address'], instance_ref['hostname'] or '*') def _host_dhcp(fixed_ip_ref): - """Return a host string for an address in dhcp-host format""" + """Return a host string for an address in dhcp-host format.""" instance_ref = fixed_ip_ref['instance'] - return "%s,%s.%s,%s" % (instance_ref['mac_address'], + return '%s,%s.%s,%s' % (instance_ref['mac_address'], instance_ref['hostname'], FLAGS.dhcp_domain, fixed_ip_ref['address']) def _execute(*cmd, **kwargs): - """Wrapper around utils._execute for fake_network""" + """Wrapper around utils._execute for fake_network.""" if FLAGS.fake_network: - LOG.debug("FAKE NET: %s", " ".join(map(str, cmd))) - return "fake", 0 + LOG.debug('FAKE NET: %s', ' '.join(map(str, cmd))) + return 'fake', 0 else: return utils.execute(*cmd, **kwargs) def _device_exists(device): - """Check if ethernet device exists""" + """Check if ethernet device exists.""" (_out, err) = _execute('ip', 'link', 'show', 'dev', device, check_exit_code=False) return not err def _dnsmasq_cmd(net): - """Builds dnsmasq command""" + """Builds dnsmasq command.""" cmd = ['sudo', '-E', 'dnsmasq', '--strict-order', '--bind-interfaces', @@ -687,6 +700,7 @@ def _dnsmasq_cmd(net): '--listen-address=%s' % net['gateway'], '--except-interface=lo', '--dhcp-range=%s,static,120s' % net['dhcp_start'], + '--dhcp-lease-max=%s' % IP(net['cidr']).len(), '--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'), '--dhcp-script=%s' % FLAGS.dhcpbridge, '--leasefile-ro'] @@ -696,7 +710,7 @@ def _dnsmasq_cmd(net): def _ra_cmd(net): - """Builds radvd command""" + """Builds radvd command.""" cmd = ['sudo', '-E', 'radvd', # '-u', 'nobody', '-C', '%s' % _ra_file(net['bridge'], 'conf'), @@ -705,44 +719,43 @@ def _ra_cmd(net): def _stop_dnsmasq(network): - """Stops the dnsmasq instance for a given network""" + """Stops the dnsmasq instance for a given network.""" pid = _dnsmasq_pid_for(network) if pid: try: _execute('sudo', 'kill', '-TERM', pid) except Exception as exc: # pylint: disable=W0703 - LOG.debug(_("Killing dnsmasq threw %s"), exc) + LOG.debug(_('Killing dnsmasq threw %s'), exc) def _dhcp_file(bridge, kind): - """Return path to a pid, leases or conf file for a bridge""" - + """Return path to a pid, leases or conf file for a bridge.""" if not os.path.exists(FLAGS.networks_path): os.makedirs(FLAGS.networks_path) - return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, + return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path, bridge, kind)) def _ra_file(bridge, kind): - """Return path to a pid or conf file for a bridge""" + """Return path to a pid or conf file for a bridge.""" if not os.path.exists(FLAGS.networks_path): os.makedirs(FLAGS.networks_path) - return os.path.abspath("%s/nova-ra-%s.%s" % (FLAGS.networks_path, + return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path, bridge, kind)) def _dnsmasq_pid_for(bridge): - """Returns the pid for prior dnsmasq instance for a bridge + """Returns the pid for prior dnsmasq instance for a bridge. - Returns None if no pid file exists + Returns None if no pid file exists. - If machine has rebooted pid might be incorrect (caller should check) - """ + If machine has rebooted pid might be incorrect (caller should check). + """ pid_file = _dhcp_file(bridge, 'pid') if os.path.exists(pid_file): @@ -751,13 +764,13 @@ def _dnsmasq_pid_for(bridge): def _ra_pid_for(bridge): - """Returns the pid for prior radvd instance for a bridge + """Returns the pid for prior radvd instance for a bridge. - Returns None if no pid file exists + Returns None if no pid file exists. - If machine has rebooted pid might be incorrect (caller should check) - """ + If machine has rebooted pid might be incorrect (caller should check). + """ pid_file = _ra_file(bridge, 'pid') if os.path.exists(pid_file): @@ -766,8 +779,7 @@ def _ra_pid_for(bridge): def _ip_bridge_cmd(action, params, device): - """Build commands to add/del ips to bridges/devices""" - + """Build commands to add/del ips to bridges/devices.""" cmd = ['sudo', 'ip', 'addr', action] cmd.extend(params) cmd.extend(['dev', device]) diff --git a/nova/network/manager.py b/nova/network/manager.py index 0dd7f2360..5a6fdde5a 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -16,8 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Network Hosts are responsible for allocating ips and setting up network. +"""Network Hosts are responsible for allocating ips and setting up network. There are multiple backend drivers that handle specific types of networking topologies. All of the network commands are issued to a subclass of @@ -61,6 +60,8 @@ from nova import rpc LOG = logging.getLogger("nova.network.manager") + + FLAGS = flags.FLAGS flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') @@ -111,7 +112,9 @@ class NetworkManager(manager.SchedulerDependentManager): """Implements common network manager functionality. This class must be subclassed to support specific topologies. + """ + timeout_fixed_ips = True def __init__(self, network_driver=None, *args, **kwargs): @@ -122,9 +125,7 @@ class NetworkManager(manager.SchedulerDependentManager): *args, **kwargs) def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Do any initialization for a standalone service.""" self.driver.init_host() self.driver.ensure_metadata_ip() # Set up networking for the projects for which we're already @@ -154,11 +155,11 @@ class NetworkManager(manager.SchedulerDependentManager): self.host, time) if num: - LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) + LOG.debug(_('Dissassociated %s stale fixed ip(s)'), num) def set_network_host(self, context, network_id): """Safely sets the host of the network.""" - LOG.debug(_("setting network host"), context=context) + LOG.debug(_('setting network host'), context=context) host = self.db.network_set_host(context, network_id, self.host) @@ -224,39 +225,39 @@ class NetworkManager(manager.SchedulerDependentManager): def lease_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is leased.""" - LOG.debug(_("Leasing IP %s"), address, context=context) + LOG.debug(_('Leasing IP %s'), address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: - raise exception.Error(_("IP %s leased that isn't associated") % + raise exception.Error(_('IP %s leased that is not associated') % address) if instance_ref['mac_address'] != mac: inst_addr = instance_ref['mac_address'] - raise exception.Error(_("IP %(address)s leased to bad" - " mac %(inst_addr)s vs %(mac)s") % locals()) + raise exception.Error(_('IP %(address)s leased to bad mac' + ' %(inst_addr)s vs %(mac)s') % locals()) now = datetime.datetime.utcnow() self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': True, 'updated_at': now}) if not fixed_ip_ref['allocated']: - LOG.warn(_("IP %s leased that was already deallocated"), address, + LOG.warn(_('IP %s leased that was already deallocated'), address, context=context) def release_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is released.""" - LOG.debug(_("Releasing IP %s"), address, context=context) + LOG.debug(_('Releasing IP %s'), address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: - raise exception.Error(_("IP %s released that isn't associated") % + raise exception.Error(_('IP %s released that is not associated') % address) if instance_ref['mac_address'] != mac: inst_addr = instance_ref['mac_address'] - raise exception.Error(_("IP %(address)s released from" - " bad mac %(inst_addr)s vs %(mac)s") % locals()) + raise exception.Error(_('IP %(address)s released from bad mac' + ' %(inst_addr)s vs %(mac)s') % locals()) if not fixed_ip_ref['leased']: - LOG.warn(_("IP %s released that was not leased"), address, + LOG.warn(_('IP %s released that was not leased'), address, context=context) self.db.fixed_ip_update(context, fixed_ip_ref['address'], @@ -286,8 +287,8 @@ class NetworkManager(manager.SchedulerDependentManager): return self.set_network_host(context, network_ref['id']) host = rpc.call(context, FLAGS.network_topic, - {"method": "set_network_host", - "args": {"network_id": network_ref['id']}}) + {'method': 'set_network_host', + 'args': {'network_id': network_ref['id']}}) return host def create_networks(self, context, cidr, num_networks, network_size, @@ -302,7 +303,7 @@ class NetworkManager(manager.SchedulerDependentManager): start = index * network_size start_v6 = index * network_size_v6 significant_bits = 32 - int(math.log(network_size, 2)) - cidr = "%s/%s" % (fixed_net[start], significant_bits) + cidr = '%s/%s' % (fixed_net[start], significant_bits) project_net = IPy.IP(cidr) net = {} net['bridge'] = FLAGS.flat_network_bridge @@ -313,13 +314,13 @@ class NetworkManager(manager.SchedulerDependentManager): net['broadcast'] = str(project_net.broadcast()) net['dhcp_start'] = str(project_net[2]) if num_networks > 1: - net['label'] = "%s_%d" % (label, count) + net['label'] = '%s_%d' % (label, count) else: net['label'] = label count += 1 if(FLAGS.use_ipv6): - cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6], + cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], significant_bits_v6) net['cidr_v6'] = cidr_v6 project_net_v6 = IPy.IP(cidr_v6) @@ -386,13 +387,13 @@ class FlatManager(NetworkManager): Metadata forwarding must be handled by the gateway, and since nova does not do any setup in this mode, it must be done manually. Requests to 169.254.169.254 port 80 will need to be forwarded to the api server. + """ + timeout_fixed_ips = False def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Do any initialization for a standalone service.""" #Fix for bug 723298 - do not call init_host on superclass #Following code has been copied for NetworkManager.init_host ctxt = context.get_admin_context() @@ -433,12 +434,11 @@ class FlatDHCPManager(NetworkManager): FlatDHCPManager will start up one dhcp server to give out addresses. It never injects network settings into the guest. Otherwise it behaves like FlatDHCPManager. + """ def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Do any initialization for a standalone service.""" super(FlatDHCPManager, self).init_host() self.driver.metadata_forward() @@ -490,12 +490,11 @@ class VlanManager(NetworkManager): A dhcp server is run for each subnet, so each project will have its own. For this mode to be useful, each project will need a vpn to access the instances in its subnet. + """ def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Do any initialization for a standalone service.""" super(VlanManager, self).init_host() self.driver.metadata_forward() @@ -566,7 +565,7 @@ class VlanManager(NetworkManager): net['vlan'] = vlan net['bridge'] = 'br%s' % vlan if(FLAGS.use_ipv6): - cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6], + cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], significant_bits_v6) net['cidr_v6'] = cidr_v6 @@ -600,8 +599,8 @@ class VlanManager(NetworkManager): return self.set_network_host(context, network_ref['id']) host = rpc.call(context, FLAGS.network_topic, - {"method": "set_network_host", - "args": {"network_id": network_ref['id']}}) + {'method': 'set_network_host', + 'args': {'network_id': network_ref['id']}}) return host diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py index 9b2db7b8f..373060add 100644 --- a/nova/network/vmwareapi_net.py +++ b/nova/network/vmwareapi_net.py @@ -15,9 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Implements vlans for vmwareapi. -""" +"""Implements vlans for vmwareapi.""" from nova import db from nova import exception @@ -27,8 +25,10 @@ from nova import utils from nova.virt.vmwareapi_conn import VMWareAPISession from nova.virt.vmwareapi import network_utils + LOG = logging.getLogger("nova.network.vmwareapi_net") + FLAGS = flags.FLAGS flags.DEFINE_string('vlan_interface', 'vmnic0', 'Physical network adapter name in VMware ESX host for ' @@ -42,10 +42,10 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): host_username = FLAGS.vmwareapi_host_username host_password = FLAGS.vmwareapi_host_password if not host_ip or host_username is None or host_password is None: - raise Exception(_("Must specify vmwareapi_host_ip," - "vmwareapi_host_username " - "and vmwareapi_host_password to use" - "connection_type=vmwareapi")) + raise Exception(_('Must specify vmwareapi_host_ip, ' + 'vmwareapi_host_username ' + 'and vmwareapi_host_password to use ' + 'connection_type=vmwareapi')) session = VMWareAPISession(host_ip, host_username, host_password, FLAGS.vmwareapi_api_retry_count) vlan_interface = FLAGS.vlan_interface diff --git a/nova/network/xenapi_net.py b/nova/network/xenapi_net.py index 8c22a7d4b..709ef7f34 100644 --- a/nova/network/xenapi_net.py +++ b/nova/network/xenapi_net.py @@ -15,9 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Implements vlans, bridges, and iptables rules using linux utilities. -""" +"""Implements vlans, bridges, and iptables rules using linux utilities.""" import os @@ -26,22 +24,24 @@ from nova import exception from nova import flags from nova import log as logging from nova import utils -from nova.virt.xenapi_conn import XenAPISession +from nova.virt import xenapi_conn from nova.virt.xenapi import network_utils + LOG = logging.getLogger("nova.xenapi_net") + FLAGS = flags.FLAGS def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): """Create a vlan and bridge unless they already exist.""" # Open xenapi session - LOG.debug("ENTERING ensure_vlan_bridge in xenapi net") + LOG.debug('ENTERING ensure_vlan_bridge in xenapi net') url = FLAGS.xenapi_connection_url username = FLAGS.xenapi_connection_username password = FLAGS.xenapi_connection_password - session = XenAPISession(url, username, password) + session = xenapi_conn.XenAPISession(url, username, password) # Check whether bridge already exists # Retrieve network whose name_label is "bridge" network_ref = network_utils.NetworkHelper.find_network_with_name_label( @@ -50,14 +50,14 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): if network_ref is None: # If bridge does not exists # 1 - create network - description = "network for nova bridge %s" % bridge + description = 'network for nova bridge %s' % bridge network_rec = {'name_label': bridge, 'name_description': description, 'other_config': {}} network_ref = session.call_xenapi('network.create', network_rec) # 2 - find PIF for VLAN - expr = 'field "device" = "%s" and \ - field "VLAN" = "-1"' % FLAGS.vlan_interface + expr = "field 'device' = '%s' and \ + field 'VLAN' = '-1'" % FLAGS.vlan_interface pifs = session.call_xenapi('PIF.get_all_records_where', expr) pif_ref = None # Multiple PIF are ok: we are dealing with a pool diff --git a/nova/notifier/__init__.py b/nova/notifier/__init__.py new file mode 100644 index 000000000..482d54e4f --- /dev/null +++ b/nova/notifier/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/nova/notifier/api.py b/nova/notifier/api.py new file mode 100644 index 000000000..a3e7a039e --- /dev/null +++ b/nova/notifier/api.py @@ -0,0 +1,83 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.import datetime + +import datetime +import uuid + +from nova import flags +from nova import utils + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('default_notification_level', 'INFO', + 'Default notification level for outgoing notifications') + +WARN = 'WARN' +INFO = 'INFO' +ERROR = 'ERROR' +CRITICAL = 'CRITICAL' +DEBUG = 'DEBUG' + +log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) + + +class BadPriorityException(Exception): + pass + + +def notify(publisher_id, event_type, priority, payload): + """ + Sends a notification using the specified driver + + Notify parameters: + + publisher_id - the source worker_type.host of the message + event_type - the literal type of event (ex. Instance Creation) + priority - patterned after the enumeration of Python logging levels in + the set (DEBUG, WARN, INFO, ERROR, CRITICAL) + payload - A python dictionary of attributes + + Outgoing message format includes the above parameters, and appends the + following: + + message_id - a UUID representing the id for this notification + timestamp - the GMT timestamp the notification was sent at + + The composite message will be constructed as a dictionary of the above + attributes, which will then be sent via the transport mechanism defined + by the driver. + + Message example: + + {'message_id': str(uuid.uuid4()), + 'publisher_id': 'compute.host1', + 'timestamp': datetime.datetime.utcnow(), + 'priority': 'WARN', + 'event_type': 'compute.create_instance', + 'payload': {'instance_id': 12, ... }} + + """ + if priority not in log_levels: + raise BadPriorityException( + _('%s not in valid priorities' % priority)) + driver = utils.import_object(FLAGS.notification_driver) + msg = dict(message_id=str(uuid.uuid4()), + publisher_id=publisher_id, + event_type=event_type, + priority=priority, + payload=payload, + timestamp=str(datetime.datetime.utcnow())) + driver.notify(msg) diff --git a/nova/notifier/log_notifier.py b/nova/notifier/log_notifier.py new file mode 100644 index 000000000..25dfc693b --- /dev/null +++ b/nova/notifier/log_notifier.py @@ -0,0 +1,34 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from nova import flags +from nova import log as logging + + +FLAGS = flags.FLAGS + + +def notify(message): + """Notifies the recipient of the desired event given the model. + Log notifications using nova's default logging system""" + + priority = message.get('priority', + FLAGS.default_notification_level) + priority = priority.lower() + logger = logging.getLogger( + 'nova.notification.%s' % message['event_type']) + getattr(logger, priority)(json.dumps(message)) diff --git a/nova/tests/real_flags.py b/nova/notifier/no_op_notifier.py index 71da04992..029710505 100644 --- a/nova/tests/real_flags.py +++ b/nova/notifier/no_op_notifier.py @@ -1,7 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,11 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -from nova import flags - -FLAGS = flags.FLAGS -FLAGS.connection_type = 'libvirt' -FLAGS.fake_rabbit = False -FLAGS.fake_network = False -FLAGS.verbose = False +def notify(message): + """Notifies the recipient of the desired event given the model""" + pass diff --git a/nova/notifier/rabbit_notifier.py b/nova/notifier/rabbit_notifier.py new file mode 100644 index 000000000..d46670b58 --- /dev/null +++ b/nova/notifier/rabbit_notifier.py @@ -0,0 +1,36 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import nova.context + +from nova import flags +from nova import rpc + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('notification_topic', 'notifications', + 'RabbitMQ topic used for Nova notifications') + + +def notify(message): + """Sends a notification to the RabbitMQ""" + context = nova.context.get_admin_context() + priority = message.get('priority', + FLAGS.default_notification_level) + priority = priority.lower() + topic = '%s.%s' % (FLAGS.notification_topic, priority) + rpc.cast(context, topic, message) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 816ae5513..55f8e0a6d 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -81,6 +81,12 @@ def get_zone_capabilities(context): return _call_scheduler('get_zone_capabilities', context=context) +def select(context, specs=None): + """Returns a list of hosts.""" + return _call_scheduler('select', context=context, + params={"specs": specs}) + + def update_service_capabilities(context, service_name, host, capabilities): """Send an update to all the scheduler services informing them of the capabilities of this service.""" @@ -105,6 +111,45 @@ def _process(func, zone): return func(nova, zone) +def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs): + """Returns a list of (zone, call_result) objects.""" + if not isinstance(errors_to_ignore, (list, tuple)): + # This will also handle the default None + errors_to_ignore = [errors_to_ignore] + + pool = greenpool.GreenPool() + results = [] + for zone in db.zone_get_all(context): + try: + nova = novaclient.OpenStack(zone.username, zone.password, + zone.api_url) + nova.authenticate() + except novaclient.exceptions.BadRequest, e: + url = zone.api_url + LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s") + % locals()) + #TODO (dabo) - add logic for failure counts per zone, + # with escalation after a given number of failures. + continue + zone_method = getattr(nova.zones, method) + + def _error_trap(*args, **kwargs): + try: + return zone_method(*args, **kwargs) + except Exception as e: + if type(e) in errors_to_ignore: + return None + # TODO (dabo) - want to be able to re-raise here. + # Returning a string now; raising was causing issues. + # raise e + return "ERROR", "%s" % e + + res = pool.spawn(_error_trap, *args, **kwargs) + results.append((zone, res)) + pool.waitall() + return [(zone.id, res.wait()) for zone, res in results] + + def child_zone_helper(zone_list, func): """Fire off a command to each zone in the list. The return is [novaclient return objects] from each child zone. diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py new file mode 100644 index 000000000..b3d230bd2 --- /dev/null +++ b/nova/scheduler/zone_aware_scheduler.py @@ -0,0 +1,119 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The Zone Aware Scheduler is a base class Scheduler for creating instances +across zones. There are two expansion points to this class for: +1. Assigning Weights to hosts for requested instances +2. Filtering Hosts based on required instance capabilities +""" + +import operator + +from nova import log as logging +from nova.scheduler import api +from nova.scheduler import driver + +LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler') + + +class ZoneAwareScheduler(driver.Scheduler): + """Base class for creating Zone Aware Schedulers.""" + + def _call_zone_method(self, context, method, specs): + """Call novaclient zone method. Broken out for testing.""" + return api.call_zone_method(context, method, specs=specs) + + def schedule_run_instance(self, context, topic='compute', specs={}, + *args, **kwargs): + """This method is called from nova.compute.api to provision + an instance. However we need to look at the parameters being + passed in to see if this is a request to: + 1. Create a Build Plan and then provision, or + 2. Use the Build Plan information in the request parameters + to simply create the instance (either in this zone or + a child zone).""" + + if 'blob' in specs: + return self.provision_instance(context, topic, specs) + + # Create build plan and provision ... + build_plan = self.select(context, specs) + for item in build_plan: + self.provision_instance(context, topic, item) + + def provision_instance(context, topic, item): + """Create the requested instance in this Zone or a child zone.""" + pass + + def select(self, context, *args, **kwargs): + """Select returns a list of weights and zone/host information + corresponding to the best hosts to service the request. Any + child zone information has been encrypted so as not to reveal + anything about the children.""" + return self._schedule(context, "compute", *args, **kwargs) + + def schedule(self, context, topic, *args, **kwargs): + """The schedule() contract requires we return the one + best-suited host for this request. + """ + res = self._schedule(context, topic, *args, **kwargs) + # TODO(sirp): should this be a host object rather than a weight-dict? + if not res: + raise driver.NoValidHost(_('No hosts were available')) + return res[0] + + def _schedule(self, context, topic, *args, **kwargs): + """Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + + #TODO(sandy): extract these from args. + num_instances = 1 + specs = {} + + # Filter local hosts based on requirements ... + host_list = self.filter_hosts(num_instances, specs) + + # then weigh the selected hosts. + # weighted = [{weight=weight, name=hostname}, ...] + weighted = self.weigh_hosts(num_instances, specs, host_list) + + # Next, tack on the best weights from the child zones ... + child_results = self._call_zone_method(context, "select", + specs=specs) + for child_zone, result in child_results: + for weighting in result: + # Remember the child_zone so we can get back to + # it later if needed. This implicitly builds a zone + # path structure. + host_dict = { + "weight": weighting["weight"], + "child_zone": child_zone, + "child_blob": weighting["blob"]} + weighted.append(host_dict) + + weighted.sort(key=operator.itemgetter('weight')) + return weighted + + def filter_hosts(self, num, specs): + """Derived classes must override this method and return + a list of hosts in [(hostname, capability_dict)] format.""" + raise NotImplemented() + + def weigh_hosts(self, num, specs, hosts): + """Derived classes must override this method and return + a lists of hosts in [{weight, hostname}] format.""" + raise NotImplemented() diff --git a/nova/service.py b/nova/service.py index 2532b9df2..ab1238c3b 100644 --- a/nova/service.py +++ b/nova/service.py @@ -240,6 +240,10 @@ class WsgiService(object): def wait(self): self.wsgi_app.wait() + def get_socket_info(self, api_name): + """Returns the (host, port) that an API was started on.""" + return self.wsgi_app.socket_info[api_name] + class ApiService(WsgiService): """Class for our nova-api service.""" @@ -318,8 +322,10 @@ def _run_wsgi(paste_config_file, apis): logging.debug(_('App Config: %(api)s\n%(config)r') % locals()) logging.info(_('Running %s API'), api) app = wsgi.load_paste_app(paste_config_file, api) - apps.append((app, getattr(FLAGS, '%s_listen_port' % api), - getattr(FLAGS, '%s_listen' % api))) + apps.append((app, + getattr(FLAGS, '%s_listen_port' % api), + getattr(FLAGS, '%s_listen' % api), + api)) if len(apps) == 0: logging.error(_('No known API applications configured in %s.'), paste_config_file) diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py index 0860b51ac..dbdd0928a 100644 --- a/nova/tests/api/openstack/extensions/foxinsocks.py +++ b/nova/tests/api/openstack/extensions/foxinsocks.py @@ -63,31 +63,33 @@ class Foxinsocks(object): self._delete_tweedle)) return actions - def get_response_extensions(self): - response_exts = [] + def get_request_extensions(self): + request_exts = [] - def _goose_handler(res): + def _goose_handler(req, res): #NOTE: This only handles JSON responses. # You can use content type header to test for XML. data = json.loads(res.body) - data['flavor']['googoose'] = "Gooey goo for chewy chewing!" - return data + data['flavor']['googoose'] = req.GET.get('chewing') + res.body = json.dumps(data) + return res - resp_ext = extensions.ResponseExtension('GET', '/v1.1/flavors/:(id)', + req_ext1 = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', _goose_handler) - response_exts.append(resp_ext) + request_exts.append(req_ext1) - def _bands_handler(res): + def _bands_handler(req, res): #NOTE: This only handles JSON responses. # You can use content type header to test for XML. data = json.loads(res.body) data['big_bands'] = 'Pig Bands!' - return data + res.body = json.dumps(data) + return res - resp_ext2 = extensions.ResponseExtension('GET', '/v1.1/flavors/:(id)', + req_ext2 = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', _bands_handler) - response_exts.append(resp_ext2) - return response_exts + request_exts.append(req_ext2) + return request_exts def _add_tweedle(self, input_dict, req, id): diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 8b0729c35..bf51239e6 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -228,6 +228,9 @@ class FakeToken(object): # FIXME(sirp): let's not use id here id = 0 + def __getitem__(self, key): + return getattr(self, key) + def __init__(self, **kwargs): FakeToken.id += 1 self.id = FakeToken.id diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 481d34ed1..544298602 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -45,10 +45,10 @@ class StubController(nova.wsgi.Controller): class StubExtensionManager(object): - def __init__(self, resource_ext=None, action_ext=None, response_ext=None): + def __init__(self, resource_ext=None, action_ext=None, request_ext=None): self.resource_ext = resource_ext self.action_ext = action_ext - self.response_ext = response_ext + self.request_ext = request_ext def get_name(self): return "Tweedle Beetle Extension" @@ -71,11 +71,11 @@ class StubExtensionManager(object): action_exts.append(self.action_ext) return action_exts - def get_response_extensions(self): - response_exts = [] - if self.response_ext: - response_exts.append(self.response_ext) - return response_exts + def get_request_extensions(self): + request_extensions = [] + if self.request_ext: + request_extensions.append(self.request_ext) + return request_extensions class ExtensionControllerTest(unittest.TestCase): @@ -183,10 +183,10 @@ class ActionExtensionTest(unittest.TestCase): self.assertEqual(404, response.status_int) -class ResponseExtensionTest(unittest.TestCase): +class RequestExtensionTest(unittest.TestCase): def setUp(self): - super(ResponseExtensionTest, self).setUp() + super(RequestExtensionTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.reset_fake_data() fakes.FakeAuthDatabase.data = {} @@ -195,42 +195,39 @@ class ResponseExtensionTest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() - super(ResponseExtensionTest, self).tearDown() + super(RequestExtensionTest, self).tearDown() def test_get_resources_with_stub_mgr(self): - test_resp = "Gooey goo for chewy chewing!" - - def _resp_handler(res): + def _req_handler(req, res): # only handle JSON responses data = json.loads(res.body) - data['flavor']['googoose'] = test_resp - return data + data['flavor']['googoose'] = req.GET.get('chewing') + res.body = json.dumps(data) + return res - resp_ext = extensions.ResponseExtension('GET', + req_ext = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', - _resp_handler) + _req_handler) - manager = StubExtensionManager(None, None, resp_ext) + manager = StubExtensionManager(None, None, req_ext) app = fakes.wsgi_app() ext_midware = extensions.ExtensionMiddleware(app, manager) - request = webob.Request.blank("/v1.1/flavors/1") + request = webob.Request.blank("/v1.1/flavors/1?chewing=bluegoo") request.environ['api.version'] = '1.1' response = request.get_response(ext_midware) self.assertEqual(200, response.status_int) response_data = json.loads(response.body) - self.assertEqual(test_resp, response_data['flavor']['googoose']) + self.assertEqual('bluegoo', response_data['flavor']['googoose']) def test_get_resources_with_mgr(self): - test_resp = "Gooey goo for chewy chewing!" - app = fakes.wsgi_app() ext_midware = extensions.ExtensionMiddleware(app) - request = webob.Request.blank("/v1.1/flavors/1") + request = webob.Request.blank("/v1.1/flavors/1?chewing=newblue") request.environ['api.version'] = '1.1' response = request.get_response(ext_midware) self.assertEqual(200, response.status_int) response_data = json.loads(response.body) - self.assertEqual(test_resp, response_data['flavor']['googoose']) + self.assertEqual('newblue', response_data['flavor']['googoose']) self.assertEqual("Pig Bands!", response_data['big_bands']) diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index 5d5799b59..fa2e05033 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -20,6 +20,8 @@ import json import nova.db from nova import context +from nova import crypto +from nova import exception from nova import flags from nova import test from nova.api.openstack import zones @@ -79,6 +81,18 @@ def zone_capabilities(method, context): return dict() +GLOBAL_BUILD_PLAN = [ + dict(name='host1', weight=10, ip='10.0.0.1', zone='zone1'), + dict(name='host2', weight=9, ip='10.0.0.2', zone='zone2'), + dict(name='host3', weight=8, ip='10.0.0.3', zone='zone3'), + dict(name='host4', weight=7, ip='10.0.0.4', zone='zone4'), + ] + + +def zone_select(context, specs): + return GLOBAL_BUILD_PLAN + + class ZonesTest(test.TestCase): def setUp(self): super(ZonesTest, self).setUp() @@ -190,3 +204,31 @@ class ZonesTest(test.TestCase): self.assertEqual(res_dict['zone']['name'], 'darksecret') self.assertEqual(res_dict['zone']['cap1'], 'a;b') self.assertEqual(res_dict['zone']['cap2'], 'c;d') + + def test_zone_select(self): + FLAGS.build_plan_encryption_key = 'c286696d887c9aa0611bbb3e2025a45a' + self.stubs.Set(api, 'select', zone_select) + + req = webob.Request.blank('/v1.0/zones/select') + + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(res.status_int, 200) + + self.assertTrue('weights' in res_dict) + + for item in res_dict['weights']: + blob = item['blob'] + decrypt = crypto.decryptor(FLAGS.build_plan_encryption_key) + secret_item = json.loads(decrypt(blob)) + found = False + for original_item in GLOBAL_BUILD_PLAN: + if original_item['name'] != secret_item['name']: + continue + found = True + for key in ('weight', 'ip', 'zone'): + self.assertEqual(secret_item[key], original_item[key]) + + self.assertTrue(found) + self.assertEqual(len(item), 2) + self.assertTrue('weight' in item) diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 5d7ca98b5..ecefc464a 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -21,24 +21,24 @@ from nova import flags FLAGS = flags.FLAGS flags.DECLARE('volume_driver', 'nova.volume.manager') -FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver' -FLAGS.connection_type = 'fake' -FLAGS.fake_rabbit = True +FLAGS['volume_driver'].SetDefault('nova.volume.driver.FakeISCSIDriver') +FLAGS['connection_type'].SetDefault('fake') +FLAGS['fake_rabbit'].SetDefault(True) flags.DECLARE('auth_driver', 'nova.auth.manager') -FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver' +FLAGS['auth_driver'].SetDefault('nova.auth.dbdriver.DbDriver') flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('fake_network', 'nova.network.manager') -FLAGS.network_size = 8 -FLAGS.num_networks = 2 -FLAGS.fake_network = True -FLAGS.image_service = 'nova.image.local.LocalImageService' +FLAGS['network_size'].SetDefault(8) +FLAGS['num_networks'].SetDefault(2) +FLAGS['fake_network'].SetDefault(True) +FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService') flags.DECLARE('num_shelves', 'nova.volume.driver') flags.DECLARE('blades_per_shelf', 'nova.volume.driver') flags.DECLARE('iscsi_num_targets', 'nova.volume.driver') -FLAGS.num_shelves = 2 -FLAGS.blades_per_shelf = 4 -FLAGS.iscsi_num_targets = 8 -FLAGS.verbose = True -FLAGS.sqlite_db = "tests.sqlite" -FLAGS.use_ipv6 = True +FLAGS['num_shelves'].SetDefault(2) +FLAGS['blades_per_shelf'].SetDefault(4) +FLAGS['iscsi_num_targets'].SetDefault(8) +FLAGS['verbose'].SetDefault(True) +FLAGS['sqlite_db'].SetDefault("tests.sqlite") +FLAGS['use_ipv6'].SetDefault(True) diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 2e5d67017..bc98921f0 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -160,7 +160,7 @@ class _IntegratedTestBase(test.TestCase): #self.start_service('network') self.start_service('scheduler') - self.auth_url = self._start_api_service() + self._start_api_service() self.context = IntegratedUnitTestContext(self.auth_url) @@ -174,8 +174,10 @@ class _IntegratedTestBase(test.TestCase): if not api_service: raise Exception("API Service was None") - auth_url = 'http://localhost:8774/v1.1' - return auth_url + self.api_service = api_service + + host, port = api_service.get_socket_info('osapi') + self.auth_url = 'http://%s:%s/v1.1' % (host, port) def tearDown(self): self.context.cleanup() @@ -184,6 +186,11 @@ class _IntegratedTestBase(test.TestCase): def _get_flags(self): """An opportunity to setup flags, before the services are started.""" f = {} + + # Auto-assign ports to allow concurrent tests + f['ec2_listen_port'] = 0 + f['osapi_listen_port'] = 0 + f['image_service'] = 'nova.image.fake.FakeImageService' f['fake_network'] = True return f diff --git a/nova/tests/public_key/dummy.fingerprint b/nova/tests/public_key/dummy.fingerprint new file mode 100644 index 000000000..715bca27a --- /dev/null +++ b/nova/tests/public_key/dummy.fingerprint @@ -0,0 +1 @@ +1c:87:d1:d9:32:fd:62:3c:78:2b:c0:ad:c0:15:88:df diff --git a/nova/tests/public_key/dummy.pub b/nova/tests/public_key/dummy.pub new file mode 100644 index 000000000..d4cf2bc0d --- /dev/null +++ b/nova/tests/public_key/dummy.pub @@ -0,0 +1 @@ +ssh-dss AAAAB3NzaC1kc3MAAACBAMGJlY9XEIm2X234pdO5yFWMp2JuOQx8U0E815IVXhmKxYCBK9ZakgZOIQmPbXoGYyV+mziDPp6HJ0wKYLQxkwLEFr51fAZjWQvRss0SinURRuLkockDfGFtD4pYJthekr/rlqMKlBSDUSpGq8jUWW60UJ18FGooFpxR7ESqQRx/AAAAFQC96LRglaUeeP+E8U/yblEJocuiWwAAAIA3XiMR8Skiz/0aBm5K50SeQznQuMJTyzt9S9uaz5QZWiFu69hOyGSFGw8fqgxEkXFJIuHobQQpGYQubLW0NdaYRqyE/Vud3JUJUb8Texld6dz8vGemyB5d1YvtSeHIo8/BGv2msOqR3u5AZTaGCBD9DhpSGOKHEdNjTtvpPd8S8gAAAIBociGZ5jf09iHLVENhyXujJbxfGRPsyNTyARJfCOGl0oFV6hEzcQyw8U/ePwjgvjc2UizMWLl8tsb2FXKHRdc2v+ND3Us+XqKQ33X3ADP4FZ/+Oj213gMyhCmvFTP0u5FmHog9My4CB7YcIWRuUR42WlhQ2IfPvKwUoTk3R+T6Og== www-data@mk diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 97f401b87..7c0331eff 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -224,6 +224,29 @@ class ApiEc2TestCase(test.TestCase): self.manager.delete_project(project) self.manager.delete_user(user) + def test_create_duplicate_key_pair(self): + """Test that, after successfully generating a keypair, + requesting a second keypair with the same name fails sanely""" + self.expect_http() + self.mox.ReplayAll() + keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \ + for x in range(random.randint(4, 8))) + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + # NOTE(vish): create depends on pool, so call helper directly + self.ec2.create_key_pair('test') + + try: + self.ec2.create_key_pair('test') + except EC2ResponseError, e: + if e.code == 'KeyPairExists': + pass + else: + self.fail("Unexpected EC2ResponseError: %s " + "(expected KeyPairExists)" % e.code) + else: + self.fail('Exception not raised.') + def test_get_all_security_groups(self): """Test that we can retrieve security groups""" self.expect_http() diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index c8559615a..54c0454de 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -354,6 +354,36 @@ class CloudTestCase(test.TestCase): self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys)) self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) + def test_import_public_key(self): + # test when user provides all values + result1 = self.cloud.import_public_key(self.context, + 'testimportkey1', + 'mytestpubkey', + 'mytestfprint') + self.assertTrue(result1) + keydata = db.key_pair_get(self.context, + self.context.user.id, + 'testimportkey1') + self.assertEqual('mytestpubkey', keydata['public_key']) + self.assertEqual('mytestfprint', keydata['fingerprint']) + # test when user omits fingerprint + pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key') + f = open(pubkey_path + '/dummy.pub', 'r') + dummypub = f.readline().rstrip() + f.close + f = open(pubkey_path + '/dummy.fingerprint', 'r') + dummyfprint = f.readline().rstrip() + f.close + result2 = self.cloud.import_public_key(self.context, + 'testimportkey2', + dummypub) + self.assertTrue(result2) + keydata = db.key_pair_get(self.context, + self.context.user.id, + 'testimportkey2') + self.assertEqual(dummypub, keydata['public_key']) + self.assertEqual(dummyfprint, keydata['fingerprint']) + def test_delete_key_pair(self): self._create_key('test') self.cloud.delete_key_pair(self.context, 'test') diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 55e7ae0c4..9170837b6 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -334,6 +334,28 @@ class ComputeTestCase(test.TestCase): self.compute.terminate_instance(self.context, instance_id) + def test_finish_resize(self): + """Contrived test to ensure finish_resize doesn't raise anything""" + + def fake(*args, **kwargs): + pass + + self.stubs.Set(self.compute.driver, 'finish_resize', fake) + context = self.context.elevated() + instance_id = self._create_instance() + self.compute.prep_resize(context, instance_id, 1) + migration_ref = db.migration_get_by_instance_and_status(context, + instance_id, 'pre-migrating') + try: + self.compute.finish_resize(context, instance_id, + int(migration_ref['id']), {}) + except KeyError, e: + # Only catch key errors. We want other reasons for the test to + # fail to actually error out so we don't obscure anything + self.fail() + + self.compute.terminate_instance(self.context, instance_id) + def test_resize_instance(self): """Ensure instance can be migrated/resized""" instance_id = self._create_instance() diff --git a/nova/tests/test_crypto.py b/nova/tests/test_crypto.py new file mode 100644 index 000000000..945d78794 --- /dev/null +++ b/nova/tests/test_crypto.py @@ -0,0 +1,48 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for Crypto module. +""" + +from nova import crypto +from nova import test + + +class SymmetricKeyTestCase(test.TestCase): + """Test case for Encrypt/Decrypt""" + def test_encrypt_decrypt(self): + key = 'c286696d887c9aa0611bbb3e2025a45a' + plain_text = "The quick brown fox jumped over the lazy dog." + + # No IV supplied (all 0's) + encrypt = crypto.encryptor(key) + cipher_text = encrypt(plain_text) + self.assertNotEquals(plain_text, cipher_text) + + decrypt = crypto.decryptor(key) + plain = decrypt(cipher_text) + + self.assertEquals(plain_text, plain) + + # IV supplied ... + iv = '562e17996d093d28ddb3ba695a2e6f58' + encrypt = crypto.encryptor(key, iv) + cipher_text = encrypt(plain_text) + self.assertNotEquals(plain_text, cipher_text) + + decrypt = crypto.decryptor(key, iv) + plain = decrypt(cipher_text) + + self.assertEquals(plain_text, plain) diff --git a/nova/tests/test_flags.py b/nova/tests/test_flags.py index 707300fcf..05319d91f 100644 --- a/nova/tests/test_flags.py +++ b/nova/tests/test_flags.py @@ -91,6 +91,20 @@ class FlagsTestCase(test.TestCase): self.assert_('runtime_answer' in self.global_FLAGS) self.assertEqual(self.global_FLAGS.runtime_answer, 60) + def test_long_vs_short_flags(self): + flags.DEFINE_string('duplicate_answer_long', 'val', 'desc', + flag_values=self.global_FLAGS) + argv = ['flags_test', '--duplicate_answer=60', 'extra_arg'] + args = self.global_FLAGS(argv) + + self.assert_('duplicate_answer' not in self.global_FLAGS) + self.assert_(self.global_FLAGS.duplicate_answer_long, 60) + + flags.DEFINE_integer('duplicate_answer', 60, 'desc', + flag_values=self.global_FLAGS) + self.assertEqual(self.global_FLAGS.duplicate_answer, 60) + self.assertEqual(self.global_FLAGS.duplicate_answer_long, 'val') + def test_flag_leak_left(self): self.assertEqual(FLAGS.flags_unittest, 'foo') FLAGS.flags_unittest = 'bar' diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py new file mode 100644 index 000000000..b6b0fcc68 --- /dev/null +++ b/nova/tests/test_notifier.py @@ -0,0 +1,117 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import nova + +from nova import context +from nova import flags +from nova import rpc +import nova.notifier.api +from nova.notifier.api import notify +from nova.notifier import no_op_notifier +from nova.notifier import rabbit_notifier +from nova import test + +import stubout + + +class NotifierTestCase(test.TestCase): + """Test case for notifications""" + def setUp(self): + super(NotifierTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + + def tearDown(self): + self.stubs.UnsetAll() + super(NotifierTestCase, self).tearDown() + + def test_send_notification(self): + self.notify_called = False + + def mock_notify(cls, *args): + self.notify_called = True + + self.stubs.Set(nova.notifier.no_op_notifier, 'notify', + mock_notify) + + class Mock(object): + pass + notify('publisher_id', 'event_type', + nova.notifier.api.WARN, dict(a=3)) + self.assertEqual(self.notify_called, True) + + def test_verify_message_format(self): + """A test to ensure changing the message format is prohibitively + annoying""" + + def message_assert(message): + fields = [('publisher_id', 'publisher_id'), + ('event_type', 'event_type'), + ('priority', 'WARN'), + ('payload', dict(a=3))] + for k, v in fields: + self.assertEqual(message[k], v) + self.assertTrue(len(message['message_id']) > 0) + self.assertTrue(len(message['timestamp']) > 0) + + self.stubs.Set(nova.notifier.no_op_notifier, 'notify', + message_assert) + notify('publisher_id', 'event_type', + nova.notifier.api.WARN, dict(a=3)) + + def test_send_rabbit_notification(self): + self.stubs.Set(nova.flags.FLAGS, 'notification_driver', + 'nova.notifier.rabbit_notifier') + self.mock_cast = False + + def mock_cast(cls, *args): + self.mock_cast = True + + class Mock(object): + pass + + self.stubs.Set(nova.rpc, 'cast', mock_cast) + notify('publisher_id', 'event_type', + nova.notifier.api.WARN, dict(a=3)) + + self.assertEqual(self.mock_cast, True) + + def test_invalid_priority(self): + def mock_cast(cls, *args): + pass + + class Mock(object): + pass + + self.stubs.Set(nova.rpc, 'cast', mock_cast) + self.assertRaises(nova.notifier.api.BadPriorityException, + notify, 'publisher_id', + 'event_type', 'not a priority', dict(a=3)) + + def test_rabbit_priority_queue(self): + self.stubs.Set(nova.flags.FLAGS, 'notification_driver', + 'nova.notifier.rabbit_notifier') + self.stubs.Set(nova.flags.FLAGS, 'notification_topic', + 'testnotify') + + self.test_topic = None + + def mock_cast(context, topic, msg): + self.test_topic = topic + + self.stubs.Set(nova.rpc, 'cast', mock_cast) + notify('publisher_id', + 'event_type', 'DEBUG', dict(a=3)) + self.assertEqual(self.test_topic, 'testnotify.debug') diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 968ef9d6c..54b3f80fb 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -912,7 +912,8 @@ class SimpleDriverTestCase(test.TestCase): class FakeZone(object): - def __init__(self, api_url, username, password): + def __init__(self, id, api_url, username, password): + self.id = id self.api_url = api_url self.username = username self.password = password @@ -920,7 +921,7 @@ class FakeZone(object): def zone_get_all(context): return [ - FakeZone('http://example.com', 'bob', 'xxx'), + FakeZone(1, 'http://example.com', 'bob', 'xxx'), ] @@ -1037,7 +1038,7 @@ class FakeNovaClient(object): class DynamicNovaClientTest(test.TestCase): def test_issue_novaclient_command_found(self): - zone = FakeZone('http://example.com', 'bob', 'xxx') + zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeServerCollection()), zone, "servers", "get", 100).a, 10) @@ -1051,7 +1052,7 @@ class DynamicNovaClientTest(test.TestCase): zone, "servers", "pause", 100), None) def test_issue_novaclient_command_not_found(self): - zone = FakeZone('http://example.com', 'bob', 'xxx') + zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeEmptyServerCollection()), zone, "servers", "get", 100), None) @@ -1063,3 +1064,55 @@ class DynamicNovaClientTest(test.TestCase): self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeEmptyServerCollection()), zone, "servers", "any", "name"), None) + + +class FakeZonesProxy(object): + def do_something(*args, **kwargs): + return 42 + + def raises_exception(*args, **kwargs): + raise Exception('testing') + + +class FakeNovaClientOpenStack(object): + def __init__(self, *args, **kwargs): + self.zones = FakeZonesProxy() + + def authenticate(self): + pass + + +class CallZoneMethodTest(test.TestCase): + def setUp(self): + super(CallZoneMethodTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.stubs.Set(db, 'zone_get_all', zone_get_all) + self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack) + + def tearDown(self): + self.stubs.UnsetAll() + super(CallZoneMethodTest, self).tearDown() + + def test_call_zone_method(self): + context = {} + method = 'do_something' + results = api.call_zone_method(context, method) + expected = [(1, 42)] + self.assertEqual(expected, results) + + def test_call_zone_method_not_present(self): + context = {} + method = 'not_present' + self.assertRaises(AttributeError, api.call_zone_method, + context, method) + + def test_call_zone_method_generates_exception(self): + context = {} + method = 'raises_exception' + results = api.call_zone_method(context, method) + + # FIXME(sirp): for now the _error_trap code is catching errors and + # converting them to a ("ERROR", "string") tuples. The code (and this + # test) should eventually handle real exceptions. + expected = [(1, ('ERROR', 'testing'))] + self.assertEqual(expected, results) diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py new file mode 100644 index 000000000..fdcde34c9 --- /dev/null +++ b/nova/tests/test_zone_aware_scheduler.py @@ -0,0 +1,119 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Zone Aware Scheduler. +""" + +from nova import test +from nova.scheduler import driver +from nova.scheduler import zone_aware_scheduler +from nova.scheduler import zone_manager + + +class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): + def filter_hosts(self, num, specs): + # NOTE(sirp): this is returning [(hostname, services)] + return self.zone_manager.service_states.items() + + def weigh_hosts(self, num, specs, hosts): + fake_weight = 99 + weighted = [] + for hostname, caps in hosts: + weighted.append(dict(weight=fake_weight, name=hostname)) + return weighted + + +class FakeZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = { + 'host1': { + 'compute': {'ram': 1000} + }, + 'host2': { + 'compute': {'ram': 2000} + }, + 'host3': { + 'compute': {'ram': 3000} + } + } + + +class FakeEmptyZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = {} + + +def fake_empty_call_zone_method(context, method, specs): + return [] + + +def fake_call_zone_method(context, method, specs): + return [ + ('zone1', [ + dict(weight=1, blob='AAAAAAA'), + dict(weight=111, blob='BBBBBBB'), + dict(weight=112, blob='CCCCCCC'), + dict(weight=113, blob='DDDDDDD'), + ]), + ('zone2', [ + dict(weight=120, blob='EEEEEEE'), + dict(weight=2, blob='FFFFFFF'), + dict(weight=122, blob='GGGGGGG'), + dict(weight=123, blob='HHHHHHH'), + ]), + ('zone3', [ + dict(weight=130, blob='IIIIIII'), + dict(weight=131, blob='JJJJJJJ'), + dict(weight=132, blob='KKKKKKK'), + dict(weight=3, blob='LLLLLLL'), + ]), + ] + + +class ZoneAwareSchedulerTestCase(test.TestCase): + """Test case for Zone Aware Scheduler.""" + + def test_zone_aware_scheduler(self): + """ + Create a nested set of FakeZones, ensure that a select call returns the + appropriate build plan. + """ + sched = FakeZoneAwareScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) + + zm = FakeZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + build_plan = sched.select(fake_context, {}) + + self.assertEqual(15, len(build_plan)) + + hostnames = [plan_item['name'] + for plan_item in build_plan if 'name' in plan_item] + self.assertEqual(3, len(hostnames)) + + def test_empty_zone_aware_scheduler(self): + """ + Ensure empty hosts & child_zones result in NoValidHosts exception. + """ + sched = FakeZoneAwareScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) + + zm = FakeEmptyZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + self.assertRaises(driver.NoValidHost, sched.schedule, fake_context, {}) diff --git a/nova/virt/disk.py b/nova/virt/disk.py index ddea1a1f7..f8aea1f34 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -81,34 +81,36 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): else: mapped_device = device - # We can only loopback mount raw images. If the device isn't there, - # it's normally because it's a .vmdk or a .vdi etc - if not os.path.exists(mapped_device): - raise exception.Error('Mapped device was not found (we can' - ' only inject raw disk images): %s' % - mapped_device) - - # Configure ext2fs so that it doesn't auto-check every N boots - out, err = utils.execute('sudo', 'tune2fs', - '-c', 0, '-i', 0, mapped_device) - - tmpdir = tempfile.mkdtemp() try: - # mount loopback to dir - out, err = utils.execute( - 'sudo', 'mount', mapped_device, tmpdir) - if err: - raise exception.Error(_('Failed to mount filesystem: %s') - % err) - + # We can only loopback mount raw images. If the device isn't there, + # it's normally because it's a .vmdk or a .vdi etc + if not os.path.exists(mapped_device): + raise exception.Error('Mapped device was not found (we can' + ' only inject raw disk images): %s' % + mapped_device) + + # Configure ext2fs so that it doesn't auto-check every N boots + out, err = utils.execute('sudo', 'tune2fs', + '-c', 0, '-i', 0, mapped_device) + + tmpdir = tempfile.mkdtemp() try: - inject_data_into_fs(tmpdir, key, net, utils.execute) + # mount loopback to dir + out, err = utils.execute( + 'sudo', 'mount', mapped_device, tmpdir) + if err: + raise exception.Error(_('Failed to mount filesystem: %s') + % err) + + try: + inject_data_into_fs(tmpdir, key, net, utils.execute) + finally: + # unmount device + utils.execute('sudo', 'umount', mapped_device) finally: - # unmount device - utils.execute('sudo', 'umount', mapped_device) + # remove temporary directory + utils.execute('rmdir', tmpdir) finally: - # remove temporary directory - utils.execute('rmdir', tmpdir) if not partition is None: # remove partitions utils.execute('sudo', 'kpartx', '-d', device) diff --git a/nova/virt/images.py b/nova/virt/images.py index 2e3f2ee4d..02c898fda 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -21,19 +21,10 @@ Handling of VM disk images. """ -import os.path -import shutil -import sys -import time -import urllib2 -import urlparse - from nova import context from nova import flags from nova import log as logging from nova import utils -from nova.auth import manager -from nova.auth import signer FLAGS = flags.FLAGS @@ -52,66 +43,6 @@ def fetch(image_id, path, _user, _project): return metadata -# NOTE(vish): The methods below should be unnecessary, but I'm leaving -# them in case the glance client does not work on windows. -def _fetch_image_no_curl(url, path, headers): - request = urllib2.Request(url) - for (k, v) in headers.iteritems(): - request.add_header(k, v) - - def urlretrieve(urlfile, fpath): - chunk = 1 * 1024 * 1024 - f = open(fpath, "wb") - while 1: - data = urlfile.read(chunk) - if not data: - break - f.write(data) - - urlopened = urllib2.urlopen(request) - urlretrieve(urlopened, path) - LOG.debug(_("Finished retreving %(url)s -- placed in %(path)s") % locals()) - - -def _fetch_s3_image(image, path, user, project): - url = image_url(image) - - # This should probably move somewhere else, like e.g. a download_as - # method on User objects and at the same time get rewritten to use - # a web client. - headers = {} - headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) - - (_, _, url_path, _, _, _) = urlparse.urlparse(url) - access = manager.AuthManager().get_access_key(user, project) - signature = signer.Signer(user.secret.encode()).s3_authorization(headers, - 'GET', - url_path) - headers['Authorization'] = 'AWS %s:%s' % (access, signature) - - if sys.platform.startswith('win'): - return _fetch_image_no_curl(url, path, headers) - else: - cmd = ['/usr/bin/curl', '--fail', '--silent', url] - for (k, v) in headers.iteritems(): - cmd += ['-H', '\'%s: %s\'' % (k, v)] - - cmd += ['-o', path] - return utils.execute(*cmd) - - -def _fetch_local_image(image, path, user, project): - source = _image_path(os.path.join(image, 'image')) - if sys.platform.startswith('win'): - return shutil.copy(source, path) - else: - return utils.execute('cp', source, path) - - -def _image_path(path): - return os.path.join(FLAGS.images_path, path) - - # TODO(vish): xenapi should use the glance client code directly instead # of retrieving the image using this method. def image_url(image): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index c8f342aa8..9f6cd608c 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -48,6 +48,8 @@ FLAGS = flags.FLAGS flags.DEFINE_string('default_os_type', 'linux', 'Default OS type') flags.DEFINE_integer('block_device_creation_timeout', 10, 'time to wait for a block device to be created') +flags.DEFINE_integer('max_kernel_ramdisk_size', 16 * 1024 * 1024, + 'maximum size in bytes of kernel or ramdisk images') XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, @@ -444,6 +446,12 @@ class VMHelper(HelperBase): if image_type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES + elif image_type == ImageType.KERNEL_RAMDISK and \ + vdi_size > FLAGS.max_kernel_ramdisk_size: + max_size = FLAGS.max_kernel_ramdisk_size + raise exception.Error( + _("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, " + "max %(max_size)d bytes") % locals()) name_label = get_name_label_for_image(image) vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 13d7d215b..45b04351d 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -25,7 +25,6 @@ import M2Crypto import os import pickle import subprocess -import tempfile import uuid from nova import context @@ -1163,23 +1162,22 @@ class SimpleDH(object): return mpi def _run_ssl(self, text, which): - base_cmd = ('cat %(tmpfile)s | openssl enc -aes-128-cbc ' - '-a -pass pass:%(shared)s -nosalt %(dec_flag)s') + base_cmd = ('openssl enc -aes-128-cbc -a -pass pass:%(shared)s ' + '-nosalt %(dec_flag)s') if which.lower()[0] == 'd': dec_flag = ' -d' else: dec_flag = '' - fd, tmpfile = tempfile.mkstemp() - os.close(fd) - file(tmpfile, 'w').write(text) shared = self._shared cmd = base_cmd % locals() proc = _runproc(cmd) + proc.stdin.write(text + '\n') + proc.stdin.close() proc.wait() err = proc.stderr.read() if err: raise RuntimeError(_('OpenSSL error: %s') % err) - return proc.stdout.read() + return proc.stdout.read().strip('\n') def encrypt(self, text): return self._run_ssl(text, 'enc') diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 72284ac02..7821a4f7e 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -204,14 +204,17 @@ def _get_volume_id(path_or_id): if isinstance(path_or_id, int): return path_or_id # n must contain at least the volume_id - # /vol- is for remote volumes - # -vol- is for local volumes + # :volume- is for remote volumes + # -volume- is for local volumes # see compute/manager->setup_compute_volume - volume_id = path_or_id[path_or_id.find('/vol-') + 1:] + volume_id = path_or_id[path_or_id.find(':volume-') + 1:] if volume_id == path_or_id: - volume_id = path_or_id[path_or_id.find('-vol-') + 1:] - volume_id = volume_id.replace('--', '-') - return volume_id + volume_id = path_or_id[path_or_id.find('-volume--') + 1:] + volume_id = volume_id.replace('volume--', '') + else: + volume_id = volume_id.replace('volume-', '') + volume_id = volume_id[0:volume_id.find('-')] + return int(volume_id) def _get_target_host(iscsi_string): @@ -244,25 +247,23 @@ def _get_target(volume_id): Gets iscsi name and portal from volume name and host. For this method to work the following are needed: 1) volume_ref['host'] must resolve to something rather than loopback - 2) ietd must bind only to the address as resolved above - If any of the two conditions are not met, fall back on Flags. """ - volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(), - volume_id) + volume_ref = db.volume_get(context.get_admin_context(), + volume_id) result = (None, None) try: - (r, _e) = utils.execute("sudo iscsiadm -m discovery -t " - "sendtargets -p %s" % - volume_ref['host']) + (r, _e) = utils.execute('sudo', 'iscsiadm', + '-m', 'discovery', + '-t', 'sendtargets', + '-p', volume_ref['host']) except exception.ProcessExecutionError, exc: LOG.exception(exc) else: - targets = r.splitlines() - if len(_e) == 0 and len(targets) == 1: - for target in targets: - if volume_id in target: - (location, _sep, iscsi_name) = target.partition(" ") - break - iscsi_portal = location.split(",")[0] - result = (iscsi_name, iscsi_portal) + volume_name = "volume-%08x" % volume_id + for target in r.splitlines(): + if FLAGS.iscsi_ip_prefix in target and volume_name in target: + (location, _sep, iscsi_name) = target.partition(" ") + break + iscsi_portal = location.split(",")[0] + result = (iscsi_name, iscsi_portal) return result diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index eb572f295..6d828e109 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -169,15 +169,15 @@ class XenAPIConnection(driver.ComputeDriver): def __init__(self, url, user, pw): super(XenAPIConnection, self).__init__() - session = XenAPISession(url, user, pw) - self._vmops = VMOps(session) - self._volumeops = VolumeOps(session) + self._session = XenAPISession(url, user, pw) + self._vmops = VMOps(self._session) + self._volumeops = VolumeOps(self._session) self._host_state = None @property def HostState(self): if not self._host_state: - self._host_state = HostState(self.session) + self._host_state = HostState(self._session) return self._host_state def init_host(self, host): diff --git a/nova/wsgi.py b/nova/wsgi.py index e60a8820d..ea9bb963d 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -59,13 +59,16 @@ class Server(object): def __init__(self, threads=1000): self.pool = eventlet.GreenPool(threads) + self.socket_info = {} - def start(self, application, port, host='0.0.0.0', backlog=128): + def start(self, application, port, host='0.0.0.0', key=None, backlog=128): """Run a WSGI server with the given application.""" arg0 = sys.argv[0] logging.audit(_('Starting %(arg0)s on %(host)s:%(port)s') % locals()) socket = eventlet.listen((host, port), backlog=backlog) self.pool.spawn_n(self._run, application, socket) + if key: + self.socket_info[key] = socket.getsockname() def wait(self): """Wait until all servers have completed running.""" diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 0a45f3873..4b45671ae 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -68,12 +68,12 @@ def _download_tarball(sr_path, staging_path, image_id, glance_host, area. """ conn = httplib.HTTPConnection(glance_host, glance_port) - conn.request('GET', '/images/%s' % image_id) + conn.request('GET', '/v1/images/%s' % image_id) resp = conn.getresponse() if resp.status == httplib.NOT_FOUND: raise Exception("Image '%s' not found in Glance" % image_id) elif resp.status != httplib.OK: - raise Exception("Unexpected response from Glance %i" % res.status) + raise Exception("Unexpected response from Glance %i" % resp.status) tar_cmd = "tar -zx --directory=%(staging_path)s" % locals() tar_proc = _make_subprocess(tar_cmd, stderr=True, stdin=True) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost new file mode 100644 index 000000000..a8428e841 --- /dev/null +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost @@ -0,0 +1,183 @@ +#!/usr/bin/env python + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for reading/writing information to xenstore +# + +try: + import json +except ImportError: + import simplejson as json +import os +import random +import re +import subprocess +import tempfile +import time + +import XenAPIPlugin + +from pluginlib_nova import * +configure_logging("xenhost") + +host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)") + + +def jsonify(fnc): + def wrapper(*args, **kwargs): + return json.dumps(fnc(*args, **kwargs)) + return wrapper + + +class TimeoutError(StandardError): + pass + + +def _run_command(cmd): + """Abstracts out the basics of issuing system commands. If the command + returns anything in stderr, a PluginError is raised with that information. + Otherwise, the output from stdout is returned. + """ + pipe = subprocess.PIPE + proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe, + stderr=pipe, close_fds=True) + proc.wait() + err = proc.stderr.read() + if err: + raise pluginlib.PluginError(err) + return proc.stdout.read() + + +@jsonify +def host_data(self, arg_dict): + """Runs the commands on the xenstore host to return the current status + information. + """ + cmd = "xe host-list | grep uuid" + resp = _run_command(cmd) + host_uuid = resp.split(":")[-1].strip() + cmd = "xe host-param-list uuid=%s" % host_uuid + resp = _run_command(cmd) + parsed_data = parse_response(resp) + # We have the raw dict of values. Extract those that we need, + # and convert the data types as needed. + ret_dict = cleanup(parsed_data) + return ret_dict + + +def parse_response(resp): + data = {} + for ln in resp.splitlines(): + if not ln: + continue + mtch = host_data_pattern.match(ln.strip()) + try: + k, v = mtch.groups() + data[k] = v + except AttributeError: + # Not a valid line; skip it + continue + return data + + +def cleanup(dct): + """Take the raw KV pairs returned and translate them into the + appropriate types, discarding any we don't need. + """ + def safe_int(val): + """Integer values will either be string versions of numbers, + or empty strings. Convert the latter to nulls. + """ + try: + return int(val) + except ValueError: + return None + + def strip_kv(ln): + return [val.strip() for val in ln.split(":", 1)] + + out = {} + +# sbs = dct.get("supported-bootloaders", "") +# out["host_supported-bootloaders"] = sbs.split("; ") +# out["host_suspend-image-sr-uuid"] = dct.get("suspend-image-sr-uuid", "") +# out["host_crash-dump-sr-uuid"] = dct.get("crash-dump-sr-uuid", "") +# out["host_local-cache-sr"] = dct.get("local-cache-sr", "") + out["host_memory"] = omm = {} + omm["total"] = safe_int(dct.get("memory-total", "")) + omm["overhead"] = safe_int(dct.get("memory-overhead", "")) + omm["free"] = safe_int(dct.get("memory-free", "")) + omm["free-computed"] = safe_int( + dct.get("memory-free-computed", "")) + +# out["host_API-version"] = avv = {} +# avv["vendor"] = dct.get("API-version-vendor", "") +# avv["major"] = safe_int(dct.get("API-version-major", "")) +# avv["minor"] = safe_int(dct.get("API-version-minor", "")) + + out["host_uuid"] = dct.get("uuid", None) + out["host_name-label"] = dct.get("name-label", "") + out["host_name-description"] = dct.get("name-description", "") +# out["host_host-metrics-live"] = dct.get( +# "host-metrics-live", "false") == "true" + out["host_hostname"] = dct.get("hostname", "") + out["host_ip_address"] = dct.get("address", "") + oc = dct.get("other-config", "") + out["host_other-config"] = ocd = {} + if oc: + for oc_fld in oc.split("; "): + ock, ocv = strip_kv(oc_fld) + ocd[ock] = ocv +# out["host_capabilities"] = dct.get("capabilities", "").split("; ") +# out["host_allowed-operations"] = dct.get( +# "allowed-operations", "").split("; ") +# lsrv = dct.get("license-server", "") +# out["host_license-server"] = ols = {} +# if lsrv: +# for lspart in lsrv.split("; "): +# lsk, lsv = lspart.split(": ") +# if lsk == "port": +# ols[lsk] = safe_int(lsv) +# else: +# ols[lsk] = lsv +# sv = dct.get("software-version", "") +# out["host_software-version"] = osv = {} +# if sv: +# for svln in sv.split("; "): +# svk, svv = strip_kv(svln) +# osv[svk] = svv + cpuinf = dct.get("cpu_info", "") + out["host_cpu_info"] = ocp = {} + if cpuinf: + for cpln in cpuinf.split("; "): + cpk, cpv = strip_kv(cpln) + if cpk in ("cpu_count", "family", "model", "stepping"): + ocp[cpk] = safe_int(cpv) + else: + ocp[cpk] = cpv +# out["host_edition"] = dct.get("edition", "") +# out["host_external-auth-service-name"] = dct.get( +# "external-auth-service-name", "") + return out + + +if __name__ == "__main__": + XenAPIPlugin.dispatch( + {"host_data": host_data}) diff --git a/run_tests.sh b/run_tests.sh index e3a0bd243..9aa555484 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -59,7 +59,13 @@ function run_tests { function run_pep8 { echo "Running pep8 ..." + # Opt-out files from pep8 + ignore_scripts="*.sh:*nova-debug:*clean-vlans" + ignore_files="*eventlet-patch:*pip-requires" + ignore_dirs="*ajaxterm*" + GLOBIGNORE="$ignore_scripts:$ignore_files:$ignore_dirs" srcfiles=`find bin -type f ! -name "nova.conf*"` + srcfiles+=" `find tools/*`" srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance" pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles} } diff --git a/tools/install_venv.py b/tools/install_venv.py index 8149a3afa..812b1dd0f 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -31,119 +31,125 @@ import sys ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) VENV = os.path.join(ROOT, '.nova-venv') PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires') -TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz' -PY_VERSION = "python" + str(sys.version_info[0]) + '.' + str(sys.version_info[1]) +TWISTED_NOVA = 'http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz' +PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) + def die(message, *args): - print >>sys.stderr, message % args - sys.exit(1) + print >>sys.stderr, message % args + sys.exit(1) + def check_python_version(): - if sys.version_info < (2, 6): - die("Need Python Version >= 2.6") + if sys.version_info < (2, 6): + die("Need Python Version >= 2.6") + def run_command(cmd, redirect_output=True, check_exit_code=True): - """ - Runs a command in an out-of-process shell, returning the - output of that command. Working directory is ROOT. - """ - if redirect_output: - stdout = subprocess.PIPE - else: - stdout = None + """ + Runs a command in an out-of-process shell, returning the + output of that command. Working directory is ROOT. + """ + if redirect_output: + stdout = subprocess.PIPE + else: + stdout = None - proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout) - output = proc.communicate()[0] - if check_exit_code and proc.returncode != 0: - die('Command "%s" failed.\n%s', ' '.join(cmd), output) - return output + proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout) + output = proc.communicate()[0] + if check_exit_code and proc.returncode != 0: + die('Command "%s" failed.\n%s', ' '.join(cmd), output) + return output -HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'], check_exit_code=False).strip()) -HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'], check_exit_code=False).strip()) +HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'], + check_exit_code=False).strip()) +HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'], + check_exit_code=False).strip()) def check_dependencies(): - """Make sure virtualenv is in the path.""" - - if not HAS_VIRTUALENV: - print 'not found.' - # Try installing it via easy_install... - if HAS_EASY_INSTALL: - print 'Installing virtualenv via easy_install...', - if not (run_command(['which', 'easy_install']) and - run_command(['easy_install', 'virtualenv'])): - die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' - ' please install it using your favorite package management tool') - print 'done.' - print 'done.' + """Make sure virtualenv is in the path.""" + + if not HAS_VIRTUALENV: + print 'not found.' + # Try installing it via easy_install... + if HAS_EASY_INSTALL: + print 'Installing virtualenv via easy_install...', + if not (run_command(['which', 'easy_install']) and + run_command(['easy_install', 'virtualenv'])): + die('ERROR: virtualenv not found.\n\nNova development' + ' requires virtualenv, please install it using your' + ' favorite package management tool') + print 'done.' + print 'done.' def create_virtualenv(venv=VENV): - """Creates the virtual environment and installs PIP only into the - virtual environment - """ - print 'Creating venv...', - run_command(['virtualenv', '-q', '--no-site-packages', VENV]) - print 'done.' - print 'Installing pip in virtualenv...', - if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip(): - die("Failed to install pip.") - print 'done.' + """Creates the virtual environment and installs PIP only into the + virtual environment + """ + print 'Creating venv...', + run_command(['virtualenv', '-q', '--no-site-packages', VENV]) + print 'done.' + print 'Installing pip in virtualenv...', + if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip(): + die("Failed to install pip.") + print 'done.' def install_dependencies(venv=VENV): - print 'Installing dependencies with pip (this can take a while)...' - # Install greenlet by hand - just listing it in the requires file does not - # get it in stalled in the right order - run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, 'greenlet'], - redirect_output=False) - run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', PIP_REQUIRES], - redirect_output=False) - run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, TWISTED_NOVA], - redirect_output=False) - - - # Tell the virtual env how to "import nova" - pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", "nova.pth") - f = open(pthfile, 'w') - f.write("%s\n" % ROOT) - # Patch eventlet (see FAQ # 1485) - patchsrc = os.path.join(ROOT, 'tools', 'eventlet-patch') - patchfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", "eventlet", - "green", "subprocess.py") - patch_cmd = "patch %s %s" % (patchfile, patchsrc) - os.system(patch_cmd) + print 'Installing dependencies with pip (this can take a while)...' + # Install greenlet by hand - just listing it in the requires file does not + # get it in stalled in the right order + run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, + 'greenlet'], redirect_output=False) + run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', + PIP_REQUIRES], redirect_output=False) + run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, + TWISTED_NOVA], redirect_output=False) + + # Tell the virtual env how to "import nova" + pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", + "nova.pth") + f = open(pthfile, 'w') + f.write("%s\n" % ROOT) + # Patch eventlet (see FAQ # 1485) + patchsrc = os.path.join(ROOT, 'tools', 'eventlet-patch') + patchfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", + "eventlet", "green", "subprocess.py") + patch_cmd = "patch %s %s" % (patchfile, patchsrc) + os.system(patch_cmd) def print_help(): - help = """ - Nova development environment setup is complete. + help = """ + Nova development environment setup is complete. - Nova development uses virtualenv to track and manage Python dependencies - while in development and testing. + Nova development uses virtualenv to track and manage Python dependencies + while in development and testing. - To activate the Nova virtualenv for the extent of your current shell session - you can run: + To activate the Nova virtualenv for the extent of your current shell + session you can run: - $ source .nova-venv/bin/activate + $ source .nova-venv/bin/activate - Or, if you prefer, you can run commands in the virtualenv on a case by case - basis by running: + Or, if you prefer, you can run commands in the virtualenv on a case by case + basis by running: - $ tools/with_venv.sh <your command> + $ tools/with_venv.sh <your command> - Also, make test will automatically use the virtualenv. - """ - print help + Also, make test will automatically use the virtualenv. + """ + print help def main(argv): - check_python_version() - check_dependencies() - create_virtualenv() - install_dependencies() - print_help() + check_python_version() + check_dependencies() + create_virtualenv() + install_dependencies() + print_help() if __name__ == '__main__': - main(sys.argv) + main(sys.argv) |
