From 77d7e022fd5f2c8709a6784cc83429494d126a3b Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 9 Dec 2010 13:59:50 -0800 Subject: Converted the instance table to use a uuid instead of a auto_increment ID and a random internal_id. I had to use a String(32) column with hex and not a String(16) with bytes because SQLAlchemy doesn't like non-unicode strings going in for String types. We could try another type, but I didn't want a primary_key on blob types. --- nova/api/ec2/cloud.py | 70 +++++++++++++++----------------- nova/api/openstack/servers.py | 4 +- nova/compute/api.py | 26 ++++++------ nova/db/api.py | 8 ++-- nova/db/sqlalchemy/api.py | 32 ++++----------- nova/db/sqlalchemy/models.py | 23 ++++++----- nova/tests/api/openstack/test_servers.py | 9 ++-- nova/tests/cloud_unittest.py | 4 +- 8 files changed, 78 insertions(+), 98 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 05f8c3d0b..a831b2a62 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -72,18 +72,14 @@ def _gen_key(context, user_id, key_name): return {'private_key': private_key, 'fingerprint': fingerprint} -def ec2_id_to_internal_id(ec2_id): - """Convert an ec2 ID (i-[base 36 number]) to an internal id (int)""" - return int(ec2_id[2:], 36) +def ec2_id_to_id(ec2_id): + """Convert an ec2 ID and an instance ID.""" + return ec2_id[2:] -def internal_id_to_ec2_id(internal_id): - """Convert an internal ID (int) to an ec2 ID (i-[base 36 number])""" - digits = [] - while internal_id != 0: - internal_id, remainder = divmod(internal_id, 36) - digits.append('0123456789abcdefghijklmnopqrstuvwxyz'[remainder]) - return "i-%s" % ''.join(reversed(digits)) +def id_to_ec2_id(instance_id): + """Convert an instance ID to an ec2 ID.""" + return "i-%s" % instance_id class CloudController(object): @@ -153,7 +149,7 @@ class CloudController(object): hostname = instance_ref['hostname'] floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) - ec2_id = internal_id_to_ec2_id(instance_ref['internal_id']) + ec2_id = id_to_ec2_id(instance_ref['id']) data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { @@ -437,8 +433,8 @@ class CloudController(object): def get_console_output(self, context, instance_id, **kwargs): # instance_id is passed in as a list of instances ec2_id = instance_id[0] - internal_id = ec2_id_to_internal_id(ec2_id) - instance_ref = self.compute_api.get_instance(context, internal_id) + instance_id = ec2_id_to_id(ec2_id) + instance_ref = self.compute_api.get_instance(context, instance_id) output = rpc.call(context, '%s.%s' % (FLAGS.compute_topic, instance_ref['host']), @@ -464,8 +460,8 @@ class CloudController(object): instance_ec2_id = None instance_data = None if volume.get('instance', None): - internal_id = volume['instance']['internal_id'] - instance_ec2_id = internal_id_to_ec2_id(internal_id) + instance_id = volume['instance']['id'] + instance_ec2_id = id_to_ec2_id(instance_id) instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} @@ -534,8 +530,8 @@ class CloudController(object): raise exception.ApiError("Volume status must be available") if volume_ref['attach_status'] == "attached": raise exception.ApiError("Volume is already attached") - internal_id = ec2_id_to_internal_id(instance_id) - instance_ref = self.compute_api.get_instance(context, internal_id) + instance_id = ec2_id_to_id(instance_id) + instance_ref = self.compute_api.get_instance(context, instance_id) host = instance_ref['host'] rpc.cast(context, db.queue_get_for(context, FLAGS.compute_topic, host), @@ -570,11 +566,11 @@ class CloudController(object): # If the instance doesn't exist anymore, # then we need to call detach blind db.volume_detached(context) - internal_id = instance_ref['internal_id'] - ec2_id = internal_id_to_ec2_id(internal_id) + instance_id = instance_ref['id'] + ec2_id = id_to_ec2_id(instance_id) return {'attachTime': volume_ref['attach_time'], 'device': volume_ref['mountpoint'], - 'instanceId': internal_id, + 'instanceId': instance_id, 'requestId': context.request_id, 'status': volume_ref['attach_status'], 'volumeId': volume_ref['id']} @@ -619,8 +615,8 @@ class CloudController(object): if instance['image_id'] == FLAGS.vpn_image_id: continue i = {} - internal_id = instance['internal_id'] - ec2_id = internal_id_to_ec2_id(internal_id) + instance_id = instance['id'] + ec2_id = id_to_ec2_id(instance_id) i['instanceId'] = ec2_id i['imageId'] = instance['image_id'] i['instanceState'] = { @@ -673,8 +669,8 @@ class CloudController(object): ec2_id = None if (floating_ip_ref['fixed_ip'] and floating_ip_ref['fixed_ip']['instance']): - internal_id = floating_ip_ref['fixed_ip']['instance']['ec2_id'] - ec2_id = internal_id_to_ec2_id(internal_id) + instance_id = floating_ip_ref['fixed_ip']['instance']['ec2_id'] + ec2_id = id_to_ec2_id(instance_id) address_rv = {'public_ip': address, 'instance_id': ec2_id} if context.user.is_admin(): @@ -709,8 +705,8 @@ class CloudController(object): return {'releaseResponse': ["Address released."]} def associate_address(self, context, instance_id, public_ip, **kwargs): - internal_id = ec2_id_to_internal_id(instance_id) - instance_ref = self.compute_api.get_instance(context, internal_id) + instance_id = ec2_id_to_id(instance_id) + instance_ref = self.compute_api.get_instance(context, instance_id) fixed_address = db.instance_get_fixed_address(context, instance_ref['id']) floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) @@ -755,7 +751,7 @@ class CloudController(object): description=kwargs.get('display_description'), key_name=kwargs.get('key_name'), security_group=kwargs.get('security_group'), - generate_hostname=internal_id_to_ec2_id) + generate_hostname=id_to_ec2_id) return self._format_run_instances(context, instances[0]['reservation_id']) @@ -764,27 +760,27 @@ class CloudController(object): instance_id is a kwarg so its name cannot be modified.""" logging.debug("Going to start terminating instances") for ec2_id in instance_id: - internal_id = ec2_id_to_internal_id(ec2_id) - self.compute_api.delete_instance(context, internal_id) + instance_id = ec2_id_to_id(ec2_id) + self.compute_api.delete_instance(context, instance_id) return True def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" for ec2_id in instance_id: - internal_id = ec2_id_to_internal_id(ec2_id) - self.compute_api.reboot(context, internal_id) + instance_id = ec2_id_to_id(ec2_id) + self.compute_api.reboot(context, instance_id) return True def rescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" - internal_id = ec2_id_to_internal_id(instance_id) - self.compute_api.rescue(context, internal_id) + instance_id = ec2_id_to_id(instance_id) + self.compute_api.rescue(context, instance_id) return True def unrescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" - internal_id = ec2_id_to_internal_id(instance_id) - self.compute_api.unrescue(context, internal_id) + instance_id = ec2_id_to_id(instance_id) + self.compute_api.unrescue(context, instance_id) return True def update_instance(self, context, ec2_id, **kwargs): @@ -794,8 +790,8 @@ class CloudController(object): if field in kwargs: changes[field] = kwargs[field] if changes: - internal_id = ec2_id_to_internal_id(ec2_id) - inst = self.compute_api.get_instance(context, internal_id) + instance_id = ec2_id_to_id(ec2_id) + inst = self.compute_api.get_instance(context, instance_id) db.instance_update(context, inst['id'], kwargs) return True diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 6f2f6fed9..f8f40b764 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -46,7 +46,7 @@ def _entity_detail(inst): inst_dict = {} mapped_keys = dict(status='state', imageId='image_id', - flavorId='instance_type', name='display_name', id='internal_id') + flavorId='instance_type', name='display_name', id='id') for k, v in mapped_keys.iteritems(): inst_dict[k] = inst[v] @@ -61,7 +61,7 @@ def _entity_detail(inst): def _entity_inst(inst): """ Filters all model attributes save for id and name """ - return dict(server=dict(id=inst['internal_id'], name=inst['display_name'])) + return dict(server=dict(id=inst['id'], name=inst['display_name'])) class Controller(wsgi.Controller): diff --git a/nova/compute/api.py b/nova/compute/api.py index 8e0efa4cc..f310c575f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -36,9 +36,9 @@ from nova.db import base FLAGS = flags.FLAGS -def generate_default_hostname(internal_id): +def generate_default_hostname(instance_id): """Default function to generate a hostname given an instance reference.""" - return str(internal_id) + return str(instance_id) class ComputeAPI(base.Base): @@ -127,7 +127,6 @@ class ComputeAPI(base.Base): **base_options) instance = self.db.instance_create(context, instance) instance_id = instance['id'] - internal_id = instance['internal_id'] elevated = context.elevated() if not security_groups: @@ -138,9 +137,9 @@ class ComputeAPI(base.Base): security_group_id) # Set sane defaults if not specified - updates = dict(hostname=generate_hostname(internal_id)) + updates = dict(hostname=generate_hostname(instance_id)) if 'display_name' not in instance: - updates['display_name'] = "Server %s" % internal_id + updates['display_name'] = "Server %s" % instance_id instance = self.update_instance(context, instance_id, **updates) instances.append(instance) @@ -199,17 +198,16 @@ class ComputeAPI(base.Base): return self.db.instance_update(context, instance_id, kwargs) def delete_instance(self, context, instance_id): - logging.debug("Going to try and terminate %d" % instance_id) + logging.debug("Going to try and terminate %s" % instance_id) try: - instance = self.db.instance_get_by_internal_id(context, - instance_id) + instance = self.db.instance_get_by_id(context, instance_id) except exception.NotFound as e: - logging.warning("Instance %d was not found during terminate", + logging.warning("Instance %s was not found during terminate", instance_id) raise e if (instance['state_description'] == 'terminating'): - logging.warning("Instance %d is already being terminated", + logging.warning("Instance %s is already being terminated", instance_id) return @@ -264,11 +262,11 @@ class ComputeAPI(base.Base): return self.db.instance_get_all(context) def get_instance(self, context, instance_id): - return self.db.instance_get_by_internal_id(context, instance_id) + return self.db.instance_get_by_id(context, instance_id) def reboot(self, context, instance_id): """Reboot the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.db.instance_get_by_id(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), @@ -277,7 +275,7 @@ class ComputeAPI(base.Base): def rescue(self, context, instance_id): """Rescue the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.db.instance_get_by_id(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), @@ -286,7 +284,7 @@ class ComputeAPI(base.Base): def unrescue(self, context, instance_id): """Unrescue the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.db.instance_get_by_id(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), diff --git a/nova/db/api.py b/nova/db/api.py index 8f9dc2443..e607ac938 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -304,9 +304,9 @@ def instance_get_floating_address(context, instance_id): return IMPL.instance_get_floating_address(context, instance_id) -def instance_get_by_internal_id(context, internal_id): - """Get an instance by internal id.""" - return IMPL.instance_get_by_internal_id(context, internal_id) +def instance_get_by_id(context, instance_id): + """Get an instance by id.""" + return IMPL.instance_get_by_id(context, instance_id) def instance_is_vpn(context, instance_id): @@ -658,7 +658,7 @@ def security_group_get_all(context): def security_group_get(context, security_group_id): - """Get security group by its internal id.""" + """Get security group by its id.""" return IMPL.security_group_get(context, security_group_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 55036d1d1..0505b77a6 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -19,7 +19,7 @@ Implementation of SQLAlchemy backend. """ -import random +import uuid import warnings from nova import db @@ -525,28 +525,18 @@ def fixed_ip_update(context, address, values): ################### -#TODO(gundlach): instance_create and volume_create are nearly identical -#and should be refactored. I expect there are other copy-and-paste -#functions between the two of them as well. @require_context def instance_create(context, values): """Create a new Instance record in the database. context - request context object values - dict containing column values. - 'internal_id' is auto-generated and should not be specified. """ instance_ref = models.Instance() instance_ref.update(values) session = get_session() with session.begin(): - while instance_ref.internal_id == None: - # Instances have integer internal ids. - internal_id = random.randint(0, 2 ** 31 - 1) - if not instance_internal_id_exists(context, internal_id, - session=session): - instance_ref.internal_id = internal_id instance_ref.save(session=session) return instance_ref @@ -652,37 +642,31 @@ def instance_get_all_by_reservation(context, reservation_id): @require_context -def instance_get_by_internal_id(context, internal_id): +def instance_get_by_id(context, instance_id): session = get_session() + if type(instance_id) is int: + instance_id = uuid.UUID(int=instance_id).hex + if is_admin_context(context): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ - filter_by(internal_id=internal_id).\ + filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ filter_by(project_id=context.project_id).\ - filter_by(internal_id=internal_id).\ + filter_by(id=instance_id).\ filter_by(deleted=False).\ first() if not result: - raise exception.NotFound('Instance %s not found' % (internal_id)) + raise exception.NotFound('Instance %s not found' % (instance_id)) return result -@require_context -def instance_internal_id_exists(context, internal_id, session=None): - if not session: - session = get_session() - return session.query(exists().\ - where(models.Instance.internal_id == internal_id)).\ - one()[0] - - @require_context def instance_get_fixed_address(context, instance_id): session = get_session() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index fe0a9a921..a0b6fff89 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -20,6 +20,7 @@ SQLAlchemy models for nova data. """ import datetime +import uuid from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy import Column, Integer, String, schema @@ -39,6 +40,10 @@ FLAGS = flags.FLAGS BASE = declarative_base() +def make_uuid(): + return uuid.uuid1().hex + + class NovaBase(object): """Base class for Nova Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} @@ -154,11 +159,13 @@ class Service(BASE, NovaBase): class Instance(BASE, NovaBase): """Represents a guest vm.""" __tablename__ = 'instances' - id = Column(Integer, primary_key=True) - internal_id = Column(Integer, unique=True) + id = Column(String(32), primary_key=True, default=make_uuid) - admin_pass = Column(String(255)) + @property + def name(self): + return "instance-%s" % self.id + admin_pass = Column(String(255)) user_id = Column(String(255)) project_id = Column(String(255)) @@ -170,10 +177,6 @@ class Instance(BASE, NovaBase): def project(self): return auth.manager.AuthManager().get_project(self.project_id) - @property - def name(self): - return "instance-%d" % self.internal_id - image_id = Column(String(255)) kernel_id = Column(String(255)) ramdisk_id = Column(String(255)) @@ -238,7 +241,7 @@ class Volume(BASE, NovaBase): host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? - instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) + instance_id = Column(String(32), ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('volumes'), foreign_keys=instance_id, @@ -311,7 +314,7 @@ class SecurityGroupInstanceAssociation(BASE, NovaBase): __tablename__ = 'security_group_instance_association' id = Column(Integer, primary_key=True) security_group_id = Column(Integer, ForeignKey('security_groups.id')) - instance_id = Column(Integer, ForeignKey('instances.id')) + instance_id = Column(String(32), ForeignKey('instances.id')) class SecurityGroup(BASE, NovaBase): @@ -431,7 +434,7 @@ class FixedIp(BASE, NovaBase): address = Column(String(255)) network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) network = relationship(Network, backref=backref('fixed_ips')) - instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) + instance_id = Column(String(32), ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('fixed_ip', uselist=False), foreign_keys=instance_id, diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 8444b6fce..d2b0e0fc8 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -56,8 +56,8 @@ def instance_address(context, instance_id): def stub_instance(id, user_id=1): - return Instance(id=id + 123456, state=0, image_id=10, user_id=user_id, - display_name='server%s' % id, internal_id=id) + return Instance(id=id, state=0, image_id=10, user_id=user_id, + display_name='server%s' % id) class ServersTest(unittest.TestCase): @@ -71,8 +71,7 @@ class ServersTest(unittest.TestCase): fakes.stub_out_key_pair_funcs(self.stubs) fakes.stub_out_image_service(self.stubs) self.stubs.Set(nova.db.api, 'instance_get_all', return_servers) - self.stubs.Set(nova.db.api, 'instance_get_by_internal_id', - return_server) + self.stubs.Set(nova.db.api, 'instance_get_by_id', return_server) self.stubs.Set(nova.db.api, 'instance_get_all_by_user', return_servers) self.stubs.Set(nova.db.api, 'instance_add_security_group', @@ -107,7 +106,7 @@ class ServersTest(unittest.TestCase): def test_create_instance(self): def instance_create(context, inst): - return {'id': 1, 'internal_id': 1, 'display_name': ''} + return {'id': '1', 'display_name': ''} def server_update(context, id, params): return instance_create(context, id) diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 9886a2449..f63eed65a 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -113,7 +113,7 @@ class CloudTestCase(test.TrialTestCase): self.cloud.allocate_address(self.context) inst = db.instance_create(self.context, {}) fixed = self.network.allocate_fixed_ip(self.context, inst['id']) - ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) + ec2_id = cloud.id_to_ec2_id(inst['id']) self.cloud.associate_address(self.context, instance_id=ec2_id, public_ip=address) @@ -289,7 +289,7 @@ class CloudTestCase(test.TrialTestCase): def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) - ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) + ec2_id = cloud.id_to_ec2_id(inst['id']) self.cloud.update_instance(self.context, ec2_id, display_name='c00l 1m4g3') inst = db.instance_get(self.context, inst['id']) -- cgit From e601ab4a1068029b2f0b79789ed506fda1332404 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 16 Dec 2010 20:59:27 -0600 Subject: XenAPI Snapshots first cut --- nova/compute/manager.py | 30 ++++++++++++++- nova/virt/libvirt_conn.py | 7 ++++ nova/virt/xenapi/vm_utils.py | 91 ++++++++++++++++++++++++++++++++++++++++++++ nova/virt/xenapi/vmops.py | 43 +++++++++++++++++---- nova/virt/xenapi_conn.py | 7 ++++ 5 files changed, 170 insertions(+), 8 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7eb60e262..8fc8d5e1a 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -134,8 +134,10 @@ class ComputeManager(manager.Manager): # TODO(ja): should we keep it in a terminated state for a bit? self.db.instance_destroy(context, instance_id) + #FIXME(sirp): Hacking reboot to snapshot @exception.wrap_exception - def reboot_instance(self, context, instance_id): + def XXreboot_instance(self, context, instance_id): + #def reboot_instance(self, context, instance_id): """Reboot an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) @@ -156,6 +158,32 @@ class ComputeManager(manager.Manager): self.driver.reboot(instance_ref) self._update_state(context, instance_id) + #FIXME(sirp): Hacking reboot to snapshot + @exception.wrap_exception + def reboot_instance(self, context, instance_id): + #def snapshot_instance(self, context, instance_id): + """Snapshot an instance on this server.""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + #FIXME(sirp): update_state currently only refreshes the state field + # if we add is_snapshotting, we will need this refreshed too, + # potentially? + self._update_state(context, instance_id) + + #TODO(sirp): check for is_snapshotting=True here? + if instance_ref['state'] != power_state.RUNNING: + logging.warn('trying to snapshot a non-running ' + 'instance: %s (state: %s excepted: %s)', + instance_ref['internal_id'], + instance_ref['state'], + power_state.RUNNING) + + logging.debug('instance %s: snapshotting', instance_ref['name']) + #TODO(sirp): set is_snapshotting=True here? + self.driver.snapshot(instance_ref) + #self._update_state(context, instance_id) + @exception.wrap_exception def rescue_instance(self, context, instance_id): """Rescue an instance on this server.""" diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 5a8c71850..112ac7d37 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -265,6 +265,13 @@ class LibvirtConnection(object): raise exception.NotFound("No disk at %s" % mount_device) virt_dom.detachDevice(xml) + @exception.wrap_exception + def snapshot(self, instance): + """ Create snapshot from a running VM instance """ + #TODO(sirp): only exists for XenAPI driver for now + raise NotImplementedError( + "Instance snapshotting is not supported for libvirt at this time") + @exception.wrap_exception def reboot(self, instance): self.destroy(instance, False) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 2f5d78e75..c3e8b30b7 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,6 +19,7 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ +import time #FIXME(sirp): take this out, replace with greenthread.sleep import logging import urllib from xml.dom import minidom @@ -148,6 +149,63 @@ class VMHelper(): vm_ref, network_ref) return vif_ref + + @classmethod + def create_snapshot(cls, session, vm_ref, label): + logging.debug("Snapshotting VM %s with label '%s'...", vm_ref, label) + + #TODO(sirp): Add quiesce and VSS locking support when Windows support + # is added + + #TODO(sirp): Make safe_lookup_vdi for assert? + vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref) + if vdi_refs is None or len(vdi_refs) != 1: + raise Exception("Unexpected number of VDIs (%s) found for VM %s" + % (len(vdi_refs), vm_ref)) + vdi_ref = vdi_refs[0] + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + vdi_uuid = vdi_rec["uuid"] + + #NOTE(sirp): We may need to wait for our parent to coalese with his + # parent + original_parent_uuid = get_vhd_parent_uuid(session, vdi_ref) + + task = session.call_xenapi('Async.VM.snapshot', vm_ref, label) + template_vm_ref = session.wait_for_task(task) + logging.debug('Created snapshot %s from VM %s.', template_vm_ref, + vm_ref) + + #NOTE(sirp): wait for any coalescing + #NOTE(sirp): for some reason re-scan wasn't occuring automatically on + # XS5.6 + #TODO(sirp): clean this up, perhaps use LoopingCall + sr_ref = vdi_rec["SR"] + scan_sr(session, sr_ref) + parent_uuid = get_vhd_parent_uuid(session, vdi_ref) + time.sleep(5) + while original_parent_uuid and (parent_uuid != original_parent_uuid): + scan_sr(session, sr_ref) + logging.debug( + "Parent %s doesn't match original parent %s, " + "waiting for coalesce...", parent_uuid, original_parent_uuid) + #TODO(sirp): make this non-blocking + time.sleep(5) + parent_uuid = get_vhd_parent_uuid(session, vdi_ref) + + vdi_uuids = [vdi_uuid, parent_uuid] + return template_vm_ref, vdi_uuids + + + @classmethod + def upload_image(cls, session, vdi_uuids, glance_label): + logging.debug("Asking xapi to upload %s as '%s'", vdi_uuids, + glance_label) + kwargs = {'vdi_uuids': ','.join(vdi_uuids), + 'glance_label': glance_label} + task = session.async_call_plugin('glance', 'put_vdis', kwargs) + session.wait_for_task(task) + + @classmethod def fetch_image(cls, session, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place @@ -258,3 +316,36 @@ def get_rrd(host, uuid): return xml.read() except IOError: return None + + +#TODO(sirp): This code comes from XS5.6 pluginlib.py, we should refactor to +# use that implmenetation +def get_vhd_parent(session, vdi_rec): + """ + Returns the VHD parent of the given VDI record, as a (ref, rec) pair. + Returns None if we're at the root of the tree. + """ + if 'vhd-parent' in vdi_rec['sm_config']: + parent_uuid = vdi_rec['sm_config']['vhd-parent'] + #NOTE(sirp): changed xenapi -> get_xenapi() + parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid) + parent_rec = session.get_xenapi().VDI.get_record(parent_ref) + #NOTE(sirp): changed log -> logging + logging.debug("VHD %s has parent %s", vdi_rec['uuid'], parent_ref) + return parent_ref, parent_rec + else: + return None + +def get_vhd_parent_uuid(session, vdi_ref): + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + ret = get_vhd_parent(session, vdi_rec) + if ret: + parent_ref, parent_rec = ret + return parent_rec["uuid"] + else: + return None + +def scan_sr(session, sr_ref): + logging.debug("Re-scanning SR %s", sr_ref) + task = session.call_xenapi('Async.SR.scan', sr_ref) + session.wait_for_task(task) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 3034df9e1..50b5ec3d6 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -78,6 +78,31 @@ class VMOps(object): self._session.call_xenapi('VM.start', vm_ref, False, False) logging.info('Spawning VM %s created %s.', instance.name, vm_ref) + + def snapshot(self, instance): + """ Create snapshot from a running VM instance """ + #TODO(sirp): Add quiesce and VSS locking support when Windows support + # is added + vm_ref = VMHelper.lookup(self._session, instance.name) + + #TODO(sirp): this is the label in Xen, we need to add a human friendly + # label that we store in paralalx + label = "%s-snapshot" % instance.name + glance_name = "MySnapshot" + + try: + template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( + self._session, vm_ref, label) + except XenAPI.Failure, exc: + logging.error("Unable to Snapshot %s: %s", vm_ref, exc) + return + + try: + # call plugin to ship snapshot off to glance + VMHelper.upload_image( + self._session, template_vdi_uuids, glance_name) + finally: + self._destroy(template_vm_ref, shutdown=False) def reboot(self, instance): """ Reboot VM instance """ @@ -89,20 +114,24 @@ class VMOps(object): self._session.wait_for_task(task) def destroy(self, instance): - """ Destroy VM instance """ vm = VMHelper.lookup(self._session, instance.name) + return self._destroy(vm, shutdown=True) + + def _destroy(self, vm, shutdown=True): + """ Destroy VM instance """ if vm is None: # Don't complain, just return. This lets us clean up instances # that have already disappeared from the underlying platform. return # Get the VDIs related to the VM vdis = VMHelper.lookup_vm_vdis(self._session, vm) - try: - task = self._session.call_xenapi('Async.VM.hard_shutdown', - vm) - self._session.wait_for_task(task) - except XenAPI.Failure, exc: - logging.warn(exc) + if shutdown: + try: + task = self._session.call_xenapi('Async.VM.hard_shutdown', + vm) + self._session.wait_for_task(task) + except XenAPI.Failure, exc: + logging.warn(exc) # Disk clean-up if vdis: for vdi in vdis: diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 6beb08f5e..8aacec507 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -115,6 +115,13 @@ class XenAPIConnection(object): """ Create VM instance """ self._vmops.spawn(instance) + + def snapshot(self, instance): + """ Create snapshot from a running VM instance """ + #TODO(sirp): Add quiesce and VSS locking support when Windows support + # is added + self._vmops.snapshot(instance) + def reboot(self, instance): """ Reboot VM instance """ self._vmops.reboot(instance) -- cgit From ae54d5bdf3e0615c5be9ebe4f03f7256f22484ee Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 17 Dec 2010 14:21:03 -0600 Subject: Add wait_for_vhd_coalesce --- nova/virt/xenapi/vm_utils.py | 52 +++++++++++++++++++++++++++++--------------- nova/virt/xenapi/vmops.py | 3 +++ 2 files changed, 37 insertions(+), 18 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index c3e8b30b7..1b18b0a01 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -175,26 +175,12 @@ class VMHelper(): logging.debug('Created snapshot %s from VM %s.', template_vm_ref, vm_ref) - #NOTE(sirp): wait for any coalescing - #NOTE(sirp): for some reason re-scan wasn't occuring automatically on - # XS5.6 - #TODO(sirp): clean this up, perhaps use LoopingCall sr_ref = vdi_rec["SR"] - scan_sr(session, sr_ref) - parent_uuid = get_vhd_parent_uuid(session, vdi_ref) - time.sleep(5) - while original_parent_uuid and (parent_uuid != original_parent_uuid): - scan_sr(session, sr_ref) - logging.debug( - "Parent %s doesn't match original parent %s, " - "waiting for coalesce...", parent_uuid, original_parent_uuid) - #TODO(sirp): make this non-blocking - time.sleep(5) - parent_uuid = get_vhd_parent_uuid(session, vdi_ref) - - vdi_uuids = [vdi_uuid, parent_uuid] - return template_vm_ref, vdi_uuids + parent_uuid = wait_for_vhd_coalesce( + session, sr_ref, vdi_ref, original_parent_uuid) + #TODO(sirp): we need to assert only one parent, not parents two deep + return template_vm_ref, [vdi_uuid, parent_uuid] @classmethod def upload_image(cls, session, vdi_uuids, glance_label): @@ -336,6 +322,7 @@ def get_vhd_parent(session, vdi_rec): else: return None + def get_vhd_parent_uuid(session, vdi_ref): vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) ret = get_vhd_parent(session, vdi_rec) @@ -345,7 +332,36 @@ def get_vhd_parent_uuid(session, vdi_ref): else: return None + def scan_sr(session, sr_ref): logging.debug("Re-scanning SR %s", sr_ref) task = session.call_xenapi('Async.SR.scan', sr_ref) session.wait_for_task(task) + + +def wait_for_vhd_coalesce(session, sr_ref, vdi_ref, original_parent_uuid): + """ TODO Explain why coalescing has to occur here """ + #TODO(sirp): we need to timeout this req after a while + #NOTE(sirp): for some reason re-scan wasn't occuring automatically on + # XS5.6 + #TODO(sirp): clean this up, perhaps use LoopingCall + def _get_vhd_parent_uuid_with_refresh(first_time): + if not first_time: + #TODO(sirp): should this interval be a gflag? + #TODO(sirp): make this non-blocking + time.sleep(5) + scan_sr(session, sr_ref) + return get_vhd_parent_uuid(session, vdi_ref) + + parent_uuid = _get_vhd_parent_uuid_with_refresh(first_time=True) + logging.debug( + "Parent %s doesn't match original parent %s, " + "waiting for coalesce...", parent_uuid, original_parent_uuid) + while original_parent_uuid and (parent_uuid != original_parent_uuid): + logging.debug( + "Parent %s doesn't match original parent %s, " + "waiting for coalesce...", parent_uuid, original_parent_uuid) + parent_uuid = _get_vhd_parent_uuid_with_refresh(first_time=False) + + return parent_uuid + diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 50b5ec3d6..988c54d6d 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -81,6 +81,7 @@ class VMOps(object): def snapshot(self, instance): """ Create snapshot from a running VM instance """ + logging.debug("Starting snapshot for VM %s", instance) #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added vm_ref = VMHelper.lookup(self._session, instance.name) @@ -104,6 +105,8 @@ class VMOps(object): finally: self._destroy(template_vm_ref, shutdown=False) + logging.debug("Finished snapshot and upload for VM %s", instance) + def reboot(self, instance): """ Reboot VM instance """ instance_name = instance.name -- cgit From 650a0cdbc854d37fd62348ce34a14ef91ccbabad Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 17 Dec 2010 19:17:39 -0600 Subject: Cleaned up TODOs, using flags now --- nova/compute/manager.py | 3 ++- nova/flags.py | 3 +++ nova/virt/xenapi/vm_utils.py | 50 +++++++++++++++++++++++--------------------- nova/virt/xenapi/vmops.py | 15 ++++++------- nova/virt/xenapi_conn.py | 9 +++++--- 5 files changed, 43 insertions(+), 37 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 8fc8d5e1a..b2584773a 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -181,7 +181,8 @@ class ComputeManager(manager.Manager): logging.debug('instance %s: snapshotting', instance_ref['name']) #TODO(sirp): set is_snapshotting=True here? - self.driver.snapshot(instance_ref) + glance_name = "MySnapshot3" + self.driver.snapshot(instance_ref, glance_name) #self._update_state(context, instance_id) @exception.wrap_exception diff --git a/nova/flags.py b/nova/flags.py index 87444565a..3318af058 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -212,6 +212,9 @@ DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') DEFINE_integer('s3_port', 3333, 's3 port') DEFINE_string('s3_host', '127.0.0.1', 's3 host') +DEFINE_integer('glance_port', 9292, 'glance port') +DEFINE_string('glance_host', '127.0.0.1', 'glance host') +DEFINE_string('glance_storage_location', 'swift://username:api_key@auth.api.rackspacecloud.com/v1.0/cloudservers', 'glance storage location') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 1b18b0a01..729f0daaf 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,11 +19,12 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ -import time #FIXME(sirp): take this out, replace with greenthread.sleep import logging +import pickle import urllib from xml.dom import minidom +from eventlet import event from nova import flags from nova import utils from nova.auth.manager import AuthManager @@ -166,8 +167,6 @@ class VMHelper(): vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec["uuid"] - #NOTE(sirp): We may need to wait for our parent to coalese with his - # parent original_parent_uuid = get_vhd_parent_uuid(session, vdi_ref) task = session.call_xenapi('Async.VM.snapshot', vm_ref, label) @@ -186,8 +185,14 @@ class VMHelper(): def upload_image(cls, session, vdi_uuids, glance_label): logging.debug("Asking xapi to upload %s as '%s'", vdi_uuids, glance_label) - kwargs = {'vdi_uuids': ','.join(vdi_uuids), - 'glance_label': glance_label} + + params = {'vdi_uuids': vdi_uuids, + 'glance_label': glance_label, + 'glance_storage_location': FLAGS.glance_storage_location, + 'glance_host': FLAGS.glance_host, + 'glance_port': FLAGS.glance_port} + + kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'put_vdis', kwargs) session.wait_for_task(task) @@ -341,27 +346,24 @@ def scan_sr(session, sr_ref): def wait_for_vhd_coalesce(session, sr_ref, vdi_ref, original_parent_uuid): """ TODO Explain why coalescing has to occur here """ - #TODO(sirp): we need to timeout this req after a while #NOTE(sirp): for some reason re-scan wasn't occuring automatically on # XS5.6 - #TODO(sirp): clean this up, perhaps use LoopingCall - def _get_vhd_parent_uuid_with_refresh(first_time): - if not first_time: - #TODO(sirp): should this interval be a gflag? - #TODO(sirp): make this non-blocking - time.sleep(5) - scan_sr(session, sr_ref) - return get_vhd_parent_uuid(session, vdi_ref) - - parent_uuid = _get_vhd_parent_uuid_with_refresh(first_time=True) - logging.debug( - "Parent %s doesn't match original parent %s, " - "waiting for coalesce...", parent_uuid, original_parent_uuid) - while original_parent_uuid and (parent_uuid != original_parent_uuid): - logging.debug( - "Parent %s doesn't match original parent %s, " - "waiting for coalesce...", parent_uuid, original_parent_uuid) - parent_uuid = _get_vhd_parent_uuid_with_refresh(first_time=False) + #TODO(sirp): we need to timeout this req after a while + def _poll_vhds(): + scan_sr(session, sr_ref) + parent_uuid = get_vhd_parent_uuid(session, vdi_ref) + if original_parent_uuid and (parent_uuid != original_parent_uuid): + logging.debug( + "Parent %s doesn't match original parent %s, " + "waiting for coalesce...", parent_uuid, original_parent_uuid) + else: + done.send(parent_uuid) + + done = event.Event() + loop = utils.LoopingCall(_poll_vhds) + loop.start(FLAGS.xenapi_vhd_coalesce_poll_interval, now=True) + parent_uuid = done.wait() + loop.stop() return parent_uuid diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 988c54d6d..a44492d57 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -79,29 +79,26 @@ class VMOps(object): logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - def snapshot(self, instance): + def snapshot(self, instance, name): """ Create snapshot from a running VM instance """ - logging.debug("Starting snapshot for VM %s", instance) + #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added + + logging.debug("Starting snapshot for VM %s", instance) vm_ref = VMHelper.lookup(self._session, instance.name) - #TODO(sirp): this is the label in Xen, we need to add a human friendly - # label that we store in paralalx label = "%s-snapshot" % instance.name - glance_name = "MySnapshot" - try: template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( self._session, vm_ref, label) except XenAPI.Failure, exc: logging.error("Unable to Snapshot %s: %s", vm_ref, exc) return - + try: # call plugin to ship snapshot off to glance - VMHelper.upload_image( - self._session, template_vdi_uuids, glance_name) + VMHelper.upload_image(self._session, template_vdi_uuids, name) finally: self._destroy(template_vm_ref, shutdown=False) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 8aacec507..92e66d32d 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -77,7 +77,10 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'The interval used for polling of remote tasks ' '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') - +flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval', + 5.0, + 'The interval used for polling of coalescing vhds.' + ' Used only if connection_type=xenapi.') XenAPI = None @@ -116,11 +119,11 @@ class XenAPIConnection(object): self._vmops.spawn(instance) - def snapshot(self, instance): + def snapshot(self, instance, name): """ Create snapshot from a running VM instance """ #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added - self._vmops.snapshot(instance) + self._vmops.snapshot(instance, name) def reboot(self, instance): """ Reboot VM instance """ -- cgit From 40c8a8a1a1e834c4e5bb61c853397a90475d83ff Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Mon, 20 Dec 2010 17:36:10 -0600 Subject: Typo fix, stubbing out to use admin project for now --- nova/api/openstack/__init__.py | 4 +++- nova/api/openstack/images.py | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index b9ecbd9b8..178838dfd 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -89,7 +89,9 @@ class AuthMiddleware(wsgi.Middleware): if not user: return faults.Fault(webob.exc.HTTPUnauthorized()) - req.environ['nova.context'] = context.RequestContext(user, user) + #FIXME(sirp): stubbing project to admin for now + project = manager.AuthManager().get_project('admin') + req.environ['nova.context'] = context.RequestContext(user, project) return self.application diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 4a0a8e6f1..78849223d 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -46,8 +46,9 @@ class Controller(wsgi.Controller): def detail(self, req): """Return all public images in detail.""" + ctxt = req.environ['nova.context'] try: - images = self._service.detail(req.environ['nova.context']) + images = self._service.detail(ctxt) images = nova.api.openstack.limited(images, req) except NotImplementedError: # Emulate detail() using repeated calls to show() -- cgit From c364724a0dc7a658058fcb167af66ee7eb5bcd2a Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 21 Dec 2010 01:41:28 -0500 Subject: Use paste.deploy for running the api server. --- nova/api/__init__.py | 1 - nova/api/cloudpipe/__init__.py | 3 ++ nova/api/ec2/__init__.py | 50 ++++++++++++++++++++++++++++++++-- nova/api/ec2/metadatarequesthandler.py | 3 ++ nova/api/openstack/__init__.py | 13 +++++++++ 5 files changed, 67 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/api/__init__.py b/nova/api/__init__.py index 80f9f2109..92b495e8c 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -29,7 +29,6 @@ import routes import webob.dec from nova import flags -from nova import utils from nova import wsgi from nova.api import cloudpipe from nova.api import ec2 diff --git a/nova/api/cloudpipe/__init__.py b/nova/api/cloudpipe/__init__.py index 6d40990a8..47349d9f9 100644 --- a/nova/api/cloudpipe/__init__.py +++ b/nova/api/cloudpipe/__init__.py @@ -67,3 +67,6 @@ class API(wsgi.Application): project_id = self.get_project_id_from_ip(req.remote_addr) cert = self.str_params['cert'] return crypto.sign_csr(urllib.unquote(cert), project_id) + +def cloudpipe_factory(global_opts, **local_opts): + return API() diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index a6ee16c33..50cb18078 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -225,10 +225,9 @@ class Executor(wsgi.Application): args = req.environ['ec2.action_args'] api_request = apirequest.APIRequest(controller, action) + result = None try: result = api_request.send(context, **args) - req.headers['Content-Type'] = 'text/xml' - return result except exception.ApiError as ex: if ex.code: @@ -238,6 +237,13 @@ class Executor(wsgi.Application): # TODO(vish): do something more useful with unknown exceptions except Exception as ex: return self._error(req, type(ex).__name__, str(ex)) + else: + resp = webob.Response() + resp.status = 200 + resp.headers['Content-Type'] = 'text/xml' + resp.body = str(result) + return resp + def _error(self, req, code, message): logging.error("%s: %s", code, message) @@ -249,3 +255,43 @@ class Executor(wsgi.Application): '%s' '?' % (code, message)) return resp + +class Versions(wsgi.Application): + + @webob.dec.wsgify + def __call__(self, req): + """Respond to a request for all EC2 versions.""" + # available api versions + versions = [ + '1.0', + '2007-01-19', + '2007-03-01', + '2007-08-29', + '2007-10-10', + '2007-12-15', + '2008-02-01', + '2008-09-01', + '2009-04-04', + ] + return ''.join('%s\n' % v for v in versions) + +def authenticate_factory(global_args, **local_args): + def authenticator(app): + return Authenticate(app) + return authenticator + +def router_factory(global_args, **local_args): + def router(app): + return Router(app) + return router + +def authorizer_factory(global_args, **local_args): + def authorizer(app): + return Authorizer(app) + return authorizer + +def executor_factory(global_args, **local_args): + return Executor() + +def versions_factory(global_args, **local_args): + return Versions() diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 2f4f414cc..fffefb97b 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -72,3 +72,6 @@ class MetadataRequestHandler(object): if data is None: raise webob.exc.HTTPNotFound() return self.print_data(data) + +def metadata_factory(global_args, **local_args): + return MetadataRequestHandler() diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index b9ecbd9b8..cb825cf41 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -210,3 +210,16 @@ def limited(items, req): limit = min(1000, limit) range_end = offset + limit return items[offset:range_end] + +def auth_factory(global_conf, **local_conf): + def auth(app): + return AuthMiddleware(app) + return auth + +def ratelimit_factory(global_conf, **local_conf): + def rl(app): + return RateLimitingMiddleware(app) + return rl + +def router_factory(global_cof, **local_conf): + return APIRouter() -- cgit From 8c8b289f2626b1d9bad76bc5d4819904ace5800d Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 21 Dec 2010 02:21:01 -0500 Subject: Remove ec2 config chain and move openstack versions to top-level application. --- nova/api/openstack/__init__.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index cb825cf41..f3686997f 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -20,7 +20,6 @@ WSGI middleware for OpenStack API controllers. """ -import json import time import logging @@ -41,7 +40,6 @@ from nova.api.openstack import images from nova.api.openstack import ratelimiting from nova.api.openstack import servers from nova.api.openstack import sharedipgroups -from nova.auth import manager FLAGS = flags.FLAGS @@ -193,6 +191,19 @@ class APIRouter(wsgi.Router): super(APIRouter, self).__init__(mapper) +class Versions(wsgi.Application): + @webob.dec.wsgify + def __call__(self, req): + """Respond to a request for all OpenStack API versions.""" + response = { + "versions": [ + dict(status="CURRENT", id="v1.0")]} + metadata = { + "application/xml": { + "attributes": dict(version=["status", "id"])}} + return wsgi.Serializer(req.environ, metadata).to_content_type(response) + + def limited(items, req): """Return a slice of items according to requested offset and limit. @@ -223,3 +234,6 @@ def ratelimit_factory(global_conf, **local_conf): def router_factory(global_cof, **local_conf): return APIRouter() + +def versions_factory(global_conf, **local_conf): + return Versions() -- cgit From 729468d0be1bf97c869b1169414154a76d9b96b2 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 21 Dec 2010 19:20:28 -0500 Subject: Burnin support by specifying a specific host via availability_zone for running instances and volumes on. --- nova/api/ec2/cloud.py | 38 ++++++++++++++ nova/compute/api.py | 4 +- nova/db/sqlalchemy/models.py | 4 +- nova/scheduler/driver.py | 5 ++ nova/scheduler/simple.py | 26 +++++++++ nova/tests/scheduler_unittest.py | 110 +++++++++++++++++++++++++++++++++++++-- 6 files changed, 182 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 8375c4399..b181a947f 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -189,9 +189,45 @@ class CloudController(object): return data def describe_availability_zones(self, context, **kwargs): + if ('zone_name' in kwargs and + 'verbose' in kwargs['zone_name'] and + context.is_admin): + return self._describe_availability_zones_verbose(context, + **kwargs) + else: + return self._describe_availability_zones(context, **kwargs) + + def _describe_availability_zones(self, context, **kwargs): return {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} + def _describe_availability_zones_verbose(self, context, **kwargs): + rv = {'availabilityZoneInfo': [{'zoneName': 'nova', + 'zoneState': 'available'}]} + + services = db.service_get_all(context) + now = db.get_time() + hosts = [] + for host in [service['host'] for service in services]: + if not host in hosts: + hosts.append(host) + for host in hosts: + rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, + 'zoneState': ''}) + hsvcs = [service for service in services if service['host'] == host] + for svc in hsvcs: + delta = now - (svc['updated_at'] or svc['created_at']) + alive = (delta.seconds <= FLAGS.service_down_time) + art = (alive and ":-)") or "XXX" + active = 'enabled' + if svc['disabled']: + active = 'disabled' + rv['availabilityZoneInfo'].append({ + 'zoneName': '| |- %s' % svc['binary'], + 'zoneState': '%s %s %s' % (active, art, + svc['updated_at'])}) + return rv + def describe_regions(self, context, region_name=None, **kwargs): if FLAGS.region_list: regions = [] @@ -757,6 +793,8 @@ class CloudController(object): description=kwargs.get('display_description'), key_name=kwargs.get('key_name'), security_group=kwargs.get('security_group'), + availability_zone=kwargs.get('placement', {}).get( + 'AvailabilityZone'), generate_hostname=internal_id_to_ec2_id) return self._format_run_instances(context, instances[0]['reservation_id']) diff --git a/nova/compute/api.py b/nova/compute/api.py index c740814da..299083bf4 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -57,6 +57,7 @@ class ComputeAPI(base.Base): max_count=1, kernel_id=None, ramdisk_id=None, display_name='', description='', key_name=None, key_data=None, security_group='default', + availability_zone=None, generate_hostname=generate_default_hostname): """Create the number of instances requested if quote and other arguments check out ok.""" @@ -121,7 +122,8 @@ class ComputeAPI(base.Base): 'display_name': display_name, 'display_description': description, 'key_name': key_name, - 'key_data': key_data} + 'key_data': key_data, + 'availability_zone': availability_zone} elevated = context.elevated() instances = [] diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 96d981571..41e8cfefa 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -148,7 +148,7 @@ class Service(BASE, NovaBase): binary = Column(String(255)) topic = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) - disabled = Column(Boolean, default=False) + disabled = Column(Boolean, default=True) class Instance(BASE, NovaBase): @@ -210,6 +210,8 @@ class Instance(BASE, NovaBase): launched_at = Column(DateTime) terminated_at = Column(DateTime) + availability_zone = Column(String(255)) + # User editable field for display in user-facing UIs display_name = Column(String(255)) display_description = Column(String(255)) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index f271d573f..340f40310 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -37,6 +37,11 @@ class NoValidHost(exception.Error): pass +class WillNotSchedule(exception.Error): + """The specified host is not up or doesn't exist.""" + pass + + class Scheduler(object): """The base class that all Scheduler clases should inherit from.""" diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index 7f5093656..9e85ba952 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -43,6 +43,19 @@ class SimpleScheduler(chance.ChanceScheduler): def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): """Picks a host that is up and has the fewest running instances.""" instance_ref = db.instance_get(context, instance_id) + if instance_ref['availability_zone'] and context.is_admin: + zone, _x, host = instance_ref['availability_zone'].partition(':') + service = db.service_get_by_args(context.elevated(), host, + 'nova-compute') + if not self.service_is_up(service): + raise driver.WillNotSchedule("Host %s is not alive" % host) + + # TODO(vish): this probably belongs in the manager, if we + # can generalize this somehow + now = datetime.datetime.utcnow() + db.instance_update(context, instance_id, {'host': host, + 'scheduled_at': now}) + return host results = db.service_get_all_compute_sorted(context) for result in results: (service, instance_cores) = result @@ -62,6 +75,19 @@ class SimpleScheduler(chance.ChanceScheduler): def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks a host that is up and has the fewest volumes.""" volume_ref = db.volume_get(context, volume_id) + if (':' in volume_ref['availability_zone']) and context.is_admin: + zone, _x, host = volume_ref['availability_zone'].partition(':') + service = db.service_get_by_args(context.elevated(), host, + 'nova-volume') + if not self.service_is_up(service): + raise driver.WillNotSchedule("Host %s not available" % host) + + # TODO(vish): this probably belongs in the manager, if we + # can generalize this somehow + now = datetime.datetime.utcnow() + db.volume_update(context, volume_id, {'host': host, + 'scheduled_at': now}) + return host results = db.service_get_all_volume_sorted(context) for result in results: (service, volume_gigabytes) = result diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py index d1756b8fb..92262cc7d 100644 --- a/nova/tests/scheduler_unittest.py +++ b/nova/tests/scheduler_unittest.py @@ -19,6 +19,8 @@ Tests For Scheduler """ +import datetime + from nova import context from nova import db from nova import flags @@ -93,7 +95,7 @@ class SimpleDriverTestCase(test.TestCase): self.manager.delete_user(self.user) self.manager.delete_project(self.project) - def _create_instance(self): + def _create_instance(self, **kwargs): """Create a test instance""" inst = {} inst['image_id'] = 'ami-test' @@ -104,6 +106,7 @@ class SimpleDriverTestCase(test.TestCase): inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 inst['vcpus'] = 1 + inst['availability_zone'] = kwargs.get('availability_zone', None) return db.instance_create(self.context, inst)['id'] def _create_volume(self): @@ -112,9 +115,29 @@ class SimpleDriverTestCase(test.TestCase): vol['image_id'] = 'ami-test' vol['reservation_id'] = 'r-fakeres' vol['size'] = 1 + vol['availability_zone'] = 'test' return db.volume_create(self.context, vol)['id'] - def test_hosts_are_up(self): + def test_doesnt_report_disabled_hosts_as_up(self): + """Ensures driver doesn't find hosts before they are enabled""" + # NOTE(vish): constructing service without create method + # because we are going to use it without queue + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(0, len(hosts)) + compute1.kill() + compute2.kill() + + def test_reports_enabled_hosts_as_up(self): """Ensures driver can find the hosts that are up""" # NOTE(vish): constructing service without create method # because we are going to use it without queue @@ -128,8 +151,12 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': False}) + db.service_update(self.context, s2['id'], {'disabled': False}) hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(len(hosts), 2) + self.assertEqual(2, len(hosts)) compute1.kill() compute2.kill() @@ -145,6 +172,10 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': False}) + db.service_update(self.context, s2['id'], {'disabled': False}) instance_id1 = self._create_instance() compute1.run_instance(self.context, instance_id1) instance_id2 = self._create_instance() @@ -156,6 +187,67 @@ class SimpleDriverTestCase(test.TestCase): compute1.kill() compute2.kill() + def test_specific_host_gets_instance(self): + """Ensures if you set availability_zone it launches on that zone""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': False}) + db.service_update(self.context, s2['id'], {'disabled': False}) + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_wont_sechedule_if_specified_host_is_down(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) + past = now - delta + db.service_update(self.context, s1['id'], {'disabled': False, + 'updated_at': past}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + self.assertRaises(driver.WillNotSchedule, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id2) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_will_schedule_on_disabled_host_if_specified(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + db.service_get_by_args(self.context, 'host1', 'nova-compute') + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + def test_too_many_cores(self): """Ensures we don't go over max cores""" compute1 = service.Service('host1', @@ -168,6 +260,10 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': False}) + db.service_update(self.context, s2['id'], {'disabled': False}) instance_ids1 = [] instance_ids2 = [] for index in xrange(FLAGS.max_cores): @@ -201,6 +297,10 @@ class SimpleDriverTestCase(test.TestCase): 'volume', FLAGS.volume_manager) volume2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-volume') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-volume') + db.service_update(self.context, s1['id'], {'disabled': False}) + db.service_update(self.context, s2['id'], {'disabled': False}) volume_id1 = self._create_volume() volume1.create_volume(self.context, volume_id1) volume_id2 = self._create_volume() @@ -224,6 +324,10 @@ class SimpleDriverTestCase(test.TestCase): 'volume', FLAGS.volume_manager) volume2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-volume') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-volume') + db.service_update(self.context, s1['id'], {'disabled': False}) + db.service_update(self.context, s2['id'], {'disabled': False}) volume_ids1 = [] volume_ids2 = [] for index in xrange(FLAGS.max_gigabytes): -- cgit From af4d6e84c67b8f59f63ef0275778fa897dac9e95 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Wed, 22 Dec 2010 13:01:33 -0600 Subject: Getting Snapshots to work with cloudservers command-line tool --- nova/api/openstack/__init__.py | 3 +-- nova/api/openstack/images.py | 8 +++++--- nova/compute/api.py | 9 +++++++++ nova/compute/manager.py | 11 +++-------- nova/flags.py | 2 +- nova/image/glance.py | 5 ++++- nova/virt/xenapi/vm_utils.py | 8 +++----- 7 files changed, 26 insertions(+), 20 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 44f3aafec..dba008111 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -89,8 +89,7 @@ class AuthMiddleware(wsgi.Middleware): if not user: return faults.Fault(webob.exc.HTTPUnauthorized()) - #FIXME(sirp): stubbing project to admin for now - project = manager.AuthManager().get_project('admin') + project = manager.AuthManager().get_project(FLAGS.default_project) req.environ['nova.context'] = context.RequestContext(user, project) return self.application diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 78849223d..de072b28f 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -66,9 +66,11 @@ class Controller(wsgi.Controller): raise faults.Fault(exc.HTTPNotFound()) def create(self, req): - # Only public images are supported for now, so a request to - # make a backup of a server cannot be supproted. - raise faults.Fault(exc.HTTPNotFound()) + ctxt = req.environ['nova.context'] + env = self._deserialize(req.body, req) + data = {'instance_id': env["image"]["serverId"], + 'name': env["image"]["name"] } + return dict(image=self._service.create(ctxt, data)) def update(self, req, id): # Users may not modify public images, and that's all that diff --git a/nova/compute/api.py b/nova/compute/api.py index c740814da..6864e694e 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -271,6 +271,15 @@ class ComputeAPI(base.Base): def get_instance(self, context, instance_id): return self.db.instance_get_by_internal_id(context, instance_id) + def snapshot(self, context, instance_id, name): + """Snapshot the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "snapshot_instance", + "args": {"instance_id": instance['id'], "name": name}}) + def reboot(self, context, instance_id): """Reboot the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 56b11c4ed..f74eacaf5 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -134,10 +134,8 @@ class ComputeManager(manager.Manager): # TODO(ja): should we keep it in a terminated state for a bit? self.db.instance_destroy(context, instance_id) - #FIXME(sirp): Hacking reboot to snapshot @exception.wrap_exception - def XXreboot_instance(self, context, instance_id): - #def reboot_instance(self, context, instance_id): + def reboot_instance(self, context, instance_id): """Reboot an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) @@ -158,10 +156,8 @@ class ComputeManager(manager.Manager): self.driver.reboot(instance_ref) self._update_state(context, instance_id) - #FIXME(sirp): Hacking reboot to snapshot @exception.wrap_exception - def reboot_instance(self, context, instance_id): - #def snapshot_instance(self, context, instance_id): + def snapshot_instance(self, context, instance_id, name): """Snapshot an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) @@ -181,8 +177,7 @@ class ComputeManager(manager.Manager): logging.debug('instance %s: snapshotting', instance_ref['name']) #TODO(sirp): set is_snapshotting=True here? - glance_name = "MySnapshot3" - self.driver.snapshot(instance_ref, glance_name) + self.driver.snapshot(instance_ref, name) #self._update_state(context, instance_id) @exception.wrap_exception diff --git a/nova/flags.py b/nova/flags.py index 51956d5cf..cf4614b8d 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -214,7 +214,6 @@ DEFINE_integer('s3_port', 3333, 's3 port') DEFINE_string('s3_host', '127.0.0.1', 's3 host') DEFINE_integer('glance_port', 9292, 'glance port') DEFINE_string('glance_host', '127.0.0.1', 'glance host') -DEFINE_string('glance_storage_location', 'swift://username:api_key@auth.api.rackspacecloud.com/v1.0/cloudservers', 'glance storage location') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on') @@ -236,6 +235,7 @@ DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', 'Url to ec2 api server') +DEFINE_string('default_project', 'openstack', 'default project for openstack') DEFINE_string('default_image', 'ami-11111', 'default image to use, testing only') DEFINE_string('default_instance_type', 'm1.small', diff --git a/nova/image/glance.py b/nova/image/glance.py index 1ca6cf2eb..84e4eaa7d 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -24,6 +24,7 @@ import urlparse import webob.exc +from nova.compute import api as compute_api from nova import utils from nova import flags from nova import exception @@ -202,7 +203,9 @@ class GlanceImageService(nova.image.service.BaseImageService): :raises AlreadyExists if the image already exist. """ - return self.parallax.add_image_metadata(data) + instance_id = data["instance_id"] + name = data["name"] + compute_api.ComputeAPI().snapshot(context, instance_id, name) def update(self, context, image_id, data): """Replace the contents of the given image with the new data. diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index a4501cdbb..7a93c44c7 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -183,13 +183,11 @@ class VMHelper(): return template_vm_ref, [vdi_uuid, parent_uuid] @classmethod - def upload_image(cls, session, vdi_uuids, glance_label): - logging.debug("Asking xapi to upload %s as '%s'", vdi_uuids, - glance_label) + def upload_image(cls, session, vdi_uuids, image_name): + logging.debug("Asking xapi to upload %s as '%s'", vdi_uuids, image_name) params = {'vdi_uuids': vdi_uuids, - 'glance_label': glance_label, - 'glance_storage_location': FLAGS.glance_storage_location, + 'image_name': image_name, 'glance_host': FLAGS.glance_host, 'glance_port': FLAGS.glance_port} -- cgit From 7c03b9aa49b390e13cfbe8315a62c660778ef854 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Wed, 22 Dec 2010 14:00:31 -0600 Subject: i18n support for xs-snaps --- nova/compute/manager.py | 11 ++++------ nova/flags.py | 2 +- nova/virt/libvirt_conn.py | 4 ++-- nova/virt/xenapi/vm_utils.py | 50 +++++++++++++++++++++++++------------------- nova/virt/xenapi/vmops.py | 26 +++++++++++++---------- nova/virt/xenapi_conn.py | 2 -- 6 files changed, 51 insertions(+), 44 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f9124aa6c..cf459d96a 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -162,23 +162,20 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - #FIXME(sirp): update_state currently only refreshes the state field + #NOTE(sirp): update_state currently only refreshes the state field # if we add is_snapshotting, we will need this refreshed too, # potentially? self._update_state(context, instance_id) - #TODO(sirp): check for is_snapshotting=True here? + logging.debug(_('instance %s: snapshotting'), instance_ref['name']) if instance_ref['state'] != power_state.RUNNING: - logging.warn('trying to snapshot a non-running ' - 'instance: %s (state: %s excepted: %s)', + logging.warn(_('trying to snapshot a non-running ' + 'instance: %s (state: %s excepted: %s)'), instance_ref['internal_id'], instance_ref['state'], power_state.RUNNING) - logging.debug('instance %s: snapshotting', instance_ref['name']) - #TODO(sirp): set is_snapshotting=True here? self.driver.snapshot(instance_ref, name) - #self._update_state(context, instance_id) @exception.wrap_exception def rescue_instance(self, context, instance_id): diff --git a/nova/flags.py b/nova/flags.py index b61447c76..d773a7e4c 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -213,7 +213,7 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') DEFINE_integer('glance_port', 9292, 'glance port') -DEFINE_string('glance_host', utils.get_my_ip(),, 'glance host') +DEFINE_string('glance_host', utils.get_my_ip(), 'glance host') DEFINE_integer('s3_port', 3333, 's3 port') DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)') DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 3b5954b3b..c8d0aa306 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -239,9 +239,9 @@ class LibvirtConnection(object): @exception.wrap_exception def snapshot(self, instance): """ Create snapshot from a running VM instance """ - #TODO(sirp): only exists for XenAPI driver for now raise NotImplementedError( - "Instance snapshotting is not supported for libvirt at this time") + _("Instance snapshotting is not supported for libvirt" + "at this time")) @exception.wrap_exception def reboot(self, instance): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index f4e608e4d..5b1c36418 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -153,17 +153,22 @@ class VMHelper(): @classmethod - def create_snapshot(cls, session, vm_ref, label): - logging.debug("Snapshotting VM %s with label '%s'...", vm_ref, label) + def create_snapshot(cls, session, instance_id, vm_ref, label): + logging.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label) #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added #TODO(sirp): Make safe_lookup_vdi for assert? vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref) - if vdi_refs is None or len(vdi_refs) != 1: - raise Exception("Unexpected number of VDIs (%s) found for VM %s" - % (len(vdi_refs), vm_ref)) + if vdi_refs is None: + raise Exception(_("No VDIs found for VM %s") % vm_ref) + else: + num_vdis = len(vdi_refs) + if num_vdis != 1: + raise Exception(_("Unexpected number of VDIs (%s) found for " + "VM %s") % (num_vdis, vm_ref)) + vdi_ref = vdi_refs[0] vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec["uuid"] @@ -171,20 +176,21 @@ class VMHelper(): original_parent_uuid = get_vhd_parent_uuid(session, vdi_ref) task = session.call_xenapi('Async.VM.snapshot', vm_ref, label) - template_vm_ref = session.wait_for_task(task) - logging.debug('Created snapshot %s from VM %s.', template_vm_ref, + template_vm_ref = session.wait_for_task(instance_id, task) + logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref, vm_ref) sr_ref = vdi_rec["SR"] parent_uuid = wait_for_vhd_coalesce( - session, sr_ref, vdi_ref, original_parent_uuid) + session, instance_id, sr_ref, vdi_ref, original_parent_uuid) #TODO(sirp): we need to assert only one parent, not parents two deep return template_vm_ref, [vdi_uuid, parent_uuid] @classmethod - def upload_image(cls, session, vdi_uuids, image_name): - logging.debug("Asking xapi to upload %s as '%s'", vdi_uuids, image_name) + def upload_image(cls, session, instance_id, vdi_uuids, image_name): + logging.debug(_("Asking xapi to upload %s as '%s'"), + vdi_uuids, image_name) params = {'vdi_uuids': vdi_uuids, 'image_name': image_name, @@ -193,11 +199,11 @@ class VMHelper(): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'put_vdis', kwargs) - session.wait_for_task(task) + session.wait_for_task(instance_id, task) @classmethod - def fetch_image(cls, session, image, user, project, use_sr): + def fetch_image(cls, session, instance_id, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for its kernel and ramdisk (if external kernels are being used). @@ -214,7 +220,7 @@ class VMHelper(): if use_sr: args['add_partition'] = 'true' task = session.async_call_plugin('objectstore', fn, args) - uuid = session.wait_for_task(task) + uuid = session.wait_for_task(instance_id, task) return uuid @classmethod @@ -317,7 +323,7 @@ def get_vhd_parent(session, vdi_rec): parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid) parent_rec = session.get_xenapi().VDI.get_record(parent_ref) #NOTE(sirp): changed log -> logging - logging.debug("VHD %s has parent %s", vdi_rec['uuid'], parent_ref) + logging.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref) return parent_ref, parent_rec else: return None @@ -333,25 +339,27 @@ def get_vhd_parent_uuid(session, vdi_ref): return None -def scan_sr(session, sr_ref): - logging.debug("Re-scanning SR %s", sr_ref) +def scan_sr(session, instance_id, sr_ref): + logging.debug(_("Re-scanning SR %s"), sr_ref) task = session.call_xenapi('Async.SR.scan', sr_ref) - session.wait_for_task(task) + session.wait_for_task(instance_id, task) -def wait_for_vhd_coalesce(session, sr_ref, vdi_ref, original_parent_uuid): +def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, + original_parent_uuid): """ TODO Explain why coalescing has to occur here """ #NOTE(sirp): for some reason re-scan wasn't occuring automatically on # XS5.6 #TODO(sirp): we need to timeout this req after a while def _poll_vhds(): - scan_sr(session, sr_ref) + scan_sr(session, instance_id, sr_ref) parent_uuid = get_vhd_parent_uuid(session, vdi_ref) if original_parent_uuid and (parent_uuid != original_parent_uuid): logging.debug( - "Parent %s doesn't match original parent %s, " - "waiting for coalesce...", parent_uuid, original_parent_uuid) + _("Parent %s doesn't match original parent %s, " + "waiting for coalesce..."), + parent_uuid, original_parent_uuid) else: done.send(parent_uuid) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index d011f4489..787f959a5 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -67,11 +67,14 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) vdi_uuid = VMHelper.fetch_image( - self._session, instance.image_id, user, project, True) + self._session, instance.id, instance.image_id, user, project, + True) kernel = VMHelper.fetch_image( - self._session, instance.kernel_id, user, project, False) + self._session, instance.id, instance.kernel_id, user, project, + False) ramdisk = VMHelper.fetch_image( - self._session, instance.ramdisk_id, user, project, False) + self._session, instance.id, instance.ramdisk_id, user, project, + False) vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) vm_ref = VMHelper.create_vm( self._session, instance, kernel, ramdisk) @@ -90,24 +93,25 @@ class VMOps(object): #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added - logging.debug("Starting snapshot for VM %s", instance) + logging.debug(_("Starting snapshot for VM %s"), instance) vm_ref = VMHelper.lookup(self._session, instance.name) label = "%s-snapshot" % instance.name try: template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( - self._session, vm_ref, label) + self._session, instance.id, vm_ref, label) except XenAPI.Failure, exc: - logging.error("Unable to Snapshot %s: %s", vm_ref, exc) + logging.error(_("Unable to Snapshot %s: %s"), vm_ref, exc) return try: # call plugin to ship snapshot off to glance - VMHelper.upload_image(self._session, template_vdi_uuids, name) + VMHelper.upload_image( + self._session, instance.id, template_vdi_uuids, name) finally: - self._destroy(template_vm_ref, shutdown=False) + self._destroy(instance, template_vm_ref, shutdown=False) - logging.debug("Finished snapshot and upload for VM %s", instance) + logging.debug(_("Finished snapshot and upload for VM %s"), instance) def reboot(self, instance): """Reboot VM instance""" @@ -121,9 +125,9 @@ class VMOps(object): def destroy(self, instance): """Destroy VM instance""" vm = VMHelper.lookup(self._session, instance.name) - return self._destroy(vm, shutdown=True) + return self._destroy(instance, vm, shutdown=True) - def _destroy(self, vm, shutdown=True): + def _destroy(self, instance, vm, shutdown=True): """ Destroy VM instance """ if vm is None: # Don't complain, just return. This lets us clean up instances diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index e6bc80d6e..58ad6ca6a 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -124,8 +124,6 @@ class XenAPIConnection(object): def snapshot(self, instance, name): """ Create snapshot from a running VM instance """ - #TODO(sirp): Add quiesce and VSS locking support when Windows support - # is added self._vmops.snapshot(instance, name) def reboot(self, instance): -- cgit From f31395c30c835201372802e9cdf9293dcbabdb5c Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Wed, 22 Dec 2010 14:54:02 -0600 Subject: Adding more comments regarding XS snapshots --- nova/tests/compute_unittest.py | 8 ++++++++ nova/virt/fake.py | 16 ++++++++++++++++ nova/virt/libvirt_conn.py | 2 +- nova/virt/xenapi/vm_utils.py | 20 +++++++++++++++++--- nova/virt/xenapi/vmops.py | 22 +++++++++++++++++++++- nova/virt/xenapi_conn.py | 1 - 6 files changed, 63 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 187ca31de..025291a92 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -142,6 +142,14 @@ class ComputeTestCase(test.TestCase): self.compute.reboot_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id) + def test_snapshot(self): + """Ensure instance can be snapshotted""" + instance_id = self._create_instance() + name = "myfakesnapshot" + self.compute.run_instance(self.context, instance_id) + self.compute.snapshot_instance(self.context, instance_id, name) + self.compute.terminate_instance(self.context, instance_id) + def test_console_output(self): """Make sure we can get console output from instance""" instance_id = self._create_instance() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 73e273edd..c6db66aa6 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -106,6 +106,22 @@ class FakeConnection(object): self.instances[instance.name] = fake_instance fake_instance._state = power_state.RUNNING + + def snapshot(self, instance, name): + """ + Snapshots the specified instance. + + The given parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. + + The second parameter is the name of the snapshot. + + The work will be done asynchronously. This function returns a + Deferred that allows the caller to detect when it is complete. + """ + pass + + def reboot(self, instance): """ Reboot the specified instance. diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index c8d0aa306..ad1a35643 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -237,7 +237,7 @@ class LibvirtConnection(object): virt_dom.detachDevice(xml) @exception.wrap_exception - def snapshot(self, instance): + def snapshot(self, instance, name): """ Create snapshot from a running VM instance """ raise NotImplementedError( _("Instance snapshotting is not supported for libvirt" diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 5b1c36418..b7e20121c 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -154,6 +154,9 @@ class VMHelper(): @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): + """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, + Snapshot VHD + """ logging.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label) #TODO(sirp): Add quiesce and VSS locking support when Windows support @@ -189,6 +192,9 @@ class VMHelper(): @classmethod def upload_image(cls, session, instance_id, vdi_uuids, image_name): + """ Requests that the Glance plugin bundle the specified VDIs and + push them into Glance using the specified human-friendly name. + """ logging.debug(_("Asking xapi to upload %s as '%s'"), vdi_uuids, image_name) @@ -347,9 +353,17 @@ def scan_sr(session, instance_id, sr_ref): def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, original_parent_uuid): - """ TODO Explain why coalescing has to occur here """ - #NOTE(sirp): for some reason re-scan wasn't occuring automatically on - # XS5.6 + """ Spin until the parent VHD is coalesced into its parent VHD + + Before coalesce: + * original_parent_vhd + * parent_vhd + snapshot + + Atter coalesce: + * parent_vhd + snapshot + """ #TODO(sirp): we need to timeout this req after a while def _poll_vhds(): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 787f959a5..dd6243dd5 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -88,7 +88,27 @@ class VMOps(object): vm_ref) def snapshot(self, instance, name): - """ Create snapshot from a running VM instance """ + """ Create snapshot from a running VM instance + + :param instance: instance to be snapshotted + :param name: name/label to be given to the snapshot + + Steps involved in a XenServer snapshot: + + 1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This + creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, + Snapshot VHD + + 2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to + a 'base-copy' VDI. The base_copy is immutable and may be chained + with other base_copies. If chained, the base_copies + coalesce together, so, we must wait for this coalescing to occur to + get a stable representation of the data on disk. + + 3. Push-to-glance: Once coalesced, we call a plugin on the XenServer + that will bundle the VHDs together and then push the bundle into + Glance. + """ #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 58ad6ca6a..816b92a0f 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -121,7 +121,6 @@ class XenAPIConnection(object): """Create VM instance""" self._vmops.spawn(instance) - def snapshot(self, instance, name): """ Create snapshot from a running VM instance """ self._vmops.snapshot(instance, name) -- cgit From 749af384c0b7ca36bdd8c511f02b819a65e5dae0 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 22 Dec 2010 17:09:46 -0600 Subject: Modified InstanceDiagnostics and truncate action --- nova/api/openstack/__init__.py | 1 + nova/api/openstack/servers.py | 4 ++++ nova/db/sqlalchemy/models.py | 10 ++-------- nova/virt/xenapi_conn.py | 2 +- 4 files changed, 8 insertions(+), 9 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index de95ee548..d8cd86116 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -176,6 +176,7 @@ class APIRouter(wsgi.Router): logging.debug("Including admin operations in API.") server_members['pause'] = 'POST' server_members['unpause'] = 'POST' + server_members["diagnostics"] = "GET" mapper.resource("server", "servers", controller=servers.Controller(), collection={'detail': 'GET'}, diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 5c3322f7c..65e371a90 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -195,3 +195,7 @@ class Controller(wsgi.Controller): logging.error("Compute.api::unpause %s", readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + + def diagnostics(self, req, id): + """Permit Admins to retrieve server diagnostics.""" + return {} diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index eac6a304e..600943005 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -22,7 +22,7 @@ SQLAlchemy models for nova data. import datetime from sqlalchemy.orm import relationship, backref, object_mapper -from sqlalchemy import Column, Integer, Float, String, schema +from sqlalchemy import Column, Integer, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base @@ -232,13 +232,7 @@ class InstanceDiagnostics(BASE, NovaBase): id = Column(Integer, primary_key=True) instance_id = Column(Integer, ForeignKey('instances.id')) - memory_available = Column(Float) - memory_free = Column(Float) - cpu_load = Column(Float) - disk_read = Column(Float) - disk_write = Column(Float) - net_tx = Column(Float) - net_rx = Column(Float) + diagnostics = Column(Text) class InstanceActions(BASE, NovaBase): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 146e2f153..7870a21ac 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -204,7 +204,7 @@ class XenAPISession(object): status = self._session.xenapi.task.get_status(task) action = dict( instance_id=int(instance_id), - action=name, + action=name[0:255], # Ensure action is never > 255 error=None) if status == "pending": return -- cgit From 358961f3cf259487a2ff9bbb225defdc7cd9e7a7 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 23 Dec 2010 11:22:51 -0600 Subject: Fixed trunk and PEP8 cleanup --- nova/virt/libvirt_conn.py | 16 +++++++++------- nova/virt/xenapi_conn.py | 4 ++++ 2 files changed, 13 insertions(+), 7 deletions(-) (limited to 'nova') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 651b2af93..809a484dc 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -512,9 +512,11 @@ class LibvirtConnection(object): if FLAGS.allow_project_net_traffic: net, mask = _get_net_and_mask(network['cidr']) - extra_params = ("\n" - "\n" - ) % (net, mask) + extra_params = ( + "\n" + "\n") % ( + net, + mask) else: extra_params = "\n" @@ -800,8 +802,8 @@ class NWFilterFirewall(object): the base filter are all in place. """ - nwfilter_xml = ("\n" - ) % instance['name'] + nwfilter_xml = ("\n") % ( + instance['name']) if instance['image_id'] == FLAGS.vpn_image_id: nwfilter_xml += " \n" @@ -814,8 +816,8 @@ class NWFilterFirewall(object): for security_group in instance.security_groups: self.ensure_security_group_filter(security_group['id']) - nwfilter_xml += (" \n" - ) % security_group['id'] + nwfilter_xml += (" \n") % ( + security_group['id']) nwfilter_xml += "" self._define_filter(nwfilter_xml) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 7870a21ac..af16fcdf3 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -110,6 +110,10 @@ class XenAPIConnection(object): self._vmops = VMOps(session) self._volumeops = VolumeOps(session) + def init_host(self): + """Initialize anything that is necessary for the driver to function""" + return + def list_instances(self): """List VM instances""" return self._vmops.list_instances() -- cgit From 55a80811a5982cb9af5b80e7ac3e925334a1b22d Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 23 Dec 2010 12:23:28 -0600 Subject: Working diagnostics API; removed diagnostics DB model - not needed --- nova/api/openstack/servers.py | 3 ++- nova/compute/api.py | 9 +++++++++ nova/compute/manager.py | 11 +++++++++++ nova/db/sqlalchemy/models.py | 11 +---------- nova/virt/xenapi/vmops.py | 6 +++--- nova/virt/xenapi_conn.py | 4 ++-- 6 files changed, 28 insertions(+), 16 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 65e371a90..f758b252a 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -198,4 +198,5 @@ class Controller(wsgi.Controller): def diagnostics(self, req, id): """Permit Admins to retrieve server diagnostics.""" - return {} + ctxt = req.environ["nova.context"] + return self.compute_api.diagnostics(ctxt, id) diff --git a/nova/compute/api.py b/nova/compute/api.py index 4953fe559..1c24c6634 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -283,6 +283,15 @@ class ComputeAPI(base.Base): {"method": "unpause_instance", "args": {"instance_id": instance['id']}}) + def diagnostics(self, context, instance_id): + """Retrieve diagnostics for the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance["host"] + return rpc.call(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "get_diagnostics", + "args": {"instance_id": instance["id"]}}) + def rescue(self, context, instance_id): """Rescue the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ff8202cca..3a5677bc5 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -296,6 +296,17 @@ class ComputeManager(manager.Manager): instance_id, result)) + @exception.wrap_exception + def get_diagnostics(self, context, instance_id): + """Retrieve diagnostics for an instance on this server.""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + if instance_ref["state"] == power_state.RUNNING: + logging.debug(_("instance %s: retrieving diagnostics"), + instance_ref["internal_id"]) + return self.driver.get_diagnostics(instance_ref) + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 163d2470b..d93dd64c5 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -236,15 +236,6 @@ class Instance(BASE, NovaBase): # 'shutdown', 'shutoff', 'crashed']) -class InstanceDiagnostics(BASE, NovaBase): - """Represents a guest VM's diagnostics""" - __tablename__ = "instance_diagnostics" - id = Column(Integer, primary_key=True) - instance_id = Column(Integer, ForeignKey('instances.id')) - - diagnostics = Column(Text) - - class InstanceActions(BASE, NovaBase): """Represents a guest VM's actions and results""" __tablename__ = "instance_actions" @@ -555,7 +546,7 @@ def register_models(): it will never need to be called explicitly elsewhere. """ from sqlalchemy import create_engine - models = (Service, Instance, InstanceDiagnostics, InstanceActions, + models = (Service, Instance, InstanceActions, Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 4d897af35..50e20bff4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -156,11 +156,11 @@ class VMOps(object): rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_info(rec) - def get_diagnostics(self, instance_id): + def get_diagnostics(self, instance): """Return data about VM diagnostics""" - vm = VMHelper.lookup(self._session, instance_id) + vm = VMHelper.lookup(self._session, instance.name) if vm is None: - raise Exception("instance not present %s" % instance_id) + raise Exception("instance not present %s" % instance.name) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_diagnostics(self._session, rec) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index af16fcdf3..5c746a08f 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -142,9 +142,9 @@ class XenAPIConnection(object): """Return data about VM instance""" return self._vmops.get_info(instance_id) - def get_diagnostics(self, instance_id): + def get_diagnostics(self, instance): """Return data about VM diagnostics""" - return self._vmops.get_diagnostics(instance_id) + return self._vmops.get_diagnostics(instance) def get_console_output(self, instance): """Return snapshot of console""" -- cgit From 13e8c8d83b8fc44cff343ea751a98f66857d1865 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 23 Dec 2010 13:14:56 -0600 Subject: Resolved merge conflict --- nova/virt/xenapi_conn.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 4ea8a6737..2bf29b295 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -225,13 +225,8 @@ class XenAPISession(object): name = self._session.xenapi.task.get_name_label(task) status = self._session.xenapi.task.get_status(task) action = dict( -<<<<<<< TREE - instance_id=int(instance_id), + instance_id=int(id), action=name[0:255], # Ensure action is never > 255 -======= - id=int(id), - action=name, ->>>>>>> MERGE-SOURCE error=None) if status == "pending": return -- cgit From bd46ab4a721da856da5743c9f55ab5e50ec9b60f Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 23 Dec 2010 13:36:01 -0600 Subject: PEP8 fix --- nova/virt/xenapi/vmops.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 5039913ba..7571fe2e4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -186,7 +186,8 @@ class VMOps(object): """Return data about VM diagnostics""" vm = VMHelper.lookup(self._session, instance.name) if vm is None: - raise exception.NotFound(_("Instance not found %s") % instance.name) + raise exception.NotFound(_("Instance not found %s") % + instance.name) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_diagnostics(self._session, rec) -- cgit From 438197264ea5ddc8bf076100586af6c71b0bf58d Mon Sep 17 00:00:00 2001 From: Eric Day Date: Mon, 27 Dec 2010 11:22:15 -0800 Subject: Added custom guid type so we can choose the most efficient backend DB type easily. --- nova/db/sqlalchemy/models.py | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index a7725ac3c..7ec96bc6d 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -22,12 +22,14 @@ SQLAlchemy models for nova data. import datetime import uuid -from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy import Column, Integer, Float, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text +from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy.schema import ForeignKeyConstraint +from sqlalchemy.types import TypeDecorator, CHAR from nova.db.sqlalchemy.session import get_session @@ -44,6 +46,40 @@ def make_uuid(): return uuid.uuid1().hex +class GUID(TypeDecorator): + """Platform-independent GUID type. + + Uses Postgresql's UUID type, otherwise uses + CHAR(32), storing as stringified hex values. + + """ + impl = CHAR + + def load_dialect_impl(self, dialect): + if dialect.name == 'postgresql': + return dialect.type_descriptor(UUID()) + else: + return dialect.type_descriptor(CHAR(32)) + + def process_bind_param(self, value, dialect): + if value is None: + return value + elif dialect.name == 'postgresql': + return str(value) + else: + if not isinstance(value, uuid.UUID): + return "%.32x" % uuid.UUID(value) + else: + # hexstring + return "%.32x" % value + + def process_result_value(self, value, dialect): + if value is None: + return value + else: + return uuid.UUID(value).hex + + class NovaBase(object): """Base class for Nova Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} @@ -169,7 +205,7 @@ class Certificate(BASE, NovaBase): class Instance(BASE, NovaBase): """Represents a guest vm.""" __tablename__ = 'instances' - id = Column(String(32), primary_key=True, default=make_uuid) + id = Column(GUID, primary_key=True, default=make_uuid) @property def name(self): -- cgit From 89000675dfe321240b3dae53604ba87115a3ca3e Mon Sep 17 00:00:00 2001 From: Eric Day Date: Mon, 27 Dec 2010 11:43:17 -0800 Subject: Converted instance references to GUID type. --- nova/db/sqlalchemy/models.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 7ec96bc6d..5634dda5e 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -279,7 +279,7 @@ class InstanceDiagnostics(BASE, NovaBase): """Represents a guest VM's diagnostics""" __tablename__ = "instance_diagnostics" id = Column(Integer, primary_key=True) - instance_id = Column(Integer, ForeignKey('instances.id')) + instance_id = Column(GUID, ForeignKey('instances.id')) memory_available = Column(Float) memory_free = Column(Float) @@ -294,7 +294,7 @@ class InstanceActions(BASE, NovaBase): """Represents a guest VM's actions and results""" __tablename__ = "instance_actions" id = Column(Integer, primary_key=True) - instance_id = Column(Integer, ForeignKey('instances.id')) + instance_id = Column(GUID, ForeignKey('instances.id')) action = Column(String(255)) error = Column(Text) @@ -312,7 +312,7 @@ class Volume(BASE, NovaBase): host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? - instance_id = Column(String(32), ForeignKey('instances.id'), nullable=True) + instance_id = Column(GUID, ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('volumes'), foreign_keys=instance_id, @@ -385,7 +385,7 @@ class SecurityGroupInstanceAssociation(BASE, NovaBase): __tablename__ = 'security_group_instance_association' id = Column(Integer, primary_key=True) security_group_id = Column(Integer, ForeignKey('security_groups.id')) - instance_id = Column(String(32), ForeignKey('instances.id')) + instance_id = Column(GUID, ForeignKey('instances.id')) class SecurityGroup(BASE, NovaBase): @@ -505,7 +505,7 @@ class FixedIp(BASE, NovaBase): address = Column(String(255)) network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) network = relationship(Network, backref=backref('fixed_ips')) - instance_id = Column(String(32), ForeignKey('instances.id'), nullable=True) + instance_id = Column(GUID, ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('fixed_ip', uselist=False), foreign_keys=instance_id, -- cgit From 6debe20395d6ab476bfd2a237df8c2b08050e0e6 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Mon, 27 Dec 2010 12:19:36 -0800 Subject: Converted Volume model and operation to use UUIDs. --- nova/api/ec2/cloud.py | 10 +++++----- nova/db/sqlalchemy/api.py | 39 --------------------------------------- nova/db/sqlalchemy/models.py | 15 +++++++-------- nova/tests/test_cloud.py | 4 ++-- nova/tests/test_xenapi.py | 11 +++++------ 5 files changed, 19 insertions(+), 60 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index d50a95095..8c687f173 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -458,7 +458,7 @@ class CloudController(object): # NOTE(vish): volume_id is an optional list of volume ids to filter by. volumes = [self._format_volume(context, v) for v in volumes - if volume_id is None or v['ec2_id'] in volume_id] + if volume_id is None or v['id'] in volume_id] return {'volumeSet': volumes} @@ -471,7 +471,7 @@ class CloudController(object): instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} - v['volumeId'] = volume['ec2_id'] + v['volumeId'] = volume['id'] v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] @@ -527,7 +527,7 @@ class CloudController(object): return {'volumeSet': [self._format_volume(context, dict(volume_ref))]} def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - volume_ref = db.volume_get_by_ec2_id(context, volume_id) + volume_ref = db.volume_get(context, volume_id) if not re.match("^/dev/[a-z]d[a-z]+$", device): raise exception.ApiError(_("Invalid device specified: %s. " "Example device: /dev/vdb") % device) @@ -553,7 +553,7 @@ class CloudController(object): 'volumeId': volume_ref['id']} def detach_volume(self, context, volume_id, **kwargs): - volume_ref = db.volume_get_by_ec2_id(context, volume_id) + volume_ref = db.volume_get(context, volume_id) instance_ref = db.volume_get_instance(context.elevated(), volume_ref['id']) if not instance_ref: @@ -807,7 +807,7 @@ class CloudController(object): def delete_volume(self, context, volume_id, **kwargs): # TODO: return error if not authorized - volume_ref = db.volume_get_by_ec2_id(context, volume_id) + volume_ref = db.volume_get(context, volume_id) if volume_ref['status'] != "available": raise exception.ApiError(_("Volume status must be available")) now = datetime.datetime.utcnow() diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 57b70288c..50f833a5f 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1285,10 +1285,6 @@ def volume_create(context, values): session = get_session() with session.begin(): - while volume_ref.ec2_id == None: - ec2_id = utils.generate_uid('vol') - if not volume_ec2_id_exists(context, ec2_id, session=session): - volume_ref.ec2_id = ec2_id volume_ref.save(session=session) return volume_ref @@ -1386,41 +1382,6 @@ def volume_get_all_by_project(context, project_id): all() -@require_context -def volume_get_by_ec2_id(context, ec2_id): - session = get_session() - result = None - - if is_admin_context(context): - result = session.query(models.Volume).\ - filter_by(ec2_id=ec2_id).\ - filter_by(deleted=can_read_deleted(context)).\ - first() - elif is_user_context(context): - result = session.query(models.Volume).\ - filter_by(project_id=context.project_id).\ - filter_by(ec2_id=ec2_id).\ - filter_by(deleted=False).\ - first() - else: - raise exception.NotAuthorized() - - if not result: - raise exception.NotFound(_('Volume %s not found') % ec2_id) - - return result - - -@require_context -def volume_ec2_id_exists(context, ec2_id, session=None): - if not session: - session = get_session() - - return session.query(exists().\ - where(models.Volume.id == ec2_id)).\ - one()[0] - - @require_admin_context def volume_get_instance(context, volume_id): session = get_session() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 5634dda5e..418c8914e 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -303,8 +303,11 @@ class InstanceActions(BASE, NovaBase): class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a vm.""" __tablename__ = 'volumes' - id = Column(Integer, primary_key=True) - ec2_id = Column(String(12), unique=True) + id = Column(GUID, primary_key=True, default=make_uuid) + + @property + def name(self): + return "vollume-%s" % self.id user_id = Column(String(255)) project_id = Column(String(255)) @@ -330,10 +333,6 @@ class Volume(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) - @property - def name(self): - return self.ec2_id - class Quota(BASE, NovaBase): """Represents quota overrides for a project.""" @@ -357,7 +356,7 @@ class ExportDevice(BASE, NovaBase): id = Column(Integer, primary_key=True) shelf_id = Column(Integer) blade_id = Column(Integer) - volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) + volume_id = Column(GUID, ForeignKey('volumes.id'), nullable=True) volume = relationship(Volume, backref=backref('export_device', uselist=False), foreign_keys=volume_id, @@ -373,7 +372,7 @@ class IscsiTarget(BASE, NovaBase): id = Column(Integer, primary_key=True) target_num = Column(Integer) host = Column(String(255)) - volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) + volume_id = Column(GUID, ForeignKey('volumes.id'), nullable=True) volume = relationship(Volume, backref=backref('iscsi_target', uselist=False), foreign_keys=volume_id, diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index ca400077a..42344af1c 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -127,9 +127,9 @@ class CloudTestCase(test.TestCase): result = self.cloud.describe_volumes(self.context) self.assertEqual(len(result['volumeSet']), 2) result = self.cloud.describe_volumes(self.context, - volume_id=[vol2['ec2_id']]) + volume_id=[vol2['id']]) self.assertEqual(len(result['volumeSet']), 1) - self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['ec2_id']) + self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['id']) db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id']) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index ed2e4ffde..900b9af2b 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -79,8 +79,8 @@ class XenAPIVolumeTestCase(test.TestCase): helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() - info = helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') - label = 'SR-%s' % vol['ec2_id'] + info = helper.parse_volume_info(vol['id'], '/dev/sdc') + label = 'SR-%s' % vol['id'] description = 'Test-SR' sr_ref = helper.create_iscsi_storage(session, info, label, description) srs = fake.get_all('SR') @@ -97,7 +97,7 @@ class XenAPIVolumeTestCase(test.TestCase): # oops, wrong mount point! self.assertRaises(volume_utils.StorageError, helper.parse_volume_info, - vol['ec2_id'], + vol['id'], '/dev/sd') db.volume_destroy(context.get_admin_context(), vol['id']) @@ -108,8 +108,7 @@ class XenAPIVolumeTestCase(test.TestCase): volume = self._create_volume() instance = db.instance_create(self.values) fake.create_vm(instance.name, 'Running') - result = conn.attach_volume(instance.name, volume['ec2_id'], - '/dev/sdc') + result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc') def check(): # check that the VM has a VBD attached to it @@ -134,7 +133,7 @@ class XenAPIVolumeTestCase(test.TestCase): self.assertRaises(Exception, conn.attach_volume, instance.name, - volume['ec2_id'], + volume['id'], '/dev/sdc') def tearDown(self): -- cgit From 748aa8089eabfd15425199c2318079e9bf84578f Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Mon, 27 Dec 2010 14:23:53 -0600 Subject: Fixing bad merge --- nova/virt/xenapi/vmops.py | 48 +++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index a0497b3e2..23531348e 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -96,7 +96,30 @@ class VMOps(object): self._session.call_xenapi('VM.start', vm_ref, False, False) logging.info(_('Spawning VM %s created %s.'), instance.name, vm_ref) - + + # NOTE(armando): Do we really need to do this in virt? + timer = utils.LoopingCall(f=None) + + def _wait_for_boot(): + try: + state = self.get_info(instance['name'])['state'] + db.instance_set_state(context.get_admin_context(), + instance['id'], state) + if state == power_state.RUNNING: + logging.debug(_('Instance %s: booted'), instance['name']) + timer.stop() + except Exception, exc: + logging.warn(exc) + logging.exception(_('instance %s: failed to boot'), + instance['name']) + db.instance_set_state(context.get_admin_context(), + instance['id'], + power_state.SHUTDOWN) + timer.stop() + + timer.f = _wait_for_boot + return timer.start(interval=0.5, now=True) + def snapshot(self, instance, name): """ Create snapshot from a running VM instance @@ -143,29 +166,6 @@ class VMOps(object): logging.debug(_("Finished snapshot and upload for VM %s"), instance) - # NOTE(armando): Do we really need to do this in virt? - timer = utils.LoopingCall(f=None) - - def _wait_for_boot(): - try: - state = self.get_info(instance['name'])['state'] - db.instance_set_state(context.get_admin_context(), - instance['id'], state) - if state == power_state.RUNNING: - logging.debug(_('Instance %s: booted'), instance['name']) - timer.stop() - except Exception, exc: - logging.warn(exc) - logging.exception(_('instance %s: failed to boot'), - instance['name']) - db.instance_set_state(context.get_admin_context(), - instance['id'], - power_state.SHUTDOWN) - timer.stop() - - timer.f = _wait_for_boot - return timer.start(interval=0.5, now=True) - def reboot(self, instance): """Reboot VM instance""" instance_name = instance.name -- cgit From 8d522838ace090a7325d930df08c37f1e9d9803e Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Mon, 27 Dec 2010 15:42:30 -0600 Subject: Fixing snapshots, pep8 fixes --- nova/api/openstack/images.py | 2 +- nova/tests/api/openstack/fakes.py | 1 + nova/virt/fake.py | 2 -- nova/virt/xenapi/vm_utils.py | 58 +++++++++++++++++++++------------------ nova/virt/xenapi/vmops.py | 16 +++++------ nova/virt/xenapi_conn.py | 2 +- 6 files changed, 43 insertions(+), 38 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 8a6772fa5..3a95e3961 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -71,7 +71,7 @@ class Controller(wsgi.Controller): ctxt = req.environ['nova.context'] env = self._deserialize(req.body, req) data = {'instance_id': env["image"]["serverId"], - 'name': env["image"]["name"] } + 'name': env["image"]["name"]} return dict(image=self._service.create(ctxt, data)) def update(self, req, id): diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index e1ed6a6a8..961431154 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -115,6 +115,7 @@ def stub_out_compute_api_snapshot(stubs): return 123 stubs.Set(nova.compute.api.ComputeAPI, 'snapshot', snapshot) + def stub_out_glance(stubs, initial_fixtures=[]): class FakeParallaxClient: diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 6d3cef268..0f456aef6 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -112,7 +112,6 @@ class FakeConnection(object): self.instances[instance.name] = fake_instance fake_instance._state = power_state.RUNNING - def snapshot(self, instance, name): """ Snapshots the specified instance. @@ -127,7 +126,6 @@ class FakeConnection(object): """ pass - def reboot(self, instance): """ Reboot the specified instance. diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 2b76afc62..f4195cb1b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -27,6 +27,7 @@ from xml.dom import minidom from eventlet import event from nova import exception from nova import flags +from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state @@ -205,44 +206,34 @@ class VMHelper(HelperBase): vm_ref, network_ref) return vif_ref - @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, Snapshot VHD """ - logging.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label) - + logging.debug(_("Snapshotting VM %s with label '%s'..."), + vm_ref, label) #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added - - #TODO(sirp): Make safe_lookup_vdi for assert? - vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref) - if vdi_refs is None: - raise Exception(_("No VDIs found for VM %s") % vm_ref) - else: - num_vdis = len(vdi_refs) - if num_vdis != 1: - raise Exception(_("Unexpected number of VDIs (%s) found for " - "VM %s") % (num_vdis, vm_ref)) - - vdi_ref = vdi_refs[0] - vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) - vdi_uuid = vdi_rec["uuid"] - - original_parent_uuid = get_vhd_parent_uuid(session, vdi_ref) + vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) + vm_vdi_uuid = vm_vdi_rec["uuid"] + original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref) task = session.call_xenapi('Async.VM.snapshot', vm_ref, label) template_vm_ref = session.wait_for_task(instance_id, task) + template_vdi_ref, template_vdi_rec = get_vdi_for_vm_safely( + session, template_vm_ref) + + template_vdi_uuid = template_vdi_rec["uuid"] logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref, vm_ref) - sr_ref = vdi_rec["SR"] + sr_ref = vm_vdi_rec["SR"] parent_uuid = wait_for_vhd_coalesce( - session, instance_id, sr_ref, vdi_ref, original_parent_uuid) + session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid) #TODO(sirp): we need to assert only one parent, not parents two deep - return template_vm_ref, [vdi_uuid, parent_uuid] + return template_vm_ref, [template_vdi_uuid, parent_uuid] @classmethod def upload_image(cls, session, instance_id, vdi_uuids, image_name): @@ -252,7 +243,7 @@ class VMHelper(HelperBase): logging.debug(_("Asking xapi to upload %s as '%s'"), vdi_uuids, image_name) - params = {'vdi_uuids': vdi_uuids, + params = {'vdi_uuids': vdi_uuids, 'image_name': image_name, 'glance_host': FLAGS.glance_host, 'glance_port': FLAGS.glance_port} @@ -261,7 +252,6 @@ class VMHelper(HelperBase): task = session.async_call_plugin('glance', 'put_vdis', kwargs) session.wait_for_task(instance_id, task) - @classmethod def fetch_image(cls, session, instance_id, image, user, project, type): """ @@ -418,7 +408,7 @@ def scan_sr(session, instance_id, sr_ref): def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, original_parent_uuid): """ Spin until the parent VHD is coalesced into its parent VHD - + Before coalesce: * original_parent_vhd * parent_vhd @@ -440,7 +430,7 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, parent_uuid, original_parent_uuid) else: done.send(parent_uuid) - + done = event.Event() loop = utils.LoopingCall(_poll_vhds) loop.start(FLAGS.xenapi_vhd_coalesce_poll_interval, now=True) @@ -448,3 +438,19 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, loop.stop() return parent_uuid + +def get_vdi_for_vm_safely(session, vm_ref): + """Returns (vdi_ref, vdi_uuid)""" + #TODO(sirp): Make safe_lookup_vdi for assert? + vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref) + if vdi_refs is None: + raise Exception(_("No VDIs found for VM %s") % vm_ref) + else: + num_vdis = len(vdi_refs) + if num_vdis != 1: + raise Exception(_("Unexpected number of VDIs (%s) found for " + "VM %s") % (num_vdis, vm_ref)) + + vdi_ref = vdi_refs[0] + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + return vdi_ref, vdi_rec diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 23531348e..2cc36ad54 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -122,7 +122,7 @@ class VMOps(object): def snapshot(self, instance, name): """ Create snapshot from a running VM instance - + :param instance: instance to be snapshotted :param name: name/label to be given to the snapshot @@ -131,7 +131,7 @@ class VMOps(object): 1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, Snapshot VHD - + 2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to a 'base-copy' VDI. The base_copy is immutable and may be chained with other base_copies. If chained, the base_copies @@ -153,14 +153,14 @@ class VMOps(object): try: template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( self._session, instance.id, vm_ref, label) - except XenAPI.Failure, exc: + except self.XenAPI.Failure, exc: logging.error(_("Unable to Snapshot %s: %s"), vm_ref, exc) return - + try: # call plugin to ship snapshot off to glance VMHelper.upload_image( - self._session, instance.id, template_vdi_uuids, name) + self._session, instance.id, template_vdi_uuids, name) finally: self._destroy(instance, template_vm_ref, shutdown=False) @@ -193,7 +193,7 @@ class VMOps(object): try: task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) self._session.wait_for_task(instance.id, task) - except XenAPI.Failure, exc: + except self.XenAPI.Failure, exc: logging.warn(exc) # Disk clean-up @@ -202,13 +202,13 @@ class VMOps(object): try: task = self._session.call_xenapi('Async.VDI.destroy', vdi) self._session.wait_for_task(instance.id, task) - except XenAPI.Failure, exc: + except self.XenAPI.Failure, exc: logging.warn(exc) # VM Destroy try: task = self._session.call_xenapi('Async.VM.destroy', vm) self._session.wait_for_task(instance.id, task) - except XenAPI.Failure, exc: + except self.XenAPI.Failure, exc: logging.warn(exc) def _wait_with_callback(self, instance_id, task, callback): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 1a3dc9f16..abd2a02ee 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -233,7 +233,7 @@ class XenAPISession(object): name = self._session.xenapi.task.get_name_label(task) status = self._session.xenapi.task.get_status(task) action = dict( - id=int(id), + instance_id=int(id), action=name, error=None) if status == "pending": -- cgit From 0fb37c5a08db4b2631ff687cb0fc6af43ba20190 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 28 Dec 2010 11:49:07 -0600 Subject: Added InstanceAction DB functions --- nova/db/api.py | 5 +++++ nova/db/sqlalchemy/api.py | 9 +++++++++ 2 files changed, 14 insertions(+) (limited to 'nova') diff --git a/nova/db/api.py b/nova/db/api.py index fde3f0852..127f15478 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -383,6 +383,11 @@ def instance_action_create(context, values): return IMPL.instance_action_create(context, values) +def instance_get_actions(context, instance_id): + """Get instance actions by instance id.""" + return IMPL.instance_get_actions(context, instance_id) + + ################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 7e945e4cb..d2127989c 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -856,6 +856,15 @@ def instance_action_create(context, values): return action_ref +@require_context +def instance_get_actions(context, instance_id): + """Return the actions associated to the given instance id""" + session = get_session() + return session.query(models.InstanceActions).\ + filter_by(instance_id=instance_id).\ + all() + + ################### -- cgit From 7ddd833bc61252061b3dfd2449765a93f750bffa Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 28 Dec 2010 12:19:25 -0600 Subject: Added get_diagnostics placeholders to libvirt and fake --- nova/virt/fake.py | 3 +++ nova/virt/libvirt_conn.py | 3 +++ 2 files changed, 6 insertions(+) (limited to 'nova') diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 706888b0d..880772b22 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -202,6 +202,9 @@ class FakeConnection(object): 'num_cpu': 2, 'cpu_time': 0} + def get_diagnostics(self, instance_name): + pass + def list_disks(self, instance_name): """ Return the IDs of all the virtual disks attached to the specified diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 65cf65098..06ace7457 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -567,6 +567,9 @@ class LibvirtConnection(object): 'num_cpu': num_cpu, 'cpu_time': cpu_time} + def get_diagnostics(self, instance_name): + raise exception.APIError("diagnostics not supported for libvirt") + def get_disks(self, instance_name): """ Note that this function takes an instance name, not an Instance, so -- cgit From 96384b689953e381f2210d4a78f1b5239a78e507 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 28 Dec 2010 12:53:32 -0600 Subject: Add some basic snapshot tests --- nova/tests/test_xenapi.py | 106 +++++++++++++++++++++++++++++-------------- nova/tests/xenapi/stubs.py | 66 +++++++++++++++++++++++++++ nova/virt/xenapi/fake.py | 64 ++++++++++++++++++++++++-- nova/virt/xenapi/vm_utils.py | 15 +++--- 4 files changed, 206 insertions(+), 45 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index ed2e4ffde..f0d84e9aa 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -29,9 +29,9 @@ from nova.auth import manager from nova.compute import instance_types from nova.compute import power_state from nova.virt import xenapi_conn -from nova.virt.xenapi import fake +from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import volume_utils -from nova.tests.db import fakes +from nova.tests.db import fakes as db_fakes from nova.tests.xenapi import stubs FLAGS = flags.FLAGS @@ -47,9 +47,9 @@ class XenAPIVolumeTestCase(test.TestCase): FLAGS.target_host = '127.0.0.1' FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' - fakes.stub_out_db_instance_api(self.stubs) + db_fakes.stub_out_db_instance_api(self.stubs) stubs.stub_out_get_target(self.stubs) - fake.reset() + xenapi_fake.reset() self.values = {'name': 1, 'id': 1, 'project_id': 'fake', 'user_id': 'fake', @@ -83,7 +83,7 @@ class XenAPIVolumeTestCase(test.TestCase): label = 'SR-%s' % vol['ec2_id'] description = 'Test-SR' sr_ref = helper.create_iscsi_storage(session, info, label, description) - srs = fake.get_all('SR') + srs = xenapi_fake.get_all('SR') self.assertEqual(sr_ref, srs[0]) db.volume_destroy(context.get_admin_context(), vol['id']) @@ -107,17 +107,17 @@ class XenAPIVolumeTestCase(test.TestCase): conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = db.instance_create(self.values) - fake.create_vm(instance.name, 'Running') + xenapi_fake.create_vm(instance.name, 'Running') result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') def check(): # check that the VM has a VBD attached to it # Get XenAPI reference for the VM - vms = fake.get_all('VM') + vms = xenapi_fake.get_all('VM') # Get XenAPI record for VBD - vbds = fake.get_all('VBD') - vbd = fake.get_record('VBD', vbds[0]) + vbds = xenapi_fake.get_all('VBD') + vbd = xenapi_fake.get_record('VBD', vbds[0]) vm_ref = vbd['VM'] self.assertEqual(vm_ref, vms[0]) @@ -130,7 +130,7 @@ class XenAPIVolumeTestCase(test.TestCase): conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = db.instance_create(self.values) - fake.create_vm(instance.name, 'Running') + xenapi_fake.create_vm(instance.name, 'Running') self.assertRaises(Exception, conn.attach_volume, instance.name, @@ -156,41 +156,66 @@ class XenAPIVMTestCase(test.TestCase): self.stubs = stubout.StubOutForTesting() FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' - fake.reset() - fakes.stub_out_db_instance_api(self.stubs) - fake.create_network('fake', FLAGS.flat_network_bridge) + xenapi_fake.reset() + db_fakes.stub_out_db_instance_api(self.stubs) + xenapi_fake.create_network('fake', FLAGS.flat_network_bridge) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.get_connection(False) def test_list_instances_0(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - conn = xenapi_conn.get_connection(False) - instances = conn.list_instances() + instances = self.conn.list_instances() self.assertEquals(instances, []) + def test_instance_snapshot(self): + stubs.stubout_instance_snapshot(self.stubs) + instance = self._create_instance() + + name = "MySnapshot" + template_vm_ref = self.conn.snapshot(instance, name) + + def ensure_vm_was_torn_down(): + vm_labels = [] + for vm_ref in xenapi_fake.get_all('VM'): + vm_rec = xenapi_fake.get_record('VM', vm_ref) + if not vm_rec["is_control_domain"]: + vm_labels.append(vm_rec["name_label"]) + + self.assertEquals(vm_labels, [1]) + + def ensure_vbd_was_torn_down(): + vbd_labels = [] + for vbd_ref in xenapi_fake.get_all('VBD'): + vbd_rec = xenapi_fake.get_record('VBD', vbd_ref) + vbd_labels.append(vbd_rec["vm_name_label"]) + + self.assertEquals(vbd_labels, [1]) + + def ensure_vdi_was_torn_down(): + for vdi_ref in xenapi_fake.get_all('VDI'): + vdi_rec = xenapi_fake.get_record('VDI', vdi_ref) + name_label = vdi_rec["name_label"] + self.assert_(not name_label.endswith('snapshot')) + + def check(): + ensure_vm_was_torn_down() + ensure_vbd_was_torn_down() + ensure_vdi_was_torn_down() + + check() + def test_spawn(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - values = {'name': 1, 'id': 1, - 'project_id': self.project.id, - 'user_id': self.user.id, - 'image_id': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'instance_type': 'm1.large', - 'mac_address': 'aa:bb:cc:dd:ee:ff', - } - conn = xenapi_conn.get_connection(False) - instance = db.instance_create(values) - conn.spawn(instance) + instance = self._create_instance() def check(): - instances = conn.list_instances() + instances = self.conn.list_instances() self.assertEquals(instances, [1]) # Get Nova record for VM - vm_info = conn.get_info(1) + vm_info = self.conn.get_info(1) # Get XenAPI record for VM - vms = fake.get_all('VM') - vm = fake.get_record('VM', vms[0]) + vms = xenapi_fake.get_all('VM') + vm = xenapi_fake.get_record('VM', vms[0]) # Check that m1.large above turned into the right thing. instance_type = instance_types.INSTANCE_TYPES['m1.large'] @@ -218,3 +243,18 @@ class XenAPIVMTestCase(test.TestCase): self.manager.delete_project(self.project) self.manager.delete_user(self.user) self.stubs.UnsetAll() + + def _create_instance(self): + """Creates and spawns a test instance""" + values = {'name': 1, 'id': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } + instance = db.instance_create(values) + self.conn.spawn(instance) + return instance diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index a7e592fee..55f751f11 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -19,6 +19,54 @@ from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils +from nova.virt.xenapi import vm_utils + + +def stubout_instance_snapshot(stubs): + @classmethod + def fake_fetch_image(cls, session, instance_id, image, user, project, + type): + # Stubout wait_for_task + def fake_wait_for_task(self, id, task): + class FakeEvent: + + def send(self, value): + self.rv = value + + def wait(self): + return self.rv + + done = FakeEvent() + self._poll_task(id, task, done) + rv = done.wait() + return rv + + stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', + fake_wait_for_task) + + from nova.virt.xenapi.fake import create_vdi + name_label = "instance-%s" % instance_id + #TODO: create fake SR record + sr_ref = "fakesr" + vdi_ref = create_vdi(name_label=name_label, read_only=False, + sr_ref=sr_ref, sharable=False) + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + vdi_uuid = vdi_rec['uuid'] + return vdi_uuid + + stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) + + def fake_parse_xmlrpc_value(val): + return val + + stubs.Set(xenapi_conn, '_parse_xmlrpc_value', fake_parse_xmlrpc_value) + + def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, + original_parent_uuid): + #TODO(sirp): Should we actually fake out the data here + return "fakeparent" + + stubs.Set(vm_utils, 'wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce) def stubout_session(stubs, cls): @@ -63,6 +111,24 @@ class FakeSessionForVMTests(fake.SessionBase): vm['is_a_template'] = False vm['is_control_domain'] = False + def VM_snapshot(self, session_ref, vm_ref, label): + status = "Running" + template_vm_ref = fake.create_vm(label, status, is_a_template=True, + is_control_domain=False) + + sr_ref = "fakesr" + template_vdi_ref = fake.create_vdi(label, read_only=True, + sr_ref=sr_ref, sharable=False) + + template_vbd_ref = fake.create_vbd(template_vm_ref, template_vdi_ref) + return template_vm_ref + + def VDI_destroy(self, session_ref, vdi_ref): + fake.destroy_vdi(vdi_ref) + + def VM_destroy(self, session_ref, vm_ref): + fake.destroy_vm(vm_ref) + class FakeSessionForVolumeTests(fake.SessionBase): """ Stubs out a XenAPISession for Volume tests """ diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 1eaf31c25..aa4026f97 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -55,6 +55,8 @@ import datetime import logging import uuid +from pprint import pformat + from nova import exception @@ -64,6 +66,10 @@ _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\ _db_content = {} +def log_db_contents(msg=None): + logging.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content)) + + def reset(): for c in _CLASSES: _db_content[c] = {} @@ -93,6 +99,24 @@ def create_vm(name_label, status, }) +def destroy_vm(vm_ref): + vm_rec = _db_content['VM'][vm_ref] + + vbd_refs = vm_rec['VBDs'] + for vbd_ref in vbd_refs: + destroy_vbd(vbd_ref) + + del _db_content['VM'][vm_ref] + + +def destroy_vbd(vbd_ref): + del _db_content['VBD'][vbd_ref] + + +def destroy_vdi(vdi_ref): + del _db_content['VDI'][vdi_ref] + + def create_vdi(name_label, read_only, sr_ref, sharable): return _create_object('VDI', { 'name_label': name_label, @@ -109,6 +133,23 @@ def create_vdi(name_label, read_only, sr_ref, sharable): }) +def create_vbd(vm_ref, vdi_ref): + vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref} + vbd_ref = _create_object('VBD', vbd_rec) + after_VBD_create(vbd_ref, vbd_rec) + return vbd_ref + + +def after_VBD_create(vbd_ref, vbd_rec): + """Create backref from VM to VBD when VBD is created""" + vm_ref = vbd_rec['VM'] + vm_rec = _db_content['VM'][vm_ref] + vm_rec['VBDs'] = [vbd_ref] + + vm_name_label = _db_content['VM'][vm_ref]['name_label'] + vbd_rec['vm_name_label'] = vm_name_label + + def create_pbd(config, sr_ref, attached): return _create_object('PBD', { 'device-config': config, @@ -277,11 +318,12 @@ class SessionBase(object): self._check_arg_count(params, 2) return get_record(cls, params[1]) - if (func == 'get_by_name_label' or - func == 'get_by_uuid'): + if func in ('get_by_name_label', 'get_by_uuid'): self._check_arg_count(params, 2) + return_singleton = (func == 'get_by_uuid') return self._get_by_field( - _db_content[cls], func[len('get_by_'):], params[1]) + _db_content[cls], func[len('get_by_'):], params[1], + return_singleton=return_singleton) if len(params) == 2: field = func[len('get_'):] @@ -324,6 +366,13 @@ class SessionBase(object): (cls, _) = name.split('.') ref = is_sr_create and \ _create_sr(cls, params) or _create_object(cls, params[1]) + + # Call hook to provide any fixups needed (ex. creating backrefs) + try: + globals()["after_%s_create" % cls](ref, params[1]) + except KeyError: + pass + obj = get_record(cls, ref) # Add RO fields @@ -359,11 +408,18 @@ class SessionBase(object): raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH', expected, actual]) - def _get_by_field(self, recs, k, v): + def _get_by_field(self, recs, k, v, return_singleton): result = [] for ref, rec in recs.iteritems(): if rec.get(k) == v: result.append(ref) + + if return_singleton: + try: + return result[0] + except IndexError: + return None + return result diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index f4195cb1b..68da807fc 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -211,24 +211,25 @@ class VMHelper(HelperBase): """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, Snapshot VHD """ - logging.debug(_("Snapshotting VM %s with label '%s'..."), - vm_ref, label) #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added + logging.debug(_("Snapshotting VM %s with label '%s'..."), + vm_ref, label) + vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) vm_vdi_uuid = vm_vdi_rec["uuid"] + sr_ref = vm_vdi_rec["SR"] + original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref) task = session.call_xenapi('Async.VM.snapshot', vm_ref, label) template_vm_ref = session.wait_for_task(instance_id, task) - template_vdi_ref, template_vdi_rec = get_vdi_for_vm_safely( - session, template_vm_ref) - + template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1] template_vdi_uuid = template_vdi_rec["uuid"] + logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref, vm_ref) - sr_ref = vm_vdi_rec["SR"] parent_uuid = wait_for_vhd_coalesce( session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid) @@ -440,8 +441,6 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, def get_vdi_for_vm_safely(session, vm_ref): - """Returns (vdi_ref, vdi_uuid)""" - #TODO(sirp): Make safe_lookup_vdi for assert? vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref) if vdi_refs is None: raise Exception(_("No VDIs found for VM %s") % vm_ref) -- cgit From 7811a77753943ee87f3c3b10f37d22e61c5119d0 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 28 Dec 2010 14:32:45 -0600 Subject: Make action log available through Admin API --- nova/api/openstack/__init__.py | 1 + nova/api/openstack/servers.py | 5 +++++ nova/compute/api.py | 9 +++++++++ nova/compute/manager.py | 10 ++++++++++ nova/db/sqlalchemy/api.py | 9 ++++++--- nova/virt/fake.py | 3 +++ nova/virt/libvirt_conn.py | 5 ++++- nova/virt/xenapi/vmops.py | 8 ++++++++ nova/virt/xenapi_conn.py | 4 ++++ 9 files changed, 50 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index be342b670..412e16315 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -94,6 +94,7 @@ class APIRouter(wsgi.Router): server_members['pause'] = 'POST' server_members['unpause'] = 'POST' server_members["diagnostics"] = "GET" + server_members["actions"] = "GET" server_members['suspend'] = 'POST' server_members['resume'] = 'POST' diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index c00aa26ce..0091a9bd2 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -224,3 +224,8 @@ class Controller(wsgi.Controller): """Permit Admins to retrieve server diagnostics.""" ctxt = req.environ["nova.context"] return self.compute_api.diagnostics(ctxt, id) + + def actions(self, req, id): + """Permit Admins to retrieve server actions.""" + ctxt = req.environ["nova.context"] + return self.compute_api.actions(ctxt, id) diff --git a/nova/compute/api.py b/nova/compute/api.py index 68919442a..3d9728c77 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -293,6 +293,15 @@ class ComputeAPI(base.Base): {"method": "get_diagnostics", "args": {"instance_id": instance["id"]}}) + def actions(self, context, instance_id): + """Retrieve actions for the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance["host"] + return rpc.call(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "get_actions", + "args": {"instance_id": instance["id"]}}) + def suspend(self, context, instance_id): """suspend the instance with instance_id""" instance = self.db.instance_get_by_internal_id(context, instance_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7dea04132..924b72004 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -307,6 +307,16 @@ class ComputeManager(manager.Manager): instance_ref["internal_id"]) return self.driver.get_diagnostics(instance_ref) + @exception.wrap_exception + def get_actions(self, context, instance_id): + """Retrieve actions for an instance on this server.""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + logging.debug(_("instance %s: retrieving actions"), + instance_ref["internal_id"]) + return self.driver.get_actions(instance_ref) + def suspend_instance(self, context, instance_id): """suspend the instance with instance_id""" context = context.elevated() diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index d2127989c..5fd928a79 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -856,13 +856,16 @@ def instance_action_create(context, values): return action_ref -@require_context +@require_admin_context def instance_get_actions(context, instance_id): """Return the actions associated to the given instance id""" session = get_session() - return session.query(models.InstanceActions).\ + actions = {} + for action in session.query(models.InstanceActions).\ filter_by(instance_id=instance_id).\ - all() + all(): + actions[action.action] = action.error + return actions ################### diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 880772b22..e9d1e4bd7 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -205,6 +205,9 @@ class FakeConnection(object): def get_diagnostics(self, instance_name): pass + def get_actions(self, instance_name): + pass + def list_disks(self, instance_name): """ Return the IDs of all the virtual disks attached to the specified diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 06ace7457..9c8aa1fae 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -568,7 +568,10 @@ class LibvirtConnection(object): 'cpu_time': cpu_time} def get_diagnostics(self, instance_name): - raise exception.APIError("diagnostics not supported for libvirt") + raise exception.APIError("diagnostics are not supported for libvirt") + + def get_actions(self, instance_name): + raise exception.APIError("actions are not supported for libvirt") def get_disks(self, instance_name): """ diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 332e603db..93f7f7695 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -226,6 +226,14 @@ class VMOps(object): rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_diagnostics(self._session, rec) + def get_actions(self, instance): + """Return VM action history""" + vm = VMHelper.lookup(self._session, instance.name) + if vm is None: + raise exception.NotFound(_("Instance not found %s") % + instance.name) + return db.instance_get_actions(context.get_admin_context(), instance.id) + def get_console_output(self, instance): """Return snapshot of console""" # TODO: implement this to fix pylint! diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index a7ec22012..92274daf9 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -163,6 +163,10 @@ class XenAPIConnection(object): """Return data about VM diagnostics""" return self._vmops.get_diagnostics(instance) + def get_actions(self, instance): + """Return VM action history""" + return self._vmops.get_actions(instance) + def get_console_output(self, instance): """Return snapshot of console""" return self._vmops.get_console_output(instance) -- cgit From 5a25de893f34cb9b05996406488188b6ed47fca1 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 28 Dec 2010 17:14:01 -0500 Subject: Add flag --enable_new_services to toggle default state of service when created. --- nova/db/api.py | 5 +++++ nova/db/sqlalchemy/api.py | 2 ++ nova/tests/test_service.py | 25 +++++++++++++++++++++++++ 3 files changed, 32 insertions(+) (limited to 'nova') diff --git a/nova/db/api.py b/nova/db/api.py index fde3f0852..fcb1cc3f9 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -27,6 +27,9 @@ The underlying driver is loaded as a :class:`LazyPluggable`. :sql_connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/nova/nova.sqlite`. + +:enable_new_services: when adding a new service to the database, is it in the + pool of available hardware (Default: False) """ from nova import exception @@ -37,6 +40,8 @@ from nova import utils FLAGS = flags.FLAGS flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') +flags.DEFINE_boolean('enable_new_services', False, + 'Services to be added to the available pool on create') IMPL = utils.LazyPluggable(FLAGS['db_backend'], diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 7e945e4cb..bcc076c5a 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -236,6 +236,8 @@ def service_get_by_args(context, host, binary): def service_create(context, values): service_ref = models.Service() service_ref.update(values) + if FLAGS.enable_new_services: + service_ref.disabled = False service_ref.save() return service_ref diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index b30838ad7..01f27e5b4 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -22,6 +22,8 @@ Unit Tests for remote procedure calls using queue import mox +from nova import context +from nova import db from nova import exception from nova import flags from nova import rpc @@ -72,6 +74,29 @@ class ServiceManagerTestCase(test.TestCase): self.assertEqual(serv.test_method(), 'service') +class ServiceFlagsTestCase(test.TestCase): + def test_service_enabled_on_create_based_on_flag(self): + self.flags(enable_new_services=True) + host = 'foo' + binary = 'nova-fake' + app = service.Service.create(host=host, binary=binary) + app.start() + app.stop() + ref = db.service_get(context.get_admin_context(), app.service_id) + db.service_destroy(context.get_admin_context(), app.service_id) + self.assert_(not ref['disabled']) + + def test_service_disabled_on_create_based_on_flag(self): + self.flags(enable_new_services=False) + host = 'foo' + binary = 'nova-fake' + app = service.Service.create(host=host, binary=binary) + app.start() + app.stop() + ref = db.service_get(context.get_admin_context(), app.service_id) + db.service_destroy(context.get_admin_context(), app.service_id) + self.assert_(ref['disabled']) + class ServiceTestCase(test.TestCase): """Test cases for Services""" -- cgit From 9da1fcd6eca6f2f88e95242b8d046f4ee11f3761 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 28 Dec 2010 17:54:31 -0500 Subject: Defualt services to enabled. --- nova/db/api.py | 4 ++-- nova/db/sqlalchemy/api.py | 4 ++-- nova/db/sqlalchemy/models.py | 2 +- nova/tests/test_scheduler.py | 34 +++++++--------------------------- 4 files changed, 12 insertions(+), 32 deletions(-) (limited to 'nova') diff --git a/nova/db/api.py b/nova/db/api.py index fcb1cc3f9..f32e1e3f5 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -29,7 +29,7 @@ The underlying driver is loaded as a :class:`LazyPluggable`. `sqlite:///var/lib/nova/nova.sqlite`. :enable_new_services: when adding a new service to the database, is it in the - pool of available hardware (Default: False) + pool of available hardware (Default: True) """ from nova import exception @@ -40,7 +40,7 @@ from nova import utils FLAGS = flags.FLAGS flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') -flags.DEFINE_boolean('enable_new_services', False, +flags.DEFINE_boolean('enable_new_services', True, 'Services to be added to the available pool on create') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index bcc076c5a..9f0597b54 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -236,8 +236,8 @@ def service_get_by_args(context, host, binary): def service_create(context, values): service_ref = models.Service() service_ref.update(values) - if FLAGS.enable_new_services: - service_ref.disabled = False + if not FLAGS.enable_new_services: + service_ref.disabled = True service_ref.save() return service_ref diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 86c24cb90..ca54d4466 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -148,7 +148,7 @@ class Service(BASE, NovaBase): binary = Column(String(255)) topic = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) - disabled = Column(Boolean, default=True) + disabled = Column(Boolean, default=False) class Certificate(BASE, NovaBase): diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 65430fb36..e8021ed5a 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -133,6 +133,10 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + db.service_update(self.context, s2['id'], {'disabled': True}) hosts = self.scheduler.driver.hosts_up(self.context, 'compute') self.assertEqual(0, len(hosts)) compute1.kill() @@ -152,10 +156,6 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': False}) - db.service_update(self.context, s2['id'], {'disabled': False}) hosts = self.scheduler.driver.hosts_up(self.context, 'compute') self.assertEqual(2, len(hosts)) compute1.kill() @@ -173,10 +173,6 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': False}) - db.service_update(self.context, s2['id'], {'disabled': False}) instance_id1 = self._create_instance() compute1.run_instance(self.context, instance_id1) instance_id2 = self._create_instance() @@ -200,10 +196,6 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': False}) - db.service_update(self.context, s2['id'], {'disabled': False}) instance_id1 = self._create_instance() compute1.run_instance(self.context, instance_id1) instance_id2 = self._create_instance(availability_zone='nova:host1') @@ -225,8 +217,7 @@ class SimpleDriverTestCase(test.TestCase): now = datetime.datetime.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta - db.service_update(self.context, s1['id'], {'disabled': False, - 'updated_at': past}) + db.service_update(self.context, s1['id'], {'updated_at': past}) instance_id2 = self._create_instance(availability_zone='nova:host1') self.assertRaises(driver.WillNotSchedule, self.scheduler.driver.schedule_run_instance, @@ -241,7 +232,8 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute1.start() - db.service_get_by_args(self.context, 'host1', 'nova-compute') + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) instance_id2 = self._create_instance(availability_zone='nova:host1') host = self.scheduler.driver.schedule_run_instance(self.context, instance_id2) @@ -261,10 +253,6 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': False}) - db.service_update(self.context, s2['id'], {'disabled': False}) instance_ids1 = [] instance_ids2 = [] for index in xrange(FLAGS.max_cores): @@ -298,10 +286,6 @@ class SimpleDriverTestCase(test.TestCase): 'volume', FLAGS.volume_manager) volume2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-volume') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-volume') - db.service_update(self.context, s1['id'], {'disabled': False}) - db.service_update(self.context, s2['id'], {'disabled': False}) volume_id1 = self._create_volume() volume1.create_volume(self.context, volume_id1) volume_id2 = self._create_volume() @@ -325,10 +309,6 @@ class SimpleDriverTestCase(test.TestCase): 'volume', FLAGS.volume_manager) volume2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-volume') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-volume') - db.service_update(self.context, s1['id'], {'disabled': False}) - db.service_update(self.context, s2['id'], {'disabled': False}) volume_ids1 = [] volume_ids2 = [] for index in xrange(FLAGS.max_gigabytes): -- cgit From 823c5fc1ff3c37acbfe9b30d7057f53b050b93c6 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 29 Dec 2010 14:02:57 -0600 Subject: Added tests --- nova/tests/api/openstack/test_servers.py | 16 ++++++++++++++++ nova/tests/test_compute.py | 16 ++++++++++++++-- nova/tests/test_xenapi.py | 30 ++++++++++++++++++++---------- nova/virt/xenapi/vm_utils.py | 4 ++++ 4 files changed, 54 insertions(+), 12 deletions(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 5d23db588..321ddceee 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -95,6 +95,10 @@ class ServersTest(unittest.TestCase): fake_compute_api) self.stubs.Set(nova.compute.api.ComputeAPI, 'resume', fake_compute_api) + self.stubs.Set(nova.compute.api.ComputeAPI, "diagnostics", + fake_compute_api) + self.stubs.Set(nova.compute.api.ComputeAPI, "actions", + fake_compute_api) self.allow_admin = FLAGS.allow_admin_api def tearDown(self): @@ -274,6 +278,18 @@ class ServersTest(unittest.TestCase): res = req.get_response(nova.api.API('os')) self.assertEqual(res.status_int, 202) + def test_server_diagnostics(self): + req = webob.Request.blank("/v1.0/servers/1/diagnostics") + req.method = "GET" + res = req.get_response(nova.api.API("os")) + self.assertEqual(res.status_int, 404) + + def test_server_actions(self): + req = webob.Request.blank("/v1.0/servers/1/actions") + req.method = "GET" + res = req.get_response(nova.api.API("os")) + self.assertEqual(res.status_int, 404) + def test_server_reboot(self): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index bcb8a1526..757b1f462 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -102,13 +102,15 @@ class ComputeTestCase(test.TestCase): instances = db.instance_get_all(context.get_admin_context()) logging.info(_("Running instances: %s"), instances) - self.assertEqual(len(instances), 1) + + instance_count = len(instances) + self.assertNotEqual(instance_count, 0) self.compute.terminate_instance(self.context, instance_id) instances = db.instance_get_all(context.get_admin_context()) logging.info(_("After terminating instances: %s"), instances) - self.assertEqual(len(instances), 0) + self.assertEqual(instance_count, len(instances) + 1) def test_run_terminate_timestamps(self): """Make sure timestamps are set for launched and destroyed""" @@ -151,6 +153,16 @@ class ComputeTestCase(test.TestCase): self.compute.reboot_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id) + def test_diagnostics(self): + """Ensure instance diagnostics are available""" + instance_id = self._create_instance() + self.compute.get_diagnostics(self.context, instance_id) + + def test_actions(self): + """Ensure instance actions are available""" + instance_id = self._create_instance() + self.compute.get_actions(self.context, instance_id) + def test_console_output(self): """Make sure we can get console output from instance""" instance_id = self._create_instance() diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index ed2e4ffde..11baead8b 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -159,6 +159,16 @@ class XenAPIVMTestCase(test.TestCase): fake.reset() fakes.stub_out_db_instance_api(self.stubs) fake.create_network('fake', FLAGS.flat_network_bridge) + self.values = { + "name": 1, + "id": 1, + "project_id": self.project.id, + "user_id": self.user.id, + "image_id": 1, + "kernel_id": 2, + "ramdisk_id": 3, + "instance_type": "m1.large", + "mac_address": "aa:bb:cc:dd:ee:ff"} def test_list_instances_0(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) @@ -166,19 +176,19 @@ class XenAPIVMTestCase(test.TestCase): instances = conn.list_instances() self.assertEquals(instances, []) + def test_get_diagnostics(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + conn = xenapi_conn.get_connection(False) + + instance = db.instance_create(self.values) + conn.spawn(instance) + + conn.get_diagnostics(instance) + def test_spawn(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - values = {'name': 1, 'id': 1, - 'project_id': self.project.id, - 'user_id': self.user.id, - 'image_id': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'instance_type': 'm1.large', - 'mac_address': 'aa:bb:cc:dd:ee:ff', - } conn = xenapi_conn.get_connection(False) - instance = db.instance_create(values) + instance = db.instance_create(self.values) conn.spawn(instance) def check(): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 47fb6db53..cca7c1476 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -299,6 +299,10 @@ class VMHelper(HelperBase): try: host = session.get_xenapi_host() host_ip = session.get_xenapi().host.get_record(host)["address"] + except (cls.XenAPI.Failure, KeyError) as e: + return {"Unable to retrieve diagnostics": e} + + try: diags = {} xml = get_rrd(host_ip, record["uuid"]) if xml: -- cgit From 5b8137b2f50a4ed3eb105e38cefa280927f1c2ea Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 29 Dec 2010 14:15:04 -0600 Subject: PEP8 cleanup --- nova/db/sqlalchemy/api.py | 2 +- nova/virt/xenapi/vmops.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 5fd928a79..8e68d12a4 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -865,7 +865,7 @@ def instance_get_actions(context, instance_id): filter_by(instance_id=instance_id).\ all(): actions[action.action] = action.error - return actions + return actions ################### diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 93f7f7695..1dfb9005c 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -232,7 +232,9 @@ class VMOps(object): if vm is None: raise exception.NotFound(_("Instance not found %s") % instance.name) - return db.instance_get_actions(context.get_admin_context(), instance.id) + return db.instance_get_actions( + context.get_admin_context(), + instance.id) def get_console_output(self, instance): """Return snapshot of console""" -- cgit From c1acb68ef54309584816fbf5c93e38266accb2f0 Mon Sep 17 00:00:00 2001 From: Ryan Lucio Date: Wed, 29 Dec 2010 15:04:21 -0800 Subject: Add the pool_recycle setting to enable connection pooling features for the sql engine. The setting is hard-coded to 3600 seconds (one hour) per the recommendation provided on sqlalchemy's site --- nova/db/sqlalchemy/session.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index e0d84c107..5f31743cd 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -36,7 +36,7 @@ def get_session(autocommit=True, expire_on_commit=False): global _MAKER if not _MAKER: if not _ENGINE: - _ENGINE = create_engine(FLAGS.sql_connection, echo=False) + _ENGINE = create_engine(FLAGS.sql_connection, pool_recycle=3600, echo=False) _MAKER = (sessionmaker(bind=_ENGINE, autocommit=autocommit, expire_on_commit=expire_on_commit)) -- cgit From 64078137ce12ee52fff710f5a262d57b4ace2809 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 29 Dec 2010 16:29:15 -0800 Subject: Moved ec2 volume operations into a volume API interface for other components to use. Added attach/detach as compute.api methods, since they operate in the context of instances (and to avoid a dependency loop). --- nova/api/ec2/cloud.py | 141 +++++++++++++----------------------------------- nova/compute/api.py | 34 +++++++++++- nova/volume/__init__.py | 91 +++++++++++++++++++++++++++---- 3 files changed, 149 insertions(+), 117 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 8c687f173..74c73e0dd 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -38,12 +38,12 @@ from nova import flags from nova import quota from nova import rpc from nova import utils +from nova import volume from nova.compute import api as compute_api from nova.compute import instance_types FLAGS = flags.FLAGS -flags.DECLARE('storage_availability_zone', 'nova.volume.manager') InvalidInputException = exception.InvalidInputException @@ -89,8 +89,10 @@ class CloudController(object): def __init__(self): self.network_manager = utils.import_object(FLAGS.network_manager) self.image_service = utils.import_object(FLAGS.image_service) + self.volume_api = volume.API() self.compute_api = compute_api.ComputeAPI(self.network_manager, - self.image_service) + self.image_service, + self.volume_api) self.setup() def __str__(self): @@ -451,15 +453,10 @@ class CloudController(object): "output": base64.b64encode(output)} def describe_volumes(self, context, volume_id=None, **kwargs): - if context.user.is_admin(): - volumes = db.volume_get_all(context) - else: - volumes = db.volume_get_all_by_project(context, context.project_id) - + volumes = self.volume_api.get(context) # NOTE(vish): volume_id is an optional list of volume ids to filter by. volumes = [self._format_volume(context, v) for v in volumes if volume_id is None or v['id'] in volume_id] - return {'volumeSet': volumes} def _format_volume(self, context, volume): @@ -498,88 +495,47 @@ class CloudController(object): return v def create_volume(self, context, size, **kwargs): - # check quota - if quota.allowed_volumes(context, 1, size) < 1: - logging.warn("Quota exceeeded for %s, tried to create %sG volume", - context.project_id, size) - raise quota.QuotaError("Volume quota exceeded. You cannot " - "create a volume of size %s" % size) - vol = {} - vol['size'] = size - vol['user_id'] = context.user.id - vol['project_id'] = context.project_id - vol['availability_zone'] = FLAGS.storage_availability_zone - vol['status'] = "creating" - vol['attach_status'] = "detached" - vol['display_name'] = kwargs.get('display_name') - vol['display_description'] = kwargs.get('display_description') - volume_ref = db.volume_create(context, vol) - - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "create_volume", - "args": {"topic": FLAGS.volume_topic, - "volume_id": volume_ref['id']}}) - + volume = self.volume_api.create(context, size, + kwargs.get('display_name'), + kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return {'volumeSet': [self._format_volume(context, dict(volume_ref))]} + def delete_volume(self, context, volume_id, **kwargs): + self.volume_api.delete(context, volume_id) + return True + + def update_volume(self, context, volume_id, **kwargs): + updatable_fields = ['display_name', 'display_description'] + changes = {} + for field in updatable_fields: + if field in kwargs: + changes[field] = kwargs[field] + if changes: + self.volume_api.update(context, volume_id, kwargs) + return True + def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - volume_ref = db.volume_get(context, volume_id) - if not re.match("^/dev/[a-z]d[a-z]+$", device): - raise exception.ApiError(_("Invalid device specified: %s. " - "Example device: /dev/vdb") % device) - # TODO(vish): abstract status checking? - if volume_ref['status'] != "available": - raise exception.ApiError(_("Volume status must be available")) - if volume_ref['attach_status'] == "attached": - raise exception.ApiError(_("Volume is already attached")) - instance_id = ec2_id_to_id(instance_id) - instance_ref = self.compute_api.get_instance(context, instance_id) - host = instance_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "attach_volume", - "args": {"volume_id": volume_ref['id'], - "instance_id": instance_ref['id'], - "mountpoint": device}}) - return {'attachTime': volume_ref['attach_time'], - 'device': volume_ref['mountpoint'], - 'instanceId': instance_ref['id'], + self.compute_api.attach_volume(context, instance_id, volume_id, device) + volume = self.volume_api.get(context, volume_id) + return {'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, 'requestId': context.request_id, - 'status': volume_ref['attach_status'], - 'volumeId': volume_ref['id']} + 'status': volume['attach_status'], + 'volumeId': volume_id} def detach_volume(self, context, volume_id, **kwargs): - volume_ref = db.volume_get(context, volume_id) - instance_ref = db.volume_get_instance(context.elevated(), - volume_ref['id']) - if not instance_ref: - raise exception.ApiError(_("Volume isn't attached to anything!")) - # TODO(vish): abstract status checking? - if volume_ref['status'] == "available": - raise exception.ApiError(_("Volume is already detached")) - try: - host = instance_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "detach_volume", - "args": {"instance_id": instance_ref['id'], - "volume_id": volume_ref['id']}}) - except exception.NotFound: - # If the instance doesn't exist anymore, - # then we need to call detach blind - db.volume_detached(context) - instance_id = instance_ref['id'] - ec2_id = id_to_ec2_id(instance_id) - return {'attachTime': volume_ref['attach_time'], - 'device': volume_ref['mountpoint'], - 'instanceId': instance_id, + volume = self.volume_api.get(context, volume_id) + instance = self.compute_api.detach_volume(context, volume_id) + return {'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': id_to_ec2_id(instance['id']), 'requestId': context.request_id, - 'status': volume_ref['attach_status'], - 'volumeId': volume_ref['id']} + 'status': volume['attach_status'], + 'volumeId': volume_id} def _convert_to_set(self, lst, label): if lst == None or lst == []: @@ -588,16 +544,6 @@ class CloudController(object): lst = [lst] return [{label: x} for x in lst] - def update_volume(self, context, volume_id, **kwargs): - updatable_fields = ['display_name', 'display_description'] - changes = {} - for field in updatable_fields: - if field in kwargs: - changes[field] = kwargs[field] - if changes: - db.volume_update(context, volume_id, kwargs) - return True - def describe_instances(self, context, **kwargs): return self._format_describe_instances(context) @@ -805,21 +751,6 @@ class CloudController(object): db.instance_update(context, inst['id'], kwargs) return True - def delete_volume(self, context, volume_id, **kwargs): - # TODO: return error if not authorized - volume_ref = db.volume_get(context, volume_id) - if volume_ref['status'] != "available": - raise exception.ApiError(_("Volume status must be available")) - now = datetime.datetime.utcnow() - db.volume_update(context, volume_ref['id'], {'status': 'deleting', - 'terminated_at': now}) - host = volume_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.volume_topic, host), - {"method": "delete_volume", - "args": {"volume_id": volume_ref['id']}}) - return True - def describe_images(self, context, image_id=None, **kwargs): # Note: image_id is a list! images = self.image_service.index(context) diff --git a/nova/compute/api.py b/nova/compute/api.py index 28af434e3..870fcdbe4 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -30,6 +30,7 @@ from nova import flags from nova import quota from nova import rpc from nova import utils +from nova import volume from nova.compute import instance_types from nova.db import base @@ -44,13 +45,17 @@ def generate_default_hostname(instance_id): class ComputeAPI(base.Base): """API for interacting with the compute manager.""" - def __init__(self, network_manager=None, image_service=None, **kwargs): + def __init__(self, network_manager=None, image_service=None, + volume_api=None, **kwargs): if not network_manager: network_manager = utils.import_object(FLAGS.network_manager) self.network_manager = network_manager if not image_service: image_service = utils.import_object(FLAGS.image_service) self.image_service = image_service + if not volume_api: + volume_api = volume.API() + self.volume_api = volume_api super(ComputeAPI, self).__init__(**kwargs) def get_network_topic(self, context, instance_id): @@ -298,3 +303,30 @@ class ComputeAPI(base.Base): self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "unrescue_instance", "args": {"instance_id": instance['id']}}) + + def attach_volume(self, context, instance_id, volume_id, device): + if not re.match("^/dev/[a-z]d[a-z]+$", device): + raise exception.ApiError(_("Invalid device specified: %s. " + "Example device: /dev/vdb") % device) + self.volume_api.check_attach(context, volume_id) + instance = self.get_instance(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + + def detach_volume(self, context, volume_id): + instance = self.db.volume_get_instance(context.elevated(), volume_id) + if not instance: + raise exception.ApiError(_("Volume isn't attached to anything!")) + self.volume_api.check_detach(context, volume_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "detach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume_id}}) + return instance diff --git a/nova/volume/__init__.py b/nova/volume/__init__.py index d6e944fc0..48ecdbe68 100644 --- a/nova/volume/__init__.py +++ b/nova/volume/__init__.py @@ -17,15 +17,84 @@ # under the License. """ -:mod:`nova.volume` -- Nova Block Storage -===================================================== - -.. automodule:: nova.volume - :platform: Unix -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith +Handles all requests relating to volumes. """ + +import datetime +import logging + +from nova import db +from nova import exception +from nova import flags +from nova import quota +from nova import rpc +from nova.db import base + +FLAGS = flags.FLAGS +flags.DECLARE('storage_availability_zone', 'nova.volume.manager') + + +class API(base.Base): + """API for interacting with the volume manager.""" + + def create(self, context, size, name, description): + if quota.allowed_volumes(context, 1, size) < 1: + logging.warn("Quota exceeeded for %s, tried to create %sG volume", + context.project_id, size) + raise quota.QuotaError("Volume quota exceeded. You cannot " + "create a volume of size %s" % size) + + options = { + 'size': size, + 'user_id': context.user.id, + 'project_id': context.project_id, + 'availability_zone': FLAGS.storage_availability_zone, + 'status': "creating", + 'attach_status': "detached", + 'display_name': name, + 'display_description': description} + + volume = self.db.volume_create(context, options) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_volume", + "args": {"topic": FLAGS.volume_topic, + "volume_id": volume['id']}}) + return volume + + def delete(self, context, volume_id): + volume = self.db.volume_get(context, volume_id) + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + now = datetime.datetime.utcnow() + self.db.volume_update(context, volume_id, {'status': 'deleting', + 'terminated_at': now}) + host = volume['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"volume_id": volume_id}}) + + def update(self, context, volume_id, fields): + self.db.volume_update(context, volume_id, fields) + + def get(self, context, volume_id=None): + if volume_id is not None: + return self.db.volume_get(context, volume_id) + if context.user.is_admin(): + return self.db.volume_get_all(context) + return self.db.volume_get_all_by_project(context, context.project_id) + + def check_attach(self, context, volume_id): + volume = self.db.volume_get(context, volume_id) + # TODO(vish): abstract status checking? + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + if volume['attach_status'] == "attached": + raise exception.ApiError(_("Volume is already attached")) + + def check_detach(self, context, volume_id): + volume = self.db.volume_get(context, volume_id) + # TODO(vish): abstract status checking? + if volume['status'] == "available": + raise exception.ApiError(_("Volume is already detached")) -- cgit From b1a08af498ed6b52e3373a23196ded0396e6d34b Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 29 Dec 2010 20:30:36 -0800 Subject: Cleaned up the compute API, mostly consistency with other parts of the system and renaming redundant module names. --- nova/api/ec2/cloud.py | 24 +-- nova/api/openstack/servers.py | 22 +- nova/compute/__init__.py | 324 ++++++++++++++++++++++++++++-- nova/compute/api.py | 332 ------------------------------- nova/tests/api/openstack/test_servers.py | 8 +- nova/tests/test_compute.py | 10 +- 6 files changed, 341 insertions(+), 379 deletions(-) delete mode 100644 nova/compute/api.py (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 74c73e0dd..cc58f3cfe 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -31,6 +31,7 @@ import os from nova import context import IPy +from nova import compute from nova import crypto from nova import db from nova import exception @@ -39,7 +40,6 @@ from nova import quota from nova import rpc from nova import utils from nova import volume -from nova.compute import api as compute_api from nova.compute import instance_types @@ -90,9 +90,9 @@ class CloudController(object): self.network_manager = utils.import_object(FLAGS.network_manager) self.image_service = utils.import_object(FLAGS.image_service) self.volume_api = volume.API() - self.compute_api = compute_api.ComputeAPI(self.network_manager, - self.image_service, - self.volume_api) + self.compute_api = compute.API(self.network_manager, + self.image_service, + self.volume_api) self.setup() def __str__(self): @@ -116,7 +116,7 @@ class CloudController(object): def _get_mpi_data(self, context, project_id): result = {} - for instance in self.compute_api.get_instances(context, project_id): + for instance in self.compute_api.get(context, project_id=project_id): if instance['fixed_ip']: line = '%s slots=%d' % (instance['fixed_ip']['address'], instance['vcpus']) @@ -440,7 +440,7 @@ class CloudController(object): # instance_id is passed in as a list of instances ec2_id = instance_id[0] instance_id = ec2_id_to_id(ec2_id) - instance_ref = self.compute_api.get_instance(context, instance_id) + instance_ref = self.compute_api.get(context, instance_id) output = rpc.call(context, '%s.%s' % (FLAGS.compute_topic, instance_ref['host']), @@ -561,7 +561,7 @@ class CloudController(object): instances = db.instance_get_all_by_reservation(context, reservation_id) else: - instances = self.compute_api.get_instances(context) + instances = self.compute_api.get(context) for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: @@ -664,7 +664,7 @@ class CloudController(object): def associate_address(self, context, instance_id, public_ip, **kwargs): instance_id = ec2_id_to_id(instance_id) - instance_ref = self.compute_api.get_instance(context, instance_id) + instance_ref = self.compute_api.get(context, instance_id) fixed_address = db.instance_get_fixed_address(context, instance_ref['id']) floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) @@ -695,7 +695,7 @@ class CloudController(object): def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) - instances = self.compute_api.create_instances(context, + instances = self.compute_api.create(context, instance_types.get_by_type(kwargs.get('instance_type', None)), kwargs['image_id'], min_count=int(kwargs.get('min_count', max_count)), @@ -703,7 +703,7 @@ class CloudController(object): kernel_id=kwargs.get('kernel_id', None), ramdisk_id=kwargs.get('ramdisk_id'), display_name=kwargs.get('display_name'), - description=kwargs.get('display_description'), + display_description=kwargs.get('display_description'), key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), @@ -717,7 +717,7 @@ class CloudController(object): logging.debug("Going to start terminating instances") for ec2_id in instance_id: instance_id = ec2_id_to_id(ec2_id) - self.compute_api.delete_instance(context, instance_id) + self.compute_api.delete(context, instance_id) return True def reboot_instances(self, context, instance_id, **kwargs): @@ -747,7 +747,7 @@ class CloudController(object): changes[field] = kwargs[field] if changes: instance_id = ec2_id_to_id(ec2_id) - inst = self.compute_api.get_instance(context, instance_id) + inst = self.compute_api.get(context, instance_id) db.instance_update(context, inst['id'], kwargs) return True diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index cac5428b9..30b2160bd 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -20,12 +20,12 @@ import traceback from webob import exc +from nova import compute from nova import exception from nova import wsgi from nova.api.openstack import common from nova.api.openstack import faults from nova.auth import manager as auth_manager -from nova.compute import api as compute_api from nova.compute import instance_types from nova.compute import power_state import nova.api.openstack @@ -81,7 +81,7 @@ class Controller(wsgi.Controller): "status", "progress"]}}} def __init__(self): - self.compute_api = compute_api.ComputeAPI() + self.compute_api = compute.API() super(Controller, self).__init__() def index(self, req): @@ -97,8 +97,7 @@ class Controller(wsgi.Controller): entity_maker - either _entity_detail or _entity_inst """ - instance_list = self.compute_api.get_instances( - req.environ['nova.context']) + instance_list = self.compute_api.get(req.environ['nova.context']) limited_list = common.limited(instance_list, req) res = [entity_maker(inst)['server'] for inst in limited_list] return _entity_list(res) @@ -106,8 +105,7 @@ class Controller(wsgi.Controller): def show(self, req, id): """ Returns server details by server id """ try: - instance = self.compute_api.get_instance( - req.environ['nova.context'], int(id)) + instance = self.compute_api.get(req.environ['nova.context'], id) return _entity_detail(instance) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) @@ -115,8 +113,7 @@ class Controller(wsgi.Controller): def delete(self, req, id): """ Destroys a server """ try: - self.compute_api.delete_instance(req.environ['nova.context'], - int(id)) + self.compute_api.delete(req.environ['nova.context'], id) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() @@ -129,12 +126,12 @@ class Controller(wsgi.Controller): key_pair = auth_manager.AuthManager.get_key_pairs( req.environ['nova.context'])[0] - instances = self.compute_api.create_instances( + instances = self.compute_api.create( req.environ['nova.context'], instance_types.get_by_flavor_id(env['server']['flavorId']), env['server']['imageId'], display_name=env['server']['name'], - description=env['server']['name'], + display_description=env['server']['name'], key_name=key_pair['name'], key_data=key_pair['public_key']) return _entity_inst(instances[0]) @@ -152,9 +149,8 @@ class Controller(wsgi.Controller): update_dict['display_name'] = inst_dict['server']['name'] try: - self.compute_api.update_instance(req.environ['nova.context'], - instance['id'], - **update_dict) + self.compute_api.update(req.environ['nova.context'], id, + **update_dict) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) return exc.HTTPNoContent() diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py index a5df2ec1a..648c0ff6a 100644 --- a/nova/compute/__init__.py +++ b/nova/compute/__init__.py @@ -17,16 +17,316 @@ # under the License. """ -:mod:`nova.compute` -- Compute Nodes using LibVirt -===================================================== - -.. automodule:: nova.compute - :platform: Unix - :synopsis: Thin wrapper around libvirt for VM mgmt. -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith +Handles all requests relating to instances (guest vms). """ + +import datetime +import logging +import time + +from nova import db +from nova import exception +from nova import flags +from nova import quota +from nova import rpc +from nova import utils +from nova import volume +from nova.compute import instance_types +from nova.db import base + +FLAGS = flags.FLAGS + + +def generate_default_hostname(instance_id): + """Default function to generate a hostname given an instance reference.""" + return str(instance_id) + + +class API(base.Base): + """API for interacting with the compute manager.""" + + def __init__(self, network_manager=None, image_service=None, + volume_api=None, **kwargs): + if not network_manager: + network_manager = utils.import_object(FLAGS.network_manager) + self.network_manager = network_manager + if not image_service: + image_service = utils.import_object(FLAGS.image_service) + self.image_service = image_service + if not volume_api: + volume_api = volume.API() + self.volume_api = volume_api + super(API, self).__init__(**kwargs) + + def get_network_topic(self, context, instance_id): + try: + instance = self.get(context, instance_id) + except exception.NotFound as e: + logging.warning("Instance %d was not found in get_network_topic", + instance_id) + raise e + + host = instance['host'] + if not host: + raise exception.Error("Instance %d has no host" % instance_id) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) + return rpc.call(context, + topic, + {"method": "get_network_topic", "args": {'fake': 1}}) + + def create(self, context, instance_type, + image_id, kernel_id=None, ramdisk_id=None, + min_count=1, max_count=1, + display_name='', display_description='', + key_name=None, key_data=None, security_group='default', + user_data=None, generate_hostname=generate_default_hostname): + """Create the number of instances requested if quota and + other arguments check out ok.""" + + num_instances = quota.allowed_instances(context, max_count, + instance_type) + if num_instances < min_count: + logging.warn("Quota exceeeded for %s, tried to run %s instances", + context.project_id, min_count) + raise quota.QuotaError("Instance quota exceeded. You can only " + "run %s more instances of this type." % + num_instances, "InstanceLimitExceeded") + + is_vpn = image_id == FLAGS.vpn_image_id + if not is_vpn: + image = self.image_service.show(context, image_id) + if kernel_id is None: + kernel_id = image.get('kernelId', None) + if ramdisk_id is None: + ramdisk_id = image.get('ramdiskId', None) + # No kernel and ramdisk for raw images + if kernel_id == str(FLAGS.null_kernel): + kernel_id = None + ramdisk_id = None + logging.debug("Creating a raw instance") + # Make sure we have access to kernel and ramdisk (if not raw) + if kernel_id: + self.image_service.show(context, kernel_id) + if ramdisk_id: + self.image_service.show(context, ramdisk_id) + + if security_group is None: + security_group = ['default'] + if not type(security_group) is list: + security_group = [security_group] + + security_groups = [] + self.ensure_default_security_group(context) + for security_group_name in security_group: + group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + security_groups.append(group['id']) + + if key_data is None and key_name: + key_pair = db.key_pair_get(context, context.user_id, key_name) + key_data = key_pair['public_key'] + + type_data = instance_types.INSTANCE_TYPES[instance_type] + base_options = { + 'reservation_id': utils.generate_uid('r'), + 'image_id': image_id, + 'kernel_id': kernel_id or '', + 'ramdisk_id': ramdisk_id or '', + 'state_description': 'scheduling', + 'user_id': context.user_id, + 'project_id': context.project_id, + 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'instance_type': instance_type, + 'memory_mb': type_data['memory_mb'], + 'vcpus': type_data['vcpus'], + 'local_gb': type_data['local_gb'], + 'display_name': display_name, + 'display_description': display_description, + 'user_data': user_data or '', + 'key_name': key_name, + 'key_data': key_data} + + elevated = context.elevated() + instances = [] + logging.debug(_("Going to run %s instances..."), num_instances) + for num in range(num_instances): + instance = dict(mac_address=utils.generate_mac(), + launch_index=num, + **base_options) + instance = self.db.instance_create(context, instance) + instance_id = instance['id'] + + elevated = context.elevated() + if not security_groups: + security_groups = [] + for security_group_id in security_groups: + self.db.instance_add_security_group(elevated, + instance_id, + security_group_id) + + # Set sane defaults if not specified + updates = dict(hostname=generate_hostname(instance_id)) + if 'display_name' not in instance: + updates['display_name'] = "Server %s" % instance_id + + instance = self.update(context, instance_id, **updates) + instances.append(instance) + + logging.debug(_("Casting to scheduler for %s/%s's instance %s"), + context.project_id, context.user_id, instance_id) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "run_instance", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id}}) + + return instances + + def ensure_default_security_group(self, context): + """ Create security group for the security context if it + does not already exist + + :param context: the security context + + """ + try: + db.security_group_get_by_name(context, context.project_id, + 'default') + except exception.NotFound: + values = {'name': 'default', + 'description': 'default', + 'user_id': context.user_id, + 'project_id': context.project_id} + db.security_group_create(context, values) + + def update(self, context, instance_id, **kwargs): + """Updates the instance in the datastore. + + :param context: The security context + :param instance_id: ID of the instance to update + :param kwargs: All additional keyword args are treated + as data fields of the instance to be + updated + + :retval None + + """ + return self.db.instance_update(context, instance_id, kwargs) + + def delete(self, context, instance_id): + logging.debug("Going to try and terminate %s" % instance_id) + try: + instance = self.get(context, instance_id) + except exception.NotFound as e: + logging.warning(_("Instance %s was not found during terminate"), + instance_id) + raise e + + if (instance['state_description'] == 'terminating'): + logging.warning(_("Instance %s is already being terminated"), + instance_id) + return + + self.update(context, + instance['id'], + state_description='terminating', + state=0, + terminated_at=datetime.datetime.utcnow()) + + host = instance['host'] + if host: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "terminate_instance", + "args": {"instance_id": instance['id']}}) + else: + self.db.instance_destroy(context, instance['id']) + + def get(self, context, instance_id=None, project_id=None): + """Get one or more instances, possibly filtered by project + ID or user ID. If there is no filter and the context is + an admin, it will retreive all instances in the system.""" + if instance_id is not None: + return self.db.instance_get_by_id(context, instance_id) + if project_id or not context.is_admin: + if not context.project: + return self.db.instance_get_all_by_user(context, + context.user_id) + if project_id is None: + project_id = context.project_id + return self.db.instance_get_all_by_project(context, + project_id) + return self.db.instance_get_all(context) + + def reboot(self, context, instance_id): + """Reboot the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "reboot_instance", + "args": {"instance_id": instance['id']}}) + + def pause(self, context, instance_id): + """Pause the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "pause_instance", + "args": {"instance_id": instance['id']}}) + + def unpause(self, context, instance_id): + """Unpause the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unpause_instance", + "args": {"instance_id": instance['id']}}) + + def rescue(self, context, instance_id): + """Rescue the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "rescue_instance", + "args": {"instance_id": instance['id']}}) + + def unrescue(self, context, instance_id): + """Unrescue the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unrescue_instance", + "args": {"instance_id": instance['id']}}) + + def attach_volume(self, context, instance_id, volume_id, device): + if not re.match("^/dev/[a-z]d[a-z]+$", device): + raise exception.ApiError(_("Invalid device specified: %s. " + "Example device: /dev/vdb") % device) + self.volume_api.check_attach(context, volume_id) + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + + def detach_volume(self, context, volume_id): + instance = self.db.volume_get_instance(context.elevated(), volume_id) + if not instance: + raise exception.ApiError(_("Volume isn't attached to anything!")) + self.volume_api.check_detach(context, volume_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "detach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume_id}}) + return instance diff --git a/nova/compute/api.py b/nova/compute/api.py deleted file mode 100644 index 870fcdbe4..000000000 --- a/nova/compute/api.py +++ /dev/null @@ -1,332 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Handles all API requests relating to instances (guest vms). -""" - -import datetime -import logging -import time - -from nova import db -from nova import exception -from nova import flags -from nova import quota -from nova import rpc -from nova import utils -from nova import volume -from nova.compute import instance_types -from nova.db import base - -FLAGS = flags.FLAGS - - -def generate_default_hostname(instance_id): - """Default function to generate a hostname given an instance reference.""" - return str(instance_id) - - -class ComputeAPI(base.Base): - """API for interacting with the compute manager.""" - - def __init__(self, network_manager=None, image_service=None, - volume_api=None, **kwargs): - if not network_manager: - network_manager = utils.import_object(FLAGS.network_manager) - self.network_manager = network_manager - if not image_service: - image_service = utils.import_object(FLAGS.image_service) - self.image_service = image_service - if not volume_api: - volume_api = volume.API() - self.volume_api = volume_api - super(ComputeAPI, self).__init__(**kwargs) - - def get_network_topic(self, context, instance_id): - try: - instance = self.db.instance_get_by_id(context, instance_id) - except exception.NotFound as e: - logging.warning("Instance %d was not found in get_network_topic", - instance_id) - raise e - - host = instance['host'] - if not host: - raise exception.Error("Instance %d has no host" % instance_id) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) - return rpc.call(context, - topic, - {"method": "get_network_topic", "args": {'fake': 1}}) - - def create_instances(self, context, instance_type, image_id, min_count=1, - max_count=1, kernel_id=None, ramdisk_id=None, - display_name='', description='', key_name=None, - key_data=None, security_group='default', - user_data=None, - generate_hostname=generate_default_hostname): - """Create the number of instances requested if quote and - other arguments check out ok.""" - - num_instances = quota.allowed_instances(context, max_count, - instance_type) - if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % - num_instances, "InstanceLimitExceeded") - - is_vpn = image_id == FLAGS.vpn_image_id - if not is_vpn: - image = self.image_service.show(context, image_id) - if kernel_id is None: - kernel_id = image.get('kernelId', None) - if ramdisk_id is None: - ramdisk_id = image.get('ramdiskId', None) - #No kernel and ramdisk for raw images - if kernel_id == str(FLAGS.null_kernel): - kernel_id = None - ramdisk_id = None - logging.debug("Creating a raw instance") - # Make sure we have access to kernel and ramdisk (if not raw) - if kernel_id: - self.image_service.show(context, kernel_id) - if ramdisk_id: - self.image_service.show(context, ramdisk_id) - - if security_group is None: - security_group = ['default'] - if not type(security_group) is list: - security_group = [security_group] - - security_groups = [] - self.ensure_default_security_group(context) - for security_group_name in security_group: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) - security_groups.append(group['id']) - - if key_data is None and key_name: - key_pair = db.key_pair_get(context, context.user_id, key_name) - key_data = key_pair['public_key'] - - type_data = instance_types.INSTANCE_TYPES[instance_type] - base_options = { - 'reservation_id': utils.generate_uid('r'), - 'image_id': image_id, - 'kernel_id': kernel_id or '', - 'ramdisk_id': ramdisk_id or '', - 'state_description': 'scheduling', - 'user_id': context.user_id, - 'project_id': context.project_id, - 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), - 'instance_type': instance_type, - 'memory_mb': type_data['memory_mb'], - 'vcpus': type_data['vcpus'], - 'local_gb': type_data['local_gb'], - 'display_name': display_name, - 'display_description': description, - 'user_data': user_data or '', - 'key_name': key_name, - 'key_data': key_data} - - elevated = context.elevated() - instances = [] - logging.debug(_("Going to run %s instances..."), num_instances) - for num in range(num_instances): - instance = dict(mac_address=utils.generate_mac(), - launch_index=num, - **base_options) - instance = self.db.instance_create(context, instance) - instance_id = instance['id'] - - elevated = context.elevated() - if not security_groups: - security_groups = [] - for security_group_id in security_groups: - self.db.instance_add_security_group(elevated, - instance_id, - security_group_id) - - # Set sane defaults if not specified - updates = dict(hostname=generate_hostname(instance_id)) - if 'display_name' not in instance: - updates['display_name'] = "Server %s" % instance_id - - instance = self.update_instance(context, instance_id, **updates) - instances.append(instance) - - logging.debug(_("Casting to scheduler for %s/%s's instance %s"), - context.project_id, context.user_id, instance_id) - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "run_instance", - "args": {"topic": FLAGS.compute_topic, - "instance_id": instance_id}}) - - return instances - - def ensure_default_security_group(self, context): - """ Create security group for the security context if it - does not already exist - - :param context: the security context - - """ - try: - db.security_group_get_by_name(context, context.project_id, - 'default') - except exception.NotFound: - values = {'name': 'default', - 'description': 'default', - 'user_id': context.user_id, - 'project_id': context.project_id} - db.security_group_create(context, values) - - def update_instance(self, context, instance_id, **kwargs): - """Updates the instance in the datastore. - - :param context: The security context - :param instance_id: ID of the instance to update - :param kwargs: All additional keyword args are treated - as data fields of the instance to be - updated - - :retval None - - """ - return self.db.instance_update(context, instance_id, kwargs) - - def delete_instance(self, context, instance_id): - logging.debug("Going to try and terminate %s" % instance_id) - try: - instance = self.db.instance_get_by_id(context, instance_id) - except exception.NotFound as e: - logging.warning(_("Instance %s was not found during terminate"), - instance_id) - raise e - - if (instance['state_description'] == 'terminating'): - logging.warning(_("Instance %s is already being terminated"), - instance_id) - return - - self.update_instance(context, - instance['id'], - state_description='terminating', - state=0, - terminated_at=datetime.datetime.utcnow()) - - host = instance['host'] - if host: - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "terminate_instance", - "args": {"instance_id": instance['id']}}) - else: - self.db.instance_destroy(context, instance['id']) - - def get_instances(self, context, project_id=None): - """Get all instances, possibly filtered by project ID or - user ID. If there is no filter and the context is an admin, - it will retreive all instances in the system.""" - if project_id or not context.is_admin: - if not context.project: - return self.db.instance_get_all_by_user(context, - context.user_id) - if project_id is None: - project_id = context.project_id - return self.db.instance_get_all_by_project(context, project_id) - return self.db.instance_get_all(context) - - def get_instance(self, context, instance_id): - return self.db.instance_get_by_id(context, instance_id) - - def reboot(self, context, instance_id): - """Reboot the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "reboot_instance", - "args": {"instance_id": instance['id']}}) - - def pause(self, context, instance_id): - """Pause the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "pause_instance", - "args": {"instance_id": instance['id']}}) - - def unpause(self, context, instance_id): - """Unpause the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unpause_instance", - "args": {"instance_id": instance['id']}}) - - def rescue(self, context, instance_id): - """Rescue the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "rescue_instance", - "args": {"instance_id": instance['id']}}) - - def unrescue(self, context, instance_id): - """Unrescue the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unrescue_instance", - "args": {"instance_id": instance['id']}}) - - def attach_volume(self, context, instance_id, volume_id, device): - if not re.match("^/dev/[a-z]d[a-z]+$", device): - raise exception.ApiError(_("Invalid device specified: %s. " - "Example device: /dev/vdb") % device) - self.volume_api.check_attach(context, volume_id) - instance = self.get_instance(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "attach_volume", - "args": {"volume_id": volume_id, - "instance_id": instance_id, - "mountpoint": device}}) - - def detach_volume(self, context, volume_id): - instance = self.db.volume_get_instance(context.elevated(), volume_id) - if not instance: - raise exception.ApiError(_("Volume isn't attached to anything!")) - self.volume_api.check_detach(context, volume_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "detach_volume", - "args": {"instance_id": instance['id'], - "volume_id": volume_id}}) - return instance diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 173772364..1cdfb86f1 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -86,10 +86,8 @@ class ServersTest(unittest.TestCase): instance_address) self.stubs.Set(nova.db.api, 'instance_get_floating_address', instance_address) - self.stubs.Set(nova.compute.api.ComputeAPI, 'pause', - fake_compute_api) - self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause', - fake_compute_api) + self.stubs.Set(nova.compute.API, 'pause', fake_compute_api) + self.stubs.Set(nova.compute.API, 'unpause', fake_compute_api) self.allow_admin = FLAGS.allow_admin_api def tearDown(self): @@ -100,7 +98,7 @@ class ServersTest(unittest.TestCase): req = webob.Request.blank('/v1.0/servers/1') res = req.get_response(nova.api.API('os')) res_dict = json.loads(res.body) - self.assertEqual(res_dict['server']['id'], 1) + self.assertEqual(res_dict['server']['id'], '1') self.assertEqual(res_dict['server']['name'], 'server1') def test_get_server_list(self): diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 348bb3351..f7067b98a 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -22,6 +22,7 @@ Tests For Compute import datetime import logging +from nova import compute from nova import context from nova import db from nova import exception @@ -29,7 +30,6 @@ from nova import flags from nova import test from nova import utils from nova.auth import manager -from nova.compute import api as compute_api FLAGS = flags.FLAGS @@ -44,7 +44,7 @@ class ComputeTestCase(test.TestCase): stub_network=True, network_manager='nova.network.manager.FlatManager') self.compute = utils.import_object(FLAGS.compute_manager) - self.compute_api = compute_api.ComputeAPI() + self.compute_api = compute.API() self.manager = manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake') self.project = self.manager.create_project('fake', 'fake', 'fake') @@ -72,7 +72,7 @@ class ComputeTestCase(test.TestCase): """Verify that an instance cannot be created without a display_name.""" cases = [dict(), dict(display_name=None)] for instance in cases: - ref = self.compute_api.create_instances(self.context, + ref = self.compute_api.create(self.context, FLAGS.default_instance_type, None, **instance) try: self.assertNotEqual(ref[0].display_name, None) @@ -80,13 +80,13 @@ class ComputeTestCase(test.TestCase): db.instance_destroy(self.context, ref[0]['id']) def test_create_instance_associates_security_groups(self): - """Make sure create_instances associates security groups""" + """Make sure create associates security groups""" values = {'name': 'default', 'description': 'default', 'user_id': self.user.id, 'project_id': self.project.id} group = db.security_group_create(self.context, values) - ref = self.compute_api.create_instances(self.context, + ref = self.compute_api.create(self.context, FLAGS.default_instance_type, None, security_group=['default']) try: self.assertEqual(len(ref[0]['security_groups']), 1) -- cgit From 750a0c9b413ad3912d522355332cffadd9667d0c Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 29 Dec 2010 21:41:42 -0800 Subject: Converted a few more ec2 calls to use compute api. --- nova/api/ec2/cloud.py | 13 ++++--------- nova/compute/__init__.py | 12 +++++++++--- 2 files changed, 13 insertions(+), 12 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index cc58f3cfe..5ffc301be 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -138,7 +138,7 @@ class CloudController(object): def get_metadata(self, address): ctxt = context.get_admin_context() - instance_ref = db.fixed_ip_get_instance(ctxt, address) + instance_ref = self.compute_api.get(ctxt, fixed_ip=address) if instance_ref is None: return None mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) @@ -555,13 +555,9 @@ class CloudController(object): assert len(i) == 1 return i[0] - def _format_instances(self, context, reservation_id=None): + def _format_instances(self, context, **kwargs): reservations = {} - if reservation_id: - instances = db.instance_get_all_by_reservation(context, - reservation_id) - else: - instances = self.compute_api.get(context) + instances = self.compute_api.get(context, **kwargs) for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: @@ -747,8 +743,7 @@ class CloudController(object): changes[field] = kwargs[field] if changes: instance_id = ec2_id_to_id(ec2_id) - inst = self.compute_api.get(context, instance_id) - db.instance_update(context, inst['id'], kwargs) + self.compute_api.update(context, instance_id, **kwargs) return True def describe_images(self, context, image_id=None, **kwargs): diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py index 648c0ff6a..fd1cdcd1b 100644 --- a/nova/compute/__init__.py +++ b/nova/compute/__init__.py @@ -243,12 +243,18 @@ class API(base.Base): else: self.db.instance_destroy(context, instance['id']) - def get(self, context, instance_id=None, project_id=None): - """Get one or more instances, possibly filtered by project - ID or user ID. If there is no filter and the context is + def get(self, context, instance_id=None, project_id=None, + reservation_id=None, fixed_ip=None): + """Get one or more instances, possibly filtered by one of the + given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system.""" if instance_id is not None: return self.db.instance_get_by_id(context, instance_id) + if reservation_id is not None: + return self.db.instance_get_all_by_reservation(context, + reservation_id) + if fixed_ip is not None: + return self.db.fixed_ip_get_instance(context, fixed_ip) if project_id or not context.is_admin: if not context.project: return self.db.instance_get_all_by_user(context, -- cgit From ba31a61ae6348bffbd70d5875f12a540d49e8885 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Thu, 30 Dec 2010 02:20:31 -0500 Subject: pep 8 --- nova/api/ec2/__init__.py | 7 ++++++- nova/api/ec2/metadatarequesthandler.py | 1 + nova/api/openstack/__init__.py | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 7bec7f81e..aa3bfaeb4 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -313,7 +313,6 @@ class Executor(wsgi.Application): resp.body = str(result) return resp - def _error(self, req, code, message): logging.error("%s: %s", code, message) resp = webob.Response() @@ -325,6 +324,7 @@ class Executor(wsgi.Application): '?' % (code, message)) return resp + class Versions(wsgi.Application): @webob.dec.wsgify @@ -344,23 +344,28 @@ class Versions(wsgi.Application): ] return ''.join('%s\n' % v for v in versions) + def authenticate_factory(global_args, **local_args): def authenticator(app): return Authenticate(app) return authenticator + def router_factory(global_args, **local_args): def router(app): return Router(app) return router + def authorizer_factory(global_args, **local_args): def authorizer(app): return Authorizer(app) return authorizer + def executor_factory(global_args, **local_args): return Executor() + def versions_factory(global_args, **local_args): return Versions() diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 9d1594ae7..a57a6698a 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -80,5 +80,6 @@ class MetadataRequestHandler(object): raise webob.exc.HTTPNotFound() return self.print_data(data) + def metadata_factory(global_args, **local_args): return MetadataRequestHandler() diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 5dd092a1f..dc7794876 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -129,5 +129,6 @@ class Versions(wsgi.Application): def router_factory(global_cof, **local_conf): return APIRouter() + def versions_factory(global_conf, **local_conf): return Versions() -- cgit From 601b19291a7cf1bcda7bcd4ebf27e4eefe3e28fd Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 30 Dec 2010 14:09:16 -0600 Subject: Calling compute api directly from OpenStack image create --- nova/api/openstack/images.py | 10 +++++----- nova/image/glance.py | 4 ---- 2 files changed, 5 insertions(+), 9 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 2ee73480f..867ee5a7e 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -25,7 +25,7 @@ import nova.image.service from nova.api.openstack import common from nova.api.openstack import faults - +from nova.compute import api as compute_api FLAGS = flags.FLAGS @@ -127,11 +127,11 @@ class Controller(wsgi.Controller): raise faults.Fault(exc.HTTPNotFound()) def create(self, req): - ctxt = req.environ['nova.context'] + context = req.environ['nova.context'] env = self._deserialize(req.body, req) - data = {'instance_id': env["image"]["serverId"], - 'name': env["image"]["name"]} - return dict(image=self._service.create(ctxt, data)) + instance_id = env["image"]["serverId"] + name = env["image"]["name"] + return compute_api.ComputeAPI().snapshot(context, instance_id, name) def update(self, req, id): # Users may not modify public images, and that's all that diff --git a/nova/image/glance.py b/nova/image/glance.py index b61cba10e..cc3192e7c 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -203,10 +203,6 @@ class GlanceImageService(nova.image.service.BaseImageService): :raises AlreadyExists if the image already exist. """ - instance_id = data["instance_id"] - name = data["name"] - compute_api.ComputeAPI().snapshot(context, instance_id, name) - # FIXME(sirp): This needs to be reworked for new-style glance return self.parallax.add_image_metadata(data) def update(self, context, image_id, data): -- cgit From 42f6a993bcc4d0bc8823e4d039b1f59a6d6758a8 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 30 Dec 2010 15:13:32 -0600 Subject: Implemented review feedback --- nova/compute/manager.py | 6 +++--- nova/virt/fake.py | 3 --- nova/virt/libvirt_conn.py | 3 --- nova/virt/xenapi/vmops.py | 10 ---------- nova/virt/xenapi_conn.py | 4 ---- 5 files changed, 3 insertions(+), 23 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 924b72004..d2ade8f52 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -299,7 +299,6 @@ class ComputeManager(manager.Manager): @exception.wrap_exception def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for an instance on this server.""" - context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) if instance_ref["state"] == power_state.RUNNING: @@ -310,12 +309,13 @@ class ComputeManager(manager.Manager): @exception.wrap_exception def get_actions(self, context, instance_id): """Retrieve actions for an instance on this server.""" - context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) logging.debug(_("instance %s: retrieving actions"), instance_ref["internal_id"]) - return self.driver.get_actions(instance_ref) + return self.db.instance_get_actions( + context, + instance_id) def suspend_instance(self, context, instance_id): """suspend the instance with instance_id""" diff --git a/nova/virt/fake.py b/nova/virt/fake.py index e9d1e4bd7..880772b22 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -205,9 +205,6 @@ class FakeConnection(object): def get_diagnostics(self, instance_name): pass - def get_actions(self, instance_name): - pass - def list_disks(self, instance_name): """ Return the IDs of all the virtual disks attached to the specified diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 8407938c6..cf45ac13b 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -583,9 +583,6 @@ class LibvirtConnection(object): def get_diagnostics(self, instance_name): raise exception.APIError("diagnostics are not supported for libvirt") - def get_actions(self, instance_name): - raise exception.APIError("actions are not supported for libvirt") - def get_disks(self, instance_name): """ Note that this function takes an instance name, not an Instance, so diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 1dfb9005c..332e603db 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -226,16 +226,6 @@ class VMOps(object): rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_diagnostics(self._session, rec) - def get_actions(self, instance): - """Return VM action history""" - vm = VMHelper.lookup(self._session, instance.name) - if vm is None: - raise exception.NotFound(_("Instance not found %s") % - instance.name) - return db.instance_get_actions( - context.get_admin_context(), - instance.id) - def get_console_output(self, instance): """Return snapshot of console""" # TODO: implement this to fix pylint! diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 92274daf9..a7ec22012 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -163,10 +163,6 @@ class XenAPIConnection(object): """Return data about VM diagnostics""" return self._vmops.get_diagnostics(instance) - def get_actions(self, instance): - """Return VM action history""" - return self._vmops.get_actions(instance) - def get_console_output(self, instance): """Return snapshot of console""" return self._vmops.get_console_output(instance) -- cgit From 00abbb4401c87cca9f1540d7be1c0119fc7aee44 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 30 Dec 2010 16:06:48 -0600 Subject: Review feedback --- nova/compute/api.py | 6 +----- nova/compute/manager.py | 11 ----------- nova/tests/test_compute.py | 11 ++--------- nova/tests/test_xenapi.py | 31 ++++++------------------------- 4 files changed, 9 insertions(+), 50 deletions(-) (limited to 'nova') diff --git a/nova/compute/api.py b/nova/compute/api.py index 4fcefb384..3f700349a 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -305,11 +305,7 @@ class ComputeAPI(base.Base): def actions(self, context, instance_id): """Retrieve actions for the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) - host = instance["host"] - return rpc.call(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "get_actions", - "args": {"instance_id": instance["id"]}}) + return self.db.instance_get_actions(context, instance["id"]) def suspend(self, context, instance_id): """suspend the instance with instance_id""" diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c4768d266..c9aff75ac 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -327,17 +327,6 @@ class ComputeManager(manager.Manager): instance_ref["internal_id"]) return self.driver.get_diagnostics(instance_ref) - @exception.wrap_exception - def get_actions(self, context, instance_id): - """Retrieve actions for an instance on this server.""" - instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_("instance %s: retrieving actions"), - instance_ref["internal_id"]) - return self.db.instance_get_actions( - context, - instance_id) - def suspend_instance(self, context, instance_id): """suspend the instance with instance_id""" context = context.elevated() diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index e3679b6b6..2664adc0d 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -102,15 +102,13 @@ class ComputeTestCase(test.TestCase): instances = db.instance_get_all(context.get_admin_context()) logging.info(_("Running instances: %s"), instances) - - instance_count = len(instances) - self.assertNotEqual(instance_count, 0) + self.assertEqual(len(instances), 1) self.compute.terminate_instance(self.context, instance_id) instances = db.instance_get_all(context.get_admin_context()) logging.info(_("After terminating instances: %s"), instances) - self.assertEqual(instance_count, len(instances) + 1) + self.assertEqual(len(instances), 0) def test_run_terminate_timestamps(self): """Make sure timestamps are set for launched and destroyed""" @@ -158,11 +156,6 @@ class ComputeTestCase(test.TestCase): instance_id = self._create_instance() self.compute.get_diagnostics(self.context, instance_id) - def test_actions(self): - """Ensure instance actions are available""" - instance_id = self._create_instance() - self.compute.get_actions(self.context, instance_id) - def test_snapshot(self): """Ensure instance can be snapshotted""" instance_id = self._create_instance() diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 3528d7bfe..a09750672 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -156,10 +156,11 @@ class XenAPIVMTestCase(test.TestCase): self.stubs = stubout.StubOutForTesting() FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' - - fake.reset() - fakes.stub_out_db_instance_api(self.stubs) - fake.create_network('fake', FLAGS.flat_network_bridge) + xenapi_fake.reset() + db_fakes.stub_out_db_instance_api(self.stubs) + xenapi_fake.create_network('fake', FLAGS.flat_network_bridge) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.get_connection(False) self.values = { "name": 1, "id": 1, @@ -171,12 +172,6 @@ class XenAPIVMTestCase(test.TestCase): "instance_type": "m1.large", "mac_address": "aa:bb:cc:dd:ee:ff"} - xenapi_fake.reset() - db_fakes.stub_out_db_instance_api(self.stubs) - xenapi_fake.create_network('fake', FLAGS.flat_network_bridge) - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.get_connection(False) - def test_list_instances_0(self): instances = self.conn.list_instances() self.assertEquals(instances, []) @@ -228,11 +223,6 @@ class XenAPIVMTestCase(test.TestCase): check() def test_spawn(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - conn = xenapi_conn.get_connection(False) - instance = db.instance_create(self.values) - conn.spawn(instance) - instance = self._create_instance() def check(): @@ -275,15 +265,6 @@ class XenAPIVMTestCase(test.TestCase): def _create_instance(self): """Creates and spawns a test instance""" - values = {'name': 1, 'id': 1, - 'project_id': self.project.id, - 'user_id': self.user.id, - 'image_id': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'instance_type': 'm1.large', - 'mac_address': 'aa:bb:cc:dd:ee:ff', - } - instance = db.instance_create(values) + instance = db.instance_create(self.values) self.conn.spawn(instance) return instance -- cgit From 5c34edb3fc215a519f1a00e8c3ff6223ee050041 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 30 Dec 2010 16:13:42 -0600 Subject: Fail --- nova/db/sqlalchemy/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 693db8d23..baa743765 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -452,7 +452,7 @@ class AuthToken(BASE, NovaBase): """ __tablename__ = 'auth_tokens' token_hash = Column(String(255), primary_key=True) - user_id = Column(Integer) + user_id = Column(String(255)) server_manageent_url = Column(String(255)) storage_url = Column(String(255)) cdn_management_url = Column(String(255)) -- cgit From d89b3a4b5c1f6bfe1f59da6c33cb469da589e866 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 30 Dec 2010 16:27:31 -0600 Subject: Make compute.api methods verbs --- nova/api/openstack/servers.py | 4 ++-- nova/compute/api.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index e1c2d36ea..c5cbe21ef 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -222,9 +222,9 @@ class Controller(wsgi.Controller): def diagnostics(self, req, id): """Permit Admins to retrieve server diagnostics.""" ctxt = req.environ["nova.context"] - return self.compute_api.diagnostics(ctxt, id) + return self.compute_api.get_diagnostics(ctxt, id) def actions(self, req, id): """Permit Admins to retrieve server actions.""" ctxt = req.environ["nova.context"] - return self.compute_api.actions(ctxt, id) + return self.compute_api.get_actions(ctxt, id) diff --git a/nova/compute/api.py b/nova/compute/api.py index 3f700349a..28189e4a9 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -293,7 +293,7 @@ class ComputeAPI(base.Base): {"method": "unpause_instance", "args": {"instance_id": instance['id']}}) - def diagnostics(self, context, instance_id): + def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) host = instance["host"] @@ -302,7 +302,7 @@ class ComputeAPI(base.Base): {"method": "get_diagnostics", "args": {"instance_id": instance["id"]}}) - def actions(self, context, instance_id): + def get_actions(self, context, instance_id): """Retrieve actions for the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) return self.db.instance_get_actions(context, instance["id"]) -- cgit From d1129fa4b14d9edba8d6c4c3cb2d13e7c66c1391 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 30 Dec 2010 17:02:16 -0600 Subject: Oopsies --- nova/tests/api/openstack/test_servers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 321ddceee..70ff714e6 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -95,9 +95,9 @@ class ServersTest(unittest.TestCase): fake_compute_api) self.stubs.Set(nova.compute.api.ComputeAPI, 'resume', fake_compute_api) - self.stubs.Set(nova.compute.api.ComputeAPI, "diagnostics", + self.stubs.Set(nova.compute.api.ComputeAPI, "get_diagnostics", fake_compute_api) - self.stubs.Set(nova.compute.api.ComputeAPI, "actions", + self.stubs.Set(nova.compute.api.ComputeAPI, "get_actions", fake_compute_api) self.allow_admin = FLAGS.allow_admin_api -- cgit From 71d715d422a746f4951877d8ff76e0ace355281e Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 30 Dec 2010 15:43:41 -0800 Subject: Moved network operation code in ec2 api into a generic network API class. Removed a circular dependency with compute/quota. --- nova/api/ec2/cloud.py | 59 +++++------------------------------- nova/compute/__init__.py | 20 +++++++----- nova/network/__init__.py | 79 ++++++++++++++++++++++++++++++++++++++++-------- nova/quota.py | 8 ++--- nova/tests/test_quota.py | 6 ++-- 5 files changed, 93 insertions(+), 79 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 5ffc301be..d68c598e9 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -36,7 +36,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags -from nova import quota +from nova import network from nova import rpc from nova import utils from nova import volume @@ -87,11 +87,10 @@ class CloudController(object): sent to the other nodes. """ def __init__(self): - self.network_manager = utils.import_object(FLAGS.network_manager) self.image_service = utils.import_object(FLAGS.image_service) + self.network_api = network.API() self.volume_api = volume.API() - self.compute_api = compute.API(self.network_manager, - self.image_service, + self.compute_api = compute.API(self.image_service, self.network_api, self.volume_api) self.setup() @@ -629,64 +628,20 @@ class CloudController(object): return {'addressesSet': addresses} def allocate_address(self, context, **kwargs): - # check quota - if quota.allowed_floating_ips(context, 1) < 1: - logging.warn(_("Quota exceeeded for %s, tried to allocate " - "address"), - context.project_id) - raise quota.QuotaError(_("Address quota exceeded. You cannot " - "allocate any more addresses")) - # NOTE(vish): We don't know which network host should get the ip - # when we allocate, so just send it to any one. This - # will probably need to move into a network supervisor - # at some point. - public_ip = rpc.call(context, - FLAGS.network_topic, - {"method": "allocate_floating_ip", - "args": {"project_id": context.project_id}}) + public_ip = self.network_api.allocate_floating_ip(context) return {'addressSet': [{'publicIp': public_ip}]} def release_address(self, context, public_ip, **kwargs): - floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) - # NOTE(vish): We don't know which network host should get the ip - # when we deallocate, so just send it to any one. This - # will probably need to move into a network supervisor - # at some point. - rpc.cast(context, - FLAGS.network_topic, - {"method": "deallocate_floating_ip", - "args": {"floating_address": floating_ip_ref['address']}}) + self.network_api.release_floating_ip(context, public_ip) return {'releaseResponse': ["Address released."]} def associate_address(self, context, instance_id, public_ip, **kwargs): instance_id = ec2_id_to_id(instance_id) - instance_ref = self.compute_api.get(context, instance_id) - fixed_address = db.instance_get_fixed_address(context, - instance_ref['id']) - floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) - # NOTE(vish): Perhaps we should just pass this on to compute and - # let compute communicate with network. - network_topic = self.compute_api.get_network_topic(context, - instance_id) - rpc.cast(context, - network_topic, - {"method": "associate_floating_ip", - "args": {"floating_address": floating_ip_ref['address'], - "fixed_address": fixed_address}}) + self.compute_api.associate_floating_ip(context, instance_id, public_ip) return {'associateResponse': ["Address associated."]} def disassociate_address(self, context, public_ip, **kwargs): - floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) - # NOTE(vish): Get the topic from the host name of the network of - # the associated fixed ip. - if not floating_ip_ref.get('fixed_ip'): - raise exception.ApiError('Address is not associated.') - host = floating_ip_ref['fixed_ip']['network']['host'] - topic = db.queue_get_for(context, FLAGS.network_topic, host) - rpc.cast(context, - topic, - {"method": "disassociate_floating_ip", - "args": {"floating_address": floating_ip_ref['address']}}) + self.network_api.disassociate_floating_ip(context, public_ip) return {'disassociateResponse': ["Address disassociated."]} def run_instances(self, context, **kwargs): diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py index fd1cdcd1b..14c242641 100644 --- a/nova/compute/__init__.py +++ b/nova/compute/__init__.py @@ -27,6 +27,7 @@ import time from nova import db from nova import exception from nova import flags +from nova import network from nova import quota from nova import rpc from nova import utils @@ -45,14 +46,14 @@ def generate_default_hostname(instance_id): class API(base.Base): """API for interacting with the compute manager.""" - def __init__(self, network_manager=None, image_service=None, - volume_api=None, **kwargs): - if not network_manager: - network_manager = utils.import_object(FLAGS.network_manager) - self.network_manager = network_manager + def __init__(self, image_service=None, network_api=None, volume_api=None, + **kwargs): if not image_service: image_service = utils.import_object(FLAGS.image_service) self.image_service = image_service + if not network_api: + network_api = network.API() + self.network_api = network_api if not volume_api: volume_api = volume.API() self.volume_api = volume_api @@ -83,8 +84,9 @@ class API(base.Base): """Create the number of instances requested if quota and other arguments check out ok.""" + type_data = instance_types.INSTANCE_TYPES[instance_type] num_instances = quota.allowed_instances(context, max_count, - instance_type) + type_data['vcpus']) if num_instances < min_count: logging.warn("Quota exceeeded for %s, tried to run %s instances", context.project_id, min_count) @@ -127,7 +129,6 @@ class API(base.Base): key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] - type_data = instance_types.INSTANCE_TYPES[instance_type] base_options = { 'reservation_id': utils.generate_uid('r'), 'image_id': image_id, @@ -336,3 +337,8 @@ class API(base.Base): "args": {"instance_id": instance['id'], "volume_id": volume_id}}) return instance + + def associate_floating_ip(self, context, instance_id, address): + instance = self.get(context, instance_id) + self.network_api.associate_floating_ip(context, address, + instance['fixed_ip']) diff --git a/nova/network/__init__.py b/nova/network/__init__.py index dcc54db09..cbd912047 100644 --- a/nova/network/__init__.py +++ b/nova/network/__init__.py @@ -17,16 +17,71 @@ # under the License. """ -:mod:`nova.network` -- Network Nodes -===================================================== - -.. automodule:: nova.network - :platform: Unix - :synopsis: Network is responsible for managing networking -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith +Handles all requests relating to instances (guest vms). """ + +import logging + +from nova import db +from nova import flags +from nova import quota +from nova import rpc +from nova.db import base + +FLAGS = flags.FLAGS + + +class API(base.Base): + """API for interacting with the network manager.""" + + def allocate_floating_ip(self, context): + if quota.allowed_floating_ips(context, 1) < 1: + logging.warn(_("Quota exceeeded for %s, tried to allocate " + "address"), + context.project_id) + raise quota.QuotaError(_("Address quota exceeded. You cannot " + "allocate any more addresses")) + # NOTE(vish): We don't know which network host should get the ip + # when we allocate, so just send it to any one. This + # will probably need to move into a network supervisor + # at some point. + return rpc.call(context, + FLAGS.network_topic, + {"method": "allocate_floating_ip", + "args": {"project_id": context.project_id}}) + + def release_floating_ip(self, context, address): + floating_ip = self.db.floating_ip_get_by_address(context, address) + # NOTE(vish): We don't know which network host should get the ip + # when we deallocate, so just send it to any one. This + # will probably need to move into a network supervisor + # at some point. + rpc.cast(context, + FLAGS.network_topic, + {"method": "deallocate_floating_ip", + "args": {"floating_address": floating_ip['address']}}) + + def associate_floating_ip(self, context, floating_ip, fixed_ip): + if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode): + fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip) + floating_ip = self.db.floating_ip_get_by_address(context, floating_ip) + # NOTE(vish): Perhaps we should just pass this on to compute and + # let compute communicate with network. + host = fixed_ip['network']['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.network_topic, host), + {"method": "associate_floating_ip", + "args": {"floating_address": floating_ip['address'], + "fixed_address": fixed_ip['address']}}) + + def disassociate_floating_ip(self, context, address): + floating_ip = self.db.floating_ip_get_by_address(context, address) + if not floating_ip.get('fixed_ip'): + raise exception.ApiError('Address is not associated.') + # NOTE(vish): Get the topic from the host name of the network of + # the associated fixed ip. + host = floating_ip['fixed_ip']['network']['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.network_topic, host), + {"method": "disassociate_floating_ip", + "args": {"floating_address": floating_ip['address']}}) diff --git a/nova/quota.py b/nova/quota.py index f6ca9f77c..12dbfcf43 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -22,7 +22,6 @@ Quotas for instances, volumes, and floating ips from nova import db from nova import exception from nova import flags -from nova.compute import instance_types FLAGS = flags.FLAGS @@ -54,7 +53,7 @@ def get_quota(context, project_id): return rval -def allowed_instances(context, num_instances, instance_type): +def allowed_instances(context, num_instances, cores_per_instance): """Check quota and return min(num_instances, allowed_instances)""" project_id = context.project_id context = context.elevated() @@ -63,10 +62,9 @@ def allowed_instances(context, num_instances, instance_type): quota = get_quota(context, project_id) allowed_instances = quota['instances'] - used_instances allowed_cores = quota['cores'] - used_cores - type_cores = instance_types.INSTANCE_TYPES[instance_type]['vcpus'] - num_cores = num_instances * type_cores + num_cores = num_instances * cores_per_instance allowed_instances = min(allowed_instances, - int(allowed_cores // type_cores)) + int(allowed_cores // cores_per_instance)) return min(num_instances, allowed_instances) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 8cf2a5e54..c15818774 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -78,14 +78,14 @@ class QuotaTestCase(test.TestCase): def test_quota_overrides(self): """Make sure overriding a projects quotas works""" - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + num_instances = quota.allowed_instances(self.context, 100, 1) self.assertEqual(num_instances, 2) db.quota_create(self.context, {'project_id': self.project.id, 'instances': 10}) - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + num_instances = quota.allowed_instances(self.context, 100, 1) self.assertEqual(num_instances, 4) db.quota_update(self.context, self.project.id, {'cores': 100}) - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + num_instances = quota.allowed_instances(self.context, 100, 1) self.assertEqual(num_instances, 10) db.quota_destroy(self.context, self.project.id) -- cgit From e453e2761daee6e96da9575a860e694a065c68c0 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 30 Dec 2010 18:12:10 -0600 Subject: Removed problematic test --- nova/tests/test_compute.py | 5 ----- 1 file changed, 5 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 2664adc0d..1fb9143f1 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -151,11 +151,6 @@ class ComputeTestCase(test.TestCase): self.compute.reboot_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id) - def test_diagnostics(self): - """Ensure instance diagnostics are available""" - instance_id = self._create_instance() - self.compute.get_diagnostics(self.context, instance_id) - def test_snapshot(self): """Ensure instance can be snapshotted""" instance_id = self._create_instance() -- cgit From 66f8e28fb4f4a898803ac6a38974a9fa804612d0 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 30 Dec 2010 18:12:57 -0600 Subject: completed the basic xenstore read/write/delete functionality --- nova/virt/xenapi/vmops.py | 222 +++++++++++++++++++++++++++++++++++++++++----- nova/virt/xenapi_conn.py | 28 +++--- 2 files changed, 220 insertions(+), 30 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0517209e2..5c9455db3 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -18,6 +19,7 @@ Management class for VM-related functions (spawn, reboot, etc). """ +import json import logging from nova import db @@ -36,7 +38,6 @@ class VMOps(object): """ Management class for VM-related tasks """ - def __init__(self, session): self.XenAPI = session.get_imported_xenapi() self._session = session @@ -120,6 +121,20 @@ class VMOps(object): timer.f = _wait_for_boot return timer.start(interval=0.5, now=True) + def _get_vm_opaque_ref(self, instance_or_vm): + """Refactored out the common code of many methods that receive either + a vm name or a vm instance, and want a vm instance in return. + """ + try: + instance_name = instance_or_vm.name + vm = VMHelper.lookup(self._session, instance_name) + except AttributeError: + # A vm opaque ref was passed + vm = instance_or_vm + if vm is None: + raise Exception(_('Instance not present %s') % instance_name) + return vm + def snapshot(self, instance, name): """ Create snapshot from a running VM instance @@ -168,11 +183,7 @@ class VMOps(object): def reboot(self, instance): """Reboot VM instance""" - instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: - raise exception.NotFound(_('instance not' - ' found %s') % instance_name) + vm = self._get_vm_opaque_ref(instance) task = self._session.call_xenapi('Async.VM.clean_reboot', vm) self._session.wait_for_task(instance.id, task) @@ -215,27 +226,19 @@ class VMOps(object): ret = None try: ret = self._session.wait_for_task(instance_id, task) - except XenAPI.Failure, exc: + except self.XenAPI.Failure, exc: logging.warn(exc) callback(ret) def pause(self, instance, callback): """Pause VM instance""" - instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: - raise exception.NotFound(_('Instance not' - ' found %s') % instance_name) + vm = self._get_vm_opaque_ref(instance) task = self._session.call_xenapi('Async.VM.pause', vm) self._wait_with_callback(instance.id, task, callback) def unpause(self, instance, callback): """Unpause VM instance""" - instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: - raise exception.NotFound(_('Instance not' - ' found %s') % instance_name) + vm = self._get_vm_opaque_ref(instance) task = self._session.call_xenapi('Async.VM.unpause', vm) self._wait_with_callback(instance.id, task, callback) @@ -270,9 +273,7 @@ class VMOps(object): def get_diagnostics(self, instance_id): """Return data about VM diagnostics""" - vm = VMHelper.lookup(self._session, instance_id) - if vm is None: - raise exception.NotFound(_("Instance not found %s") % instance_id) + vm = self._get_vm_opaque_ref(instance) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_diagnostics(self._session, rec) @@ -280,3 +281,184 @@ class VMOps(object): """Return snapshot of console""" # TODO: implement this to fix pylint! return 'FAKE CONSOLE OUTPUT of instance' + + def list_from_xenstore(self, vm, path): + """Runs the xenstore-ls command to get a listing of all records + from 'path' downward. Returns a dict with the sub-paths as keys, + and the value stored in those paths as values. If nothing is + found at that path, returns None. + """ + ret = self._make_xenstore_call('list_records', vm, path) + try: + return json.loads(ret) + except ValueError: + # Not a valid JSON value + return ret + + def read_from_xenstore(self, vm, path): + """Returns the value stored in the xenstore record for the given VM + at the specified location. A XenAPIPlugin.PluginError will be raised + if any error is encountered in the read process. + """ + try: + ret = self._make_xenstore_call('read_record', vm, path, + {'ignore_missing_path': 'True'}) + except self.XenAPI.Failure, e: + print "XENERR", e + return None + except StandardError, e: + print "ERR", type(e), e, e.msg + return None + try: + return json.loads(ret) + except ValueError: + # Not a JSON object + if ret == "None": + # Can't marshall None over RPC calls. + return None + return ret + + def write_to_xenstore(self, vm, path, value): + """Writes the passed value to the xenstore record for the given VM + at the specified location. A XenAPIPlugin.PluginError will be raised + if any error is encountered in the write process. + """ + return self._make_xenstore_call('write_record', vm, path, {'value': json.dumps(value)}) + + def clear_xenstore(self, vm, path): + """Deletes the VM's xenstore record for the specified path. + If there is no such record, the request is ignored. + """ + self._make_xenstore_call('delete_record', vm, path) + + def _make_xenstore_call(self, method, vm, path, addl_args={}): + """Handles calls to the xenstore xenapi plugin.""" + return self._make_plugin_call('xenstore.py', method=method, vm=vm, path=path, + addl_args=addl_args) + + def _make_plugin_call(self, plugin, method, vm, path, addl_args={}): + """Abstracts out the process of calling a method of a xenapi plugin. + Any errors raised by the plugin will in turn raise a RuntimeError here. + """ + vm = self._get_vm_opaque_ref(vm) + rec = self._session.get_xenapi().VM.get_record(vm) + args = {'dom_id': rec['domid'], 'path': path} + args.update(addl_args) + # If the 'testing_mode' attribute is set, add that to the args. + if getattr(self, 'testing_mode', False): + args['testing_mode'] = 'true' + try: + task = self._session.async_call_plugin(plugin, method, args) + ret = self._session.wait_for_task(0, task) + except self.XenAPI.Failure, e: + raise RuntimeError("%s" % e.details[-1]) + return ret + + def add_to_xenstore(self, vm, path, key, value): + """Adds the passed key/value pair to the xenstore record for + the given VM at the specified location. A XenAPIPlugin.PluginError + will be raised if any error is encountered in the write process. + """ + current = self.read_from_xenstore(vm, path) + if not current: + # Nothing at that location + current = {key: value} + else: + current[key] = value + self.write_to_xenstore(vm, path, current) + + def remove_from_xenstore(self, vm, path, key_or_keys): + """Takes either a single key or a list of keys and removes + them from the xenstoreirecord data for the given VM. + If the key doesn't exist, the request is ignored. + """ + current = self.list_from_xenstore(vm, path) + if not current: + return + if isinstance(key_or_keys, basestring): + keys = [key_or_keys] + else: + keys = key_or_keys + keys.sort(lambda x,y: cmp(y.count('/'), x.count('/'))) + for key in keys: + if path: + keypath = "%s/%s" % (path, key) + else: + keypath = key + self._make_xenstore_call('delete_record', vm, keypath) + + + ######################################################################## + ###### The following methods interact with the xenstore parameter + ###### record, not the live xenstore. They were created before I + ###### knew the difference, and are left in here in case they prove + ###### to be useful. They all have '_param' added to their method + ###### names to distinguish them. (dabo) + ######################################################################## + def read_partial_from_param_xenstore(self, instance_or_vm, key_prefix): + """Returns a dict of all the keys in the xenstore parameter record + for the given instance that begin with the key_prefix. + """ + data = self.read_from_param_xenstore(instance_or_vm) + badkeys = [k for k in data.keys() + if not k.startswith(key_prefix)] + for badkey in badkeys: + del data[badkey] + return data + + def read_from_param_xenstore(self, instance_or_vm, keys=None): + """Returns the xenstore parameter record data for the specified VM instance + as a dict. Accepts an optional key or list of keys; if a value for 'keys' + is passed, the returned dict is filtered to only return the values + for those keys. + """ + vm = self._get_vm_opaque_ref(instance_or_vm) + data = self._session.call_xenapi_request('VM.get_xenstore_data', (vm, )) + ret = {} + if keys is None: + keys = data.keys() + elif isinstance(keys, basestring): + keys = [keys] + for key in keys: + raw = data.get(key) + if raw: + ret[key] = json.loads(raw) + else: + ret[key] = raw + return ret + + def add_to_param_xenstore(self, instance_or_vm, key, val): + """Takes a key/value pair and adds it to the xenstore parameter + record for the given vm instance. If the key exists in xenstore, + it is overwritten""" + vm = self._get_vm_opaque_ref(instance_or_vm) + self.remove_from_param_xenstore(instance_or_vm, key) + jsonval = json.dumps(val) + self._session.call_xenapi_request('VM.add_to_xenstore_data', + (vm, key, jsonval)) + + def write_to_param_xenstore(self, instance_or_vm, mapping): + """Takes a dict and writes each key/value pair to the xenstore + parameter record for the given vm instance. Any existing data for + those keys is overwritten. + """ + for k, v in mapping.iteritems(): + self.add_to_param_xenstore(instance_or_vm, k, v) + + def remove_from_param_xenstore(self, instance_or_vm, key_or_keys): + """Takes either a single key or a list of keys and removes + them from the xenstore parameter record data for the given VM. + If the key doesn't exist, the request is ignored. + """ + vm = self._get_vm_opaque_ref(instance_or_vm) + if isinstance(key_or_keys, basestring): + keys = [key_or_keys] + else: + keys = key_or_keys + for key in keys: + self._session.call_xenapi_request('VM.remove_from_xenstore_data', (vm, key)) + + def clear_param_xenstore(self, instance_or_vm): + """Removes all data from the xenstore parameter record for this VM.""" + self.write_to_param_xenstore(instance_or_vm, {}) + ######################################################################## diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index d12e78c67..863461ee7 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -19,15 +20,15 @@ A connection to XenServer or Xen Cloud Platform. The concurrency model for this class is as follows: -All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator -deferredToThread). They are remote calls, and so may hang for the usual -reasons. They should not be allowed to block the reactor thread. +All XenAPI calls are on a green thread (using eventlet's "tpool" +thread pool). They are remote calls, and so may hang for the usual +reasons. All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async -(using XenAPI.VM.async_start etc). These return a task, which can then be -polled for completion. Polling is handled using reactor.callLater. +(using XenAPI.VM.async_start etc). These return a task, which can then be +polled for completion. -This combination of techniques means that we don't block the reactor thread at +This combination of techniques means that we don't block the main thread at all, and at the same time we don't hold lots of threads waiting for long-running operations. @@ -81,7 +82,7 @@ flags.DEFINE_string('xenapi_connection_password', flags.DEFINE_float('xenapi_task_poll_interval', 0.5, 'The interval used for polling of remote tasks ' - '(Async.VM.start, etc). Used only if ' + '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval', 5.0, @@ -213,6 +214,14 @@ class XenAPISession(object): f = f.__getattr__(m) return tpool.execute(f, *args) + def call_xenapi_request(self, method, *args): + """Some interactions with dom0, such as interacting with xenstore's + param record, require using the xenapi_request method of the session + object. This wraps that call on a background thread. + """ + f = self._session.xenapi_request + return tpool.execute(f, method, *args) + def async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread.""" return tpool.execute(self._unwrap_plugin_exceptions, @@ -222,7 +231,6 @@ class XenAPISession(object): def wait_for_task(self, id, task): """Return the result of the given task. The task is polled until it completes.""" - done = event.Event() loop = utils.LoopingCall(self._poll_task, id, task, done) loop.start(FLAGS.xenapi_task_poll_interval, now=True) @@ -235,7 +243,7 @@ class XenAPISession(object): return self.XenAPI.Session(url) def _poll_task(self, id, task, done): - """Poll the given XenAPI task, and fire the given Deferred if we + """Poll the given XenAPI task, and fire the given action if we get a result.""" try: name = self._session.xenapi.task.get_name_label(task) @@ -290,7 +298,7 @@ class XenAPISession(object): def _parse_xmlrpc_value(val): - """Parse the given value as if it were an XML-RPC value. This is + """Parse the given value as if it were an XML-RPC value. This is sometimes used as the format for the task.result field.""" if not val: return val -- cgit From 7f27f62e41fd655049574975bd3bf6c5b00e9ccf Mon Sep 17 00:00:00 2001 From: Ryan Lucio Date: Thu, 30 Dec 2010 16:21:11 -0800 Subject: Converted the pool_recycle setting to be a flag with a default of 3600 seconds --- nova/db/sqlalchemy/session.py | 4 +++- nova/flags.py | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 5f31743cd..ca23e93d8 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -36,7 +36,9 @@ def get_session(autocommit=True, expire_on_commit=False): global _MAKER if not _MAKER: if not _ENGINE: - _ENGINE = create_engine(FLAGS.sql_connection, pool_recycle=3600, echo=False) + _ENGINE = create_engine(FLAGS.sql_connection, + pool_recycle=FLAGS.sql_idle_timeout, + echo=False) _MAKER = (sessionmaker(bind=_ENGINE, autocommit=autocommit, expire_on_commit=expire_on_commit)) diff --git a/nova/flags.py b/nova/flags.py index 76a98d35a..338dcbf46 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -260,6 +260,9 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), DEFINE_string('sql_connection', 'sqlite:///$state_path/nova.sqlite', 'connection string for sql database') +DEFINE_string('sql_idle_timeout', + '3600', + 'timeout for idle sql database connections') DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', 'Manager for compute') -- cgit From b5f5ec40bbc6b898ac73444e9a0f0372c92fc12a Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 30 Dec 2010 18:25:30 -0600 Subject: removed some debugging code left in previous push. --- nova/virt/xenapi/vmops.py | 4 ---- 1 file changed, 4 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 5c9455db3..c46a881a8 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -304,10 +304,6 @@ class VMOps(object): ret = self._make_xenstore_call('read_record', vm, path, {'ignore_missing_path': 'True'}) except self.XenAPI.Failure, e: - print "XENERR", e - return None - except StandardError, e: - print "ERR", type(e), e, e.msg return None try: return json.loads(ret) -- cgit From f55991c01421350434893200db8f01ca3911d957 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 30 Dec 2010 18:56:12 -0600 Subject: Improved test --- nova/tests/test_xenapi.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index a09750672..539d132bd 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -180,9 +180,7 @@ class XenAPIVMTestCase(test.TestCase): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) conn = xenapi_conn.get_connection(False) - instance = db.instance_create(self.values) - conn.spawn(instance) - + instance = self._create_instance() conn.get_diagnostics(instance) def test_instance_snapshot(self): -- cgit From ffaf32b9ac9a3e71ac25f68eb593bbf16a11946a Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 30 Dec 2010 19:07:20 -0600 Subject: Cleanup --- nova/tests/test_xenapi.py | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 539d132bd..c95a53af3 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -161,27 +161,14 @@ class XenAPIVMTestCase(test.TestCase): xenapi_fake.create_network('fake', FLAGS.flat_network_bridge) stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.get_connection(False) - self.values = { - "name": 1, - "id": 1, - "project_id": self.project.id, - "user_id": self.user.id, - "image_id": 1, - "kernel_id": 2, - "ramdisk_id": 3, - "instance_type": "m1.large", - "mac_address": "aa:bb:cc:dd:ee:ff"} def test_list_instances_0(self): instances = self.conn.list_instances() self.assertEquals(instances, []) def test_get_diagnostics(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - conn = xenapi_conn.get_connection(False) - instance = self._create_instance() - conn.get_diagnostics(instance) + self.conn.get_diagnostics(instance) def test_instance_snapshot(self): stubs.stubout_instance_snapshot(self.stubs) @@ -263,6 +250,15 @@ class XenAPIVMTestCase(test.TestCase): def _create_instance(self): """Creates and spawns a test instance""" - instance = db.instance_create(self.values) + values = {'name': 1, 'id': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff' + } + instance = db.instance_create(values) self.conn.spawn(instance) return instance -- cgit From a89730699db2add131028ccda031ead3526b2c4f Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Thu, 30 Dec 2010 17:50:32 -0800 Subject: Removed dependencies on flags.py from adminclient --- nova/adminclient.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/adminclient.py b/nova/adminclient.py index 6ae9f0c0f..b2609c8c4 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -23,12 +23,9 @@ import base64 import boto import httplib -from nova import flags from boto.ec2.regioninfo import RegionInfo -FLAGS = flags.FLAGS - DEFAULT_CLC_URL = 'http://127.0.0.1:8773' DEFAULT_REGION = 'nova' @@ -199,8 +196,8 @@ class NovaAdminClient(object): self, clc_url=DEFAULT_CLC_URL, region=DEFAULT_REGION, - access_key=FLAGS.aws_access_key_id, - secret_key=FLAGS.aws_secret_access_key, + access_key=None, + secret_key=None, **kwargs): parts = self.split_clc_url(clc_url) -- cgit From f0e4bed6f4bf4ab3835ecd3e54eb9d7ac21dd5f1 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 31 Dec 2010 07:04:40 -0600 Subject: fixed pep8 issues --- nova/virt/xenapi/vmops.py | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c46a881a8..20f5e0215 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -285,7 +285,7 @@ class VMOps(object): def list_from_xenstore(self, vm, path): """Runs the xenstore-ls command to get a listing of all records from 'path' downward. Returns a dict with the sub-paths as keys, - and the value stored in those paths as values. If nothing is + and the value stored in those paths as values. If nothing is found at that path, returns None. """ ret = self._make_xenstore_call('list_records', vm, path) @@ -319,7 +319,8 @@ class VMOps(object): at the specified location. A XenAPIPlugin.PluginError will be raised if any error is encountered in the write process. """ - return self._make_xenstore_call('write_record', vm, path, {'value': json.dumps(value)}) + return self._make_xenstore_call('write_record', vm, path, + {'value': json.dumps(value)}) def clear_xenstore(self, vm, path): """Deletes the VM's xenstore record for the specified path. @@ -329,8 +330,8 @@ class VMOps(object): def _make_xenstore_call(self, method, vm, path, addl_args={}): """Handles calls to the xenstore xenapi plugin.""" - return self._make_plugin_call('xenstore.py', method=method, vm=vm, path=path, - addl_args=addl_args) + return self._make_plugin_call('xenstore.py', method=method, vm=vm, + path=path, addl_args=addl_args) def _make_plugin_call(self, plugin, method, vm, path, addl_args={}): """Abstracts out the process of calling a method of a xenapi plugin. @@ -375,7 +376,7 @@ class VMOps(object): keys = [key_or_keys] else: keys = key_or_keys - keys.sort(lambda x,y: cmp(y.count('/'), x.count('/'))) + keys.sort(lambda x, y: cmp(y.count('/'), x.count('/'))) for key in keys: if path: keypath = "%s/%s" % (path, key) @@ -383,9 +384,8 @@ class VMOps(object): keypath = key self._make_xenstore_call('delete_record', vm, keypath) - ######################################################################## - ###### The following methods interact with the xenstore parameter + ###### The following methods interact with the xenstore parameter ###### record, not the live xenstore. They were created before I ###### knew the difference, and are left in here in case they prove ###### to be useful. They all have '_param' added to their method @@ -403,13 +403,14 @@ class VMOps(object): return data def read_from_param_xenstore(self, instance_or_vm, keys=None): - """Returns the xenstore parameter record data for the specified VM instance - as a dict. Accepts an optional key or list of keys; if a value for 'keys' - is passed, the returned dict is filtered to only return the values - for those keys. + """Returns the xenstore parameter record data for the specified VM + instance as a dict. Accepts an optional key or list of keys; if a + value for 'keys' is passed, the returned dict is filtered to only + return the values for those keys. """ vm = self._get_vm_opaque_ref(instance_or_vm) - data = self._session.call_xenapi_request('VM.get_xenstore_data', (vm, )) + data = self._session.call_xenapi_request('VM.get_xenstore_data', + (vm, )) ret = {} if keys is None: keys = data.keys() @@ -452,7 +453,8 @@ class VMOps(object): else: keys = key_or_keys for key in keys: - self._session.call_xenapi_request('VM.remove_from_xenstore_data', (vm, key)) + self._session.call_xenapi_request('VM.remove_from_xenstore_data', + (vm, key)) def clear_param_xenstore(self, instance_or_vm): """Removes all data from the xenstore parameter record for this VM.""" -- cgit From 5fd9ff898bf372f26bac3c0530521ba7abb7f26c Mon Sep 17 00:00:00 2001 From: Ryan Lucio Date: Mon, 3 Jan 2011 10:55:52 -0800 Subject: removed extra whitespace chars at the end of the changed lines --- nova/db/sqlalchemy/session.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index ca23e93d8..c3876c02a 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -36,8 +36,8 @@ def get_session(autocommit=True, expire_on_commit=False): global _MAKER if not _MAKER: if not _ENGINE: - _ENGINE = create_engine(FLAGS.sql_connection, - pool_recycle=FLAGS.sql_idle_timeout, + _ENGINE = create_engine(FLAGS.sql_connection, + pool_recycle=FLAGS.sql_idle_timeout, echo=False) _MAKER = (sessionmaker(bind=_ENGINE, autocommit=autocommit, -- cgit From 6a8f011789ddad57726ce55962b51a04a69fe527 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Mon, 3 Jan 2011 16:08:52 -0600 Subject: Fixes LP688545 --- nova/compute/manager.py | 1 + nova/db/sqlalchemy/__init__.py | 21 ++++++++++++++++++++- nova/flags.py | 2 ++ nova/tests/test_xenapi.py | 19 ++++++++++--------- 4 files changed, 33 insertions(+), 10 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c9aff75ac..6e8f34347 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -327,6 +327,7 @@ class ComputeManager(manager.Manager): instance_ref["internal_id"]) return self.driver.get_diagnostics(instance_ref) + @exception.wrap_exception def suspend_instance(self, context, instance_id): """suspend the instance with instance_id""" context = context.elevated() diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py index 3288ebd20..22aa1cfe6 100644 --- a/nova/db/sqlalchemy/__init__.py +++ b/nova/db/sqlalchemy/__init__.py @@ -19,6 +19,25 @@ """ SQLAlchemy database backend """ +import logging +import time + +from sqlalchemy.exc import OperationalError + +from nova import flags from nova.db.sqlalchemy import models -models.register_models() + +FLAGS = flags.FLAGS + + +for i in xrange(FLAGS.sql_max_retries): + if i > 0: + time.sleep(FLAGS.sql_retry_interval) + + try: + models.register_models() + break + except OperationalError: + logging.exception(_("Data store is unreachable." + " Trying again in %d seconds.") % FLAGS.sql_retry_interval) diff --git a/nova/flags.py b/nova/flags.py index e872ba217..4b7334927 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -266,6 +266,8 @@ DEFINE_string('sql_connection', DEFINE_string('sql_idle_timeout', '3600', 'timeout for idle sql database connections') +DEFINE_integer('sql_max_retries', 12, 'sql connection attempts') +DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval') DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', 'Manager for compute') diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index c95a53af3..33571dad0 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -250,15 +250,16 @@ class XenAPIVMTestCase(test.TestCase): def _create_instance(self): """Creates and spawns a test instance""" - values = {'name': 1, 'id': 1, - 'project_id': self.project.id, - 'user_id': self.user.id, - 'image_id': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'instance_type': 'm1.large', - 'mac_address': 'aa:bb:cc:dd:ee:ff' - } + values = { + 'name': 1, + 'id': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff'} instance = db.instance_create(values) self.conn.spawn(instance) return instance -- cgit From 8bbcadafbc25c7ab478d6143293232f2cea24411 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 4 Jan 2011 09:44:15 -0800 Subject: Removed UUID keys for instance and volume. --- nova/api/ec2/cloud.py | 12 +++++--- nova/db/sqlalchemy/models.py | 64 +++++++--------------------------------- nova/virt/xenapi/volume_utils.py | 3 ++ 3 files changed, 21 insertions(+), 58 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index a6818855d..b1eaafc8b 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -72,13 +72,17 @@ def _gen_key(context, user_id, key_name): def ec2_id_to_id(ec2_id): - """Convert an ec2 ID and an instance ID.""" - return ec2_id[2:] + """Convert an ec2 ID (i-[base 36 number]) to an instance id (int)""" + return int(ec2_id[2:], 36) def id_to_ec2_id(instance_id): - """Convert an instance ID to an ec2 ID.""" - return "i-%s" % instance_id + """Convert an instance ID (int) to an ec2 ID (i-[base 36 number])""" + digits = [] + while instance_id != 0: + instance_id, remainder = divmod(instance_id, 36) + digits.append('0123456789abcdefghijklmnopqrstuvwxyz'[remainder]) + return "i-%s" % ''.join(reversed(digits)) class CloudController(object): diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 78ab1ba9b..2acff8e60 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -20,17 +20,13 @@ SQLAlchemy models for nova data. """ import datetime -import uuid from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy import Column, Integer, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text -from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy.schema import ForeignKeyConstraint -from sqlalchemy.types import TypeDecorator, CHAR from nova.db.sqlalchemy.session import get_session @@ -43,46 +39,6 @@ FLAGS = flags.FLAGS BASE = declarative_base() -def make_uuid(): - return uuid.uuid1().hex - - -class GUID(TypeDecorator): - """Platform-independent GUID type. - - Uses Postgresql's UUID type, otherwise uses - CHAR(32), storing as stringified hex values. - - """ - impl = CHAR - - def load_dialect_impl(self, dialect): - if dialect.name == 'postgresql': - return dialect.type_descriptor(UUID()) - else: - return dialect.type_descriptor(CHAR(32)) - - def process_bind_param(self, value, dialect): - if value is None: - return value - elif dialect.name == 'postgresql': - return str(value) - else: - if isinstance(value, int): - return "%.32x" % uuid.UUID(int=value) - elif not isinstance(value, uuid.UUID): - return "%.32x" % uuid.UUID(value) - else: - # hexstring - return "%.32x" % value - - def process_result_value(self, value, dialect): - if value is None: - return value - else: - return uuid.UUID(value).hex - - class NovaBase(object): """Base class for Nova Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} @@ -208,11 +164,11 @@ class Certificate(BASE, NovaBase): class Instance(BASE, NovaBase): """Represents a guest vm.""" __tablename__ = 'instances' - id = Column(GUID, primary_key=True, default=make_uuid) + id = Column(Integer, primary_key=True, autoincrement=True) @property def name(self): - return "instance-%s" % self.id + return "instance-%08x" % self.id admin_pass = Column(String(255)) user_id = Column(String(255)) @@ -284,7 +240,7 @@ class InstanceActions(BASE, NovaBase): """Represents a guest VM's actions and results""" __tablename__ = "instance_actions" id = Column(Integer, primary_key=True) - instance_id = Column(GUID, ForeignKey('instances.id')) + instance_id = Column(Integer, ForeignKey('instances.id')) action = Column(String(255)) error = Column(Text) @@ -293,11 +249,11 @@ class InstanceActions(BASE, NovaBase): class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a vm.""" __tablename__ = 'volumes' - id = Column(GUID, primary_key=True, default=make_uuid) + id = Column(Integer, primary_key=True, autoincrement=True) @property def name(self): - return "vollume-%s" % self.id + return "volume-%08x" % self.id user_id = Column(String(255)) project_id = Column(String(255)) @@ -305,7 +261,7 @@ class Volume(BASE, NovaBase): host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? - instance_id = Column(GUID, ForeignKey('instances.id'), nullable=True) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('volumes'), foreign_keys=instance_id, @@ -346,7 +302,7 @@ class ExportDevice(BASE, NovaBase): id = Column(Integer, primary_key=True) shelf_id = Column(Integer) blade_id = Column(Integer) - volume_id = Column(GUID, ForeignKey('volumes.id'), nullable=True) + volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) volume = relationship(Volume, backref=backref('export_device', uselist=False), foreign_keys=volume_id, @@ -362,7 +318,7 @@ class IscsiTarget(BASE, NovaBase): id = Column(Integer, primary_key=True) target_num = Column(Integer) host = Column(String(255)) - volume_id = Column(GUID, ForeignKey('volumes.id'), nullable=True) + volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) volume = relationship(Volume, backref=backref('iscsi_target', uselist=False), foreign_keys=volume_id, @@ -374,7 +330,7 @@ class SecurityGroupInstanceAssociation(BASE, NovaBase): __tablename__ = 'security_group_instance_association' id = Column(Integer, primary_key=True) security_group_id = Column(Integer, ForeignKey('security_groups.id')) - instance_id = Column(GUID, ForeignKey('instances.id')) + instance_id = Column(Integer, ForeignKey('instances.id')) class SecurityGroup(BASE, NovaBase): @@ -494,7 +450,7 @@ class FixedIp(BASE, NovaBase): address = Column(String(255)) network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) network = relationship(Network, backref=backref('fixed_ips')) - instance_id = Column(GUID, ForeignKey('instances.id'), nullable=True) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('fixed_ip', uselist=False), foreign_keys=instance_id, diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 1ca813bcf..0ad6fd450 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -202,6 +202,9 @@ class VolumeHelper(HelperBase): def _get_volume_id(path): """Retrieve the volume id from device_path""" + # If we have the ID and not a path, just return it. + if isinstance(path, int): + return path # n must contain at least the volume_id # /vol- is for remote volumes # -vol- is for local volumes -- cgit From 4b0509f014aa164273a0e544441838be5352b1eb Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 4 Jan 2011 12:16:43 -0800 Subject: Removed leftover UUID reference. --- nova/db/sqlalchemy/api.py | 4 ---- 1 file changed, 4 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index bb84883e7..aaa07e3c9 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -19,7 +19,6 @@ Implementation of SQLAlchemy backend. """ -import uuid import warnings from nova import db @@ -742,9 +741,6 @@ def instance_get_project_vpn(context, project_id): def instance_get_by_id(context, instance_id): session = get_session() - if type(instance_id) is int: - instance_id = uuid.UUID(int=instance_id).hex - if is_admin_context(context): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ -- cgit From 2899896d1c7742ad59e2da2d2369bc2ff9526fed Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 4 Jan 2011 12:27:50 -0800 Subject: Renamed argument to represent possible types in volume_utils. --- nova/virt/xenapi/volume_utils.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 0ad6fd450..4bbc41b03 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -200,18 +200,19 @@ class VolumeHelper(HelperBase): return -1 -def _get_volume_id(path): +def _get_volume_id(path_or_id): """Retrieve the volume id from device_path""" # If we have the ID and not a path, just return it. - if isinstance(path, int): - return path + if isinstance(path_or_id, int): + return path_or_id # n must contain at least the volume_id # /vol- is for remote volumes # -vol- is for local volumes # see compute/manager->setup_compute_volume - volume_id = path[path.find('/vol-') + 1:] - if volume_id == path: - volume_id = path[path.find('-vol-') + 1:].replace('--', '-') + volume_id = path_or_id[path_or_id.find('/vol-') + 1:] + if volume_id == path_or_id: + volume_id = path_or_id[path_or_id.find('-vol-') + 1:] + volume_id = volume_id.replace('--', '-') return volume_id -- cgit From 02c86d1e1146c1162a36620560eb8116ce8d47f1 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Tue, 4 Jan 2011 15:20:10 -0600 Subject: Made the plugin output fully json-ified, so I could remove the exception handlers in vmops.py. Cleaned up some pep8 issues that weren't caught in earlier runs. --- nova/virt/xenapi/vmops.py | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index d539f9416..b6d620782 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -289,11 +289,7 @@ class VMOps(object): found at that path, returns None. """ ret = self._make_xenstore_call('list_records', vm, path) - try: - return json.loads(ret) - except ValueError: - # Not a valid JSON value - return ret + return json.loads(ret) def read_from_xenstore(self, vm, path): """Returns the value stored in the xenstore record for the given VM @@ -305,14 +301,11 @@ class VMOps(object): {'ignore_missing_path': 'True'}) except self.XenAPI.Failure, e: return None - try: - return json.loads(ret) - except ValueError: - # Not a JSON object - if ret == "None": - # Can't marshall None over RPC calls. - return None - return ret + ret = json.loads(ret) + if ret == "None": + # Can't marshall None over RPC calls. + return None + return ret def write_to_xenstore(self, vm, path, value): """Writes the passed value to the xenstore record for the given VM -- cgit From e97cb0f19f66ee4d28685575cea57b1eb32c4ed3 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 4 Jan 2011 13:56:36 -0800 Subject: Moved __init__ api code to api.py and changed allowed_instances quota method argument to accept all type data, not just vcpu count. --- nova/compute/__init__.py | 369 +-------------------------------------------- nova/compute/api.py | 385 +++++++++++++++++++++++++++++++++++++++++++++++ nova/network/__init__.py | 70 +-------- nova/network/api.py | 87 +++++++++++ nova/quota.py | 6 +- nova/tests/test_quota.py | 10 +- nova/volume/__init__.py | 83 +--------- nova/volume/api.py | 100 ++++++++++++ 8 files changed, 585 insertions(+), 525 deletions(-) create mode 100644 nova/compute/api.py create mode 100644 nova/network/api.py create mode 100644 nova/volume/api.py (limited to 'nova') diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py index 9efd90909..b94f971d1 100644 --- a/nova/compute/__init__.py +++ b/nova/compute/__init__.py @@ -16,371 +16,4 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles all requests relating to instances (guest vms). -""" - -import datetime -import logging -import time - -from nova import db -from nova import exception -from nova import flags -from nova import network -from nova import quota -from nova import rpc -from nova import utils -from nova import volume -from nova.compute import instance_types -from nova.db import base - -FLAGS = flags.FLAGS - - -def generate_default_hostname(instance_id): - """Default function to generate a hostname given an instance reference.""" - return str(instance_id) - - -class API(base.Base): - """API for interacting with the compute manager.""" - - def __init__(self, image_service=None, network_api=None, volume_api=None, - **kwargs): - if not image_service: - image_service = utils.import_object(FLAGS.image_service) - self.image_service = image_service - if not network_api: - network_api = network.API() - self.network_api = network_api - if not volume_api: - volume_api = volume.API() - self.volume_api = volume_api - super(API, self).__init__(**kwargs) - - def get_network_topic(self, context, instance_id): - try: - instance = self.get(context, instance_id) - except exception.NotFound as e: - logging.warning("Instance %d was not found in get_network_topic", - instance_id) - raise e - - host = instance['host'] - if not host: - raise exception.Error("Instance %d has no host" % instance_id) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) - return rpc.call(context, - topic, - {"method": "get_network_topic", "args": {'fake': 1}}) - - def create(self, context, instance_type, - image_id, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, - display_name='', display_description='', - key_name=None, key_data=None, security_group='default', - availability_zone=None, user_data=None, - generate_hostname=generate_default_hostname): - """Create the number of instances requested if quota and - other arguments check out ok.""" - - type_data = instance_types.INSTANCE_TYPES[instance_type] - num_instances = quota.allowed_instances(context, max_count, - type_data['vcpus']) - if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % - num_instances, "InstanceLimitExceeded") - - is_vpn = image_id == FLAGS.vpn_image_id - if not is_vpn: - image = self.image_service.show(context, image_id) - if kernel_id is None: - kernel_id = image.get('kernelId', None) - if ramdisk_id is None: - ramdisk_id = image.get('ramdiskId', None) - # No kernel and ramdisk for raw images - if kernel_id == str(FLAGS.null_kernel): - kernel_id = None - ramdisk_id = None - logging.debug("Creating a raw instance") - # Make sure we have access to kernel and ramdisk (if not raw) - if kernel_id: - self.image_service.show(context, kernel_id) - if ramdisk_id: - self.image_service.show(context, ramdisk_id) - - if security_group is None: - security_group = ['default'] - if not type(security_group) is list: - security_group = [security_group] - - security_groups = [] - self.ensure_default_security_group(context) - for security_group_name in security_group: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) - security_groups.append(group['id']) - - if key_data is None and key_name: - key_pair = db.key_pair_get(context, context.user_id, key_name) - key_data = key_pair['public_key'] - - base_options = { - 'reservation_id': utils.generate_uid('r'), - 'image_id': image_id, - 'kernel_id': kernel_id or '', - 'ramdisk_id': ramdisk_id or '', - 'state_description': 'scheduling', - 'user_id': context.user_id, - 'project_id': context.project_id, - 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), - 'instance_type': instance_type, - 'memory_mb': type_data['memory_mb'], - 'vcpus': type_data['vcpus'], - 'local_gb': type_data['local_gb'], - 'display_name': display_name, - 'display_description': display_description, - 'user_data': user_data or '', - 'key_name': key_name, - 'key_data': key_data, - 'availability_zone': availability_zone} - - elevated = context.elevated() - instances = [] - logging.debug(_("Going to run %s instances..."), num_instances) - for num in range(num_instances): - instance = dict(mac_address=utils.generate_mac(), - launch_index=num, - **base_options) - instance = self.db.instance_create(context, instance) - instance_id = instance['id'] - - elevated = context.elevated() - if not security_groups: - security_groups = [] - for security_group_id in security_groups: - self.db.instance_add_security_group(elevated, - instance_id, - security_group_id) - - # Set sane defaults if not specified - updates = dict(hostname=generate_hostname(instance_id)) - if 'display_name' not in instance: - updates['display_name'] = "Server %s" % instance_id - - instance = self.update(context, instance_id, **updates) - instances.append(instance) - - logging.debug(_("Casting to scheduler for %s/%s's instance %s"), - context.project_id, context.user_id, instance_id) - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "run_instance", - "args": {"topic": FLAGS.compute_topic, - "instance_id": instance_id}}) - - return instances - - def ensure_default_security_group(self, context): - """ Create security group for the security context if it - does not already exist - - :param context: the security context - - """ - try: - db.security_group_get_by_name(context, context.project_id, - 'default') - except exception.NotFound: - values = {'name': 'default', - 'description': 'default', - 'user_id': context.user_id, - 'project_id': context.project_id} - db.security_group_create(context, values) - - def update(self, context, instance_id, **kwargs): - """Updates the instance in the datastore. - - :param context: The security context - :param instance_id: ID of the instance to update - :param kwargs: All additional keyword args are treated - as data fields of the instance to be - updated - - :retval None - - """ - return self.db.instance_update(context, instance_id, kwargs) - - def delete(self, context, instance_id): - logging.debug("Going to try and terminate %s" % instance_id) - try: - instance = self.get(context, instance_id) - except exception.NotFound as e: - logging.warning(_("Instance %s was not found during terminate"), - instance_id) - raise e - - if (instance['state_description'] == 'terminating'): - logging.warning(_("Instance %s is already being terminated"), - instance_id) - return - - self.update(context, - instance['id'], - state_description='terminating', - state=0, - terminated_at=datetime.datetime.utcnow()) - - host = instance['host'] - if host: - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "terminate_instance", - "args": {"instance_id": instance_id}}) - else: - self.db.instance_destroy(context, instance_id) - - def get(self, context, instance_id=None, project_id=None, - reservation_id=None, fixed_ip=None): - """Get one or more instances, possibly filtered by one of the - given parameters. If there is no filter and the context is - an admin, it will retreive all instances in the system.""" - if instance_id is not None: - return self.db.instance_get_by_id(context, instance_id) - if reservation_id is not None: - return self.db.instance_get_all_by_reservation(context, - reservation_id) - if fixed_ip is not None: - return self.db.fixed_ip_get_instance(context, fixed_ip) - if project_id or not context.is_admin: - if not context.project: - return self.db.instance_get_all_by_user(context, - context.user_id) - if project_id is None: - project_id = context.project_id - return self.db.instance_get_all_by_project(context, - project_id) - return self.db.instance_get_all(context) - - def snapshot(self, context, instance_id, name): - """Snapshot the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "snapshot_instance", - "args": {"instance_id": instance_id, "name": name}}) - - def reboot(self, context, instance_id): - """Reboot the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "reboot_instance", - "args": {"instance_id": instance_id}}) - - def pause(self, context, instance_id): - """Pause the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "pause_instance", - "args": {"instance_id": instance_id}}) - - def unpause(self, context, instance_id): - """Unpause the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unpause_instance", - "args": {"instance_id": instance_id}}) - - def get_diagnostics(self, context, instance_id): - """Retrieve diagnostics for the given instance.""" - instance = self.get(context, instance_id) - host = instance["host"] - return rpc.call(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "get_diagnostics", - "args": {"instance_id": instance_id}}) - - def get_actions(self, context, instance_id): - """Retrieve actions for the given instance.""" - return self.db.instance_get_actions(context, instance_id) - - def suspend(self, context, instance_id): - """suspend the instance with instance_id""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "suspend_instance", - "args": {"instance_id": instance_id}}) - - def resume(self, context, instance_id): - """resume the instance with instance_id""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "resume_instance", - "args": {"instance_id": instance_id}}) - - def rescue(self, context, instance_id): - """Rescue the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "rescue_instance", - "args": {"instance_id": instance_id}}) - - def unrescue(self, context, instance_id): - """Unrescue the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unrescue_instance", - "args": {"instance_id": instance_id}}) - - def attach_volume(self, context, instance_id, volume_id, device): - if not re.match("^/dev/[a-z]d[a-z]+$", device): - raise exception.ApiError(_("Invalid device specified: %s. " - "Example device: /dev/vdb") % device) - self.volume_api.check_attach(context, volume_id) - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "attach_volume", - "args": {"volume_id": volume_id, - "instance_id": instance_id, - "mountpoint": device}}) - - def detach_volume(self, context, volume_id): - instance = self.db.volume_get_instance(context.elevated(), volume_id) - if not instance: - raise exception.ApiError(_("Volume isn't attached to anything!")) - self.volume_api.check_detach(context, volume_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "detach_volume", - "args": {"instance_id": instance['id'], - "volume_id": volume_id}}) - return instance - - def associate_floating_ip(self, context, instance_id, address): - instance = self.get(context, instance_id) - self.network_api.associate_floating_ip(context, address, - instance['fixed_ip']) +from nova.compute.api import API diff --git a/nova/compute/api.py b/nova/compute/api.py new file mode 100644 index 000000000..74d030c4d --- /dev/null +++ b/nova/compute/api.py @@ -0,0 +1,385 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to instances (guest vms). +""" + +import datetime +import logging +import time + +from nova import db +from nova import exception +from nova import flags +from nova import network +from nova import quota +from nova import rpc +from nova import utils +from nova import volume +from nova.compute import instance_types +from nova.db import base + +FLAGS = flags.FLAGS + + +def generate_default_hostname(instance_id): + """Default function to generate a hostname given an instance reference.""" + return str(instance_id) + + +class API(base.Base): + """API for interacting with the compute manager.""" + + def __init__(self, image_service=None, network_api=None, volume_api=None, + **kwargs): + if not image_service: + image_service = utils.import_object(FLAGS.image_service) + self.image_service = image_service + if not network_api: + network_api = network.API() + self.network_api = network_api + if not volume_api: + volume_api = volume.API() + self.volume_api = volume_api + super(API, self).__init__(**kwargs) + + def get_network_topic(self, context, instance_id): + try: + instance = self.get(context, instance_id) + except exception.NotFound as e: + logging.warning("Instance %d was not found in get_network_topic", + instance_id) + raise e + + host = instance['host'] + if not host: + raise exception.Error("Instance %d has no host" % instance_id) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) + return rpc.call(context, + topic, + {"method": "get_network_topic", "args": {'fake': 1}}) + + def create(self, context, instance_type, + image_id, kernel_id=None, ramdisk_id=None, + min_count=1, max_count=1, + display_name='', display_description='', + key_name=None, key_data=None, security_group='default', + availability_zone=None, user_data=None, + generate_hostname=generate_default_hostname): + """Create the number of instances requested if quota and + other arguments check out ok.""" + + type_data = instance_types.INSTANCE_TYPES[instance_type] + num_instances = quota.allowed_instances(context, max_count, type_data) + if num_instances < min_count: + logging.warn("Quota exceeeded for %s, tried to run %s instances", + context.project_id, min_count) + raise quota.QuotaError("Instance quota exceeded. You can only " + "run %s more instances of this type." % + num_instances, "InstanceLimitExceeded") + + is_vpn = image_id == FLAGS.vpn_image_id + if not is_vpn: + image = self.image_service.show(context, image_id) + if kernel_id is None: + kernel_id = image.get('kernelId', None) + if ramdisk_id is None: + ramdisk_id = image.get('ramdiskId', None) + # No kernel and ramdisk for raw images + if kernel_id == str(FLAGS.null_kernel): + kernel_id = None + ramdisk_id = None + logging.debug("Creating a raw instance") + # Make sure we have access to kernel and ramdisk (if not raw) + if kernel_id: + self.image_service.show(context, kernel_id) + if ramdisk_id: + self.image_service.show(context, ramdisk_id) + + if security_group is None: + security_group = ['default'] + if not type(security_group) is list: + security_group = [security_group] + + security_groups = [] + self.ensure_default_security_group(context) + for security_group_name in security_group: + group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + security_groups.append(group['id']) + + if key_data is None and key_name: + key_pair = db.key_pair_get(context, context.user_id, key_name) + key_data = key_pair['public_key'] + + base_options = { + 'reservation_id': utils.generate_uid('r'), + 'image_id': image_id, + 'kernel_id': kernel_id or '', + 'ramdisk_id': ramdisk_id or '', + 'state_description': 'scheduling', + 'user_id': context.user_id, + 'project_id': context.project_id, + 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'instance_type': instance_type, + 'memory_mb': type_data['memory_mb'], + 'vcpus': type_data['vcpus'], + 'local_gb': type_data['local_gb'], + 'display_name': display_name, + 'display_description': display_description, + 'user_data': user_data or '', + 'key_name': key_name, + 'key_data': key_data, + 'availability_zone': availability_zone} + + elevated = context.elevated() + instances = [] + logging.debug(_("Going to run %s instances..."), num_instances) + for num in range(num_instances): + instance = dict(mac_address=utils.generate_mac(), + launch_index=num, + **base_options) + instance = self.db.instance_create(context, instance) + instance_id = instance['id'] + + elevated = context.elevated() + if not security_groups: + security_groups = [] + for security_group_id in security_groups: + self.db.instance_add_security_group(elevated, + instance_id, + security_group_id) + + # Set sane defaults if not specified + updates = dict(hostname=generate_hostname(instance_id)) + if 'display_name' not in instance: + updates['display_name'] = "Server %s" % instance_id + + instance = self.update(context, instance_id, **updates) + instances.append(instance) + + logging.debug(_("Casting to scheduler for %s/%s's instance %s"), + context.project_id, context.user_id, instance_id) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "run_instance", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id}}) + + return instances + + def ensure_default_security_group(self, context): + """ Create security group for the security context if it + does not already exist + + :param context: the security context + + """ + try: + db.security_group_get_by_name(context, context.project_id, + 'default') + except exception.NotFound: + values = {'name': 'default', + 'description': 'default', + 'user_id': context.user_id, + 'project_id': context.project_id} + db.security_group_create(context, values) + + def update(self, context, instance_id, **kwargs): + """Updates the instance in the datastore. + + :param context: The security context + :param instance_id: ID of the instance to update + :param kwargs: All additional keyword args are treated + as data fields of the instance to be + updated + + :retval None + + """ + return self.db.instance_update(context, instance_id, kwargs) + + def delete(self, context, instance_id): + logging.debug("Going to try and terminate %s" % instance_id) + try: + instance = self.get(context, instance_id) + except exception.NotFound as e: + logging.warning(_("Instance %s was not found during terminate"), + instance_id) + raise e + + if (instance['state_description'] == 'terminating'): + logging.warning(_("Instance %s is already being terminated"), + instance_id) + return + + self.update(context, + instance['id'], + state_description='terminating', + state=0, + terminated_at=datetime.datetime.utcnow()) + + host = instance['host'] + if host: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "terminate_instance", + "args": {"instance_id": instance_id}}) + else: + self.db.instance_destroy(context, instance_id) + + def get(self, context, instance_id=None, project_id=None, + reservation_id=None, fixed_ip=None): + """Get one or more instances, possibly filtered by one of the + given parameters. If there is no filter and the context is + an admin, it will retreive all instances in the system.""" + if instance_id is not None: + return self.db.instance_get_by_id(context, instance_id) + if reservation_id is not None: + return self.db.instance_get_all_by_reservation(context, + reservation_id) + if fixed_ip is not None: + return self.db.fixed_ip_get_instance(context, fixed_ip) + if project_id or not context.is_admin: + if not context.project: + return self.db.instance_get_all_by_user(context, + context.user_id) + if project_id is None: + project_id = context.project_id + return self.db.instance_get_all_by_project(context, + project_id) + return self.db.instance_get_all(context) + + def snapshot(self, context, instance_id, name): + """Snapshot the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "snapshot_instance", + "args": {"instance_id": instance_id, "name": name}}) + + def reboot(self, context, instance_id): + """Reboot the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "reboot_instance", + "args": {"instance_id": instance_id}}) + + def pause(self, context, instance_id): + """Pause the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "pause_instance", + "args": {"instance_id": instance_id}}) + + def unpause(self, context, instance_id): + """Unpause the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unpause_instance", + "args": {"instance_id": instance_id}}) + + def get_diagnostics(self, context, instance_id): + """Retrieve diagnostics for the given instance.""" + instance = self.get(context, instance_id) + host = instance["host"] + return rpc.call(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "get_diagnostics", + "args": {"instance_id": instance_id}}) + + def get_actions(self, context, instance_id): + """Retrieve actions for the given instance.""" + return self.db.instance_get_actions(context, instance_id) + + def suspend(self, context, instance_id): + """suspend the instance with instance_id""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "suspend_instance", + "args": {"instance_id": instance_id}}) + + def resume(self, context, instance_id): + """resume the instance with instance_id""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "resume_instance", + "args": {"instance_id": instance_id}}) + + def rescue(self, context, instance_id): + """Rescue the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "rescue_instance", + "args": {"instance_id": instance_id}}) + + def unrescue(self, context, instance_id): + """Unrescue the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unrescue_instance", + "args": {"instance_id": instance_id}}) + + def attach_volume(self, context, instance_id, volume_id, device): + if not re.match("^/dev/[a-z]d[a-z]+$", device): + raise exception.ApiError(_("Invalid device specified: %s. " + "Example device: /dev/vdb") % device) + self.volume_api.check_attach(context, volume_id) + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + + def detach_volume(self, context, volume_id): + instance = self.db.volume_get_instance(context.elevated(), volume_id) + if not instance: + raise exception.ApiError(_("Volume isn't attached to anything!")) + self.volume_api.check_detach(context, volume_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "detach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume_id}}) + return instance + + def associate_floating_ip(self, context, instance_id, address): + instance = self.get(context, instance_id) + self.network_api.associate_floating_ip(context, address, + instance['fixed_ip']) diff --git a/nova/network/__init__.py b/nova/network/__init__.py index cbd912047..6eb3e3ef6 100644 --- a/nova/network/__init__.py +++ b/nova/network/__init__.py @@ -16,72 +16,4 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles all requests relating to instances (guest vms). -""" - -import logging - -from nova import db -from nova import flags -from nova import quota -from nova import rpc -from nova.db import base - -FLAGS = flags.FLAGS - - -class API(base.Base): - """API for interacting with the network manager.""" - - def allocate_floating_ip(self, context): - if quota.allowed_floating_ips(context, 1) < 1: - logging.warn(_("Quota exceeeded for %s, tried to allocate " - "address"), - context.project_id) - raise quota.QuotaError(_("Address quota exceeded. You cannot " - "allocate any more addresses")) - # NOTE(vish): We don't know which network host should get the ip - # when we allocate, so just send it to any one. This - # will probably need to move into a network supervisor - # at some point. - return rpc.call(context, - FLAGS.network_topic, - {"method": "allocate_floating_ip", - "args": {"project_id": context.project_id}}) - - def release_floating_ip(self, context, address): - floating_ip = self.db.floating_ip_get_by_address(context, address) - # NOTE(vish): We don't know which network host should get the ip - # when we deallocate, so just send it to any one. This - # will probably need to move into a network supervisor - # at some point. - rpc.cast(context, - FLAGS.network_topic, - {"method": "deallocate_floating_ip", - "args": {"floating_address": floating_ip['address']}}) - - def associate_floating_ip(self, context, floating_ip, fixed_ip): - if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode): - fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip) - floating_ip = self.db.floating_ip_get_by_address(context, floating_ip) - # NOTE(vish): Perhaps we should just pass this on to compute and - # let compute communicate with network. - host = fixed_ip['network']['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.network_topic, host), - {"method": "associate_floating_ip", - "args": {"floating_address": floating_ip['address'], - "fixed_address": fixed_ip['address']}}) - - def disassociate_floating_ip(self, context, address): - floating_ip = self.db.floating_ip_get_by_address(context, address) - if not floating_ip.get('fixed_ip'): - raise exception.ApiError('Address is not associated.') - # NOTE(vish): Get the topic from the host name of the network of - # the associated fixed ip. - host = floating_ip['fixed_ip']['network']['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.network_topic, host), - {"method": "disassociate_floating_ip", - "args": {"floating_address": floating_ip['address']}}) +from nova.network.api import API diff --git a/nova/network/api.py b/nova/network/api.py new file mode 100644 index 000000000..cbd912047 --- /dev/null +++ b/nova/network/api.py @@ -0,0 +1,87 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to instances (guest vms). +""" + +import logging + +from nova import db +from nova import flags +from nova import quota +from nova import rpc +from nova.db import base + +FLAGS = flags.FLAGS + + +class API(base.Base): + """API for interacting with the network manager.""" + + def allocate_floating_ip(self, context): + if quota.allowed_floating_ips(context, 1) < 1: + logging.warn(_("Quota exceeeded for %s, tried to allocate " + "address"), + context.project_id) + raise quota.QuotaError(_("Address quota exceeded. You cannot " + "allocate any more addresses")) + # NOTE(vish): We don't know which network host should get the ip + # when we allocate, so just send it to any one. This + # will probably need to move into a network supervisor + # at some point. + return rpc.call(context, + FLAGS.network_topic, + {"method": "allocate_floating_ip", + "args": {"project_id": context.project_id}}) + + def release_floating_ip(self, context, address): + floating_ip = self.db.floating_ip_get_by_address(context, address) + # NOTE(vish): We don't know which network host should get the ip + # when we deallocate, so just send it to any one. This + # will probably need to move into a network supervisor + # at some point. + rpc.cast(context, + FLAGS.network_topic, + {"method": "deallocate_floating_ip", + "args": {"floating_address": floating_ip['address']}}) + + def associate_floating_ip(self, context, floating_ip, fixed_ip): + if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode): + fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip) + floating_ip = self.db.floating_ip_get_by_address(context, floating_ip) + # NOTE(vish): Perhaps we should just pass this on to compute and + # let compute communicate with network. + host = fixed_ip['network']['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.network_topic, host), + {"method": "associate_floating_ip", + "args": {"floating_address": floating_ip['address'], + "fixed_address": fixed_ip['address']}}) + + def disassociate_floating_ip(self, context, address): + floating_ip = self.db.floating_ip_get_by_address(context, address) + if not floating_ip.get('fixed_ip'): + raise exception.ApiError('Address is not associated.') + # NOTE(vish): Get the topic from the host name of the network of + # the associated fixed ip. + host = floating_ip['fixed_ip']['network']['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.network_topic, host), + {"method": "disassociate_floating_ip", + "args": {"floating_address": floating_ip['address']}}) diff --git a/nova/quota.py b/nova/quota.py index 12dbfcf43..3884eb308 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -53,7 +53,7 @@ def get_quota(context, project_id): return rval -def allowed_instances(context, num_instances, cores_per_instance): +def allowed_instances(context, num_instances, instance_type): """Check quota and return min(num_instances, allowed_instances)""" project_id = context.project_id context = context.elevated() @@ -62,9 +62,9 @@ def allowed_instances(context, num_instances, cores_per_instance): quota = get_quota(context, project_id) allowed_instances = quota['instances'] - used_instances allowed_cores = quota['cores'] - used_cores - num_cores = num_instances * cores_per_instance + num_cores = num_instances * instance_type['vcpus'] allowed_instances = min(allowed_instances, - int(allowed_cores // cores_per_instance)) + int(allowed_cores // instance_type['vcpus'])) return min(num_instances, allowed_instances) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index c15818774..b5f9f30ef 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -27,6 +27,7 @@ from nova import test from nova import utils from nova.auth import manager from nova.api.ec2 import cloud +from nova.compute import instance_types FLAGS = flags.FLAGS @@ -78,14 +79,17 @@ class QuotaTestCase(test.TestCase): def test_quota_overrides(self): """Make sure overriding a projects quotas works""" - num_instances = quota.allowed_instances(self.context, 100, 1) + num_instances = quota.allowed_instances(self.context, 100, + instance_types.INSTANCE_TYPES['m1.small']) self.assertEqual(num_instances, 2) db.quota_create(self.context, {'project_id': self.project.id, 'instances': 10}) - num_instances = quota.allowed_instances(self.context, 100, 1) + num_instances = quota.allowed_instances(self.context, 100, + instance_types.INSTANCE_TYPES['m1.small']) self.assertEqual(num_instances, 4) db.quota_update(self.context, self.project.id, {'cores': 100}) - num_instances = quota.allowed_instances(self.context, 100, 1) + num_instances = quota.allowed_instances(self.context, 100, + instance_types.INSTANCE_TYPES['m1.small']) self.assertEqual(num_instances, 10) db.quota_destroy(self.context, self.project.id) diff --git a/nova/volume/__init__.py b/nova/volume/__init__.py index 48ecdbe68..56ef9332e 100644 --- a/nova/volume/__init__.py +++ b/nova/volume/__init__.py @@ -16,85 +16,4 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles all requests relating to volumes. -""" - -import datetime -import logging - -from nova import db -from nova import exception -from nova import flags -from nova import quota -from nova import rpc -from nova.db import base - -FLAGS = flags.FLAGS -flags.DECLARE('storage_availability_zone', 'nova.volume.manager') - - -class API(base.Base): - """API for interacting with the volume manager.""" - - def create(self, context, size, name, description): - if quota.allowed_volumes(context, 1, size) < 1: - logging.warn("Quota exceeeded for %s, tried to create %sG volume", - context.project_id, size) - raise quota.QuotaError("Volume quota exceeded. You cannot " - "create a volume of size %s" % size) - - options = { - 'size': size, - 'user_id': context.user.id, - 'project_id': context.project_id, - 'availability_zone': FLAGS.storage_availability_zone, - 'status': "creating", - 'attach_status': "detached", - 'display_name': name, - 'display_description': description} - - volume = self.db.volume_create(context, options) - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "create_volume", - "args": {"topic": FLAGS.volume_topic, - "volume_id": volume['id']}}) - return volume - - def delete(self, context, volume_id): - volume = self.db.volume_get(context, volume_id) - if volume['status'] != "available": - raise exception.ApiError(_("Volume status must be available")) - now = datetime.datetime.utcnow() - self.db.volume_update(context, volume_id, {'status': 'deleting', - 'terminated_at': now}) - host = volume['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.volume_topic, host), - {"method": "delete_volume", - "args": {"volume_id": volume_id}}) - - def update(self, context, volume_id, fields): - self.db.volume_update(context, volume_id, fields) - - def get(self, context, volume_id=None): - if volume_id is not None: - return self.db.volume_get(context, volume_id) - if context.user.is_admin(): - return self.db.volume_get_all(context) - return self.db.volume_get_all_by_project(context, context.project_id) - - def check_attach(self, context, volume_id): - volume = self.db.volume_get(context, volume_id) - # TODO(vish): abstract status checking? - if volume['status'] != "available": - raise exception.ApiError(_("Volume status must be available")) - if volume['attach_status'] == "attached": - raise exception.ApiError(_("Volume is already attached")) - - def check_detach(self, context, volume_id): - volume = self.db.volume_get(context, volume_id) - # TODO(vish): abstract status checking? - if volume['status'] == "available": - raise exception.ApiError(_("Volume is already detached")) +from nova.volume.api import API diff --git a/nova/volume/api.py b/nova/volume/api.py new file mode 100644 index 000000000..48ecdbe68 --- /dev/null +++ b/nova/volume/api.py @@ -0,0 +1,100 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to volumes. +""" + +import datetime +import logging + +from nova import db +from nova import exception +from nova import flags +from nova import quota +from nova import rpc +from nova.db import base + +FLAGS = flags.FLAGS +flags.DECLARE('storage_availability_zone', 'nova.volume.manager') + + +class API(base.Base): + """API for interacting with the volume manager.""" + + def create(self, context, size, name, description): + if quota.allowed_volumes(context, 1, size) < 1: + logging.warn("Quota exceeeded for %s, tried to create %sG volume", + context.project_id, size) + raise quota.QuotaError("Volume quota exceeded. You cannot " + "create a volume of size %s" % size) + + options = { + 'size': size, + 'user_id': context.user.id, + 'project_id': context.project_id, + 'availability_zone': FLAGS.storage_availability_zone, + 'status': "creating", + 'attach_status': "detached", + 'display_name': name, + 'display_description': description} + + volume = self.db.volume_create(context, options) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_volume", + "args": {"topic": FLAGS.volume_topic, + "volume_id": volume['id']}}) + return volume + + def delete(self, context, volume_id): + volume = self.db.volume_get(context, volume_id) + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + now = datetime.datetime.utcnow() + self.db.volume_update(context, volume_id, {'status': 'deleting', + 'terminated_at': now}) + host = volume['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"volume_id": volume_id}}) + + def update(self, context, volume_id, fields): + self.db.volume_update(context, volume_id, fields) + + def get(self, context, volume_id=None): + if volume_id is not None: + return self.db.volume_get(context, volume_id) + if context.user.is_admin(): + return self.db.volume_get_all(context) + return self.db.volume_get_all_by_project(context, context.project_id) + + def check_attach(self, context, volume_id): + volume = self.db.volume_get(context, volume_id) + # TODO(vish): abstract status checking? + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + if volume['attach_status'] == "attached": + raise exception.ApiError(_("Volume is already attached")) + + def check_detach(self, context, volume_id): + volume = self.db.volume_get(context, volume_id) + # TODO(vish): abstract status checking? + if volume['status'] == "available": + raise exception.ApiError(_("Volume is already detached")) -- cgit From 472af7e750f369e3b999d2b1ac48f74369975ba6 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 4 Jan 2011 18:34:47 -0600 Subject: Recover from a lost data store connection --- nova/service.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'nova') diff --git a/nova/service.py b/nova/service.py index f1f90742f..7203430c6 100644 --- a/nova/service.py +++ b/nova/service.py @@ -24,17 +24,21 @@ import inspect import logging import os import sys +import time from eventlet import event from eventlet import greenthread from eventlet import greenpool +from sqlalchemy.exc import OperationalError + from nova import context from nova import db from nova import exception from nova import flags from nova import rpc from nova import utils +from nova.db.sqlalchemy import models FLAGS = flags.FLAGS @@ -204,6 +208,14 @@ class Service(object): self.model_disconnected = True logging.exception(_("model server went away")) + try: + models.register_models() + except OperationalError: + logging.exception(_("Data store is unreachable." + " Trying again in %d seconds.") % + FLAGS.sql_retry_interval) + time.sleep(FLAGS.sql_retry_interval) + def serve(*services): argv = FLAGS(sys.argv) -- cgit From def5583469bd265c9107ed54d461441bc6303151 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 5 Jan 2011 09:50:19 -0800 Subject: Split internal API get calls to get and get_all, where the former takes an ID and returns one resource, and the latter can optionally take a filter and return a list of resources. --- nova/api/ec2/cloud.py | 9 +++++---- nova/api/openstack/servers.py | 2 +- nova/compute/api.py | 12 +++++++----- nova/volume/api.py | 13 +++++++------ 4 files changed, 20 insertions(+), 16 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index b1eaafc8b..0c0027287 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -119,7 +119,8 @@ class CloudController(object): def _get_mpi_data(self, context, project_id): result = {} - for instance in self.compute_api.get(context, project_id=project_id): + for instance in self.compute_api.get_all(context, + project_id=project_id): if instance['fixed_ip']: line = '%s slots=%d' % (instance['fixed_ip']['address'], instance['vcpus']) @@ -141,7 +142,7 @@ class CloudController(object): def get_metadata(self, address): ctxt = context.get_admin_context() - instance_ref = self.compute_api.get(ctxt, fixed_ip=address) + instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address) if instance_ref is None: return None mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) @@ -493,7 +494,7 @@ class CloudController(object): "output": base64.b64encode(output)} def describe_volumes(self, context, volume_id=None, **kwargs): - volumes = self.volume_api.get(context) + volumes = self.volume_api.get_all(context) # NOTE(vish): volume_id is an optional list of volume ids to filter by. volumes = [self._format_volume(context, v) for v in volumes if volume_id is None or v['id'] in volume_id] @@ -597,7 +598,7 @@ class CloudController(object): def _format_instances(self, context, **kwargs): reservations = {} - instances = self.compute_api.get(context, **kwargs) + instances = self.compute_api.get_all(context, **kwargs) for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 6be24629a..ce64ac7ad 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -96,7 +96,7 @@ class Controller(wsgi.Controller): entity_maker - either _translate_detail_keys or _translate_keys """ - instance_list = self.compute_api.get(req.environ['nova.context']) + instance_list = self.compute_api.get_all(req.environ['nova.context']) limited_list = common.limited(instance_list, req) res = [entity_maker(inst)['server'] for inst in limited_list] return dict(servers=res) diff --git a/nova/compute/api.py b/nova/compute/api.py index 74d030c4d..64d47b1ce 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -245,13 +245,15 @@ class API(base.Base): else: self.db.instance_destroy(context, instance_id) - def get(self, context, instance_id=None, project_id=None, - reservation_id=None, fixed_ip=None): - """Get one or more instances, possibly filtered by one of the + def get(self, context, instance_id): + """Get a single instance with the given ID.""" + return self.db.instance_get_by_id(context, instance_id) + + def get_all(self, context, project_id=None, reservation_id=None, + fixed_ip=None): + """Get all instances, possibly filtered by one of the given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system.""" - if instance_id is not None: - return self.db.instance_get_by_id(context, instance_id) if reservation_id is not None: return self.db.instance_get_all_by_reservation(context, reservation_id) diff --git a/nova/volume/api.py b/nova/volume/api.py index 48ecdbe68..2d7fe3762 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -63,7 +63,7 @@ class API(base.Base): return volume def delete(self, context, volume_id): - volume = self.db.volume_get(context, volume_id) + volume = self.get(context, volume_id) if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) now = datetime.datetime.utcnow() @@ -78,15 +78,16 @@ class API(base.Base): def update(self, context, volume_id, fields): self.db.volume_update(context, volume_id, fields) - def get(self, context, volume_id=None): - if volume_id is not None: - return self.db.volume_get(context, volume_id) + def get(self, context, volume_id): + return self.db.volume_get(context, volume_id) + + def get_all(self, context): if context.user.is_admin(): return self.db.volume_get_all(context) return self.db.volume_get_all_by_project(context, context.project_id) def check_attach(self, context, volume_id): - volume = self.db.volume_get(context, volume_id) + volume = self.get(context, volume_id) # TODO(vish): abstract status checking? if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) @@ -94,7 +95,7 @@ class API(base.Base): raise exception.ApiError(_("Volume is already attached")) def check_detach(self, context, volume_id): - volume = self.db.volume_get(context, volume_id) + volume = self.get(context, volume_id) # TODO(vish): abstract status checking? if volume['status'] == "available": raise exception.ApiError(_("Volume is already detached")) -- cgit From 11d5e914044583882384ffd462991ef4f678b28e Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 5 Jan 2011 16:04:16 -0600 Subject: Updated register_models() docstring --- nova/db/sqlalchemy/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 0c9c387fc..6367d1ff8 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -545,7 +545,8 @@ def register_models(): """Register Models and create metadata. Called from nova.db.sqlalchemy.__init__ as part of loading the driver, - it will never need to be called explicitly elsewhere. + it will never need to be called explicitly elsewhere unless the + connection is lost and needs to be reestablished. """ from sqlalchemy import create_engine models = (Service, Instance, InstanceActions, -- cgit