diff options
41 files changed, 1083 insertions, 155 deletions
diff --git a/.bzrignore b/.bzrignore index 82db46fa2..d81a7d829 100644 --- a/.bzrignore +++ b/.bzrignore @@ -6,6 +6,7 @@ keys networks nova.sqlite CA/cacert.pem +CA/crl.pem CA/index.txt* CA/openssl.cnf CA/serial* @@ -27,5 +27,6 @@ <vishvananda@gmail.com> <root@ubuntu> <sleepsonthefloor@gmail.com> <root@tonbuntu> <rlane@wikimedia.org> <laner@controller> +<rconradharris@gmail.com> <rick.harris@rackspace.com> <corywright@gmail.com> <cory.wright@rackspace.com> <ant@openstack.org> <amesserl@rackspace.com> @@ -27,6 +27,7 @@ Michael Gundlach <michael.gundlach@rackspace.com> Monty Taylor <mordred@inaugust.com> Paul Voccio <paul@openstack.org> Rick Clark <rick@openstack.org> +Rick Harris <rconradharris@gmail.com> Ryan Lane <rlane@wikimedia.org> Ryan Lucio <rlucio@internap.com> Salvatore Orlando <salvatore.orlando@eu.citrix.com> diff --git a/bin/nova-manage b/bin/nova-manage index 599e02a7e..3416c1a52 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -53,6 +53,7 @@ CLI interface for nova management. """ +import datetime import gettext import logging import os @@ -452,6 +453,52 @@ class NetworkCommands(object): int(network_size), int(vlan_start), int(vpn_start)) + +class ServiceCommands(object): + """Enable and disable running services""" + + def list(self, host=None, service=None): + """Show a list of all running services. Filter by host & service name. + args: [host] [service]""" + ctxt = context.get_admin_context() + now = datetime.datetime.utcnow() + services = db.service_get_all(ctxt) + if host: + services = [s for s in services if s['host'] == host] + if service: + services = [s for s in services if s['binary'] == service] + for svc in services: + delta = now - (svc['updated_at'] or svc['created_at']) + alive = (delta.seconds <= 15) + art = (alive and ":-)") or "XXX" + active = 'enabled' + if svc['disabled']: + active = 'disabled' + print "%-10s %-10s %-8s %s %s" % (svc['host'], svc['binary'], + active, art, + svc['updated_at']) + + def enable(self, host, service): + """Enable scheduling for a service + args: host service""" + ctxt = context.get_admin_context() + svc = db.service_get_by_args(ctxt, host, service) + if not svc: + print "Unable to find service" + return + db.service_update(ctxt, svc['id'], {'disabled': False}) + + def disable(self, host, service): + """Disable scheduling for a service + args: host service""" + ctxt = context.get_admin_context() + svc = db.service_get_by_args(ctxt, host, service) + if not svc: + print "Unable to find service" + return + db.service_update(ctxt, svc['id'], {'disabled': True}) + + CATEGORIES = [ ('user', UserCommands), ('project', ProjectCommands), @@ -459,7 +506,8 @@ CATEGORIES = [ ('shell', ShellCommands), ('vpn', VpnCommands), ('floating', FloatingIpCommands), - ('network', NetworkCommands)] + ('network', NetworkCommands), + ('service', ServiceCommands)] def lazy_match(name, key_value_tuples): diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst index fcb76c5e5..40490e71e 100644 --- a/doc/source/adminguide/multi.node.install.rst +++ b/doc/source/adminguide/multi.node.install.rst @@ -77,21 +77,20 @@ Nova development has consolidated all .conf files to nova.conf as of November 20 #. These need to be defined in the nova.conf configuration file:: - --sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db - --s3_host=$CC_ADDR # This is where nova is hosting the objectstore service, which - # will contain the VM images and buckets - --rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted - --cc_host=$CC_ADDR # This is where the the nova-api service lives - --verbose # Optional but very helpful during initial setup - --ec2_url=http://$CC_ADDR:8773/services/Cloud - --network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type - - --fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26 - --network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64 + --sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db + --s3_host=$CC_ADDR # This is where Nova is hosting the objectstore service, which + # will contain the VM images and buckets + --rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted + --cc_host=$CC_ADDR # This is where the the nova-api service lives + --verbose # Optional but very helpful during initial setup + --ec2_url=http://$CC_ADDR:8773/services/Cloud + --network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type + --fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26 + --network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64 #. Create a nova group:: - sudo addgroup nova + sudo addgroup nova The Nova config file should have its owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password. diff --git a/doc/source/devref/addmethod.openstackapi.rst b/doc/source/devref/addmethod.openstackapi.rst index 6484613df..4baa46e20 100644 --- a/doc/source/devref/addmethod.openstackapi.rst +++ b/doc/source/devref/addmethod.openstackapi.rst @@ -24,7 +24,7 @@ Routing To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ fore more information. -URLs are mapped to "action" methods on "controller" classes in nova/api/openstack/__init__/ApiRouter.__init__ . +URLs are mapped to "action" methods on "controller" classes in `nova/api/openstack/__init__/ApiRouter.__init__` . See http://routes.groovie.org/manual.html for all syntax, but you'll probably just need these two: - mapper.connect() lets you map a single URL to a single action on a controller. @@ -33,9 +33,9 @@ See http://routes.groovie.org/manual.html for all syntax, but you'll probably ju Controllers and actions ----------------------- -Controllers live in nova/api/openstack, and inherit from nova.wsgi.Controller. +Controllers live in `nova/api/openstack`, and inherit from nova.wsgi.Controller. -See nova/api/openstack/servers.py for an example. +See `nova/api/openstack/servers.py` for an example. Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc. @@ -46,7 +46,7 @@ Actions return a dictionary, and wsgi.Controller serializes that to JSON or XML If you define a new controller, you'll need to define a _serialization_metadata attribute on the class, to tell wsgi.Controller how to convert your dictionary to XML. It needs to know the singular form of any list tag (e.g. <servers> list contains <server> tags) and which dictionary keys are to be XML attributes as opposed to subtags (e.g. <server id="4"/> instead of <server><id>4</id></server>). -See nova/api/openstack/servers.py for an example. +See `nova/api/openstack/servers.py` for an example. Faults ------ diff --git a/doc/source/devref/rabbit.rst b/doc/source/devref/rabbit.rst index 423284a55..ae0bac49d 100644 --- a/doc/source/devref/rabbit.rst +++ b/doc/source/devref/rabbit.rst @@ -71,8 +71,8 @@ RPC Casts The diagram below the message flow during an rp.cast operation: - 1. a Topic Publisher is instantiated to send the message request to the queuing system. - 2. once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. + 1. A Topic Publisher is instantiated to send the message request to the queuing system. + 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. .. image:: /images/rabbit/flow2.png :width: 60% diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst index 18368546b..fb3969a43 100644 --- a/doc/source/nova.concepts.rst +++ b/doc/source/nova.concepts.rst @@ -75,7 +75,7 @@ Nova is built on a shared-nothing, messaging-based architecture. All of the majo To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.) - .. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>_`. + .. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>`_. Concept: Storage ---------------- @@ -129,12 +129,12 @@ The simplest networking mode. Each instance receives a fixed ip from the pool. Flat DHCP Mode ~~~~~~~~~~~~~~ -This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover. +This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode Nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover. VLAN DHCP Mode ~~~~~~~~~~~~~~ -This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`. +This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, Nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`. The following diagram illustrates how the communication that occurs between the vlan (the dashed box) and the public internet (represented by the two clouds) @@ -154,16 +154,16 @@ Concept: nova-manage -------------------- The nova-manage command is used to perform many essential functions for -administration and ongoing maintenance of nova, such as user creation, +administration and ongoing maintenance of Nova, such as user creation, vpn management, and much more. -See doc:`nova.manage` in the Administration Guide for more details. +See :doc:`nova.manage` in the Administration Guide for more details. Concept: Flags -------------- -Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. +Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each Nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. Concept: Plugins @@ -181,7 +181,7 @@ Concept: Plugins Concept: IPC/RPC ---------------- -Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/. +Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various Nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/. Concept: Fakes -------------- diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst index ae2b64d8a..fa5d96738 100644 --- a/doc/source/quickstart.rst +++ b/doc/source/quickstart.rst @@ -59,38 +59,21 @@ different configurations (though for more complex setups you should see * HOST_IP * Default: address of first interface from the ifconfig command * Values: 127.0.0.1, or any other valid address - -TEST -~~~~ - -**Default**: 0 -**Values**: 1, run tests after checkout and initial setup - -USE_MYSQL -~~~~~~~~~ - -**Default**: 0, use sqlite3 -**Values**: 1, use mysql instead of sqlite3 - -MYSQL_PASS -~~~~~~~~~~ - -Only useful if $USE_MYSQL=1. - -**Default**: nova -**Values**: value of root password for mysql - -USE_LDAP -~~~~~~~~ - -**Default**: 0, use :mod:`nova.auth.dbdriver` -**Values**: 1, use :mod:`nova.auth.ldapdriver` - -LIBVIRT_TYPE -~~~~~~~~~~~~ - -**Default**: qemu -**Values**: uml, kvm +* TEST + * Default: 0 + * Values: 1, run tests after checkout and initial setup +* USE_MYSQL + * Default: 0, use sqlite3 + * Values: 1, use mysql instead of sqlite3 +* MYSQL_PASS (Only useful if $USE_MYSQL=1) + * Default: nova + * Values: value of root password for mysql +* USE_LDAP + * Default: 0, use :mod:`nova.auth.dbdriver` + * Values: 1, use :mod:`nova.auth.ldapdriver` +* LIBVIRT_TYPE + * Default: qemu + * Values: uml, kvm Usage ----- diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index e09261f00..9fb6307a8 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -188,9 +188,46 @@ class CloudController(object): return data def describe_availability_zones(self, context, **kwargs): + if ('zone_name' in kwargs and + 'verbose' in kwargs['zone_name'] and + context.is_admin): + return self._describe_availability_zones_verbose(context, + **kwargs) + else: + return self._describe_availability_zones(context, **kwargs) + + def _describe_availability_zones(self, context, **kwargs): return {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} + def _describe_availability_zones_verbose(self, context, **kwargs): + rv = {'availabilityZoneInfo': [{'zoneName': 'nova', + 'zoneState': 'available'}]} + + services = db.service_get_all(context) + now = db.get_time() + hosts = [] + for host in [service['host'] for service in services]: + if not host in hosts: + hosts.append(host) + for host in hosts: + rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, + 'zoneState': ''}) + hsvcs = [service for service in services \ + if service['host'] == host] + for svc in hsvcs: + delta = now - (svc['updated_at'] or svc['created_at']) + alive = (delta.seconds <= FLAGS.service_down_time) + art = (alive and ":-)") or "XXX" + active = 'enabled' + if svc['disabled']: + active = 'disabled' + rv['availabilityZoneInfo'].append({ + 'zoneName': '| |- %s' % svc['binary'], + 'zoneState': '%s %s %s' % (active, art, + svc['updated_at'])}) + return rv + def describe_regions(self, context, region_name=None, **kwargs): if FLAGS.region_list: regions = [] @@ -765,6 +802,8 @@ class CloudController(object): key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), + availability_zone=kwargs.get('placement', {}).get( + 'AvailabilityZone'), generate_hostname=internal_id_to_ec2_id) return self._format_run_instances(context, instances[0]['reservation_id']) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index dc7794876..a1430caed 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -91,6 +91,8 @@ class APIRouter(wsgi.Router): logging.debug("Including admin operations in API.") server_members['pause'] = 'POST' server_members['unpause'] = 'POST' + server_members["diagnostics"] = "GET" + server_members["actions"] = "GET" server_members['suspend'] = 'POST' server_members['resume'] = 'POST' diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 12df208a3..00e817c8d 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -55,7 +55,8 @@ class AuthMiddleware(wsgi.Middleware): if not user: return faults.Fault(webob.exc.HTTPUnauthorized()) - req.environ['nova.context'] = context.RequestContext(user, user) + project = self.auth.get_project(FLAGS.default_project) + req.environ['nova.context'] = context.RequestContext(user, project) return self.application def has_authentication(self, req): diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index ba35fbc78..867ee5a7e 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -25,7 +25,7 @@ import nova.image.service from nova.api.openstack import common from nova.api.openstack import faults - +from nova.compute import api as compute_api FLAGS = flags.FLAGS @@ -127,9 +127,11 @@ class Controller(wsgi.Controller): raise faults.Fault(exc.HTTPNotFound()) def create(self, req): - # Only public images are supported for now, so a request to - # make a backup of a server cannot be supproted. - raise faults.Fault(exc.HTTPNotFound()) + context = req.environ['nova.context'] + env = self._deserialize(req.body, req) + instance_id = env["image"]["serverId"] + name = env["image"]["name"] + return compute_api.ComputeAPI().snapshot(context, instance_id, name) def update(self, req, id): # Users may not modify public images, and that's all that diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 845183258..c5cbe21ef 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -218,3 +218,13 @@ class Controller(wsgi.Controller): logging.error(_("compute.api::resume %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + + def diagnostics(self, req, id): + """Permit Admins to retrieve server diagnostics.""" + ctxt = req.environ["nova.context"] + return self.compute_api.get_diagnostics(ctxt, id) + + def actions(self, req, id): + """Permit Admins to retrieve server actions.""" + ctxt = req.environ["nova.context"] + return self.compute_api.get_actions(ctxt, id) diff --git a/nova/compute/api.py b/nova/compute/api.py index a47703461..07c69bd31 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -74,6 +74,7 @@ class ComputeAPI(base.Base): max_count=1, kernel_id=None, ramdisk_id=None, display_name='', description='', key_name=None, key_data=None, security_group='default', + availability_zone=None, user_data=None, generate_hostname=generate_default_hostname): """Create the number of instances requested if quote and @@ -141,7 +142,8 @@ class ComputeAPI(base.Base): 'display_description': description, 'user_data': user_data or '', 'key_name': key_name, - 'key_data': key_data} + 'key_data': key_data, + 'availability_zone': availability_zone} elevated = context.elevated() instances = [] @@ -257,6 +259,15 @@ class ComputeAPI(base.Base): def get_instance(self, context, instance_id): return self.db.instance_get_by_internal_id(context, instance_id) + def snapshot(self, context, instance_id, name): + """Snapshot the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "snapshot_instance", + "args": {"instance_id": instance['id'], "name": name}}) + def reboot(self, context, instance_id): """Reboot the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -284,6 +295,20 @@ class ComputeAPI(base.Base): {"method": "unpause_instance", "args": {"instance_id": instance['id']}}) + def get_diagnostics(self, context, instance_id): + """Retrieve diagnostics for the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance["host"] + return rpc.call(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "get_diagnostics", + "args": {"instance_id": instance["id"]}}) + + def get_actions(self, context, instance_id): + """Retrieve actions for the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + return self.db.instance_get_actions(context, instance["id"]) + def suspend(self, context, instance_id): """suspend the instance with instance_id""" instance = self.db.instance_get_by_internal_id(context, instance_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 70b175e7c..c9aff75ac 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -225,6 +225,27 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + def snapshot_instance(self, context, instance_id, name): + """Snapshot an instance on this server.""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + #NOTE(sirp): update_state currently only refreshes the state field + # if we add is_snapshotting, we will need this refreshed too, + # potentially? + self._update_state(context, instance_id) + + logging.debug(_('instance %s: snapshotting'), instance_ref['name']) + if instance_ref['state'] != power_state.RUNNING: + logging.warn(_('trying to snapshot a non-running ' + 'instance: %s (state: %s excepted: %s)'), + instance_ref['internal_id'], + instance_ref['state'], + power_state.RUNNING) + + self.driver.snapshot(instance_ref, name) + + @exception.wrap_exception def rescue_instance(self, context, instance_id): """Rescue an instance on this server.""" context = context.elevated() @@ -297,6 +318,15 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + def get_diagnostics(self, context, instance_id): + """Retrieve diagnostics for an instance on this server.""" + instance_ref = self.db.instance_get(context, instance_id) + + if instance_ref["state"] == power_state.RUNNING: + logging.debug(_("instance %s: retrieving diagnostics"), + instance_ref["internal_id"]) + return self.driver.get_diagnostics(instance_ref) + def suspend_instance(self, context, instance_id): """suspend the instance with instance_id""" context = context.elevated() diff --git a/nova/db/api.py b/nova/db/api.py index fde3f0852..bab698d9d 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -27,6 +27,9 @@ The underlying driver is loaded as a :class:`LazyPluggable`. :sql_connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/nova/nova.sqlite`. + +:enable_new_services: when adding a new service to the database, is it in the + pool of available hardware (Default: True) """ from nova import exception @@ -37,6 +40,8 @@ from nova import utils FLAGS = flags.FLAGS flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') +flags.DEFINE_boolean('enable_new_services', True, + 'Services to be added to the available pool on create') IMPL = utils.LazyPluggable(FLAGS['db_backend'], @@ -383,6 +388,11 @@ def instance_action_create(context, values): return IMPL.instance_action_create(context, values) +def instance_get_actions(context, instance_id): + """Get instance actions by instance id.""" + return IMPL.instance_get_actions(context, instance_id) + + ################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 7e945e4cb..72369d187 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -236,6 +236,8 @@ def service_get_by_args(context, host, binary): def service_create(context, values): service_ref = models.Service() service_ref.update(values) + if not FLAGS.enable_new_services: + service_ref.disabled = True service_ref.save() return service_ref @@ -856,6 +858,18 @@ def instance_action_create(context, values): return action_ref +@require_admin_context +def instance_get_actions(context, instance_id): + """Return the actions associated to the given instance id""" + session = get_session() + actions = {} + for action in session.query(models.InstanceActions).\ + filter_by(instance_id=instance_id).\ + all(): + actions[action.action] = action.error + return actions + + ################### diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 693db8d23..0c9c387fc 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -22,7 +22,7 @@ SQLAlchemy models for nova data. import datetime from sqlalchemy.orm import relationship, backref, object_mapper -from sqlalchemy import Column, Integer, Float, String, schema +from sqlalchemy import Column, Integer, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base @@ -220,6 +220,8 @@ class Instance(BASE, NovaBase): launched_at = Column(DateTime) terminated_at = Column(DateTime) + availability_zone = Column(String(255)) + # User editable field for display in user-facing UIs display_name = Column(String(255)) display_description = Column(String(255)) @@ -236,21 +238,6 @@ class Instance(BASE, NovaBase): # 'shutdown', 'shutoff', 'crashed']) -class InstanceDiagnostics(BASE, NovaBase): - """Represents a guest VM's diagnostics""" - __tablename__ = "instance_diagnostics" - id = Column(Integer, primary_key=True) - instance_id = Column(Integer, ForeignKey('instances.id')) - - memory_available = Column(Float) - memory_free = Column(Float) - cpu_load = Column(Float) - disk_read = Column(Float) - disk_write = Column(Float) - net_tx = Column(Float) - net_rx = Column(Float) - - class InstanceActions(BASE, NovaBase): """Represents a guest VM's actions and results""" __tablename__ = "instance_actions" @@ -452,7 +439,7 @@ class AuthToken(BASE, NovaBase): """ __tablename__ = 'auth_tokens' token_hash = Column(String(255), primary_key=True) - user_id = Column(Integer) + user_id = Column(String(255)) server_manageent_url = Column(String(255)) storage_url = Column(String(255)) cdn_management_url = Column(String(255)) @@ -561,7 +548,7 @@ def register_models(): it will never need to be called explicitly elsewhere. """ from sqlalchemy import create_engine - models = (Service, Instance, InstanceDiagnostics, InstanceActions, + models = (Service, Instance, InstanceActions, Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index e0d84c107..c3876c02a 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -36,7 +36,9 @@ def get_session(autocommit=True, expire_on_commit=False): global _MAKER if not _MAKER: if not _ENGINE: - _ENGINE = create_engine(FLAGS.sql_connection, echo=False) + _ENGINE = create_engine(FLAGS.sql_connection, + pool_recycle=FLAGS.sql_idle_timeout, + echo=False) _MAKER = (sessionmaker(bind=_ENGINE, autocommit=autocommit, expire_on_commit=expire_on_commit)) diff --git a/nova/flags.py b/nova/flags.py index 76a98d35a..e872ba217 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -212,6 +212,8 @@ DEFINE_list('region_list', DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') +DEFINE_integer('glance_port', 9292, 'glance port') +DEFINE_string('glance_host', utils.get_my_ip(), 'glance host') DEFINE_integer('s3_port', 3333, 's3 port') DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)') DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)') @@ -239,6 +241,7 @@ DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server') DEFINE_integer('cc_port', 8773, 'cloud controller port') DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2') +DEFINE_string('default_project', 'openstack', 'default project for openstack') DEFINE_string('default_image', 'ami-11111', 'default image to use, testing only') DEFINE_string('default_instance_type', 'm1.small', @@ -260,6 +263,9 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), DEFINE_string('sql_connection', 'sqlite:///$state_path/nova.sqlite', 'connection string for sql database') +DEFINE_string('sql_idle_timeout', + '3600', + 'timeout for idle sql database connections') DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', 'Manager for compute') diff --git a/nova/image/glance.py b/nova/image/glance.py index cb3936df1..cc3192e7c 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -24,6 +24,7 @@ import urlparse import webob.exc +from nova.compute import api as compute_api from nova import utils from nova import flags from nova import exception diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 08d7033f5..66e46c1b9 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -37,6 +37,11 @@ class NoValidHost(exception.Error): pass +class WillNotSchedule(exception.Error): + """The specified host is not up or doesn't exist.""" + pass + + class Scheduler(object): """The base class that all Scheduler clases should inherit from.""" diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index f9171ab35..47baf0d73 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -43,6 +43,19 @@ class SimpleScheduler(chance.ChanceScheduler): def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): """Picks a host that is up and has the fewest running instances.""" instance_ref = db.instance_get(context, instance_id) + if instance_ref['availability_zone'] and context.is_admin: + zone, _x, host = instance_ref['availability_zone'].partition(':') + service = db.service_get_by_args(context.elevated(), host, + 'nova-compute') + if not self.service_is_up(service): + raise driver.WillNotSchedule("Host %s is not alive" % host) + + # TODO(vish): this probably belongs in the manager, if we + # can generalize this somehow + now = datetime.datetime.utcnow() + db.instance_update(context, instance_id, {'host': host, + 'scheduled_at': now}) + return host results = db.service_get_all_compute_sorted(context) for result in results: (service, instance_cores) = result @@ -62,6 +75,19 @@ class SimpleScheduler(chance.ChanceScheduler): def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks a host that is up and has the fewest volumes.""" volume_ref = db.volume_get(context, volume_id) + if (':' in volume_ref['availability_zone']) and context.is_admin: + zone, _x, host = volume_ref['availability_zone'].partition(':') + service = db.service_get_by_args(context.elevated(), host, + 'nova-volume') + if not self.service_is_up(service): + raise driver.WillNotSchedule("Host %s not available" % host) + + # TODO(vish): this probably belongs in the manager, if we + # can generalize this somehow + now = datetime.datetime.utcnow() + db.volume_update(context, volume_id, {'host': host, + 'scheduled_at': now}) + return host results = db.service_get_all_volume_sorted(context) for result in results: (service, volume_gigabytes) = result diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 79663e43a..961431154 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -110,6 +110,12 @@ def stub_out_networking(stubs): stubs.Set(nova.utils, 'get_my_ip', get_my_ip) +def stub_out_compute_api_snapshot(stubs): + def snapshot(self, context, instance_id, name): + return 123 + stubs.Set(nova.compute.api.ComputeAPI, 'snapshot', snapshot) + + def stub_out_glance(stubs, initial_fixtures=[]): class FakeParallaxClient: @@ -213,6 +219,9 @@ class FakeAuthManager(object): return v return None + def get_project(self, pid): + return None + def get_user_from_access_key(self, key): return FakeAuthManager.auth_data.get(key, None) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 1b4031217..0f274bd15 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -50,7 +50,7 @@ class BaseImageServiceTests(object): 'updated': None, 'created': None, 'status': None, - 'serverId': None, + 'instance_id': None, 'progress': None} num_images = len(self.service.index(self.context)) @@ -67,7 +67,7 @@ class BaseImageServiceTests(object): 'updated': None, 'created': None, 'status': None, - 'serverId': None, + 'instance_id': None, 'progress': None} num_images = len(self.service.index(self.context)) @@ -87,7 +87,7 @@ class BaseImageServiceTests(object): 'updated': None, 'created': None, 'status': None, - 'serverId': None, + 'instance_id': None, 'progress': None} id = self.service.create(self.context, fixture) @@ -105,13 +105,13 @@ class BaseImageServiceTests(object): 'updated': None, 'created': None, 'status': None, - 'serverId': None, + 'instance_id': None, 'progress': None}, {'name': 'test image 2', 'updated': None, 'created': None, 'status': None, - 'serverId': None, + 'instance_id': None, 'progress': None}] num_images = len(self.service.index(self.context)) @@ -155,6 +155,7 @@ class GlanceImageServiceTest(unittest.TestCase, def setUp(self): self.stubs = stubout.StubOutForTesting() fakes.stub_out_glance(self.stubs) + fakes.stub_out_compute_api_snapshot(self.stubs) service_class = 'nova.image.glance.GlanceImageService' self.service = utils.import_object(service_class) self.context = context.RequestContext(None, None) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 5d23db588..70ff714e6 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -95,6 +95,10 @@ class ServersTest(unittest.TestCase): fake_compute_api) self.stubs.Set(nova.compute.api.ComputeAPI, 'resume', fake_compute_api) + self.stubs.Set(nova.compute.api.ComputeAPI, "get_diagnostics", + fake_compute_api) + self.stubs.Set(nova.compute.api.ComputeAPI, "get_actions", + fake_compute_api) self.allow_admin = FLAGS.allow_admin_api def tearDown(self): @@ -274,6 +278,18 @@ class ServersTest(unittest.TestCase): res = req.get_response(nova.api.API('os')) self.assertEqual(res.status_int, 202) + def test_server_diagnostics(self): + req = webob.Request.blank("/v1.0/servers/1/diagnostics") + req.method = "GET" + res = req.get_response(nova.api.API("os")) + self.assertEqual(res.status_int, 404) + + def test_server_actions(self): + req = webob.Request.blank("/v1.0/servers/1/actions") + req.method = "GET" + res = req.get_response(nova.api.API("os")) + self.assertEqual(res.status_int, 404) + def test_server_reboot(self): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index bcb8a1526..1fb9143f1 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -151,6 +151,14 @@ class ComputeTestCase(test.TestCase): self.compute.reboot_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id) + def test_snapshot(self): + """Ensure instance can be snapshotted""" + instance_id = self._create_instance() + name = "myfakesnapshot" + self.compute.run_instance(self.context, instance_id) + self.compute.snapshot_instance(self.context, instance_id, name) + self.compute.terminate_instance(self.context, instance_id) + def test_console_output(self): """Make sure we can get console output from instance""" instance_id = self._create_instance() diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 91517cc5d..a9937d797 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -19,6 +19,8 @@ Tests For Scheduler """ +import datetime + from nova import context from nova import db from nova import flags @@ -33,6 +35,7 @@ from nova.scheduler import driver FLAGS = flags.FLAGS flags.DECLARE('max_cores', 'nova.scheduler.simple') +flags.DECLARE('stub_network', 'nova.compute.manager') class TestDriver(driver.Scheduler): @@ -94,7 +97,7 @@ class SimpleDriverTestCase(test.TestCase): self.manager.delete_user(self.user) self.manager.delete_project(self.project) - def _create_instance(self): + def _create_instance(self, **kwargs): """Create a test instance""" inst = {} inst['image_id'] = 'ami-test' @@ -105,6 +108,7 @@ class SimpleDriverTestCase(test.TestCase): inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 inst['vcpus'] = 1 + inst['availability_zone'] = kwargs.get('availability_zone', None) return db.instance_create(self.context, inst)['id'] def _create_volume(self): @@ -113,9 +117,33 @@ class SimpleDriverTestCase(test.TestCase): vol['image_id'] = 'ami-test' vol['reservation_id'] = 'r-fakeres' vol['size'] = 1 + vol['availability_zone'] = 'test' return db.volume_create(self.context, vol)['id'] - def test_hosts_are_up(self): + def test_doesnt_report_disabled_hosts_as_up(self): + """Ensures driver doesn't find hosts before they are enabled""" + # NOTE(vish): constructing service without create method + # because we are going to use it without queue + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + db.service_update(self.context, s2['id'], {'disabled': True}) + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(0, len(hosts)) + compute1.kill() + compute2.kill() + + def test_reports_enabled_hosts_as_up(self): """Ensures driver can find the hosts that are up""" # NOTE(vish): constructing service without create method # because we are going to use it without queue @@ -130,7 +158,7 @@ class SimpleDriverTestCase(test.TestCase): FLAGS.compute_manager) compute2.start() hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(len(hosts), 2) + self.assertEqual(2, len(hosts)) compute1.kill() compute2.kill() @@ -157,6 +185,63 @@ class SimpleDriverTestCase(test.TestCase): compute1.kill() compute2.kill() + def test_specific_host_gets_instance(self): + """Ensures if you set availability_zone it launches on that zone""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_wont_sechedule_if_specified_host_is_down(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) + past = now - delta + db.service_update(self.context, s1['id'], {'updated_at': past}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + self.assertRaises(driver.WillNotSchedule, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id2) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_will_schedule_on_disabled_host_if_specified(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + def test_too_many_cores(self): """Ensures we don't go over max cores""" compute1 = service.Service('host1', diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index b30838ad7..9f1a181a0 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -22,6 +22,8 @@ Unit Tests for remote procedure calls using queue import mox +from nova import context +from nova import db from nova import exception from nova import flags from nova import rpc @@ -72,6 +74,30 @@ class ServiceManagerTestCase(test.TestCase): self.assertEqual(serv.test_method(), 'service') +class ServiceFlagsTestCase(test.TestCase): + def test_service_enabled_on_create_based_on_flag(self): + self.flags(enable_new_services=True) + host = 'foo' + binary = 'nova-fake' + app = service.Service.create(host=host, binary=binary) + app.start() + app.stop() + ref = db.service_get(context.get_admin_context(), app.service_id) + db.service_destroy(context.get_admin_context(), app.service_id) + self.assert_(not ref['disabled']) + + def test_service_disabled_on_create_based_on_flag(self): + self.flags(enable_new_services=False) + host = 'foo' + binary = 'nova-fake' + app = service.Service.create(host=host, binary=binary) + app.start() + app.stop() + ref = db.service_get(context.get_admin_context(), app.service_id) + db.service_destroy(context.get_admin_context(), app.service_id) + self.assert_(ref['disabled']) + + class ServiceTestCase(test.TestCase): """Test cases for Services""" diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index ed2e4ffde..c95a53af3 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -29,9 +29,9 @@ from nova.auth import manager from nova.compute import instance_types from nova.compute import power_state from nova.virt import xenapi_conn -from nova.virt.xenapi import fake +from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import volume_utils -from nova.tests.db import fakes +from nova.tests.db import fakes as db_fakes from nova.tests.xenapi import stubs FLAGS = flags.FLAGS @@ -47,9 +47,9 @@ class XenAPIVolumeTestCase(test.TestCase): FLAGS.target_host = '127.0.0.1' FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' - fakes.stub_out_db_instance_api(self.stubs) + db_fakes.stub_out_db_instance_api(self.stubs) stubs.stub_out_get_target(self.stubs) - fake.reset() + xenapi_fake.reset() self.values = {'name': 1, 'id': 1, 'project_id': 'fake', 'user_id': 'fake', @@ -83,7 +83,7 @@ class XenAPIVolumeTestCase(test.TestCase): label = 'SR-%s' % vol['ec2_id'] description = 'Test-SR' sr_ref = helper.create_iscsi_storage(session, info, label, description) - srs = fake.get_all('SR') + srs = xenapi_fake.get_all('SR') self.assertEqual(sr_ref, srs[0]) db.volume_destroy(context.get_admin_context(), vol['id']) @@ -107,17 +107,17 @@ class XenAPIVolumeTestCase(test.TestCase): conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = db.instance_create(self.values) - fake.create_vm(instance.name, 'Running') + xenapi_fake.create_vm(instance.name, 'Running') result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') def check(): # check that the VM has a VBD attached to it # Get XenAPI reference for the VM - vms = fake.get_all('VM') + vms = xenapi_fake.get_all('VM') # Get XenAPI record for VBD - vbds = fake.get_all('VBD') - vbd = fake.get_record('VBD', vbds[0]) + vbds = xenapi_fake.get_all('VBD') + vbd = xenapi_fake.get_record('VBD', vbds[0]) vm_ref = vbd['VM'] self.assertEqual(vm_ref, vms[0]) @@ -130,7 +130,7 @@ class XenAPIVolumeTestCase(test.TestCase): conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = db.instance_create(self.values) - fake.create_vm(instance.name, 'Running') + xenapi_fake.create_vm(instance.name, 'Running') self.assertRaises(Exception, conn.attach_volume, instance.name, @@ -156,41 +156,70 @@ class XenAPIVMTestCase(test.TestCase): self.stubs = stubout.StubOutForTesting() FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' - fake.reset() - fakes.stub_out_db_instance_api(self.stubs) - fake.create_network('fake', FLAGS.flat_network_bridge) + xenapi_fake.reset() + db_fakes.stub_out_db_instance_api(self.stubs) + xenapi_fake.create_network('fake', FLAGS.flat_network_bridge) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.get_connection(False) def test_list_instances_0(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - conn = xenapi_conn.get_connection(False) - instances = conn.list_instances() + instances = self.conn.list_instances() self.assertEquals(instances, []) + def test_get_diagnostics(self): + instance = self._create_instance() + self.conn.get_diagnostics(instance) + + def test_instance_snapshot(self): + stubs.stubout_instance_snapshot(self.stubs) + instance = self._create_instance() + + name = "MySnapshot" + template_vm_ref = self.conn.snapshot(instance, name) + + def ensure_vm_was_torn_down(): + vm_labels = [] + for vm_ref in xenapi_fake.get_all('VM'): + vm_rec = xenapi_fake.get_record('VM', vm_ref) + if not vm_rec["is_control_domain"]: + vm_labels.append(vm_rec["name_label"]) + + self.assertEquals(vm_labels, [1]) + + def ensure_vbd_was_torn_down(): + vbd_labels = [] + for vbd_ref in xenapi_fake.get_all('VBD'): + vbd_rec = xenapi_fake.get_record('VBD', vbd_ref) + vbd_labels.append(vbd_rec["vm_name_label"]) + + self.assertEquals(vbd_labels, [1]) + + def ensure_vdi_was_torn_down(): + for vdi_ref in xenapi_fake.get_all('VDI'): + vdi_rec = xenapi_fake.get_record('VDI', vdi_ref) + name_label = vdi_rec["name_label"] + self.assert_(not name_label.endswith('snapshot')) + + def check(): + ensure_vm_was_torn_down() + ensure_vbd_was_torn_down() + ensure_vdi_was_torn_down() + + check() + def test_spawn(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - values = {'name': 1, 'id': 1, - 'project_id': self.project.id, - 'user_id': self.user.id, - 'image_id': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'instance_type': 'm1.large', - 'mac_address': 'aa:bb:cc:dd:ee:ff', - } - conn = xenapi_conn.get_connection(False) - instance = db.instance_create(values) - conn.spawn(instance) + instance = self._create_instance() def check(): - instances = conn.list_instances() + instances = self.conn.list_instances() self.assertEquals(instances, [1]) # Get Nova record for VM - vm_info = conn.get_info(1) + vm_info = self.conn.get_info(1) # Get XenAPI record for VM - vms = fake.get_all('VM') - vm = fake.get_record('VM', vms[0]) + vms = xenapi_fake.get_all('VM') + vm = xenapi_fake.get_record('VM', vms[0]) # Check that m1.large above turned into the right thing. instance_type = instance_types.INSTANCE_TYPES['m1.large'] @@ -218,3 +247,18 @@ class XenAPIVMTestCase(test.TestCase): self.manager.delete_project(self.project) self.manager.delete_user(self.user) self.stubs.UnsetAll() + + def _create_instance(self): + """Creates and spawns a test instance""" + values = {'name': 1, 'id': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff' + } + instance = db.instance_create(values) + self.conn.spawn(instance) + return instance diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index a7e592fee..55f751f11 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -19,6 +19,54 @@ from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils +from nova.virt.xenapi import vm_utils + + +def stubout_instance_snapshot(stubs): + @classmethod + def fake_fetch_image(cls, session, instance_id, image, user, project, + type): + # Stubout wait_for_task + def fake_wait_for_task(self, id, task): + class FakeEvent: + + def send(self, value): + self.rv = value + + def wait(self): + return self.rv + + done = FakeEvent() + self._poll_task(id, task, done) + rv = done.wait() + return rv + + stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', + fake_wait_for_task) + + from nova.virt.xenapi.fake import create_vdi + name_label = "instance-%s" % instance_id + #TODO: create fake SR record + sr_ref = "fakesr" + vdi_ref = create_vdi(name_label=name_label, read_only=False, + sr_ref=sr_ref, sharable=False) + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + vdi_uuid = vdi_rec['uuid'] + return vdi_uuid + + stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) + + def fake_parse_xmlrpc_value(val): + return val + + stubs.Set(xenapi_conn, '_parse_xmlrpc_value', fake_parse_xmlrpc_value) + + def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, + original_parent_uuid): + #TODO(sirp): Should we actually fake out the data here + return "fakeparent" + + stubs.Set(vm_utils, 'wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce) def stubout_session(stubs, cls): @@ -63,6 +111,24 @@ class FakeSessionForVMTests(fake.SessionBase): vm['is_a_template'] = False vm['is_control_domain'] = False + def VM_snapshot(self, session_ref, vm_ref, label): + status = "Running" + template_vm_ref = fake.create_vm(label, status, is_a_template=True, + is_control_domain=False) + + sr_ref = "fakesr" + template_vdi_ref = fake.create_vdi(label, read_only=True, + sr_ref=sr_ref, sharable=False) + + template_vbd_ref = fake.create_vbd(template_vm_ref, template_vdi_ref) + return template_vm_ref + + def VDI_destroy(self, session_ref, vdi_ref): + fake.destroy_vdi(vdi_ref) + + def VM_destroy(self, session_ref, vm_ref): + fake.destroy_vm(vm_ref) + class FakeSessionForVolumeTests(fake.SessionBase): """ Stubs out a XenAPISession for Volume tests """ diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 706888b0d..32541f5b4 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -112,6 +112,20 @@ class FakeConnection(object): self.instances[instance.name] = fake_instance fake_instance._state = power_state.RUNNING + def snapshot(self, instance, name): + """ + Snapshots the specified instance. + + The given parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. + + The second parameter is the name of the snapshot. + + The work will be done asynchronously. This function returns a + Deferred that allows the caller to detect when it is complete. + """ + pass + def reboot(self, instance): """ Reboot the specified instance. @@ -202,6 +216,9 @@ class FakeConnection(object): 'num_cpu': 2, 'cpu_time': 0} + def get_diagnostics(self, instance_name): + pass + def list_disks(self, instance_name): """ Return the IDs of all the virtual disks attached to the specified diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 39215c4e1..00edfbdc8 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -260,6 +260,13 @@ class LibvirtConnection(object): virt_dom.detachDevice(xml) @exception.wrap_exception + def snapshot(self, instance, name): + """ Create snapshot from a running VM instance """ + raise NotImplementedError( + _("Instance snapshotting is not supported for libvirt" + "at this time")) + + @exception.wrap_exception def reboot(self, instance): self.destroy(instance, False) xml = self.to_xml(instance) @@ -580,6 +587,9 @@ class LibvirtConnection(object): 'num_cpu': num_cpu, 'cpu_time': cpu_time} + def get_diagnostics(self, instance_name): + raise exception.APIError("diagnostics are not supported for libvirt") + def get_disks(self, instance_name): """ Note that this function takes an instance name, not an Instance, so diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 1eaf31c25..aa4026f97 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -55,6 +55,8 @@ import datetime import logging import uuid +from pprint import pformat + from nova import exception @@ -64,6 +66,10 @@ _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\ _db_content = {} +def log_db_contents(msg=None): + logging.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content)) + + def reset(): for c in _CLASSES: _db_content[c] = {} @@ -93,6 +99,24 @@ def create_vm(name_label, status, }) +def destroy_vm(vm_ref): + vm_rec = _db_content['VM'][vm_ref] + + vbd_refs = vm_rec['VBDs'] + for vbd_ref in vbd_refs: + destroy_vbd(vbd_ref) + + del _db_content['VM'][vm_ref] + + +def destroy_vbd(vbd_ref): + del _db_content['VBD'][vbd_ref] + + +def destroy_vdi(vdi_ref): + del _db_content['VDI'][vdi_ref] + + def create_vdi(name_label, read_only, sr_ref, sharable): return _create_object('VDI', { 'name_label': name_label, @@ -109,6 +133,23 @@ def create_vdi(name_label, read_only, sr_ref, sharable): }) +def create_vbd(vm_ref, vdi_ref): + vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref} + vbd_ref = _create_object('VBD', vbd_rec) + after_VBD_create(vbd_ref, vbd_rec) + return vbd_ref + + +def after_VBD_create(vbd_ref, vbd_rec): + """Create backref from VM to VBD when VBD is created""" + vm_ref = vbd_rec['VM'] + vm_rec = _db_content['VM'][vm_ref] + vm_rec['VBDs'] = [vbd_ref] + + vm_name_label = _db_content['VM'][vm_ref]['name_label'] + vbd_rec['vm_name_label'] = vm_name_label + + def create_pbd(config, sr_ref, attached): return _create_object('PBD', { 'device-config': config, @@ -277,11 +318,12 @@ class SessionBase(object): self._check_arg_count(params, 2) return get_record(cls, params[1]) - if (func == 'get_by_name_label' or - func == 'get_by_uuid'): + if func in ('get_by_name_label', 'get_by_uuid'): self._check_arg_count(params, 2) + return_singleton = (func == 'get_by_uuid') return self._get_by_field( - _db_content[cls], func[len('get_by_'):], params[1]) + _db_content[cls], func[len('get_by_'):], params[1], + return_singleton=return_singleton) if len(params) == 2: field = func[len('get_'):] @@ -324,6 +366,13 @@ class SessionBase(object): (cls, _) = name.split('.') ref = is_sr_create and \ _create_sr(cls, params) or _create_object(cls, params[1]) + + # Call hook to provide any fixups needed (ex. creating backrefs) + try: + globals()["after_%s_create" % cls](ref, params[1]) + except KeyError: + pass + obj = get_record(cls, ref) # Add RO fields @@ -359,11 +408,18 @@ class SessionBase(object): raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH', expected, actual]) - def _get_by_field(self, recs, k, v): + def _get_by_field(self, recs, k, v, return_singleton): result = [] for ref, rec in recs.iteritems(): if rec.get(k) == v: result.append(ref) + + if return_singleton: + try: + return result[0] + except IndexError: + return None + return result diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 47fb6db53..9d1b51848 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -20,11 +20,14 @@ their attributes like VDIs, VIFs, as well as their lookup functions. """ import logging +import pickle import urllib from xml.dom import minidom +from eventlet import event from nova import exception from nova import flags +from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state @@ -204,7 +207,54 @@ class VMHelper(HelperBase): return vif_ref @classmethod - def fetch_image(cls, session, image, user, project, type): + def create_snapshot(cls, session, instance_id, vm_ref, label): + """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, + Snapshot VHD + """ + #TODO(sirp): Add quiesce and VSS locking support when Windows support + # is added + logging.debug(_("Snapshotting VM %s with label '%s'..."), + vm_ref, label) + + vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) + vm_vdi_uuid = vm_vdi_rec["uuid"] + sr_ref = vm_vdi_rec["SR"] + + original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref) + + task = session.call_xenapi('Async.VM.snapshot', vm_ref, label) + template_vm_ref = session.wait_for_task(instance_id, task) + template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1] + template_vdi_uuid = template_vdi_rec["uuid"] + + logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref, + vm_ref) + + parent_uuid = wait_for_vhd_coalesce( + session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid) + + #TODO(sirp): we need to assert only one parent, not parents two deep + return template_vm_ref, [template_vdi_uuid, parent_uuid] + + @classmethod + def upload_image(cls, session, instance_id, vdi_uuids, image_name): + """ Requests that the Glance plugin bundle the specified VDIs and + push them into Glance using the specified human-friendly name. + """ + logging.debug(_("Asking xapi to upload %s as '%s'"), + vdi_uuids, image_name) + + params = {'vdi_uuids': vdi_uuids, + 'image_name': image_name, + 'glance_host': FLAGS.glance_host, + 'glance_port': FLAGS.glance_port} + + kwargs = {'params': pickle.dumps(params)} + task = session.async_call_plugin('glance', 'put_vdis', kwargs) + session.wait_for_task(instance_id, task) + + @classmethod + def fetch_image(cls, session, instance_id, image, user, project, type): """ type is interpreted as an ImageType instance """ @@ -223,9 +273,7 @@ class VMHelper(HelperBase): if type == ImageType.DISK_RAW: args['raw'] = 'true' task = session.async_call_plugin('objectstore', fn, args) - #FIXME(armando): find a solution to missing instance_id - #with Josh Kearney - uuid = session.wait_for_task(0, task) + uuid = session.wait_for_task(instance_id, task) return uuid @classmethod @@ -299,6 +347,10 @@ class VMHelper(HelperBase): try: host = session.get_xenapi_host() host_ip = session.get_xenapi().host.get_record(host)["address"] + except (cls.XenAPI.Failure, KeyError) as e: + return {"Unable to retrieve diagnostics": e} + + try: diags = {} xml = get_rrd(host_ip, record["uuid"]) if xml: @@ -325,3 +377,87 @@ def get_rrd(host, uuid): return xml.read() except IOError: return None + + +#TODO(sirp): This code comes from XS5.6 pluginlib.py, we should refactor to +# use that implmenetation +def get_vhd_parent(session, vdi_rec): + """ + Returns the VHD parent of the given VDI record, as a (ref, rec) pair. + Returns None if we're at the root of the tree. + """ + if 'vhd-parent' in vdi_rec['sm_config']: + parent_uuid = vdi_rec['sm_config']['vhd-parent'] + #NOTE(sirp): changed xenapi -> get_xenapi() + parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid) + parent_rec = session.get_xenapi().VDI.get_record(parent_ref) + #NOTE(sirp): changed log -> logging + logging.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref) + return parent_ref, parent_rec + else: + return None + + +def get_vhd_parent_uuid(session, vdi_ref): + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + ret = get_vhd_parent(session, vdi_rec) + if ret: + parent_ref, parent_rec = ret + return parent_rec["uuid"] + else: + return None + + +def scan_sr(session, instance_id, sr_ref): + logging.debug(_("Re-scanning SR %s"), sr_ref) + task = session.call_xenapi('Async.SR.scan', sr_ref) + session.wait_for_task(instance_id, task) + + +def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, + original_parent_uuid): + """ Spin until the parent VHD is coalesced into its parent VHD + + Before coalesce: + * original_parent_vhd + * parent_vhd + snapshot + + Atter coalesce: + * parent_vhd + snapshot + """ + #TODO(sirp): we need to timeout this req after a while + + def _poll_vhds(): + scan_sr(session, instance_id, sr_ref) + parent_uuid = get_vhd_parent_uuid(session, vdi_ref) + if original_parent_uuid and (parent_uuid != original_parent_uuid): + logging.debug( + _("Parent %s doesn't match original parent %s, " + "waiting for coalesce..."), + parent_uuid, original_parent_uuid) + else: + done.send(parent_uuid) + + done = event.Event() + loop = utils.LoopingCall(_poll_vhds) + loop.start(FLAGS.xenapi_vhd_coalesce_poll_interval, now=True) + parent_uuid = done.wait() + loop.stop() + return parent_uuid + + +def get_vdi_for_vm_safely(session, vm_ref): + vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref) + if vdi_refs is None: + raise Exception(_("No VDIs found for VM %s") % vm_ref) + else: + num_vdis = len(vdi_refs) + if num_vdis != 1: + raise Exception(_("Unexpected number of VDIs (%s) found for " + "VM %s") % (num_vdis, vm_ref)) + + vdi_ref = vdi_refs[0] + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + return vdi_ref, vdi_rec diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index ba502ffa2..76f31635a 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -70,7 +70,7 @@ class VMOps(object): disk_image_type = ImageType.DISK else: disk_image_type = ImageType.DISK_RAW - vdi_uuid = VMHelper.fetch_image(self._session, + vdi_uuid = VMHelper.fetch_image(self._session, instance.id, instance.image_id, user, project, disk_image_type) vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) #Have a look at the VDI and see if it has a PV kernel @@ -79,11 +79,11 @@ class VMOps(object): pv_kernel = VMHelper.lookup_image(self._session, vdi_ref) kernel = None if instance.kernel_id: - kernel = VMHelper.fetch_image(self._session, + kernel = VMHelper.fetch_image(self._session, instance.id, instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) ramdisk = None if instance.ramdisk_id: - ramdisk = VMHelper.fetch_image(self._session, + ramdisk = VMHelper.fetch_image(self._session, instance.id, instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, pv_kernel) @@ -120,6 +120,52 @@ class VMOps(object): timer.f = _wait_for_boot return timer.start(interval=0.5, now=True) + def snapshot(self, instance, name): + """ Create snapshot from a running VM instance + + :param instance: instance to be snapshotted + :param name: name/label to be given to the snapshot + + Steps involved in a XenServer snapshot: + + 1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This + creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, + Snapshot VHD + + 2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to + a 'base-copy' VDI. The base_copy is immutable and may be chained + with other base_copies. If chained, the base_copies + coalesce together, so, we must wait for this coalescing to occur to + get a stable representation of the data on disk. + + 3. Push-to-glance: Once coalesced, we call a plugin on the XenServer + that will bundle the VHDs together and then push the bundle into + Glance. + """ + + #TODO(sirp): Add quiesce and VSS locking support when Windows support + # is added + + logging.debug(_("Starting snapshot for VM %s"), instance) + vm_ref = VMHelper.lookup(self._session, instance.name) + + label = "%s-snapshot" % instance.name + try: + template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( + self._session, instance.id, vm_ref, label) + except self.XenAPI.Failure, exc: + logging.error(_("Unable to Snapshot %s: %s"), vm_ref, exc) + return + + try: + # call plugin to ship snapshot off to glance + VMHelper.upload_image( + self._session, instance.id, template_vdi_uuids, name) + finally: + self._destroy(instance, template_vm_ref, shutdown=False) + + logging.debug(_("Finished snapshot and upload for VM %s"), instance) + def reboot(self, instance): """Reboot VM instance""" instance_name = instance.name @@ -133,31 +179,36 @@ class VMOps(object): def destroy(self, instance): """Destroy VM instance""" vm = VMHelper.lookup(self._session, instance.name) + return self._destroy(instance, vm, shutdown=True) + + def _destroy(self, instance, vm, shutdown=True): + """ Destroy VM instance """ if vm is None: # Don't complain, just return. This lets us clean up instances # that have already disappeared from the underlying platform. return # Get the VDIs related to the VM vdis = VMHelper.lookup_vm_vdis(self._session, vm) - try: - task = self._session.call_xenapi('Async.VM.hard_shutdown', - vm) - self._session.wait_for_task(instance.id, task) - except XenAPI.Failure, exc: - logging.warn(exc) + if shutdown: + try: + task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) + self._session.wait_for_task(instance.id, task) + except self.XenAPI.Failure, exc: + logging.warn(exc) + # Disk clean-up if vdis: for vdi in vdis: try: task = self._session.call_xenapi('Async.VDI.destroy', vdi) self._session.wait_for_task(instance.id, task) - except XenAPI.Failure, exc: + except self.XenAPI.Failure, exc: logging.warn(exc) # VM Destroy try: task = self._session.call_xenapi('Async.VM.destroy', vm) self._session.wait_for_task(instance.id, task) - except XenAPI.Failure, exc: + except self.XenAPI.Failure, exc: logging.warn(exc) def _wait_with_callback(self, instance_id, task, callback): @@ -217,11 +268,12 @@ class VMOps(object): rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_info(rec) - def get_diagnostics(self, instance_id): + def get_diagnostics(self, instance): """Return data about VM diagnostics""" - vm = VMHelper.lookup(self._session, instance_id) + vm = VMHelper.lookup(self._session, instance.name) if vm is None: - raise exception.NotFound(_("Instance not found %s") % instance_id) + raise exception.NotFound(_("Instance not found %s") % + instance.name) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_diagnostics(self._session, rec) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 7f03d6c2b..f17c8f39d 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -83,6 +83,10 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'The interval used for polling of remote tasks ' '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') +flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval', + 5.0, + 'The interval used for polling of coalescing vhds.' + ' Used only if connection_type=xenapi.') flags.DEFINE_string('target_host', None, 'iSCSI Target Host') @@ -131,6 +135,10 @@ class XenAPIConnection(object): """Create VM instance""" self._vmops.spawn(instance) + def snapshot(self, instance, name): + """ Create snapshot from a running VM instance """ + self._vmops.snapshot(instance, name) + def reboot(self, instance): """Reboot VM instance""" self._vmops.reboot(instance) @@ -159,9 +167,9 @@ class XenAPIConnection(object): """Return data about VM instance""" return self._vmops.get_info(instance_id) - def get_diagnostics(self, instance_id): + def get_diagnostics(self, instance): """Return data about VM diagnostics""" - return self._vmops.get_diagnostics(instance_id) + return self._vmops.get_diagnostics(instance) def get_console_output(self, instance): """Return snapshot of console""" @@ -233,8 +241,8 @@ class XenAPISession(object): name = self._session.xenapi.task.get_name_label(task) status = self._session.xenapi.task.get_status(task) action = dict( - id=int(id), - action=name, + instance_id=int(id), + action=name[0:255], # Ensure action is never > 255 error=None) if status == "pending": return diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance new file mode 100644 index 000000000..5e648b970 --- /dev/null +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -0,0 +1,132 @@ +#!/usr/bin/env python + +# Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for putting images into glance +# + +import base64 +import errno +import hmac +import httplib +import os +import os.path +import pickle +import sha +import subprocess +import time +import urlparse + +import XenAPIPlugin + +#FIXME(sirp): should this use pluginlib from 5.6? +from pluginlib_nova import * +configure_logging('glance') + +CHUNK_SIZE = 8192 +FILE_SR_PATH = '/var/run/sr-mount' + +def put_vdis(session, args): + params = pickle.loads(exists(args, 'params')) + vdi_uuids = params["vdi_uuids"] + image_name = params["image_name"] + glance_host = params["glance_host"] + glance_port = params["glance_port"] + + sr_path = get_sr_path(session) + #FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs + tmp_file = "%s.tar.gz" % os.path.join('/tmp', image_name) + tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path] + paths = [ "%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids ] + tar_cmd.extend(paths) + logging.debug("Bundling image with cmd: %s", tar_cmd) + subprocess.call(tar_cmd) + logging.debug("Writing to test file %s", tmp_file) + put_bundle_in_glance(tmp_file, image_name, glance_host, glance_port) + return "" # FIXME(sirp): return anything useful here? + + +def put_bundle_in_glance(tmp_file, image_name, glance_host, glance_port): + size = os.path.getsize(tmp_file) + basename = os.path.basename(tmp_file) + + bundle = open(tmp_file, 'r') + try: + headers = { + 'x-image-meta-store': 'file', + 'x-image-meta-is_public': 'True', + 'x-image-meta-type': 'raw', + 'x-image-meta-name': image_name, + 'x-image-meta-size': size, + 'content-length': size, + 'content-type': 'application/octet-stream', + } + conn = httplib.HTTPConnection(glance_host, glance_port) + #NOTE(sirp): httplib under python2.4 won't accept a file-like object + # to request + conn.putrequest('POST', '/images') + + for header, value in headers.iteritems(): + conn.putheader(header, value) + conn.endheaders() + + chunk = bundle.read(CHUNK_SIZE) + while chunk: + conn.send(chunk) + chunk = bundle.read(CHUNK_SIZE) + + + res = conn.getresponse() + #FIXME(sirp): should this be 201 Created? + if res.status != httplib.OK: + raise Exception("Unexpected response from Glance %i" % res.status) + finally: + bundle.close() + +def get_sr_path(session): + sr_ref = find_sr(session) + + if sr_ref is None: + raise Exception('Cannot find SR to read VDI from') + + sr_rec = session.xenapi.SR.get_record(sr_ref) + sr_uuid = sr_rec["uuid"] + sr_path = os.path.join(FILE_SR_PATH, sr_uuid) + return sr_path + + +#TODO(sirp): both objectstore and glance need this, should this be refactored +#into common lib +def find_sr(session): + host = get_this_host(session) + srs = session.xenapi.SR.get_all() + for sr in srs: + sr_rec = session.xenapi.SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.xenapi.PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + + +if __name__ == '__main__': + XenAPIPlugin.dispatch({'put_vdis': put_vdis}) diff --git a/run_tests.py b/run_tests.py new file mode 100644 index 000000000..5b8617f63 --- /dev/null +++ b/run_tests.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import unittest +import sys + +from nose import config +from nose import result +from nose import core + + +class NovaTestResult(result.TextTestResult): + def __init__(self, *args, **kw): + result.TextTestResult.__init__(self, *args, **kw) + self._last_case = None + + def getDescription(self, test): + return str(test) + + def startTest(self, test): + unittest.TestResult.startTest(self, test) + current_case = test.test.__class__.__name__ + + if self.showAll: + if current_case != self._last_case: + self.stream.writeln(current_case) + self._last_case = current_case + + self.stream.write( + ' %s' % str(test.test._testMethodName).ljust(60)) + self.stream.flush() + + +class NovaTestRunner(core.TextTestRunner): + def _makeResult(self): + return NovaTestResult(self.stream, + self.descriptions, + self.verbosity, + self.config) + + +if __name__ == '__main__': + c = config.Config(stream=sys.stdout, + env=os.environ, + verbosity=3) + + runner = NovaTestRunner(stream=c.stream, + verbosity=c.verbosity, + config=c) + sys.exit(not core.run(config=c, testRunner=runner)) diff --git a/run_tests.sh b/run_tests.sh index ffb0b6295..fe703fece 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -32,16 +32,17 @@ never_venv=0 force=0 noseargs= + for arg in "$@"; do process_option $arg done -NOSETESTS="nosetests -v $noseargs" +NOSETESTS="python run_tests.py $noseargs" if [ $never_venv -eq 1 ]; then # Just run the test suites in current environment rm -f nova.sqlite - $NOSETESTS + $NOSETESTS 2> run_tests.err.log exit fi @@ -53,7 +54,7 @@ fi if [ -e ${venv} ]; then ${with_venv} rm -f nova.sqlite - ${with_venv} $NOSETESTS + ${with_venv} $NOSETESTS 2> run_tests.err.log else if [ $always_venv -eq 1 ]; then # Automatically install the virtualenv @@ -66,10 +67,10 @@ else python tools/install_venv.py else rm -f nova.sqlite - $NOSETESTS + $NOSETESTS 2> run_tests.err.log exit fi fi ${with_venv} rm -f nova.sqlite - ${with_venv} $NOSETESTS + ${with_venv} $NOSETESTS 2> run_tests.err.log fi |