diff options
| author | Justin Santa Barbara <justinsb@justinsb-desktop> | 2010-10-14 12:59:36 -0700 |
|---|---|---|
| committer | Justin Santa Barbara <justinsb@justinsb-desktop> | 2010-10-14 12:59:36 -0700 |
| commit | d8643f1e15f241db96893d1ea41083a2bee65dbd (patch) | |
| tree | 12e9e85733306f97b12b99339edbe49ef4031418 /nova/compute | |
| parent | 759bab6059ef2e4c463a73e12fe85fe4b147eba7 (diff) | |
| parent | 3363b133a927509432cb42d77abf18d3d5248abf (diff) | |
| download | nova-d8643f1e15f241db96893d1ea41083a2bee65dbd.tar.gz nova-d8643f1e15f241db96893d1ea41083a2bee65dbd.tar.xz nova-d8643f1e15f241db96893d1ea41083a2bee65dbd.zip | |
Merged with trunk, fixed broken stuff
Diffstat (limited to 'nova/compute')
| -rw-r--r-- | nova/compute/instance_types.py | 14 | ||||
| -rw-r--r-- | nova/compute/manager.py | 180 | ||||
| -rw-r--r-- | nova/compute/model.py | 314 | ||||
| -rw-r--r-- | nova/compute/service.py | 367 |
4 files changed, 187 insertions, 688 deletions
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index 439be3c7d..0102bae54 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -21,10 +21,10 @@ The built-in instance properties. """ -INSTANCE_TYPES = {} -INSTANCE_TYPES['m1.tiny'] = {'memory_mb': 512, 'vcpus': 1, 'local_gb': 0} -INSTANCE_TYPES['m1.small'] = {'memory_mb': 1024, 'vcpus': 1, 'local_gb': 10} -INSTANCE_TYPES['m1.medium'] = {'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10} -INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10} -INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10} -INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10} +INSTANCE_TYPES = { + 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1), + 'm1.small': dict(memory_mb=1024, vcpus=1, local_gb=10, flavorid=2), + 'm1.medium': dict(memory_mb=2048, vcpus=2, local_gb=10, flavorid=3), + 'm1.large': dict(memory_mb=4096, vcpus=4, local_gb=10, flavorid=4), + 'm1.xlarge': dict(memory_mb=8192, vcpus=4, local_gb=10, flavorid=5), + 'c1.medium': dict(memory_mb=2048, vcpus=4, local_gb=10, flavorid=6)} diff --git a/nova/compute/manager.py b/nova/compute/manager.py new file mode 100644 index 000000000..94c95038f --- /dev/null +++ b/nova/compute/manager.py @@ -0,0 +1,180 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all code relating to instances (guest vms) +""" + +import base64 +import datetime +import logging +import os + +from twisted.internet import defer + +from nova import exception +from nova import flags +from nova import manager +from nova import utils +from nova.compute import power_state + + +FLAGS = flags.FLAGS +flags.DEFINE_string('instances_path', utils.abspath('../instances'), + 'where instances are stored on disk') +flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', + 'Driver to use for volume creation') + + +class ComputeManager(manager.Manager): + """ + Manages the running instances. + """ + def __init__(self, compute_driver=None, *args, **kwargs): + """Load configuration options and connect to the hypervisor.""" + # TODO(vish): sync driver creation logic with the rest of the system + if not compute_driver: + compute_driver = FLAGS.compute_driver + self.driver = utils.import_object(compute_driver) + self.network_manager = utils.import_object(FLAGS.network_manager) + self.volume_manager = utils.import_object(FLAGS.volume_manager) + super(ComputeManager, self).__init__(*args, **kwargs) + + def _update_state(self, context, instance_id): + """Update the state of an instance from the driver info""" + # FIXME(ja): include other fields from state? + instance_ref = self.db.instance_get(context, instance_id) + state = self.driver.get_info(instance_ref.name)['state'] + self.db.instance_set_state(context, instance_id, state) + + @defer.inlineCallbacks + @exception.wrap_exception + def refresh_security_group(self, context, security_group_id, **_kwargs): + yield self.driver.refresh_security_group(security_group_id) + + @defer.inlineCallbacks + @exception.wrap_exception + def run_instance(self, context, instance_id, **_kwargs): + """Launch a new instance with specified options.""" + instance_ref = self.db.instance_get(context, instance_id) + if instance_ref['name'] in self.driver.list_instances(): + raise exception.Error("Instance has already been created") + logging.debug("instance %s: starting...", instance_id) + project_id = instance_ref['project_id'] + self.network_manager.setup_compute_network(context, instance_id) + self.db.instance_update(context, + instance_id, + {'host': self.host}) + + # TODO(vish) check to make sure the availability zone matches + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'spawning') + + try: + yield self.driver.spawn(instance_ref) + now = datetime.datetime.utcnow() + self.db.instance_update(context, + instance_id, + {'launched_at': now}) + except Exception: # pylint: disable-msg=W0702 + logging.exception("instance %s: Failed to spawn", + instance_ref['name']) + self.db.instance_set_state(context, + instance_id, + power_state.SHUTDOWN) + + self._update_state(context, instance_id) + + @defer.inlineCallbacks + @exception.wrap_exception + def terminate_instance(self, context, instance_id): + """Terminate an instance on this machine.""" + logging.debug("instance %s: terminating", instance_id) + + instance_ref = self.db.instance_get(context, instance_id) + if instance_ref['state'] == power_state.SHUTOFF: + self.db.instance_destroy(context, instance_id) + raise exception.Error('trying to destroy already destroyed' + ' instance: %s' % instance_id) + + yield self.driver.destroy(instance_ref) + + # TODO(ja): should we keep it in a terminated state for a bit? + self.db.instance_destroy(context, instance_id) + + @defer.inlineCallbacks + @exception.wrap_exception + def reboot_instance(self, context, instance_id): + """Reboot an instance on this server.""" + self._update_state(context, instance_id) + instance_ref = self.db.instance_get(context, instance_id) + + if instance_ref['state'] != power_state.RUNNING: + raise exception.Error( + 'trying to reboot a non-running' + 'instance: %s (state: %s excepted: %s)' % + (instance_ref['internal_id'], + instance_ref['state'], + power_state.RUNNING)) + + logging.debug('instance %s: rebooting', instance_ref['name']) + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'rebooting') + yield self.driver.reboot(instance_ref) + self._update_state(context, instance_id) + + @exception.wrap_exception + def get_console_output(self, context, instance_id): + """Send the console output for an instance.""" + logging.debug("instance %s: getting console output", instance_id) + instance_ref = self.db.instance_get(context, instance_id) + + return self.driver.get_console_output(instance_ref) + + @defer.inlineCallbacks + @exception.wrap_exception + def attach_volume(self, context, instance_id, volume_id, mountpoint): + """Attach a volume to an instance.""" + logging.debug("instance %s: attaching volume %s to %s", instance_id, + volume_id, mountpoint) + instance_ref = self.db.instance_get(context, instance_id) + dev_path = yield self.volume_manager.setup_compute_volume(context, + volume_id) + yield self.driver.attach_volume(instance_ref['ec2_id'], + dev_path, + mountpoint) + self.db.volume_attached(context, volume_id, instance_id, mountpoint) + defer.returnValue(True) + + @defer.inlineCallbacks + @exception.wrap_exception + def detach_volume(self, context, instance_id, volume_id): + """Detach a volume from an instance.""" + logging.debug("instance %s: detaching volume %s", + instance_id, + volume_id) + instance_ref = self.db.instance_get(context, instance_id) + volume_ref = self.db.volume_get(context, volume_id) + yield self.driver.detach_volume(instance_ref['ec2_id'], + volume_ref['mountpoint']) + self.db.volume_detached(context, volume_id) + defer.returnValue(True) diff --git a/nova/compute/model.py b/nova/compute/model.py deleted file mode 100644 index 84432b55f..000000000 --- a/nova/compute/model.py +++ /dev/null @@ -1,314 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Datastore Model objects for Compute Instances, with -InstanceDirectory manager. - -# Create a new instance? ->>> InstDir = InstanceDirectory() ->>> inst = InstDir.new() ->>> inst.destroy() -True ->>> inst = InstDir['i-123'] ->>> inst['ip'] = "192.168.0.3" ->>> inst['project_id'] = "projectA" ->>> inst.save() -True - ->>> InstDir['i-123'] -<Instance:i-123> ->>> InstDir.all.next() -<Instance:i-123> - ->>> inst.destroy() -True -""" - -import datetime -import uuid - -from nova import datastore -from nova import exception -from nova import flags -from nova import utils - - -FLAGS = flags.FLAGS - - -# TODO(todd): Implement this at the class level for Instance -class InstanceDirectory(object): - """an api for interacting with the global state of instances""" - - def get(self, instance_id): - """returns an instance object for a given id""" - return Instance(instance_id) - - def __getitem__(self, item): - return self.get(item) - - @datastore.absorb_connection_error - def by_project(self, project): - """returns a list of instance objects for a project""" - for instance_id in datastore.Redis.instance().smembers('project:%s:instances' % project): - yield Instance(instance_id) - - @datastore.absorb_connection_error - def by_node(self, node): - """returns a list of instances for a node""" - for instance_id in datastore.Redis.instance().smembers('node:%s:instances' % node): - yield Instance(instance_id) - - def by_ip(self, ip): - """returns an instance object that is using the IP""" - # NOTE(vish): The ip association should be just a single value, but - # to maintain consistency it is using the standard - # association and the ugly method for retrieving - # the first item in the set below. - result = datastore.Redis.instance().smembers('ip:%s:instances' % ip) - if not result: - return None - return Instance(list(result)[0]) - - def by_volume(self, volume_id): - """returns the instance a volume is attached to""" - pass - - @datastore.absorb_connection_error - def exists(self, instance_id): - return datastore.Redis.instance().sismember('instances', instance_id) - - @property - @datastore.absorb_connection_error - def all(self): - """returns a list of all instances""" - for instance_id in datastore.Redis.instance().smembers('instances'): - yield Instance(instance_id) - - def new(self): - """returns an empty Instance object, with ID""" - instance_id = utils.generate_uid('i') - return self.get(instance_id) - - -class Instance(datastore.BasicModel): - """Wrapper around stored properties of an instance""" - - def __init__(self, instance_id): - """loads an instance from the datastore if exists""" - # set instance data before super call since it uses default_state - self.instance_id = instance_id - super(Instance, self).__init__() - - def default_state(self): - return {'state': 0, - 'state_description': 'pending', - 'instance_id': self.instance_id, - 'node_name': 'unassigned', - 'project_id': 'unassigned', - 'user_id': 'unassigned', - 'private_dns_name': 'unassigned'} - - @property - def identifier(self): - return self.instance_id - - @property - def project(self): - if self.state.get('project_id', None): - return self.state['project_id'] - return self.state.get('owner_id', 'unassigned') - - @property - def volumes(self): - """returns a list of attached volumes""" - pass - - @property - def reservation(self): - """Returns a reservation object""" - pass - - def save(self): - """Call into superclass to save object, then save associations""" - # NOTE(todd): doesn't track migration between projects/nodes, - # it just adds the first one - is_new = self.is_new_record() - node_set = (self.state['node_name'] != 'unassigned' and - self.initial_state.get('node_name', 'unassigned') - == 'unassigned') - success = super(Instance, self).save() - if success and is_new: - self.associate_with("project", self.project) - self.associate_with("ip", self.state['private_dns_name']) - if success and node_set: - self.associate_with("node", self.state['node_name']) - return True - - def destroy(self): - """Destroy associations, then destroy the object""" - self.unassociate_with("project", self.project) - self.unassociate_with("node", self.state['node_name']) - self.unassociate_with("ip", self.state['private_dns_name']) - return super(Instance, self).destroy() - - -class Host(datastore.BasicModel): - """A Host is the machine where a Daemon is running.""" - - def __init__(self, hostname): - """loads an instance from the datastore if exists""" - # set instance data before super call since it uses default_state - self.hostname = hostname - super(Host, self).__init__() - - def default_state(self): - return {"hostname": self.hostname} - - @property - def identifier(self): - return self.hostname - - -class Daemon(datastore.BasicModel): - """A Daemon is a job (compute, api, network, ...) that runs on a host.""" - - def __init__(self, host_or_combined, binpath=None): - """loads an instance from the datastore if exists""" - # set instance data before super call since it uses default_state - # since loading from datastore expects a combined key that - # is equivilent to identifier, we need to expect that, while - # maintaining meaningful semantics (2 arguments) when creating - # from within other code like the bin/nova-* scripts - if binpath: - self.hostname = host_or_combined - self.binary = binpath - else: - self.hostname, self.binary = host_or_combined.split(":") - super(Daemon, self).__init__() - - def default_state(self): - return {"hostname": self.hostname, - "binary": self.binary, - "updated_at": utils.isotime() - } - - @property - def identifier(self): - return "%s:%s" % (self.hostname, self.binary) - - def save(self): - """Call into superclass to save object, then save associations""" - # NOTE(todd): this makes no attempt to destroy itsself, - # so after termination a record w/ old timestmap remains - success = super(Daemon, self).save() - if success: - self.associate_with("host", self.hostname) - return True - - def destroy(self): - """Destroy associations, then destroy the object""" - self.unassociate_with("host", self.hostname) - return super(Daemon, self).destroy() - - def heartbeat(self): - self['updated_at'] = utils.isotime() - return self.save() - - @classmethod - def by_host(cls, hostname): - for x in cls.associated_to("host", hostname): - yield x - - -class SessionToken(datastore.BasicModel): - """This is a short-lived auth token that is passed through web requests""" - - def __init__(self, session_token): - self.token = session_token - self.default_ttl = FLAGS.auth_token_ttl - super(SessionToken, self).__init__() - - @property - def identifier(self): - return self.token - - def default_state(self): - now = datetime.datetime.utcnow() - diff = datetime.timedelta(seconds=self.default_ttl) - expires = now + diff - return {'user': None, 'session_type': None, 'token': self.token, - 'expiry': expires.strftime(utils.TIME_FORMAT)} - - def save(self): - """Call into superclass to save object, then save associations""" - if not self['user']: - raise exception.Invalid("SessionToken requires a User association") - success = super(SessionToken, self).save() - if success: - self.associate_with("user", self['user']) - return True - - @classmethod - def lookup(cls, key): - token = super(SessionToken, cls).lookup(key) - if token: - expires_at = utils.parse_isotime(token['expiry']) - if datetime.datetime.utcnow() >= expires_at: - token.destroy() - return None - return token - - @classmethod - def generate(cls, userid, session_type=None): - """make a new token for the given user""" - token = str(uuid.uuid4()) - while cls.lookup(token): - token = str(uuid.uuid4()) - instance = cls(token) - instance['user'] = userid - instance['session_type'] = session_type - instance.save() - return instance - - def update_expiry(self, **kwargs): - """updates the expirty attribute, but doesn't save""" - if not kwargs: - kwargs['seconds'] = self.default_ttl - time = datetime.datetime.utcnow() - diff = datetime.timedelta(**kwargs) - expires = time + diff - self['expiry'] = expires.strftime(utils.TIME_FORMAT) - - def is_expired(self): - now = datetime.datetime.utcnow() - expires = utils.parse_isotime(self['expiry']) - return expires <= now - - def ttl(self): - """number of seconds remaining before expiration""" - now = datetime.datetime.utcnow() - expires = utils.parse_isotime(self['expiry']) - delta = expires - now - return (delta.seconds + (delta.days * 24 * 3600)) - - -if __name__ == "__main__": - import doctest - doctest.testmod() diff --git a/nova/compute/service.py b/nova/compute/service.py deleted file mode 100644 index e59f3fb34..000000000 --- a/nova/compute/service.py +++ /dev/null @@ -1,367 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Compute Service: - - Runs on each compute host, managing the - hypervisor using the virt module. - -""" - -import base64 -import json -import logging -import os -import sys - -from twisted.internet import defer -from twisted.internet import task - -from nova import exception -from nova import flags -from nova import process -from nova import service -from nova import utils -from nova.compute import disk -from nova.compute import model -from nova.compute import power_state -from nova.compute.instance_types import INSTANCE_TYPES -from nova.network import service as network_service -from nova.objectstore import image # for image_path flag -from nova.virt import connection as virt_connection -from nova.volume import service as volume_service - - -FLAGS = flags.FLAGS -flags.DEFINE_string('instances_path', utils.abspath('../instances'), - 'where instances are stored on disk') - - -class ComputeService(service.Service): - """ - Manages the running instances. - """ - def __init__(self): - """ load configuration options for this node and connect to the hypervisor""" - super(ComputeService, self).__init__() - self._instances = {} - self._conn = virt_connection.get_connection() - self.instdir = model.InstanceDirectory() - # TODO(joshua): This needs to ensure system state, specifically: modprobe aoe - - def noop(self): - """ simple test of an AMQP message call """ - return defer.succeed('PONG') - - def get_instance(self, instance_id): - # inst = self.instdir.get(instance_id) - # return inst - if self.instdir.exists(instance_id): - return Instance.fromName(self._conn, instance_id) - return None - - @exception.wrap_exception - def adopt_instances(self): - """ if there are instances already running, adopt them """ - return defer.succeed(0) - instance_names = self._conn.list_instances() - for name in instance_names: - try: - new_inst = Instance.fromName(self._conn, name) - new_inst.update_state() - except: - pass - return defer.succeed(len(self._instances)) - - @exception.wrap_exception - def describe_instances(self): - retval = {} - for inst in self.instdir.by_node(FLAGS.node_name): - retval[inst['instance_id']] = ( - Instance.fromName(self._conn, inst['instance_id'])) - return retval - - @defer.inlineCallbacks - def report_state(self, nodename, daemon): - # TODO(termie): make this pattern be more elegant. -todd - try: - record = model.Daemon(nodename, daemon) - record.heartbeat() - if getattr(self, "model_disconnected", False): - self.model_disconnected = False - logging.error("Recovered model server connection!") - - except model.ConnectionError, ex: - if not getattr(self, "model_disconnected", False): - self.model_disconnected = True - logging.exception("model server went away") - yield - - @exception.wrap_exception - def run_instance(self, instance_id, **_kwargs): - """ launch a new instance with specified options """ - logging.debug("Starting instance %s..." % (instance_id)) - inst = self.instdir.get(instance_id) - # TODO: Get the real security group of launch in here - security_group = "default" - # NOTE(vish): passing network type allows us to express the - # network without making a call to network to find - # out which type of network to setup - network_service.setup_compute_network( - inst.get('network_type', 'vlan'), - inst['user_id'], - inst['project_id'], - security_group) - - inst['node_name'] = FLAGS.node_name - inst.save() - # TODO(vish) check to make sure the availability zone matches - new_inst = Instance(self._conn, name=instance_id, data=inst) - logging.info("Instances current state is %s", new_inst.state) - if new_inst.is_running(): - raise exception.Error("Instance is already running") - new_inst.spawn() - - @exception.wrap_exception - def terminate_instance(self, instance_id): - """ terminate an instance on this machine """ - logging.debug("Got told to terminate instance %s" % instance_id) - instance = self.get_instance(instance_id) - # inst = self.instdir.get(instance_id) - if not instance: - raise exception.Error( - 'trying to terminate unknown instance: %s' % instance_id) - d = instance.destroy() - # d.addCallback(lambda x: inst.destroy()) - return d - - @exception.wrap_exception - def reboot_instance(self, instance_id): - """ reboot an instance on this server - KVM doesn't support reboot, so we terminate and restart """ - instance = self.get_instance(instance_id) - if not instance: - raise exception.Error( - 'trying to reboot unknown instance: %s' % instance_id) - return instance.reboot() - - @defer.inlineCallbacks - @exception.wrap_exception - def get_console_output(self, instance_id): - """ send the console output for an instance """ - logging.debug("Getting console output for %s" % (instance_id)) - inst = self.instdir.get(instance_id) - instance = self.get_instance(instance_id) - if not instance: - raise exception.Error( - 'trying to get console log for unknown: %s' % instance_id) - rv = yield instance.console_output() - # TODO(termie): this stuff belongs in the API layer, no need to - # munge the data we send to ourselves - output = {"InstanceId" : instance_id, - "Timestamp" : "2", - "output" : base64.b64encode(rv)} - defer.returnValue(output) - - @defer.inlineCallbacks - @exception.wrap_exception - def attach_volume(self, instance_id = None, - volume_id = None, mountpoint = None): - volume = volume_service.get_volume(volume_id) - yield self._init_aoe() - yield process.simple_execute( - "sudo virsh attach-disk %s /dev/etherd/%s %s" % - (instance_id, - volume['aoe_device'], - mountpoint.rpartition('/dev/')[2])) - volume.finish_attach() - defer.returnValue(True) - - @defer.inlineCallbacks - def _init_aoe(self): - yield process.simple_execute("sudo aoe-discover") - yield process.simple_execute("sudo aoe-stat") - - @defer.inlineCallbacks - @exception.wrap_exception - def detach_volume(self, instance_id, volume_id): - """ detach a volume from an instance """ - # despite the documentation, virsh detach-disk just wants the device - # name without the leading /dev/ - volume = volume_service.get_volume(volume_id) - target = volume['mountpoint'].rpartition('/dev/')[2] - yield process.simple_execute( - "sudo virsh detach-disk %s %s " % (instance_id, target)) - volume.finish_detach() - defer.returnValue(True) - - -class Group(object): - def __init__(self, group_id): - self.group_id = group_id - - -class ProductCode(object): - def __init__(self, product_code): - self.product_code = product_code - - -class Instance(object): - - NOSTATE = 0x00 - RUNNING = 0x01 - BLOCKED = 0x02 - PAUSED = 0x03 - SHUTDOWN = 0x04 - SHUTOFF = 0x05 - CRASHED = 0x06 - - def __init__(self, conn, name, data): - """ spawn an instance with a given name """ - self._conn = conn - # TODO(vish): this can be removed after data has been updated - # data doesn't seem to have a working iterator so in doesn't work - if data.get('owner_id', None) is not None: - data['user_id'] = data['owner_id'] - data['project_id'] = data['owner_id'] - self.datamodel = data - - size = data.get('instance_type', FLAGS.default_instance_type) - if size not in INSTANCE_TYPES: - raise exception.Error('invalid instance type: %s' % size) - - self.datamodel.update(INSTANCE_TYPES[size]) - - self.datamodel['name'] = name - self.datamodel['instance_id'] = name - self.datamodel['basepath'] = data.get( - 'basepath', os.path.abspath( - os.path.join(FLAGS.instances_path, self.name))) - self.datamodel['memory_kb'] = int(self.datamodel['memory_mb']) * 1024 - self.datamodel.setdefault('image_id', FLAGS.default_image) - self.datamodel.setdefault('kernel_id', FLAGS.default_kernel) - self.datamodel.setdefault('ramdisk_id', FLAGS.default_ramdisk) - self.datamodel.setdefault('project_id', self.datamodel['user_id']) - self.datamodel.setdefault('bridge_name', None) - #self.datamodel.setdefault('key_data', None) - #self.datamodel.setdefault('key_name', None) - #self.datamodel.setdefault('addressing_type', None) - - # TODO(joshua) - The ugly non-flat ones - self.datamodel['groups'] = data.get('security_group', 'default') - # TODO(joshua): Support product codes somehow - self.datamodel.setdefault('product_codes', None) - - self.datamodel.save() - logging.debug("Finished init of Instance with id of %s" % name) - - @classmethod - def fromName(cls, conn, name): - """ use the saved data for reloading the instance """ - instdir = model.InstanceDirectory() - instance = instdir.get(name) - return cls(conn=conn, name=name, data=instance) - - def set_state(self, state_code, state_description=None): - self.datamodel['state'] = state_code - if not state_description: - state_description = power_state.name(state_code) - self.datamodel['state_description'] = state_description - self.datamodel.save() - - @property - def state(self): - # it is a string in datamodel - return int(self.datamodel['state']) - - @property - def name(self): - return self.datamodel['name'] - - def is_pending(self): - return (self.state == power_state.NOSTATE or self.state == 'pending') - - def is_destroyed(self): - return self.state == power_state.SHUTOFF - - def is_running(self): - logging.debug("Instance state is: %s" % self.state) - return (self.state == power_state.RUNNING or self.state == 'running') - - def describe(self): - return self.datamodel - - def info(self): - result = self._conn.get_info(self.name) - result['node_name'] = FLAGS.node_name - return result - - def update_state(self): - self.datamodel.update(self.info()) - self.set_state(self.state) - self.datamodel.save() # Extra, but harmless - - @defer.inlineCallbacks - @exception.wrap_exception - def destroy(self): - if self.is_destroyed(): - self.datamodel.destroy() - raise exception.Error('trying to destroy already destroyed' - ' instance: %s' % self.name) - - self.set_state(power_state.NOSTATE, 'shutting_down') - yield self._conn.destroy(self) - self.datamodel.destroy() - - @defer.inlineCallbacks - @exception.wrap_exception - def reboot(self): - if not self.is_running(): - raise exception.Error( - 'trying to reboot a non-running' - 'instance: %s (state: %s)' % (self.name, self.state)) - - logging.debug('rebooting instance %s' % self.name) - self.set_state(power_state.NOSTATE, 'rebooting') - yield self._conn.reboot(self) - self.update_state() - - @defer.inlineCallbacks - @exception.wrap_exception - def spawn(self): - self.set_state(power_state.NOSTATE, 'spawning') - logging.debug("Starting spawn in Instance") - try: - yield self._conn.spawn(self) - except Exception, ex: - logging.debug(ex) - self.set_state(power_state.SHUTDOWN) - self.update_state() - - @exception.wrap_exception - def console_output(self): - # FIXME: Abstract this for Xen - if FLAGS.connection_type == 'libvirt': - fname = os.path.abspath( - os.path.join(self.datamodel['basepath'], 'console.log')) - with open(fname, 'r') as f: - console = f.read() - else: - console = 'FAKE CONSOLE OUTPUT' - return defer.succeed(console) |
