summaryrefslogtreecommitdiffstats
path: root/nova/compute
diff options
context:
space:
mode:
Diffstat (limited to 'nova/compute')
-rw-r--r--nova/compute/api.py305
-rw-r--r--nova/compute/disk.py72
-rw-r--r--nova/compute/instance_types.py20
-rw-r--r--nova/compute/manager.py100
4 files changed, 382 insertions, 115 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
new file mode 100644
index 000000000..8e0efa4cc
--- /dev/null
+++ b/nova/compute/api.py
@@ -0,0 +1,305 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all API requests relating to instances (guest vms).
+"""
+
+import datetime
+import logging
+import time
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import quota
+from nova import rpc
+from nova import utils
+from nova.compute import instance_types
+from nova.db import base
+
+FLAGS = flags.FLAGS
+
+
+def generate_default_hostname(internal_id):
+ """Default function to generate a hostname given an instance reference."""
+ return str(internal_id)
+
+
+class ComputeAPI(base.Base):
+ """API for interacting with the compute manager."""
+
+ def __init__(self, network_manager=None, image_service=None, **kwargs):
+ if not network_manager:
+ network_manager = utils.import_object(FLAGS.network_manager)
+ self.network_manager = network_manager
+ if not image_service:
+ image_service = utils.import_object(FLAGS.image_service)
+ self.image_service = image_service
+ super(ComputeAPI, self).__init__(**kwargs)
+
+ def create_instances(self, context, instance_type, image_id, min_count=1,
+ max_count=1, kernel_id=None, ramdisk_id=None,
+ display_name='', description='', key_name=None,
+ key_data=None, security_group='default',
+ generate_hostname=generate_default_hostname):
+ """Create the number of instances requested if quote and
+ other arguments check out ok."""
+
+ num_instances = quota.allowed_instances(context, max_count,
+ instance_type)
+ if num_instances < min_count:
+ logging.warn("Quota exceeeded for %s, tried to run %s instances",
+ context.project_id, min_count)
+ raise quota.QuotaError("Instance quota exceeded. You can only "
+ "run %s more instances of this type." %
+ num_instances, "InstanceLimitExceeded")
+
+ is_vpn = image_id == FLAGS.vpn_image_id
+ if not is_vpn:
+ image = self.image_service.show(context, image_id)
+ if kernel_id is None:
+ kernel_id = image.get('kernelId', FLAGS.default_kernel)
+ if ramdisk_id is None:
+ ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
+
+ # Make sure we have access to kernel and ramdisk
+ self.image_service.show(context, kernel_id)
+ self.image_service.show(context, ramdisk_id)
+
+ if security_group is None:
+ security_group = ['default']
+ if not type(security_group) is list:
+ security_group = [security_group]
+
+ security_groups = []
+ self.ensure_default_security_group(context)
+ for security_group_name in security_group:
+ group = db.security_group_get_by_name(context,
+ context.project_id,
+ security_group_name)
+ security_groups.append(group['id'])
+
+ if key_data is None and key_name:
+ key_pair = db.key_pair_get(context, context.user_id, key_name)
+ key_data = key_pair['public_key']
+
+ type_data = instance_types.INSTANCE_TYPES[instance_type]
+ base_options = {
+ 'reservation_id': utils.generate_uid('r'),
+ 'image_id': image_id,
+ 'kernel_id': kernel_id,
+ 'ramdisk_id': ramdisk_id,
+ 'state_description': 'scheduling',
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
+ 'instance_type': instance_type,
+ 'memory_mb': type_data['memory_mb'],
+ 'vcpus': type_data['vcpus'],
+ 'local_gb': type_data['local_gb'],
+ 'display_name': display_name,
+ 'display_description': description,
+ 'key_name': key_name,
+ 'key_data': key_data}
+
+ elevated = context.elevated()
+ instances = []
+ logging.debug("Going to run %s instances...", num_instances)
+ for num in range(num_instances):
+ instance = dict(mac_address=utils.generate_mac(),
+ launch_index=num,
+ **base_options)
+ instance = self.db.instance_create(context, instance)
+ instance_id = instance['id']
+ internal_id = instance['internal_id']
+
+ elevated = context.elevated()
+ if not security_groups:
+ security_groups = []
+ for security_group_id in security_groups:
+ self.db.instance_add_security_group(elevated,
+ instance_id,
+ security_group_id)
+
+ # Set sane defaults if not specified
+ updates = dict(hostname=generate_hostname(internal_id))
+ if 'display_name' not in instance:
+ updates['display_name'] = "Server %s" % internal_id
+
+ instance = self.update_instance(context, instance_id, **updates)
+ instances.append(instance)
+
+ # TODO(vish): This probably should be done in the scheduler
+ # or in compute as a call. The network should be
+ # allocated after the host is assigned and setup
+ # can happen at the same time.
+ address = self.network_manager.allocate_fixed_ip(context,
+ instance_id,
+ is_vpn)
+ rpc.cast(elevated,
+ self._get_network_topic(context),
+ {"method": "setup_fixed_ip",
+ "args": {"address": address}})
+
+ logging.debug("Casting to scheduler for %s/%s's instance %s",
+ context.project_id, context.user_id, instance_id)
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "run_instance",
+ "args": {"topic": FLAGS.compute_topic,
+ "instance_id": instance_id}})
+
+ return instances
+
+ def ensure_default_security_group(self, context):
+ """ Create security group for the security context if it
+ does not already exist
+
+ :param context: the security context
+
+ """
+ try:
+ db.security_group_get_by_name(context, context.project_id,
+ 'default')
+ except exception.NotFound:
+ values = {'name': 'default',
+ 'description': 'default',
+ 'user_id': context.user_id,
+ 'project_id': context.project_id}
+ db.security_group_create(context, values)
+
+ def update_instance(self, context, instance_id, **kwargs):
+ """Updates the instance in the datastore.
+
+ :param context: The security context
+ :param instance_id: ID of the instance to update
+ :param kwargs: All additional keyword args are treated
+ as data fields of the instance to be
+ updated
+
+ :retval None
+
+ """
+ return self.db.instance_update(context, instance_id, kwargs)
+
+ def delete_instance(self, context, instance_id):
+ logging.debug("Going to try and terminate %d" % instance_id)
+ try:
+ instance = self.db.instance_get_by_internal_id(context,
+ instance_id)
+ except exception.NotFound as e:
+ logging.warning("Instance %d was not found during terminate",
+ instance_id)
+ raise e
+
+ if (instance['state_description'] == 'terminating'):
+ logging.warning("Instance %d is already being terminated",
+ instance_id)
+ return
+
+ self.update_instance(context,
+ instance['id'],
+ state_description='terminating',
+ state=0,
+ terminated_at=datetime.datetime.utcnow())
+
+ # FIXME(ja): where should network deallocate occur?
+ address = self.db.instance_get_floating_address(context,
+ instance['id'])
+ if address:
+ logging.debug("Disassociating address %s" % address)
+ # NOTE(vish): Right now we don't really care if the ip is
+ # disassociated. We may need to worry about
+ # checking this later. Perhaps in the scheduler?
+ rpc.cast(context,
+ self._get_network_topic(context),
+ {"method": "disassociate_floating_ip",
+ "args": {"floating_address": address}})
+
+ address = self.db.instance_get_fixed_address(context, instance['id'])
+ if address:
+ logging.debug("Deallocating address %s" % address)
+ # NOTE(vish): Currently, nothing needs to be done on the
+ # network node until release. If this changes,
+ # we will need to cast here.
+ self.network_manager.deallocate_fixed_ip(context.elevated(),
+ address)
+
+ host = instance['host']
+ if host:
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "terminate_instance",
+ "args": {"instance_id": instance['id']}})
+ else:
+ self.db.instance_destroy(context, instance['id'])
+
+ def get_instances(self, context, project_id=None):
+ """Get all instances, possibly filtered by project ID or
+ user ID. If there is no filter and the context is an admin,
+ it will retreive all instances in the system."""
+ if project_id or not context.is_admin:
+ if not context.project:
+ return self.db.instance_get_all_by_user(context,
+ context.user_id)
+ if project_id is None:
+ project_id = context.project_id
+ return self.db.instance_get_all_by_project(context, project_id)
+ return self.db.instance_get_all(context)
+
+ def get_instance(self, context, instance_id):
+ return self.db.instance_get_by_internal_id(context, instance_id)
+
+ def reboot(self, context, instance_id):
+ """Reboot the given instance."""
+ instance = self.db.instance_get_by_internal_id(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "reboot_instance",
+ "args": {"instance_id": instance['id']}})
+
+ def rescue(self, context, instance_id):
+ """Rescue the given instance."""
+ instance = self.db.instance_get_by_internal_id(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "rescue_instance",
+ "args": {"instance_id": instance['id']}})
+
+ def unrescue(self, context, instance_id):
+ """Unrescue the given instance."""
+ instance = self.db.instance_get_by_internal_id(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "unrescue_instance",
+ "args": {"instance_id": instance['id']}})
+
+ def _get_network_topic(self, context):
+ """Retrieves the network host for a project"""
+ network_ref = self.network_manager.get_network(context)
+ host = network_ref['host']
+ if not host:
+ host = rpc.call(context,
+ FLAGS.network_topic,
+ {"method": "set_network_host",
+ "args": {"network_id": network_ref['id']}})
+ return self.db.queue_get_for(context, FLAGS.network_topic, host)
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index 4338d39f0..675cd0259 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -26,8 +26,6 @@ import logging
import os
import tempfile
-from twisted.internet import defer
-
from nova import exception
from nova import flags
@@ -39,7 +37,6 @@ flags.DEFINE_integer('block_size', 1024 * 1024 * 256,
'block_size to use for dd')
-@defer.inlineCallbacks
def partition(infile, outfile, local_bytes=0, resize=True,
local_type='ext2', execute=None):
"""
@@ -64,10 +61,10 @@ def partition(infile, outfile, local_bytes=0, resize=True,
file_size = os.path.getsize(infile)
if resize and file_size < FLAGS.minimum_root_size:
last_sector = FLAGS.minimum_root_size / sector_size - 1
- yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
- % (infile, last_sector, sector_size))
- yield execute('e2fsck -fp %s' % infile, check_exit_code=False)
- yield execute('resize2fs %s' % infile)
+ execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
+ % (infile, last_sector, sector_size))
+ execute('e2fsck -fp %s' % infile, check_exit_code=False)
+ execute('resize2fs %s' % infile)
file_size = FLAGS.minimum_root_size
elif file_size % sector_size != 0:
logging.warn("Input partition size not evenly divisible by"
@@ -86,30 +83,29 @@ def partition(infile, outfile, local_bytes=0, resize=True,
last_sector = local_last # e
# create an empty file
- yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
- % (outfile, mbr_last, sector_size))
+ execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
+ % (outfile, mbr_last, sector_size))
# make mbr partition
- yield execute('parted --script %s mklabel msdos' % outfile)
+ execute('parted --script %s mklabel msdos' % outfile)
# append primary file
- yield execute('dd if=%s of=%s bs=%s conv=notrunc,fsync oflag=append'
- % (infile, outfile, FLAGS.block_size))
+ execute('dd if=%s of=%s bs=%s conv=notrunc,fsync oflag=append'
+ % (infile, outfile, FLAGS.block_size))
# make primary partition
- yield execute('parted --script %s mkpart primary %ds %ds'
- % (outfile, primary_first, primary_last))
+ execute('parted --script %s mkpart primary %ds %ds'
+ % (outfile, primary_first, primary_last))
if local_bytes > 0:
# make the file bigger
- yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
- % (outfile, last_sector, sector_size))
+ execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
+ % (outfile, last_sector, sector_size))
# make and format local partition
- yield execute('parted --script %s mkpartfs primary %s %ds %ds'
- % (outfile, local_type, local_first, local_last))
+ execute('parted --script %s mkpartfs primary %s %ds %ds'
+ % (outfile, local_type, local_first, local_last))
-@defer.inlineCallbacks
def inject_data(image, key=None, net=None, partition=None, execute=None):
"""Injects a ssh key and optionally net data into a disk image.
@@ -119,26 +115,26 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
If partition is not specified it mounts the image as a single partition.
"""
- out, err = yield execute('sudo losetup -f --show %s' % image)
+ out, err = execute('sudo losetup -f --show %s' % image)
if err:
raise exception.Error('Could not attach image to loopback: %s' % err)
device = out.strip()
try:
if not partition is None:
# create partition
- out, err = yield execute('sudo kpartx -a %s' % device)
+ out, err = execute('sudo kpartx -a %s' % device)
if err:
raise exception.Error('Failed to load partition: %s' % err)
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
partition)
else:
mapped_device = device
- out, err = yield execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
+ out, err = execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
tmpdir = tempfile.mkdtemp()
try:
# mount loopback to dir
- out, err = yield execute(
+ out, err = execute(
'sudo mount %s %s' % (mapped_device, tmpdir))
if err:
raise exception.Error('Failed to mount filesystem: %s' % err)
@@ -146,24 +142,23 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
try:
if key:
# inject key file
- yield _inject_key_into_fs(key, tmpdir, execute=execute)
+ _inject_key_into_fs(key, tmpdir, execute=execute)
if net:
- yield _inject_net_into_fs(net, tmpdir, execute=execute)
+ _inject_net_into_fs(net, tmpdir, execute=execute)
finally:
# unmount device
- yield execute('sudo umount %s' % mapped_device)
+ execute('sudo umount %s' % mapped_device)
finally:
# remove temporary directory
- yield execute('rmdir %s' % tmpdir)
+ execute('rmdir %s' % tmpdir)
if not partition is None:
# remove partitions
- yield execute('sudo kpartx -d %s' % device)
+ execute('sudo kpartx -d %s' % device)
finally:
# remove loopback
- yield execute('sudo losetup -d %s' % device)
+ execute('sudo losetup -d %s' % device)
-@defer.inlineCallbacks
def _inject_key_into_fs(key, fs, execute=None):
"""Add the given public ssh key to root's authorized_keys.
@@ -171,22 +166,21 @@ def _inject_key_into_fs(key, fs, execute=None):
fs is the path to the base of the filesystem into which to inject the key.
"""
sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
- yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
- yield execute('sudo chown root %s' % sshdir)
- yield execute('sudo chmod 700 %s' % sshdir)
+ execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
+ execute('sudo chown root %s' % sshdir)
+ execute('sudo chmod 700 %s' % sshdir)
keyfile = os.path.join(sshdir, 'authorized_keys')
- yield execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
+ execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
-@defer.inlineCallbacks
def _inject_net_into_fs(net, fs, execute=None):
"""Inject /etc/network/interfaces into the filesystem rooted at fs.
net is the contents of /etc/network/interfaces.
"""
netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
- yield execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
- yield execute('sudo chown root:root %s' % netdir)
- yield execute('sudo chmod 755 %s' % netdir)
+ execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
+ execute('sudo chown root:root %s' % netdir)
+ execute('sudo chmod 755 %s' % netdir)
netfile = os.path.join(netdir, 'interfaces')
- yield execute('sudo tee %s' % netfile, net)
+ execute('sudo tee %s' % netfile, net)
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 67ee8f8a8..6e47170bd 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -21,9 +21,29 @@
The built-in instance properties.
"""
+from nova import flags
+from nova import exception
+
+FLAGS = flags.FLAGS
INSTANCE_TYPES = {
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+
+
+def get_by_type(instance_type):
+ """Build instance data structure and save it to the data store."""
+ if instance_type is None:
+ return FLAGS.default_instance_type
+ if instance_type not in INSTANCE_TYPES:
+ raise exception.ApiError("Unknown instance type: %s" % instance_type)
+ return instance_type
+
+
+def get_by_flavor_id(flavor_id):
+ for instance_type, details in INSTANCE_TYPES.iteritems():
+ if details['flavorid'] == flavor_id:
+ return instance_type
+ return FLAGS.default_instance_type
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 50a9d316b..7eb60e262 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -22,8 +22,8 @@ Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
-responding to calls to check it state, attaching persistent as well as
-termination.
+responding to calls to check its state, attaching persistent storage, and
+terminating it.
**Related Flags**
@@ -37,23 +37,21 @@ termination.
import datetime
import logging
-from twisted.internet import defer
-
from nova import exception
from nova import flags
from nova import manager
from nova import utils
from nova.compute import power_state
-
FLAGS = flags.FLAGS
flags.DEFINE_string('instances_path', '$state_path/instances',
'where instances are stored on disk')
flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
- 'Driver to use for volume creation')
+ 'Driver to use for controlling virtualization')
class ComputeManager(manager.Manager):
+
"""Manages the running instances from creation to destruction."""
def __init__(self, compute_driver=None, *args, **kwargs):
@@ -78,54 +76,11 @@ class ComputeManager(manager.Manager):
state = power_state.NOSTATE
self.db.instance_set_state(context, instance_id, state)
- @defer.inlineCallbacks
@exception.wrap_exception
def refresh_security_group(self, context, security_group_id, **_kwargs):
"""This call passes stright through to the virtualization driver."""
- yield self.driver.refresh_security_group(security_group_id)
-
- def create_instance(self, context, security_groups=None, **kwargs):
- """Creates the instance in the datastore and returns the
- new instance as a mapping
-
- :param context: The security context
- :param security_groups: list of security group ids to
- attach to the instance
- :param kwargs: All additional keyword args are treated
- as data fields of the instance to be
- created
-
- :retval Returns a mapping of the instance information
- that has just been created
-
- """
- instance_ref = self.db.instance_create(context, kwargs)
- inst_id = instance_ref['id']
-
- elevated = context.elevated()
- if not security_groups:
- security_groups = []
- for security_group_id in security_groups:
- self.db.instance_add_security_group(elevated,
- inst_id,
- security_group_id)
- return instance_ref
-
- def update_instance(self, context, instance_id, **kwargs):
- """Updates the instance in the datastore.
-
- :param context: The security context
- :param instance_id: ID of the instance to update
- :param kwargs: All additional keyword args are treated
- as data fields of the instance to be
- updated
-
- :retval None
-
- """
- self.db.instance_update(context, instance_id, kwargs)
-
- @defer.inlineCallbacks
+ self.driver.refresh_security_group(security_group_id)
+
@exception.wrap_exception
def run_instance(self, context, instance_id, **_kwargs):
"""Launch a new instance with specified options."""
@@ -134,7 +89,6 @@ class ComputeManager(manager.Manager):
if instance_ref['name'] in self.driver.list_instances():
raise exception.Error("Instance has already been created")
logging.debug("instance %s: starting...", instance_id)
- project_id = instance_ref['project_id']
self.network_manager.setup_compute_network(context, instance_id)
self.db.instance_update(context,
instance_id,
@@ -147,7 +101,7 @@ class ComputeManager(manager.Manager):
'spawning')
try:
- yield self.driver.spawn(instance_ref)
+ self.driver.spawn(instance_ref)
now = datetime.datetime.utcnow()
self.db.instance_update(context,
instance_id,
@@ -161,7 +115,6 @@ class ComputeManager(manager.Manager):
self._update_state(context, instance_id)
- @defer.inlineCallbacks
@exception.wrap_exception
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this machine."""
@@ -176,13 +129,11 @@ class ComputeManager(manager.Manager):
self.db.instance_destroy(context, instance_id)
raise exception.Error('trying to destroy already destroyed'
' instance: %s' % instance_id)
-
- yield self.driver.destroy(instance_ref)
+ self.driver.destroy(instance_ref)
# TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
- @defer.inlineCallbacks
@exception.wrap_exception
def reboot_instance(self, context, instance_id):
"""Reboot an instance on this server."""
@@ -202,10 +153,9 @@ class ComputeManager(manager.Manager):
instance_id,
power_state.NOSTATE,
'rebooting')
- yield self.driver.reboot(instance_ref)
+ self.driver.reboot(instance_ref)
self._update_state(context, instance_id)
- @defer.inlineCallbacks
@exception.wrap_exception
def rescue_instance(self, context, instance_id):
"""Rescue an instance on this server."""
@@ -218,10 +168,9 @@ class ComputeManager(manager.Manager):
instance_id,
power_state.NOSTATE,
'rescuing')
- yield self.driver.rescue(instance_ref)
+ self.driver.rescue(instance_ref)
self._update_state(context, instance_id)
- @defer.inlineCallbacks
@exception.wrap_exception
def unrescue_instance(self, context, instance_id):
"""Rescue an instance on this server."""
@@ -234,7 +183,7 @@ class ComputeManager(manager.Manager):
instance_id,
power_state.NOSTATE,
'unrescuing')
- yield self.driver.unrescue(instance_ref)
+ self.driver.unrescue(instance_ref)
self._update_state(context, instance_id)
@exception.wrap_exception
@@ -246,7 +195,6 @@ class ComputeManager(manager.Manager):
return self.driver.get_console_output(instance_ref)
- @defer.inlineCallbacks
@exception.wrap_exception
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
@@ -254,12 +202,12 @@ class ComputeManager(manager.Manager):
logging.debug("instance %s: attaching volume %s to %s", instance_id,
volume_id, mountpoint)
instance_ref = self.db.instance_get(context, instance_id)
- dev_path = yield self.volume_manager.setup_compute_volume(context,
- volume_id)
+ dev_path = self.volume_manager.setup_compute_volume(context,
+ volume_id)
try:
- yield self.driver.attach_volume(instance_ref['name'],
- dev_path,
- mountpoint)
+ self.driver.attach_volume(instance_ref['name'],
+ dev_path,
+ mountpoint)
self.db.volume_attached(context,
volume_id,
instance_id,
@@ -270,12 +218,12 @@ class ComputeManager(manager.Manager):
# ecxception below.
logging.exception("instance %s: attach failed %s, removing",
instance_id, mountpoint)
- yield self.volume_manager.remove_compute_volume(context,
- volume_id)
+ self.volume_manager.remove_compute_volume(context,
+ volume_id)
raise exc
- defer.returnValue(True)
- @defer.inlineCallbacks
+ return True
+
@exception.wrap_exception
def detach_volume(self, context, instance_id, volume_id):
"""Detach a volume from an instance."""
@@ -289,8 +237,8 @@ class ComputeManager(manager.Manager):
logging.warn("Detaching volume from unknown instance %s",
instance_ref['name'])
else:
- yield self.driver.detach_volume(instance_ref['name'],
- volume_ref['mountpoint'])
- yield self.volume_manager.remove_compute_volume(context, volume_id)
+ self.driver.detach_volume(instance_ref['name'],
+ volume_ref['mountpoint'])
+ self.volume_manager.remove_compute_volume(context, volume_id)
self.db.volume_detached(context, volume_id)
- defer.returnValue(True)
+ return True