summaryrefslogtreecommitdiffstats
path: root/nova/compute
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@gmail.com>2010-12-22 20:54:58 +0000
committerVishvananda Ishaya <vishvananda@gmail.com>2010-12-22 20:54:58 +0000
commit9d4a60d6bd7621b44a1ccd4a48741f32e620f342 (patch)
tree0904abbefd68ef786407cea494b29a064af6de12 /nova/compute
parentdb938f975da64540ebb942e9dfd640db4dd7f939 (diff)
parentff1575e782fb08fb3923a09cb1a18d703b33be4a (diff)
downloadnova-9d4a60d6bd7621b44a1ccd4a48741f32e620f342.tar.gz
nova-9d4a60d6bd7621b44a1ccd4a48741f32e620f342.tar.xz
nova-9d4a60d6bd7621b44a1ccd4a48741f32e620f342.zip
fixed conflicts
Diffstat (limited to 'nova/compute')
-rw-r--r--nova/compute/api.py43
-rw-r--r--nova/compute/disk.py38
-rw-r--r--nova/compute/instance_types.py3
-rw-r--r--nova/compute/manager.py77
-rw-r--r--nova/compute/monitor.py12
5 files changed, 128 insertions, 45 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 1dbe6e02d..9c4065ed1 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -90,14 +90,19 @@ class ComputeAPI(base.Base):
is_vpn = image_id == FLAGS.vpn_image_id
if not is_vpn:
image = self.image_service.show(context, image_id)
+
+ # If kernel_id/ramdisk_id isn't explicitly set in API call
+ # we take the defaults from the image's metadata
if kernel_id is None:
- kernel_id = image.get('kernelId', FLAGS.default_kernel)
+ kernel_id = image.get('kernelId', None)
if ramdisk_id is None:
- ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
+ ramdisk_id = image.get('ramdiskId', None)
# Make sure we have access to kernel and ramdisk
- self.image_service.show(context, kernel_id)
- self.image_service.show(context, ramdisk_id)
+ if kernel_id:
+ self.image_service.show(context, kernel_id)
+ if ramdisk_id:
+ self.image_service.show(context, ramdisk_id)
if security_group is None:
security_group = ['default']
@@ -120,8 +125,8 @@ class ComputeAPI(base.Base):
base_options = {
'reservation_id': utils.generate_uid('r'),
'image_id': image_id,
- 'kernel_id': kernel_id,
- 'ramdisk_id': ramdisk_id,
+ 'kernel_id': kernel_id or '',
+ 'ramdisk_id': ramdisk_id or '',
'state_description': 'scheduling',
'user_id': context.user_id,
'project_id': context.project_id,
@@ -137,7 +142,7 @@ class ComputeAPI(base.Base):
elevated = context.elevated()
instances = []
- logging.debug("Going to run %s instances...", num_instances)
+ logging.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
instance = dict(mac_address=utils.generate_mac(),
launch_index=num,
@@ -162,7 +167,7 @@ class ComputeAPI(base.Base):
instance = self.update_instance(context, instance_id, **updates)
instances.append(instance)
- logging.debug("Casting to scheduler for %s/%s's instance %s",
+ logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
context.project_id, context.user_id, instance_id)
rpc.cast(context,
FLAGS.scheduler_topic,
@@ -209,12 +214,12 @@ class ComputeAPI(base.Base):
instance = self.db.instance_get_by_internal_id(context,
instance_id)
except exception.NotFound as e:
- logging.warning("Instance %d was not found during terminate",
+ logging.warning(_("Instance %d was not found during terminate"),
instance_id)
raise e
if (instance['state_description'] == 'terminating'):
- logging.warning("Instance %d is already being terminated",
+ logging.warning(_("Instance %d is already being terminated"),
instance_id)
return
@@ -258,6 +263,24 @@ class ComputeAPI(base.Base):
{"method": "reboot_instance",
"args": {"instance_id": instance['id']}})
+ def pause(self, context, instance_id):
+ """Pause the given instance."""
+ instance = self.db.instance_get_by_internal_id(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "pause_instance",
+ "args": {"instance_id": instance['id']}})
+
+ def unpause(self, context, instance_id):
+ """Unpause the given instance."""
+ instance = self.db.instance_get_by_internal_id(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "unpause_instance",
+ "args": {"instance_id": instance['id']}})
+
def rescue(self, context, instance_id):
"""Rescue the given instance."""
instance = self.db.instance_get_by_internal_id(context, instance_id)
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index 675cd0259..814a258cd 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -67,12 +67,12 @@ def partition(infile, outfile, local_bytes=0, resize=True,
execute('resize2fs %s' % infile)
file_size = FLAGS.minimum_root_size
elif file_size % sector_size != 0:
- logging.warn("Input partition size not evenly divisible by"
- " sector size: %d / %d", file_size, sector_size)
+ logging.warn(_("Input partition size not evenly divisible by"
+ " sector size: %d / %d"), file_size, sector_size)
primary_sectors = file_size / sector_size
if local_bytes % sector_size != 0:
- logging.warn("Bytes for local storage not evenly divisible"
- " by sector size: %d / %d", local_bytes, sector_size)
+ logging.warn(_("Bytes for local storage not evenly divisible"
+ " by sector size: %d / %d"), local_bytes, sector_size)
local_sectors = local_bytes / sector_size
mbr_last = 62 # a
@@ -106,6 +106,13 @@ def partition(infile, outfile, local_bytes=0, resize=True,
% (outfile, local_type, local_first, local_last))
+def extend(image, size, execute):
+ file_size = os.path.getsize(image)
+ if file_size >= size:
+ return
+ return execute('truncate -s size %s' % (image,))
+
+
def inject_data(image, key=None, net=None, partition=None, execute=None):
"""Injects a ssh key and optionally net data into a disk image.
@@ -115,20 +122,30 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
If partition is not specified it mounts the image as a single partition.
"""
- out, err = execute('sudo losetup -f --show %s' % image)
+ out, err = execute('sudo losetup --find --show %s' % image)
if err:
- raise exception.Error('Could not attach image to loopback: %s' % err)
+ raise exception.Error(_('Could not attach image to loopback: %s')
+ % err)
device = out.strip()
try:
if not partition is None:
# create partition
out, err = execute('sudo kpartx -a %s' % device)
if err:
- raise exception.Error('Failed to load partition: %s' % err)
+ raise exception.Error(_('Failed to load partition: %s') % err)
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
partition)
else:
mapped_device = device
+
+ # We can only loopback mount raw images. If the device isn't there,
+ # it's normally because it's a .vmdk or a .vdi etc
+ if not os.path.exists(mapped_device):
+ raise exception.Error('Mapped device was not found (we can'
+ ' only inject raw disk images): %s' %
+ mapped_device)
+
+ # Configure ext2fs so that it doesn't auto-check every N boots
out, err = execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
tmpdir = tempfile.mkdtemp()
@@ -137,7 +154,8 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
out, err = execute(
'sudo mount %s %s' % (mapped_device, tmpdir))
if err:
- raise exception.Error('Failed to mount filesystem: %s' % err)
+ raise exception.Error(_('Failed to mount filesystem: %s')
+ % err)
try:
if key:
@@ -156,7 +174,7 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
execute('sudo kpartx -d %s' % device)
finally:
# remove loopback
- execute('sudo losetup -d %s' % device)
+ execute('sudo losetup --detach %s' % device)
def _inject_key_into_fs(key, fs, execute=None):
@@ -165,7 +183,7 @@ def _inject_key_into_fs(key, fs, execute=None):
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
- sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
+ sshdir = os.path.join(fs, 'root', '.ssh')
execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
execute('sudo chown root %s' % sshdir)
execute('sudo chmod 700 %s' % sshdir)
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 6e47170bd..196d6a8df 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -38,7 +38,8 @@ def get_by_type(instance_type):
if instance_type is None:
return FLAGS.default_instance_type
if instance_type not in INSTANCE_TYPES:
- raise exception.ApiError("Unknown instance type: %s" % instance_type)
+ raise exception.ApiError(_("Unknown instance type: %s"),
+ instance_type)
return instance_type
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index b3e97011f..923378352 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -104,8 +104,8 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref['name'] in self.driver.list_instances():
- raise exception.Error("Instance has already been created")
- logging.debug("instance %s: starting...", instance_id)
+ raise exception.Error(_("Instance has already been created"))
+ logging.debug(_("instance %s: starting..."), instance_id)
self.db.instance_update(context,
instance_id,
{'host': self.host})
@@ -143,7 +143,7 @@ class ComputeManager(manager.Manager):
instance_id,
{'launched_at': now})
except Exception: # pylint: disable-msg=W0702
- logging.exception("instance %s: Failed to spawn",
+ logging.exception(_("instance %s: Failed to spawn"),
instance_ref['name'])
self.db.instance_set_state(context,
instance_id,
@@ -162,7 +162,7 @@ class ComputeManager(manager.Manager):
address = self.db.instance_get_floating_address(context,
instance_ref['id'])
if address:
- logging.debug("Disassociating address %s" % address)
+ logging.debug(_("Disassociating address %s") % address)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later.
@@ -174,22 +174,22 @@ class ComputeManager(manager.Manager):
address = self.db.instance_get_fixed_address(context,
instance_ref['id'])
if address:
- logging.debug("Deallocating address %s" % address)
+ logging.debug(_("Deallocating address %s") % address)
# NOTE(vish): Currently, nothing needs to be done on the
# network node until release. If this changes,
# we will need to cast here.
self.network_manager.deallocate_fixed_ip(context.elevated(),
address)
- logging.debug("instance %s: terminating", instance_id)
+ logging.debug(_("instance %s: terminating"), instance_id)
volumes = instance_ref.get('volumes', []) or []
for volume in volumes:
self.detach_volume(context, instance_id, volume['id'])
if instance_ref['state'] == power_state.SHUTOFF:
self.db.instance_destroy(context, instance_id)
- raise exception.Error('trying to destroy already destroyed'
- ' instance: %s' % instance_id)
+ raise exception.Error(_('trying to destroy already destroyed'
+ ' instance: %s') % instance_id)
self.driver.destroy(instance_ref)
# TODO(ja): should we keep it in a terminated state for a bit?
@@ -203,13 +203,13 @@ class ComputeManager(manager.Manager):
self._update_state(context, instance_id)
if instance_ref['state'] != power_state.RUNNING:
- logging.warn('trying to reboot a non-running '
- 'instance: %s (state: %s excepted: %s)',
+ logging.warn(_('trying to reboot a non-running '
+ 'instance: %s (state: %s excepted: %s)'),
instance_ref['internal_id'],
instance_ref['state'],
power_state.RUNNING)
- logging.debug('instance %s: rebooting', instance_ref['name'])
+ logging.debug(_('instance %s: rebooting'), instance_ref['name'])
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -223,7 +223,7 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug('instance %s: rescuing',
+ logging.debug(_('instance %s: rescuing'),
instance_ref['internal_id'])
self.db.instance_set_state(context,
instance_id,
@@ -238,7 +238,7 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug('instance %s: unrescuing',
+ logging.debug(_('instance %s: unrescuing'),
instance_ref['internal_id'])
self.db.instance_set_state(context,
instance_id,
@@ -247,11 +247,52 @@ class ComputeManager(manager.Manager):
self.driver.unrescue(instance_ref)
self._update_state(context, instance_id)
+ @staticmethod
+ def _update_state_callback(self, context, instance_id, result):
+ """Update instance state when async task completes."""
+ self._update_state(context, instance_id)
+
+ @exception.wrap_exception
+ def pause_instance(self, context, instance_id):
+ """Pause an instance on this server."""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ logging.debug('instance %s: pausing',
+ instance_ref['internal_id'])
+ self.db.instance_set_state(context,
+ instance_id,
+ power_state.NOSTATE,
+ 'pausing')
+ self.driver.pause(instance_ref,
+ lambda result: self._update_state_callback(self,
+ context,
+ instance_id,
+ result))
+
+ @exception.wrap_exception
+ def unpause_instance(self, context, instance_id):
+ """Unpause a paused instance on this server."""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ logging.debug('instance %s: unpausing',
+ instance_ref['internal_id'])
+ self.db.instance_set_state(context,
+ instance_id,
+ power_state.NOSTATE,
+ 'unpausing')
+ self.driver.unpause(instance_ref,
+ lambda result: self._update_state_callback(self,
+ context,
+ instance_id,
+ result))
+
@exception.wrap_exception
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
context = context.elevated()
- logging.debug("instance %s: getting console output", instance_id)
+ logging.debug(_("instance %s: getting console output"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_console_output(instance_ref)
@@ -260,7 +301,7 @@ class ComputeManager(manager.Manager):
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
context = context.elevated()
- logging.debug("instance %s: attaching volume %s to %s", instance_id,
+ logging.debug(_("instance %s: attaching volume %s to %s"), instance_id,
volume_id, mountpoint)
instance_ref = self.db.instance_get(context, instance_id)
dev_path = self.volume_manager.setup_compute_volume(context,
@@ -277,7 +318,7 @@ class ComputeManager(manager.Manager):
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
- logging.exception("instance %s: attach failed %s, removing",
+ logging.exception(_("instance %s: attach failed %s, removing"),
instance_id, mountpoint)
self.volume_manager.remove_compute_volume(context,
volume_id)
@@ -289,13 +330,13 @@ class ComputeManager(manager.Manager):
def detach_volume(self, context, instance_id, volume_id):
"""Detach a volume from an instance."""
context = context.elevated()
- logging.debug("instance %s: detaching volume %s",
+ logging.debug(_("instance %s: detaching volume %s"),
instance_id,
volume_id)
instance_ref = self.db.instance_get(context, instance_id)
volume_ref = self.db.volume_get(context, volume_id)
if instance_ref['name'] not in self.driver.list_instances():
- logging.warn("Detaching volume from unknown instance %s",
+ logging.warn(_("Detaching volume from unknown instance %s"),
instance_ref['name'])
else:
self.driver.detach_volume(instance_ref['name'],
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 22653113a..60c347a5e 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -255,7 +255,7 @@ class Instance(object):
Updates the instances statistics and stores the resulting graphs
in the internal object store on the cloud controller.
"""
- logging.debug('updating %s...', self.instance_id)
+ logging.debug(_('updating %s...'), self.instance_id)
try:
data = self.fetch_cpu_stats()
@@ -285,7 +285,7 @@ class Instance(object):
graph_disk(self, '1w')
graph_disk(self, '1m')
except Exception:
- logging.exception('unexpected error during update')
+ logging.exception(_('unexpected error during update'))
self.last_updated = utcnow()
@@ -351,7 +351,7 @@ class Instance(object):
rd += rd_bytes
wr += wr_bytes
except TypeError:
- logging.error('Cannot get blockstats for "%s" on "%s"',
+ logging.error(_('Cannot get blockstats for "%s" on "%s"'),
disk, self.instance_id)
raise
@@ -373,7 +373,7 @@ class Instance(object):
rx += stats[0]
tx += stats[4]
except TypeError:
- logging.error('Cannot get ifstats for "%s" on "%s"',
+ logging.error(_('Cannot get ifstats for "%s" on "%s"'),
interface, self.instance_id)
raise
@@ -408,7 +408,7 @@ class InstanceMonitor(object, service.Service):
try:
conn = virt_connection.get_connection(read_only=True)
except Exception, exn:
- logging.exception('unexpected exception getting connection')
+ logging.exception(_('unexpected exception getting connection'))
time.sleep(FLAGS.monitoring_instances_delay)
return
@@ -423,7 +423,7 @@ class InstanceMonitor(object, service.Service):
if not domain_id in self._instances:
instance = Instance(conn, domain_id)
self._instances[domain_id] = instance
- logging.debug('Found instance: %s', domain_id)
+ logging.debug(_('Found instance: %s'), domain_id)
for key in self._instances.keys():
instance = self._instances[key]