From 2337fab0979b72bbc7e7730e94518a0e835a2751 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Mon, 25 Oct 2010 03:45:19 +0900 Subject: part way through porting the codebase off of twisted this provides a very basic eventlet-based service replacement for the twistd-based services, a replacement for task.LoopingCall also adds nova-combined with the goal of running a single service when doing local testing and dev --- nova/compute/disk.py | 66 ++++++++++++++++++++++--------------------------- nova/compute/manager.py | 28 ++++++++------------- 2 files changed, 40 insertions(+), 54 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/disk.py b/nova/compute/disk.py index e362b4507..ad4c2c092 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -25,8 +25,6 @@ import logging import os import tempfile -from twisted.internet import defer - from nova import exception from nova import flags @@ -38,7 +36,6 @@ flags.DEFINE_integer('block_size', 1024 * 1024 * 256, 'block_size to use for dd') -@defer.inlineCallbacks def partition(infile, outfile, local_bytes=0, resize=True, local_type='ext2', execute=None): """Takes a single partition represented by infile and writes a bootable @@ -60,10 +57,10 @@ def partition(infile, outfile, local_bytes=0, resize=True, file_size = os.path.getsize(infile) if resize and file_size < FLAGS.minimum_root_size: last_sector = FLAGS.minimum_root_size / sector_size - 1 - yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' - % (infile, last_sector, sector_size)) - yield execute('e2fsck -fp %s' % infile, check_exit_code=False) - yield execute('resize2fs %s' % infile) + execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' + % (infile, last_sector, sector_size)) + execute('e2fsck -fp %s' % infile, check_exit_code=False) + execute('resize2fs %s' % infile) file_size = FLAGS.minimum_root_size elif file_size % sector_size != 0: logging.warn("Input partition size not evenly divisible by" @@ -82,30 +79,29 @@ def partition(infile, outfile, local_bytes=0, resize=True, last_sector = local_last # e # create an empty file - yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' - % (outfile, mbr_last, sector_size)) + execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' + % (outfile, mbr_last, sector_size)) # make mbr partition - yield execute('parted --script %s mklabel msdos' % outfile) + execute('parted --script %s mklabel msdos' % outfile) # append primary file - yield execute('dd if=%s of=%s bs=%s conv=notrunc,fsync oflag=append' - % (infile, outfile, FLAGS.block_size)) + execute('dd if=%s of=%s bs=%s conv=notrunc,fsync oflag=append' + % (infile, outfile, FLAGS.block_size)) # make primary partition - yield execute('parted --script %s mkpart primary %ds %ds' - % (outfile, primary_first, primary_last)) + execute('parted --script %s mkpart primary %ds %ds' + % (outfile, primary_first, primary_last)) if local_bytes > 0: # make the file bigger - yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' - % (outfile, last_sector, sector_size)) + execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' + % (outfile, last_sector, sector_size)) # make and format local partition - yield execute('parted --script %s mkpartfs primary %s %ds %ds' - % (outfile, local_type, local_first, local_last)) + execute('parted --script %s mkpartfs primary %s %ds %ds' + % (outfile, local_type, local_first, local_last)) -@defer.inlineCallbacks def inject_data(image, key=None, net=None, partition=None, execute=None): """Injects a ssh key and optionally net data into a disk image. @@ -115,26 +111,26 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): If partition is not specified it mounts the image as a single partition. """ - out, err = yield execute('sudo losetup -f --show %s' % image) + out, err = execute('sudo losetup -f --show %s' % image) if err: raise exception.Error('Could not attach image to loopback: %s' % err) device = out.strip() try: if not partition is None: # create partition - out, err = yield execute('sudo kpartx -a %s' % device) + out, err = execute('sudo kpartx -a %s' % device) if err: raise exception.Error('Failed to load partition: %s' % err) mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1], partition) else: mapped_device = device - out, err = yield execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) + out, err = execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) tmpdir = tempfile.mkdtemp() try: # mount loopback to dir - out, err = yield execute( + out, err = execute( 'sudo mount %s %s' % (mapped_device, tmpdir)) if err: raise exception.Error('Failed to mount filesystem: %s' % err) @@ -142,35 +138,33 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): try: if key: # inject key file - yield _inject_key_into_fs(key, tmpdir, execute=execute) + _inject_key_into_fs(key, tmpdir, execute=execute) if net: - yield _inject_net_into_fs(net, tmpdir, execute=execute) + _inject_net_into_fs(net, tmpdir, execute=execute) finally: # unmount device - yield execute('sudo umount %s' % mapped_device) + execute('sudo umount %s' % mapped_device) finally: # remove temporary directory - yield execute('rmdir %s' % tmpdir) + execute('rmdir %s' % tmpdir) if not partition is None: # remove partitions - yield execute('sudo kpartx -d %s' % device) + execute('sudo kpartx -d %s' % device) finally: # remove loopback - yield execute('sudo losetup -d %s' % device) + execute('sudo losetup -d %s' % device) -@defer.inlineCallbacks def _inject_key_into_fs(key, fs, execute=None): sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh') - yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter - yield execute('sudo chown root %s' % sshdir) - yield execute('sudo chmod 700 %s' % sshdir) + execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter + execute('sudo chown root %s' % sshdir) + execute('sudo chmod 700 %s' % sshdir) keyfile = os.path.join(sshdir, 'authorized_keys') - yield execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n') + execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n') -@defer.inlineCallbacks def _inject_net_into_fs(net, fs, execute=None): netfile = os.path.join(os.path.join(os.path.join( fs, 'etc'), 'network'), 'interfaces') - yield execute('sudo tee %s' % netfile, net) + execute('sudo tee %s' % netfile, net) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 523bb8893..a105a1dd0 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -25,8 +25,6 @@ import datetime import logging import os -from twisted.internet import defer - from nova import exception from nova import flags from nova import manager @@ -54,7 +52,7 @@ class ComputeManager(manager.Manager): self.network_manager = utils.import_object(FLAGS.network_manager) self.volume_manager = utils.import_object(FLAGS.volume_manager) super(ComputeManager, self).__init__(*args, **kwargs) - + def _update_state(self, context, instance_id): """Update the state of an instance from the driver info""" # FIXME(ja): include other fields from state? @@ -62,12 +60,10 @@ class ComputeManager(manager.Manager): state = self.driver.get_info(instance_ref.name)['state'] self.db.instance_set_state(context, instance_id, state) - @defer.inlineCallbacks @exception.wrap_exception def refresh_security_group(self, context, security_group_id, **_kwargs): - yield self.driver.refresh_security_group(security_group_id) + self.driver.refresh_security_group(security_group_id) - @defer.inlineCallbacks @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): """Launch a new instance with specified options.""" @@ -89,7 +85,7 @@ class ComputeManager(manager.Manager): 'spawning') try: - yield self.driver.spawn(instance_ref) + self.driver.spawn(instance_ref) now = datetime.datetime.utcnow() self.db.instance_update(context, instance_id, @@ -103,7 +99,6 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) - @defer.inlineCallbacks @exception.wrap_exception def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" @@ -116,12 +111,11 @@ class ComputeManager(manager.Manager): raise exception.Error('trying to destroy already destroyed' ' instance: %s' % instance_id) - yield self.driver.destroy(instance_ref) + self.driver.destroy(instance_ref) # TODO(ja): should we keep it in a terminated state for a bit? self.db.instance_destroy(context, instance_id) - @defer.inlineCallbacks @exception.wrap_exception def reboot_instance(self, context, instance_id): """Reboot an instance on this server.""" @@ -142,7 +136,7 @@ class ComputeManager(manager.Manager): instance_id, power_state.NOSTATE, 'rebooting') - yield self.driver.reboot(instance_ref) + self.driver.reboot(instance_ref) self._update_state(context, instance_id) @exception.wrap_exception @@ -154,7 +148,6 @@ class ComputeManager(manager.Manager): return self.driver.get_console_output(instance_ref) - @defer.inlineCallbacks @exception.wrap_exception def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" @@ -164,13 +157,12 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) dev_path = yield self.volume_manager.setup_compute_volume(context, volume_id) - yield self.driver.attach_volume(instance_ref['ec2_id'], - dev_path, - mountpoint) + self.driver.attach_volume(instance_ref['ec2_id'], + dev_path, + mountpoint) self.db.volume_attached(context, volume_id, instance_id, mountpoint) defer.returnValue(True) - @defer.inlineCallbacks @exception.wrap_exception def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" @@ -180,7 +172,7 @@ class ComputeManager(manager.Manager): volume_id) instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) - yield self.driver.detach_volume(instance_ref['ec2_id'], - volume_ref['mountpoint']) + self.driver.detach_volume(instance_ref['ec2_id'], + volume_ref['mountpoint']) self.db.volume_detached(context, volume_id) defer.returnValue(True) -- cgit From 1188dd95fbfef144ca71a3c9df2f7dbdb665c97f Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 24 Nov 2010 14:52:10 -0800 Subject: Consolidated the start instance logic in the two API classes into a single method. This also cleans up a number of small discrepencies between the two. --- nova/compute/instance_types.py | 20 +++++++ nova/compute/manager.py | 130 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 150 insertions(+) (limited to 'nova/compute') diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index 67ee8f8a8..a2679e0fc 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -21,9 +21,29 @@ The built-in instance properties. """ +from nova import flags + +FLAGS = flags.FLAGS INSTANCE_TYPES = { 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1), 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2), 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3), 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4), 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)} + + +def get_by_type(instance_type): + """Build instance data structure and save it to the data store.""" + if instance_type is None: + return FLAGS.default_instance_type + if instance_type not in INSTANCE_TYPES: + raise exception.ApiError("Unknown instance type: %s", + instance_type) + return instance_type + + +def get_by_flavor_id(flavor_id): + for instance_type, details in INSTANCE_TYPES.iteritems(): + if details['flavorid'] == flavor_id: + return instance_type + return FLAGS.default_instance_type diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 890d79fba..cfc3d4bbd 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -36,13 +36,18 @@ termination. import datetime import logging +import time from twisted.internet import defer +from nova import db from nova import exception from nova import flags from nova import manager +from nova import quota +from nova import rpc from nova import utils +from nova.compute import instance_types from nova.compute import power_state @@ -53,6 +58,11 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for volume creation') +def generate_default_hostname(internal_id): + """Default function to generate a hostname given an instance reference.""" + return str(internal_id) + + class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" @@ -84,6 +94,126 @@ class ComputeManager(manager.Manager): """This call passes stright through to the virtualization driver.""" yield self.driver.refresh_security_group(security_group_id) + # TODO(eday): network_topic arg should go away once we push network + # allocation into the scheduler or compute worker. + def create_instances(self, context, instance_type, image_service, image_id, + network_topic, min_count=1, max_count=1, + kernel_id=None, ramdisk_id=None, name='', + description='', user_data='', key_name=None, + key_data=None, security_group='default', + generate_hostname=generate_default_hostname): + """Create the number of instances requested if quote and + other arguments check out ok.""" + + num_instances = quota.allowed_instances(context, max_count, + instance_type) + if num_instances < min_count: + logging.warn("Quota exceeeded for %s, tried to run %s instances", + context.project_id, min_count) + raise quota.QuotaError("Instance quota exceeded. You can only " + "run %s more instances of this type." % + num_instances, "InstanceLimitExceeded") + + is_vpn = image_id == FLAGS.vpn_image_id + if not is_vpn: + image = image_service.show(context, image_id) + if not image: + raise Exception("Image not found") + if kernel_id is None: + kernel_id = image.get('kernelId', FLAGS.default_kernel) + if ramdisk_id is None: + ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) + + # Make sure we have access to kernel and ramdisk + image_service.show(context, kernel_id) + image_service.show(context, ramdisk_id) + + if security_group is None: + security_group = ['default'] + if not type(security_group) is list: + security_group = [security_group] + + security_groups = [] + self.ensure_default_security_group(context) + for security_group_name in security_group: + group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + security_groups.append(group['id']) + + if key_data is None and key_name: + key_pair = db.key_pair_get(context, context.user_id, key_name) + key_data = key_pair['public_key'] + + type_data = instance_types.INSTANCE_TYPES[instance_type] + base_options = { + 'reservation_id': utils.generate_uid('r'), + 'server_name': name, + 'image_id': image_id, + 'kernel_id': kernel_id, + 'ramdisk_id': ramdisk_id, + 'state_description': 'scheduling', + 'user_id': context.user_id, + 'project_id': context.project_id, + 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'instance_type': instance_type, + 'memory_mb': type_data['memory_mb'], + 'vcpus': type_data['vcpus'], + 'local_gb': type_data['local_gb'], + 'display_name': name, + 'display_description': description, + 'key_name': key_name, + 'key_data': key_data} + + elevated = context.elevated() + instances = [] + logging.debug("Going to run %s instances...", num_instances) + for num in range(num_instances): + instance = dict(mac_address=utils.generate_mac(), + launch_index=num, + **base_options) + instance_ref = self.create_instance(context, security_groups, + **instance) + instance_id = instance_ref['id'] + internal_id = instance_ref['internal_id'] + hostname = generate_hostname(internal_id) + self.update_instance(context, instance_id, hostname=hostname) + instances.append(dict(id=instance_id, internal_id=internal_id, + hostname=hostname, **instance)) + + # TODO(vish): This probably should be done in the scheduler + # or in compute as a call. The network should be + # allocated after the host is assigned and setup + # can happen at the same time. + address = self.network_manager.allocate_fixed_ip(context, + instance_id, + is_vpn) + rpc.cast(elevated, + network_topic, + {"method": "setup_fixed_ip", + "args": {"address": address}}) + + logging.debug("Casting to scheduler for %s/%s's instance %s" % + (context.project_id, context.user_id, instance_id)) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "run_instance", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id}}) + + return instances + + def ensure_default_security_group(self, context): + try: + db.security_group_get_by_name(context, context.project_id, + 'default') + except exception.NotFound: + values = {'name': 'default', + 'description': 'default', + 'user_id': context.user_id, + 'project_id': context.project_id} + group = db.security_group_create(context, values) + def create_instance(self, context, security_groups=None, **kwargs): """Creates the instance in the datastore and returns the new instance as a mapping -- cgit From 7d771bf9c549499c0a138ea991da5df537e0dd88 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 24 Nov 2010 15:16:23 -0800 Subject: The image server should throw not found errors, don't need to check in compute manager. --- nova/compute/manager.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index cfc3d4bbd..3f870f866 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -117,8 +117,6 @@ class ComputeManager(manager.Manager): is_vpn = image_id == FLAGS.vpn_image_id if not is_vpn: image = image_service.show(context, image_id) - if not image: - raise Exception("Image not found") if kernel_id is None: kernel_id = image.get('kernelId', FLAGS.default_kernel) if ramdisk_id is None: -- cgit From 6956057ac490c788cb94fbfd0af7fe6e91a7ca96 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 1 Dec 2010 09:24:39 -0800 Subject: Broke parts of compute manager out into compute.api to separate what gets run on the API side vs the worker side. --- nova/compute/api.py | 207 ++++++++++++++++++++++++++++++++++++++++++++++++ nova/compute/manager.py | 169 --------------------------------------- 2 files changed, 207 insertions(+), 169 deletions(-) create mode 100644 nova/compute/api.py (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py new file mode 100644 index 000000000..e678be85d --- /dev/null +++ b/nova/compute/api.py @@ -0,0 +1,207 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all API requests relating to instances (guest vms). +""" + +import logging +import time + +from nova import db +from nova import exception +from nova import flags +from nova import quota +from nova import rpc +from nova import utils +from nova.compute import instance_types +from nova.db import base + +FLAGS = flags.FLAGS + + +def generate_default_hostname(internal_id): + """Default function to generate a hostname given an instance reference.""" + return str(internal_id) + + +class ComputeAPI(base.Base): + """API for interacting with the compute manager.""" + + def __init__(self, **kwargs): + self.network_manager = utils.import_object(FLAGS.network_manager) + super(ComputeAPI, self).__init__(**kwargs) + + # TODO(eday): network_topic arg should go away once we push network + # allocation into the scheduler or compute worker. + def create_instances(self, context, instance_type, image_service, image_id, + network_topic, min_count=1, max_count=1, + kernel_id=None, ramdisk_id=None, name='', + description='', user_data='', key_name=None, + key_data=None, security_group='default', + generate_hostname=generate_default_hostname): + """Create the number of instances requested if quote and + other arguments check out ok.""" + + num_instances = quota.allowed_instances(context, max_count, + instance_type) + if num_instances < min_count: + logging.warn("Quota exceeeded for %s, tried to run %s instances", + context.project_id, min_count) + raise quota.QuotaError("Instance quota exceeded. You can only " + "run %s more instances of this type." % + num_instances, "InstanceLimitExceeded") + + is_vpn = image_id == FLAGS.vpn_image_id + if not is_vpn: + image = image_service.show(context, image_id) + if kernel_id is None: + kernel_id = image.get('kernelId', FLAGS.default_kernel) + if ramdisk_id is None: + ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) + + # Make sure we have access to kernel and ramdisk + image_service.show(context, kernel_id) + image_service.show(context, ramdisk_id) + + if security_group is None: + security_group = ['default'] + if not type(security_group) is list: + security_group = [security_group] + + security_groups = [] + self.ensure_default_security_group(context) + for security_group_name in security_group: + group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + security_groups.append(group['id']) + + if key_data is None and key_name: + key_pair = db.key_pair_get(context, context.user_id, key_name) + key_data = key_pair['public_key'] + + type_data = instance_types.INSTANCE_TYPES[instance_type] + base_options = { + 'reservation_id': utils.generate_uid('r'), + 'server_name': name, + 'image_id': image_id, + 'kernel_id': kernel_id, + 'ramdisk_id': ramdisk_id, + 'state_description': 'scheduling', + 'user_id': context.user_id, + 'project_id': context.project_id, + 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'instance_type': instance_type, + 'memory_mb': type_data['memory_mb'], + 'vcpus': type_data['vcpus'], + 'local_gb': type_data['local_gb'], + 'display_name': name, + 'display_description': description, + 'key_name': key_name, + 'key_data': key_data} + + elevated = context.elevated() + instances = [] + logging.debug("Going to run %s instances...", num_instances) + for num in range(num_instances): + instance = dict(mac_address=utils.generate_mac(), + launch_index=num, + **base_options) + instance_ref = self.create_instance(context, security_groups, + **instance) + instance_id = instance_ref['id'] + internal_id = instance_ref['internal_id'] + hostname = generate_hostname(internal_id) + self.update_instance(context, instance_id, hostname=hostname) + instances.append(dict(id=instance_id, internal_id=internal_id, + hostname=hostname, **instance)) + + # TODO(vish): This probably should be done in the scheduler + # or in compute as a call. The network should be + # allocated after the host is assigned and setup + # can happen at the same time. + address = self.network_manager.allocate_fixed_ip(context, + instance_id, + is_vpn) + rpc.cast(elevated, + network_topic, + {"method": "setup_fixed_ip", + "args": {"address": address}}) + + logging.debug("Casting to scheduler for %s/%s's instance %s" % + (context.project_id, context.user_id, instance_id)) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "run_instance", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id}}) + + return instances + + def ensure_default_security_group(self, context): + try: + db.security_group_get_by_name(context, context.project_id, + 'default') + except exception.NotFound: + values = {'name': 'default', + 'description': 'default', + 'user_id': context.user_id, + 'project_id': context.project_id} + group = db.security_group_create(context, values) + + def create_instance(self, context, security_groups=None, **kwargs): + """Creates the instance in the datastore and returns the + new instance as a mapping + + :param context: The security context + :param security_groups: list of security group ids to + attach to the instance + :param kwargs: All additional keyword args are treated + as data fields of the instance to be + created + + :retval Returns a mapping of the instance information + that has just been created + + """ + instance_ref = self.db.instance_create(context, kwargs) + inst_id = instance_ref['id'] + + elevated = context.elevated() + if not security_groups: + security_groups = [] + for security_group_id in security_groups: + self.db.instance_add_security_group(elevated, + inst_id, + security_group_id) + return instance_ref + + def update_instance(self, context, instance_id, **kwargs): + """Updates the instance in the datastore. + + :param context: The security context + :param instance_id: ID of the instance to update + :param kwargs: All additional keyword args are treated + as data fields of the instance to be + updated + + :retval None + + """ + self.db.instance_update(context, instance_id, kwargs) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3f870f866..a25b8f6f3 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -36,7 +36,6 @@ termination. import datetime import logging -import time from twisted.internet import defer @@ -44,13 +43,9 @@ from nova import db from nova import exception from nova import flags from nova import manager -from nova import quota -from nova import rpc from nova import utils -from nova.compute import instance_types from nova.compute import power_state - FLAGS = flags.FLAGS flags.DEFINE_string('instances_path', utils.abspath('../instances'), 'where instances are stored on disk') @@ -58,11 +53,6 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for volume creation') -def generate_default_hostname(internal_id): - """Default function to generate a hostname given an instance reference.""" - return str(internal_id) - - class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" @@ -94,165 +84,6 @@ class ComputeManager(manager.Manager): """This call passes stright through to the virtualization driver.""" yield self.driver.refresh_security_group(security_group_id) - # TODO(eday): network_topic arg should go away once we push network - # allocation into the scheduler or compute worker. - def create_instances(self, context, instance_type, image_service, image_id, - network_topic, min_count=1, max_count=1, - kernel_id=None, ramdisk_id=None, name='', - description='', user_data='', key_name=None, - key_data=None, security_group='default', - generate_hostname=generate_default_hostname): - """Create the number of instances requested if quote and - other arguments check out ok.""" - - num_instances = quota.allowed_instances(context, max_count, - instance_type) - if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % - num_instances, "InstanceLimitExceeded") - - is_vpn = image_id == FLAGS.vpn_image_id - if not is_vpn: - image = image_service.show(context, image_id) - if kernel_id is None: - kernel_id = image.get('kernelId', FLAGS.default_kernel) - if ramdisk_id is None: - ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) - - # Make sure we have access to kernel and ramdisk - image_service.show(context, kernel_id) - image_service.show(context, ramdisk_id) - - if security_group is None: - security_group = ['default'] - if not type(security_group) is list: - security_group = [security_group] - - security_groups = [] - self.ensure_default_security_group(context) - for security_group_name in security_group: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) - security_groups.append(group['id']) - - if key_data is None and key_name: - key_pair = db.key_pair_get(context, context.user_id, key_name) - key_data = key_pair['public_key'] - - type_data = instance_types.INSTANCE_TYPES[instance_type] - base_options = { - 'reservation_id': utils.generate_uid('r'), - 'server_name': name, - 'image_id': image_id, - 'kernel_id': kernel_id, - 'ramdisk_id': ramdisk_id, - 'state_description': 'scheduling', - 'user_id': context.user_id, - 'project_id': context.project_id, - 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), - 'instance_type': instance_type, - 'memory_mb': type_data['memory_mb'], - 'vcpus': type_data['vcpus'], - 'local_gb': type_data['local_gb'], - 'display_name': name, - 'display_description': description, - 'key_name': key_name, - 'key_data': key_data} - - elevated = context.elevated() - instances = [] - logging.debug("Going to run %s instances...", num_instances) - for num in range(num_instances): - instance = dict(mac_address=utils.generate_mac(), - launch_index=num, - **base_options) - instance_ref = self.create_instance(context, security_groups, - **instance) - instance_id = instance_ref['id'] - internal_id = instance_ref['internal_id'] - hostname = generate_hostname(internal_id) - self.update_instance(context, instance_id, hostname=hostname) - instances.append(dict(id=instance_id, internal_id=internal_id, - hostname=hostname, **instance)) - - # TODO(vish): This probably should be done in the scheduler - # or in compute as a call. The network should be - # allocated after the host is assigned and setup - # can happen at the same time. - address = self.network_manager.allocate_fixed_ip(context, - instance_id, - is_vpn) - rpc.cast(elevated, - network_topic, - {"method": "setup_fixed_ip", - "args": {"address": address}}) - - logging.debug("Casting to scheduler for %s/%s's instance %s" % - (context.project_id, context.user_id, instance_id)) - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "run_instance", - "args": {"topic": FLAGS.compute_topic, - "instance_id": instance_id}}) - - return instances - - def ensure_default_security_group(self, context): - try: - db.security_group_get_by_name(context, context.project_id, - 'default') - except exception.NotFound: - values = {'name': 'default', - 'description': 'default', - 'user_id': context.user_id, - 'project_id': context.project_id} - group = db.security_group_create(context, values) - - def create_instance(self, context, security_groups=None, **kwargs): - """Creates the instance in the datastore and returns the - new instance as a mapping - - :param context: The security context - :param security_groups: list of security group ids to - attach to the instance - :param kwargs: All additional keyword args are treated - as data fields of the instance to be - created - - :retval Returns a mapping of the instance information - that has just been created - - """ - instance_ref = self.db.instance_create(context, kwargs) - inst_id = instance_ref['id'] - - elevated = context.elevated() - if not security_groups: - security_groups = [] - for security_group_id in security_groups: - self.db.instance_add_security_group(elevated, - inst_id, - security_group_id) - return instance_ref - - def update_instance(self, context, instance_id, **kwargs): - """Updates the instance in the datastore. - - :param context: The security context - :param instance_id: ID of the instance to update - :param kwargs: All additional keyword args are treated - as data fields of the instance to be - updated - - :retval None - - """ - self.db.instance_update(context, instance_id, kwargs) - @defer.inlineCallbacks @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): -- cgit From fdf0aa30a1127eb8311a599dfdad9653ac699154 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 1 Dec 2010 14:55:42 -0600 Subject: Todd points out that the API doesn't require a display_name, so let's make a default. That way the OpenStack API can rest assured that its server responses will always have a name key. --- nova/compute/manager.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 50a9d316b..0893db9fc 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -99,6 +99,8 @@ class ComputeManager(manager.Manager): that has just been created """ + # Set sane defaults if not specified + kwargs.setdefault('display_name', "Server %s" % kwargs['internal_id']) instance_ref = self.db.instance_create(context, kwargs) inst_id = instance_ref['id'] -- cgit From f53f5880c08994d04a552a41ce6f88dfbd867946 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 1 Dec 2010 15:53:27 -0600 Subject: Oops, internal_id isn't available until after a save. This code saves twice; if I moved it into the DB layer we could do it in one save. However, we're moving to one sqlite db per compute worker, so I'd rather have two saves in order to keep the logic in the right layer. --- nova/compute/manager.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0893db9fc..6fc5c5186 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -99,10 +99,14 @@ class ComputeManager(manager.Manager): that has just been created """ - # Set sane defaults if not specified - kwargs.setdefault('display_name', "Server %s" % kwargs['internal_id']) instance_ref = self.db.instance_create(context, kwargs) inst_id = instance_ref['id'] + # Set sane defaults if not specified + if 'display_name' not in kwargs: + display_name = "Server %s" % instance_ref['internal_id'] + instance_ref['display_name'] = display_name + self.db.instance_update(context, inst_id, + { 'display_name': display_name }) elevated = context.elevated() if not security_groups: -- cgit From 8af2b1c97903f11034a95894a23bb7e77f573aa6 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 1 Dec 2010 16:04:04 -0600 Subject: Going for a record commits per line changes ratio --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6fc5c5186..e826bdaa2 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -106,7 +106,7 @@ class ComputeManager(manager.Manager): display_name = "Server %s" % instance_ref['internal_id'] instance_ref['display_name'] = display_name self.db.instance_update(context, inst_id, - { 'display_name': display_name }) + {'display_name': display_name}) elevated = context.elevated() if not security_groups: -- cgit From 3af6da1fa5a38c8238ea45a7b03a6e3fbb78fe5b Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 2 Dec 2010 10:08:56 -0600 Subject: Default Instance.display_name to a value even when None is explicitly passed in. --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index e826bdaa2..c4a90e604 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -102,7 +102,7 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_create(context, kwargs) inst_id = instance_ref['id'] # Set sane defaults if not specified - if 'display_name' not in kwargs: + if kwargs.get('display_name') is None: display_name = "Server %s" % instance_ref['internal_id'] instance_ref['display_name'] = display_name self.db.instance_update(context, inst_id, -- cgit From 26571952bb8f1015b11d6b9514d232ad8a20d837 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 2 Dec 2010 10:21:43 -0800 Subject: Moved reboot/rescue methods into nova.compute.api. --- nova/compute/api.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 929342a1e..da01ca61a 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -210,3 +210,30 @@ class ComputeAPI(base.Base): """ self.db.instance_update(context, instance_id, kwargs) + + def reboot(self, context, instance_id): + """Reboot the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "reboot_instance", + "args": {"instance_id": instance['id']}}) + + def rescue(self, context, instance_id): + """Rescue the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "rescue_instance", + "args": {"instance_id": instance['id']}}) + + def unrescue(self, context, instance_id): + """Unrescue the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unrescue_instance", + "args": {"instance_id": instance['id']}}) -- cgit From 9d5e1b52f837047aac55d08a664a35be7cc5b8ef Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 2 Dec 2010 12:58:13 -0600 Subject: Correctly translate instance ids to internal_ids in some spots we neglected. And do some pylint cleanup. --- nova/compute/manager.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index b5eb23b24..dd8d41129 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -22,8 +22,8 @@ Handles all processes relating to instances (guest vms). The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that handles RPC calls relating to creating instances. It is responsible for building a disk image, launching it via the underlying virtualization driver, -responding to calls to check it state, attaching persistent as well as -termination. +responding to calls to check its state, attaching persistent storage, and +terminating it. **Related Flags** @@ -39,7 +39,6 @@ import logging from twisted.internet import defer -from nova import db from nova import exception from nova import flags from nova import manager @@ -50,10 +49,11 @@ FLAGS = flags.FLAGS flags.DEFINE_string('instances_path', '$state_path/instances', 'where instances are stored on disk') flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', - 'Driver to use for volume creation') + 'Driver to use for controlling virtualization') class ComputeManager(manager.Manager): + """Manages the running instances from creation to destruction.""" def __init__(self, compute_driver=None, *args, **kwargs): @@ -93,7 +93,6 @@ class ComputeManager(manager.Manager): if instance_ref['name'] in self.driver.list_instances(): raise exception.Error("Instance has already been created") logging.debug("instance %s: starting...", instance_id) - project_id = instance_ref['project_id'] self.network_manager.setup_compute_network(context, instance_id) self.db.instance_update(context, instance_id, @@ -135,7 +134,6 @@ class ComputeManager(manager.Manager): self.db.instance_destroy(context, instance_id) raise exception.Error('trying to destroy already destroyed' ' instance: %s' % instance_id) - yield self.driver.destroy(instance_ref) # TODO(ja): should we keep it in a terminated state for a bit? -- cgit From 47b47bc4ae34f90a6d1c59718b5ee759fb7c7327 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 2 Dec 2010 15:26:14 -0800 Subject: Pushed terminate instance and network manager/topic methods into network.compute.api. --- nova/compute/api.py | 82 +++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 73 insertions(+), 9 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index da01ca61a..457d6e27f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -20,6 +20,7 @@ Handles all API requests relating to instances (guest vms). """ +import datetime import logging import time @@ -43,17 +44,17 @@ def generate_default_hostname(internal_id): class ComputeAPI(base.Base): """API for interacting with the compute manager.""" - def __init__(self, **kwargs): - self.network_manager = utils.import_object(FLAGS.network_manager) + def __init__(self, network_manager=None, **kwargs): + if not network_manager: + network_manager = utils.import_object(FLAGS.network_manager) + self.network_manager = network_manager super(ComputeAPI, self).__init__(**kwargs) - # TODO(eday): network_topic arg should go away once we push network - # allocation into the scheduler or compute worker. def create_instances(self, context, instance_type, image_service, image_id, - network_topic, min_count=1, max_count=1, - kernel_id=None, ramdisk_id=None, name='', - description='', user_data='', key_name=None, - key_data=None, security_group='default', + min_count=1, max_count=1, kernel_id=None, + ramdisk_id=None, name='', description='', + user_data='', key_name=None, key_data=None, + security_group='default', generate_hostname=generate_default_hostname): """Create the number of instances requested if quote and other arguments check out ok.""" @@ -139,7 +140,7 @@ class ComputeAPI(base.Base): instance_id, is_vpn) rpc.cast(elevated, - network_topic, + self._get_network_topic(context), {"method": "setup_fixed_ip", "args": {"address": address}}) @@ -211,6 +212,58 @@ class ComputeAPI(base.Base): """ self.db.instance_update(context, instance_id, kwargs) + def delete_instance(self, context, instance_id): + logging.debug("Going to try and terminate %d" % instance_id) + try: + instance = self.db.instance_get_by_internal_id(context, + instance_id) + except exception.NotFound as e: + logging.warning("Instance %d was not found during terminate", + instance_id) + raise e + + if (instance['state_description'] == 'terminating'): + logging.warning("Instance %d is already being terminated", + instance_id) + return + + self.update_instance(context, + instance['id'], + state_description='terminating', + state=0, + terminated_at=datetime.datetime.utcnow()) + + # FIXME(ja): where should network deallocate occur? + address = self.db.instance_get_floating_address(context, + instance['id']) + if address: + logging.debug("Disassociating address %s" % address) + # NOTE(vish): Right now we don't really care if the ip is + # disassociated. We may need to worry about + # checking this later. Perhaps in the scheduler? + rpc.cast(context, + self._get_network_topic(context), + {"method": "disassociate_floating_ip", + "args": {"floating_address": address}}) + + address = self.db.instance_get_fixed_address(context, instance['id']) + if address: + logging.debug("Deallocating address %s" % address) + # NOTE(vish): Currently, nothing needs to be done on the + # network node until release. If this changes, + # we will need to cast here. + self.network_manager.deallocate_fixed_ip(context.elevated(), + address) + + host = instance['host'] + if host: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "terminate_instance", + "args": {"instance_id": instance['id']}}) + else: + self.db.instance_destroy(context, instance['id']) + def reboot(self, context, instance_id): """Reboot the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -237,3 +290,14 @@ class ComputeAPI(base.Base): self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "unrescue_instance", "args": {"instance_id": instance['id']}}) + + def _get_network_topic(self, context): + """Retrieves the network host for a project""" + network_ref = self.network_manager.get_network(context) + host = network_ref['host'] + if not host: + host = rpc.call(context, + FLAGS.network_topic, + {"method": "set_network_host", + "args": {"network_id": network_ref['id']}}) + return self.db.queue_get_for(context, FLAGS.network_topic, host) -- cgit From 4203aa1060e5a97bed86d2e201c4c2443ef7e042 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Fri, 3 Dec 2010 12:21:18 -0800 Subject: Finished cleaning up the openstack servers API, it no longer touches the database directly. Also cleaned up similar things in ec2 API and refactored a couple methods in nova.compute.api to accomodate this work. --- nova/compute/api.py | 96 +++++++++++++++++++++++++---------------------------- 1 file changed, 45 insertions(+), 51 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 457d6e27f..995bed91b 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -44,16 +44,19 @@ def generate_default_hostname(internal_id): class ComputeAPI(base.Base): """API for interacting with the compute manager.""" - def __init__(self, network_manager=None, **kwargs): + def __init__(self, network_manager=None, image_service=None, **kwargs): if not network_manager: network_manager = utils.import_object(FLAGS.network_manager) self.network_manager = network_manager + if not image_service: + image_service = utils.import_object(FLAGS.image_service) + self.image_service = image_service super(ComputeAPI, self).__init__(**kwargs) - def create_instances(self, context, instance_type, image_service, image_id, - min_count=1, max_count=1, kernel_id=None, - ramdisk_id=None, name='', description='', - user_data='', key_name=None, key_data=None, + def create_instances(self, context, instance_type, image_id, min_count=1, + max_count=1, kernel_id=None, ramdisk_id=None, + display_name='', description='', user_data='', + key_name=None, key_data=None, security_group='default', generate_hostname=generate_default_hostname): """Create the number of instances requested if quote and @@ -70,15 +73,15 @@ class ComputeAPI(base.Base): is_vpn = image_id == FLAGS.vpn_image_id if not is_vpn: - image = image_service.show(context, image_id) + image = self.image_service.show(context, image_id) if kernel_id is None: kernel_id = image.get('kernelId', FLAGS.default_kernel) if ramdisk_id is None: ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) # Make sure we have access to kernel and ramdisk - image_service.show(context, kernel_id) - image_service.show(context, ramdisk_id) + self.image_service.show(context, kernel_id) + self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] @@ -111,7 +114,7 @@ class ComputeAPI(base.Base): 'memory_mb': type_data['memory_mb'], 'vcpus': type_data['vcpus'], 'local_gb': type_data['local_gb'], - 'display_name': name, + 'display_name': display_name, 'display_description': description, 'key_name': key_name, 'key_data': key_data} @@ -123,14 +126,25 @@ class ComputeAPI(base.Base): instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) - instance_ref = self.create_instance(context, security_groups, - **instance) - instance_id = instance_ref['id'] - internal_id = instance_ref['internal_id'] - hostname = generate_hostname(internal_id) - self.update_instance(context, instance_id, hostname=hostname) - instances.append(dict(id=instance_id, internal_id=internal_id, - hostname=hostname, **instance)) + instance = self.db.instance_create(context, instance) + instance_id = instance['id'] + internal_id = instance['internal_id'] + + elevated = context.elevated() + if not security_groups: + security_groups = [] + for security_group_id in security_groups: + self.db.instance_add_security_group(elevated, + instance_id, + security_group_id) + + # Set sane defaults if not specified + updates = dict(hostname=generate_hostname(internal_id)) + if 'display_name' not in instance: + updates['display_name'] = "Server %s" % internal_id + + instance = self.update_instance(context, instance_id, **updates) + instances.append(instance) # TODO(vish): This probably should be done in the scheduler # or in compute as a call. The network should be @@ -165,39 +179,6 @@ class ComputeAPI(base.Base): 'project_id': context.project_id} group = db.security_group_create(context, values) - def create_instance(self, context, security_groups=None, **kwargs): - """Creates the instance in the datastore and returns the - new instance as a mapping - - :param context: The security context - :param security_groups: list of security group ids to - attach to the instance - :param kwargs: All additional keyword args are treated - as data fields of the instance to be - created - - :retval Returns a mapping of the instance information - that has just been created - - """ - instance_ref = self.db.instance_create(context, kwargs) - inst_id = instance_ref['id'] - # Set sane defaults if not specified - if kwargs.get('display_name') is None: - display_name = "Server %s" % instance_ref['internal_id'] - instance_ref['display_name'] = display_name - self.db.instance_update(context, inst_id, - {'display_name': display_name}) - - elevated = context.elevated() - if not security_groups: - security_groups = [] - for security_group_id in security_groups: - self.db.instance_add_security_group(elevated, - inst_id, - security_group_id) - return instance_ref - def update_instance(self, context, instance_id, **kwargs): """Updates the instance in the datastore. @@ -210,7 +191,7 @@ class ComputeAPI(base.Base): :retval None """ - self.db.instance_update(context, instance_id, kwargs) + return self.db.instance_update(context, instance_id, kwargs) def delete_instance(self, context, instance_id): logging.debug("Going to try and terminate %d" % instance_id) @@ -264,6 +245,19 @@ class ComputeAPI(base.Base): else: self.db.instance_destroy(context, instance['id']) + def get_instances(self, context, project_id=None): + if project_id or not context.is_admin: + if not context.project: + return self.db.instance_get_all_by_user(context, + context.user_id) + if project_id is None: + project_id = context.project_id + return self.db.instance_get_all_by_project(context, project_id) + return self.db.instance_get_all(context) + + def get_instance(self, context, instance_id): + return self.db.instance_get_by_internal_id(context, instance_id) + def reboot(self, context, instance_id): """Reboot the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) -- cgit From 88c0e3e380d50d5794970063bbe464171089f260 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Tue, 7 Dec 2010 04:41:53 +0000 Subject: modified a few files --- nova/compute/api.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 929342a1e..6830bacb8 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -52,7 +52,7 @@ class ComputeAPI(base.Base): def create_instances(self, context, instance_type, image_service, image_id, network_topic, min_count=1, max_count=1, kernel_id=None, ramdisk_id=None, name='', - description='', user_data='', key_name=None, + description='', key_name=None, key_data=None, security_group='default', generate_hostname=generate_default_hostname): """Create the number of instances requested if quote and @@ -143,8 +143,8 @@ class ComputeAPI(base.Base): {"method": "setup_fixed_ip", "args": {"address": address}}) - logging.debug("Casting to scheduler for %s/%s's instance %s" % - (context.project_id, context.user_id, instance_id)) + logging.debug("Casting to scheduler for %s/%s's instance %s", + context.project_id, context.user_id, instance_id) rpc.cast(context, FLAGS.scheduler_topic, {"method": "run_instance", @@ -154,6 +154,12 @@ class ComputeAPI(base.Base): return instances def ensure_default_security_group(self, context): + """ Create security group for the security context if it + does not already exist + + :param context: the security context + + """ try: db.security_group_get_by_name(context, context.project_id, 'default') @@ -162,7 +168,7 @@ class ComputeAPI(base.Base): 'description': 'default', 'user_id': context.user_id, 'project_id': context.project_id} - group = db.security_group_create(context, values) + db.security_group_create(context, values) def create_instance(self, context, security_groups=None, **kwargs): """Creates the instance in the datastore and returns the -- cgit From c1a40a8381ae3e559b3faad4a93ffec1abe8907f Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 7 Dec 2010 10:06:49 -0800 Subject: Added docstring for get_instances. --- nova/compute/api.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 995bed91b..cb23dae55 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -246,6 +246,9 @@ class ComputeAPI(base.Base): self.db.instance_destroy(context, instance['id']) def get_instances(self, context, project_id=None): + """Get all instances, possibly filtered by project ID or + user ID. If there is no filter and the context is an admin, + it will retreive all instances in the system.""" if project_id or not context.is_admin: if not context.project: return self.db.instance_get_all_by_user(context, -- cgit From b35b86c6f415a205b6ce49164cccb2a870c46fcb Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 9 Dec 2010 13:43:54 +0000 Subject: fixes exception throwing with wrong instance type. --- nova/compute/instance_types.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index a2679e0fc..0e9600c00 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -22,6 +22,7 @@ The built-in instance properties. """ from nova import flags +from nova.exception import ApiError FLAGS = flags.FLAGS INSTANCE_TYPES = { @@ -37,8 +38,7 @@ def get_by_type(instance_type): if instance_type is None: return FLAGS.default_instance_type if instance_type not in INSTANCE_TYPES: - raise exception.ApiError("Unknown instance type: %s", - instance_type) + raise ApiError("Unknown instance type: %s" % instance_type) return instance_type -- cgit From 4c9bf2f01fb712a3af6a9876a175a7a0638bcd59 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 9 Dec 2010 20:18:06 +0000 Subject: import module and not classe directely as per Soren recommendation. --- nova/compute/instance_types.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index 0e9600c00..6e47170bd 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -22,7 +22,7 @@ The built-in instance properties. """ from nova import flags -from nova.exception import ApiError +from nova import exception FLAGS = flags.FLAGS INSTANCE_TYPES = { @@ -38,7 +38,7 @@ def get_by_type(instance_type): if instance_type is None: return FLAGS.default_instance_type if instance_type not in INSTANCE_TYPES: - raise ApiError("Unknown instance type: %s" % instance_type) + raise exception.ApiError("Unknown instance type: %s" % instance_type) return instance_type -- cgit From a2a8406b5d793545c8ecb359e18b80bba618c509 Mon Sep 17 00:00:00 2001 From: termie Date: Tue, 14 Dec 2010 16:05:39 -0800 Subject: updates per review --- nova/compute/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 051ce579d..f90f28b78 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -203,7 +203,7 @@ class ComputeManager(manager.Manager): volume_id, mountpoint) instance_ref = self.db.instance_get(context, instance_id) dev_path = self.volume_manager.setup_compute_volume(context, - volume_id) + volume_id) try: self.driver.attach_volume(instance_ref['name'], dev_path, @@ -238,7 +238,7 @@ class ComputeManager(manager.Manager): instance_ref['name']) else: self.driver.detach_volume(instance_ref['name'], - volume_ref['mountpoint']) + volume_ref['mountpoint']) self.volume_manager.remove_compute_volume(context, volume_id) self.db.volume_detached(context, volume_id) return True -- cgit From 9b049acc27d477a1ab9e13c9e064e59d8bd0a3ae Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 16 Dec 2010 10:52:30 -0800 Subject: pep8 fixes --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f90f28b78..7eb60e262 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -64,7 +64,7 @@ class ComputeManager(manager.Manager): self.network_manager = utils.import_object(FLAGS.network_manager) self.volume_manager = utils.import_object(FLAGS.volume_manager) super(ComputeManager, self).__init__(*args, **kwargs) - + def _update_state(self, context, instance_id): """Update the state of an instance from the driver info.""" # FIXME(ja): include other fields from state? -- cgit