summaryrefslogtreecommitdiffstats
path: root/nova/compute
diff options
context:
space:
mode:
authorEric Day <eday@oddments.org>2010-11-24 14:52:10 -0800
committerEric Day <eday@oddments.org>2010-11-24 14:52:10 -0800
commit1188dd95fbfef144ca71a3c9df2f7dbdb665c97f (patch)
tree51c3fb6eb2896c1bf5be1607301cfab46cab795a /nova/compute
parent07ee9639a105a58b8b212fff607f4e0639d411da (diff)
downloadnova-1188dd95fbfef144ca71a3c9df2f7dbdb665c97f.tar.gz
nova-1188dd95fbfef144ca71a3c9df2f7dbdb665c97f.tar.xz
nova-1188dd95fbfef144ca71a3c9df2f7dbdb665c97f.zip
Consolidated the start instance logic in the two API classes into a single method. This also cleans up a number of small discrepencies between the two.
Diffstat (limited to 'nova/compute')
-rw-r--r--nova/compute/instance_types.py20
-rw-r--r--nova/compute/manager.py130
2 files changed, 150 insertions, 0 deletions
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 67ee8f8a8..a2679e0fc 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -21,9 +21,29 @@
The built-in instance properties.
"""
+from nova import flags
+
+FLAGS = flags.FLAGS
INSTANCE_TYPES = {
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+
+
+def get_by_type(instance_type):
+ """Build instance data structure and save it to the data store."""
+ if instance_type is None:
+ return FLAGS.default_instance_type
+ if instance_type not in INSTANCE_TYPES:
+ raise exception.ApiError("Unknown instance type: %s",
+ instance_type)
+ return instance_type
+
+
+def get_by_flavor_id(flavor_id):
+ for instance_type, details in INSTANCE_TYPES.iteritems():
+ if details['flavorid'] == flavor_id:
+ return instance_type
+ return FLAGS.default_instance_type
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 890d79fba..cfc3d4bbd 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -36,13 +36,18 @@ termination.
import datetime
import logging
+import time
from twisted.internet import defer
+from nova import db
from nova import exception
from nova import flags
from nova import manager
+from nova import quota
+from nova import rpc
from nova import utils
+from nova.compute import instance_types
from nova.compute import power_state
@@ -53,6 +58,11 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
'Driver to use for volume creation')
+def generate_default_hostname(internal_id):
+ """Default function to generate a hostname given an instance reference."""
+ return str(internal_id)
+
+
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
@@ -84,6 +94,126 @@ class ComputeManager(manager.Manager):
"""This call passes stright through to the virtualization driver."""
yield self.driver.refresh_security_group(security_group_id)
+ # TODO(eday): network_topic arg should go away once we push network
+ # allocation into the scheduler or compute worker.
+ def create_instances(self, context, instance_type, image_service, image_id,
+ network_topic, min_count=1, max_count=1,
+ kernel_id=None, ramdisk_id=None, name='',
+ description='', user_data='', key_name=None,
+ key_data=None, security_group='default',
+ generate_hostname=generate_default_hostname):
+ """Create the number of instances requested if quote and
+ other arguments check out ok."""
+
+ num_instances = quota.allowed_instances(context, max_count,
+ instance_type)
+ if num_instances < min_count:
+ logging.warn("Quota exceeeded for %s, tried to run %s instances",
+ context.project_id, min_count)
+ raise quota.QuotaError("Instance quota exceeded. You can only "
+ "run %s more instances of this type." %
+ num_instances, "InstanceLimitExceeded")
+
+ is_vpn = image_id == FLAGS.vpn_image_id
+ if not is_vpn:
+ image = image_service.show(context, image_id)
+ if not image:
+ raise Exception("Image not found")
+ if kernel_id is None:
+ kernel_id = image.get('kernelId', FLAGS.default_kernel)
+ if ramdisk_id is None:
+ ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
+
+ # Make sure we have access to kernel and ramdisk
+ image_service.show(context, kernel_id)
+ image_service.show(context, ramdisk_id)
+
+ if security_group is None:
+ security_group = ['default']
+ if not type(security_group) is list:
+ security_group = [security_group]
+
+ security_groups = []
+ self.ensure_default_security_group(context)
+ for security_group_name in security_group:
+ group = db.security_group_get_by_name(context,
+ context.project_id,
+ security_group_name)
+ security_groups.append(group['id'])
+
+ if key_data is None and key_name:
+ key_pair = db.key_pair_get(context, context.user_id, key_name)
+ key_data = key_pair['public_key']
+
+ type_data = instance_types.INSTANCE_TYPES[instance_type]
+ base_options = {
+ 'reservation_id': utils.generate_uid('r'),
+ 'server_name': name,
+ 'image_id': image_id,
+ 'kernel_id': kernel_id,
+ 'ramdisk_id': ramdisk_id,
+ 'state_description': 'scheduling',
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
+ 'instance_type': instance_type,
+ 'memory_mb': type_data['memory_mb'],
+ 'vcpus': type_data['vcpus'],
+ 'local_gb': type_data['local_gb'],
+ 'display_name': name,
+ 'display_description': description,
+ 'key_name': key_name,
+ 'key_data': key_data}
+
+ elevated = context.elevated()
+ instances = []
+ logging.debug("Going to run %s instances...", num_instances)
+ for num in range(num_instances):
+ instance = dict(mac_address=utils.generate_mac(),
+ launch_index=num,
+ **base_options)
+ instance_ref = self.create_instance(context, security_groups,
+ **instance)
+ instance_id = instance_ref['id']
+ internal_id = instance_ref['internal_id']
+ hostname = generate_hostname(internal_id)
+ self.update_instance(context, instance_id, hostname=hostname)
+ instances.append(dict(id=instance_id, internal_id=internal_id,
+ hostname=hostname, **instance))
+
+ # TODO(vish): This probably should be done in the scheduler
+ # or in compute as a call. The network should be
+ # allocated after the host is assigned and setup
+ # can happen at the same time.
+ address = self.network_manager.allocate_fixed_ip(context,
+ instance_id,
+ is_vpn)
+ rpc.cast(elevated,
+ network_topic,
+ {"method": "setup_fixed_ip",
+ "args": {"address": address}})
+
+ logging.debug("Casting to scheduler for %s/%s's instance %s" %
+ (context.project_id, context.user_id, instance_id))
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "run_instance",
+ "args": {"topic": FLAGS.compute_topic,
+ "instance_id": instance_id}})
+
+ return instances
+
+ def ensure_default_security_group(self, context):
+ try:
+ db.security_group_get_by_name(context, context.project_id,
+ 'default')
+ except exception.NotFound:
+ values = {'name': 'default',
+ 'description': 'default',
+ 'user_id': context.user_id,
+ 'project_id': context.project_id}
+ group = db.security_group_create(context, values)
+
def create_instance(self, context, security_groups=None, **kwargs):
"""Creates the instance in the datastore and returns the
new instance as a mapping