summaryrefslogtreecommitdiffstats
path: root/nova/compute
diff options
context:
space:
mode:
authorIsaku Yamahata <yamahata@valinux.co.jp>2011-07-08 12:07:58 +0900
committerIsaku Yamahata <yamahata@valinux.co.jp>2011-07-08 12:07:58 +0900
commita02895b6bb353a468ce7c58e60bc2dbd152c5ec9 (patch)
tree605c2efa569a42fd6f059299da1316edb597fec1 /nova/compute
parent02c0bf3b242395e63baf582b1f9c279eef4282d6 (diff)
parentbc8f009f8ac6393301dd857339918d40b93be63d (diff)
downloadnova-a02895b6bb353a468ce7c58e60bc2dbd152c5ec9.tar.gz
nova-a02895b6bb353a468ce7c58e60bc2dbd152c5ec9.tar.xz
nova-a02895b6bb353a468ce7c58e60bc2dbd152c5ec9.zip
merge with trunk
Diffstat (limited to 'nova/compute')
-rw-r--r--nova/compute/api.py272
-rw-r--r--nova/compute/manager.py274
2 files changed, 362 insertions, 184 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index af487a239..5350b8f28 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -49,9 +49,27 @@ flags.DEFINE_integer('find_host_timeout', 30,
'Timeout after NN seconds when looking for a host.')
-def generate_default_hostname(instance_id):
+def generate_default_hostname(instance):
"""Default function to generate a hostname given an instance reference."""
- return str(instance_id)
+ display_name = instance['display_name']
+ if display_name is None:
+ return 'server_%d' % (instance['id'],)
+ table = ''
+ deletions = ''
+ for i in xrange(256):
+ c = chr(i)
+ if ('a' <= c <= 'z') or ('0' <= c <= '9') or (c == '-'):
+ table += c
+ elif c == ' ':
+ table += '_'
+ elif ('A' <= c <= 'Z'):
+ table += c.lower()
+ else:
+ table += '\0'
+ deletions += c
+ if isinstance(display_name, unicode):
+ display_name = display_name.encode('latin-1', 'ignore')
+ return display_name.translate(table, deletions)
def _is_able_to_shutdown(instance, instance_id):
@@ -84,23 +102,6 @@ class API(base.Base):
self.hostname_factory = hostname_factory
super(API, self).__init__(**kwargs)
- def get_network_topic(self, context, instance_id):
- """Get the network topic for an instance."""
- try:
- instance = self.get(context, instance_id)
- except exception.NotFound:
- LOG.warning(_("Instance %d was not found in get_network_topic"),
- instance_id)
- raise
-
- host = instance['host']
- if not host:
- raise exception.Error(_("Instance %d has no host") % instance_id)
- topic = self.db.queue_get_for(context, FLAGS.compute_topic, host)
- return rpc.call(context,
- topic,
- {"method": "get_network_topic", "args": {'fake': 1}})
-
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
@@ -144,7 +145,7 @@ class API(base.Base):
def _check_create_parameters(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
- min_count=1, max_count=1,
+ min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
@@ -155,6 +156,10 @@ class API(base.Base):
if not instance_type:
instance_type = instance_types.get_default_instance_type()
+ if not min_count:
+ min_count = 1
+ if not max_count:
+ max_count = min_count
num_instances = quota.allowed_instances(context, max_count,
instance_type)
@@ -204,18 +209,7 @@ class API(base.Base):
if ramdisk_id:
image_service.show(context, ramdisk_id)
- if security_group is None:
- security_group = ['default']
- if not type(security_group) is list:
- security_group = [security_group]
-
- security_groups = []
self.ensure_default_security_group(context)
- for security_group_name in security_group:
- group = db.security_group_get_by_name(context,
- context.project_id,
- security_group_name)
- security_groups.append(group['id'])
if key_data is None and key_name:
key_pair = db.key_pair_get(context, context.user_id, key_name)
@@ -308,19 +302,31 @@ class API(base.Base):
def create_db_entry_for_new_instance(self, context, image, base_options,
security_groups, block_device_mapping, num=1):
"""Create an entry in the DB for this new instance,
- including any related table updates (such as security
- groups, MAC address, etc). This will called by create()
- in the majority of situations, but all-at-once style
- Schedulers may initiate the call."""
- instance = dict(mac_address=utils.generate_mac(),
- launch_index=num,
- **base_options)
+ including any related table updates (such as security group,
+ etc).
+
+ This will called by create() in the majority of situations,
+ but create_all_at_once() style Schedulers may initiate the call.
+ If you are changing this method, be sure to update both
+ call paths.
+ """
+ instance = dict(launch_index=num, **base_options)
instance = self.db.instance_create(context, instance)
instance_id = instance['id']
elevated = context.elevated()
- if not security_groups:
- security_groups = []
+ if security_group is None:
+ security_group = ['default']
+ if not isinstance(security_group, list):
+ security_group = [security_group]
+
+ security_groups = []
+ for security_group_name in security_group:
+ group = db.security_group_get_by_name(context,
+ context.project_id,
+ security_group_name)
+ security_groups.append(group['id'])
+
for security_group_id in security_groups:
self.db.instance_add_security_group(elevated,
instance_id,
@@ -336,10 +342,12 @@ class API(base.Base):
block_device_mapping)
# Set sane defaults if not specified
- updates = dict(hostname=self.hostname_factory(instance_id))
+ updates = {}
if (not hasattr(instance, 'display_name') or
instance.display_name is None):
updates['display_name'] = "Server %s" % instance_id
+ instance['display_name'] = updates['display_name']
+ updates['hostname'] = self.hostname_factory(instance)
instance = self.update(context, instance_id, **updates)
@@ -384,12 +392,12 @@ class API(base.Base):
def create_all_at_once(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
- min_count=1, max_count=1,
+ min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
injected_files=None, admin_password=None, zone_blob=None,
- reservation_id=None):
+ reservation_id=None, block_device_mapping=None):
"""Provision the instances by passing the whole request to
the Scheduler for execution. Returns a Reservation ID
related to the creation of all of these instances."""
@@ -414,7 +422,7 @@ class API(base.Base):
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
- min_count=1, max_count=1,
+ min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
@@ -426,6 +434,9 @@ class API(base.Base):
Scheduler drivers, but may remove the effectiveness of the
more complicated drivers.
+ NOTE: If you change this method, be sure to change
+ create_all_at_once() at the same time!
+
Returns a list of instance dicts.
"""
@@ -440,7 +451,6 @@ class API(base.Base):
injected_files, admin_password, zone_blob,
reservation_id)
- block_device_mapping = block_device_mapping or []
instances = []
LOG.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
@@ -659,50 +669,60 @@ class API(base.Base):
"""
return self.get(context, instance_id)
- def get_all_across_zones(self, context, reservation_id):
- """Get all instances with this reservation_id, across
- all available Zones (if any).
- """
- context = context.elevated()
- instances = self.db.instance_get_all_by_reservation(
- context, reservation_id)
-
- children = scheduler_api.call_zone_method(context, "list",
- novaclient_collection_name="servers",
- reservation_id=reservation_id)
-
- for zone, servers in children:
- for server in servers:
- # Results are ready to send to user. No need to scrub.
- server._info['_is_precooked'] = True
- instances.append(server._info)
- return instances
-
def get_all(self, context, project_id=None, reservation_id=None,
- fixed_ip=None):
+ fixed_ip=None, recurse_zones=False):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retreive
all instances in the system.
"""
- if reservation_id is not None:
- return self.get_all_across_zones(context, reservation_id)
-
- if fixed_ip is not None:
- return self.db.fixed_ip_get_instance(context, fixed_ip)
- if project_id or not context.is_admin:
+ if reservation_id is not None:
+ recurse_zones = True
+ instances = self.db.instance_get_all_by_reservation(
+ context, reservation_id)
+ elif fixed_ip is not None:
+ try:
+ instances = self.db.fixed_ip_get_instance(context, fixed_ip)
+ except exception.FloatingIpNotFound, e:
+ if not recurse_zones:
+ raise
+ instances = None
+ elif project_id or not context.is_admin:
if not context.project:
- return self.db.instance_get_all_by_user(
+ instances = self.db.instance_get_all_by_user(
context, context.user_id)
+ else:
+ if project_id is None:
+ project_id = context.project_id
+ instances = self.db.instance_get_all_by_project(
+ context, project_id)
+ else:
+ instances = self.db.instance_get_all(context)
+
+ if instances is None:
+ instances = []
+ elif not isinstance(instances, list):
+ instances = [instances]
- if project_id is None:
- project_id = context.project_id
+ if not recurse_zones:
+ return instances
- return self.db.instance_get_all_by_project(
- context, project_id)
+ admin_context = context.elevated()
+ children = scheduler_api.call_zone_method(admin_context,
+ "list",
+ novaclient_collection_name="servers",
+ reservation_id=reservation_id,
+ project_id=project_id,
+ fixed_ip=fixed_ip,
+ recurse_zones=True)
- return self.db.instance_get_all(context)
+ for zone, servers in children:
+ for server in servers:
+ # Results are ready to send to user. No need to scrub.
+ server._info['_is_precooked'] = True
+ instances.append(server._info)
+ return instances
def _cast_compute_message(self, method, context, instance_id, host=None,
params=None):
@@ -736,7 +756,7 @@ class API(base.Base):
params = {}
if not host:
instance = self.get(context, instance_id)
- host = instance["host"]
+ host = instance['host']
queue = self.db.queue_get_for(context, FLAGS.compute_topic, host)
params['instance_id'] = instance_id
kwargs = {'method': method, 'args': params}
@@ -757,19 +777,60 @@ class API(base.Base):
raise exception.Error(_("Unable to find host for Instance %s")
% instance_id)
+ def backup(self, context, instance_id, name, backup_type, rotation,
+ extra_properties=None):
+ """Backup the given instance
+
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param name: name of the backup or snapshot
+ name = backup_type # daily backups are called 'daily'
+ :param rotation: int representing how many backups to keep around;
+ None if rotation shouldn't be used (as in the case of snapshots)
+ :param extra_properties: dict of extra image properties to include
+ """
+ recv_meta = self._create_image(context, instance_id, name, 'backup',
+ backup_type=backup_type, rotation=rotation,
+ extra_properties=extra_properties)
+ return recv_meta
+
def snapshot(self, context, instance_id, name, extra_properties=None):
"""Snapshot the given instance.
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param name: name of the backup or snapshot
+ :param extra_properties: dict of extra image properties to include
+
:returns: A dict containing image metadata
"""
- properties = {'instance_id': str(instance_id),
+ return self._create_image(context, instance_id, name, 'snapshot',
+ extra_properties=extra_properties)
+
+ def _create_image(self, context, instance_id, name, image_type,
+ backup_type=None, rotation=None, extra_properties=None):
+ """Create snapshot or backup for an instance on this host.
+
+ :param context: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param name: string for name of the snapshot
+ :param image_type: snapshot | backup
+ :param backup_type: daily | weekly
+ :param rotation: int representing how many backups to keep around;
+ None if rotation shouldn't be used (as in the case of snapshots)
+ :param extra_properties: dict of extra image properties to include
+
+ """
+ instance = db.api.instance_get(context, instance_id)
+ properties = {'instance_uuid': instance['uuid'],
'user_id': str(context.user_id),
- 'image_state': 'creating'}
+ 'image_state': 'creating',
+ 'image_type': image_type,
+ 'backup_type': backup_type}
properties.update(extra_properties or {})
sent_meta = {'name': name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = self.image_service.create(context, sent_meta)
- params = {'image_id': recv_meta['id']}
+ params = {'image_id': recv_meta['id'], 'image_type': image_type,
+ 'backup_type': backup_type, 'rotation': rotation}
self._cast_compute_message('snapshot_instance', context, instance_id,
params=params)
return recv_meta
@@ -871,6 +932,23 @@ class API(base.Base):
"instance_id": instance_id,
"flavor_id": flavor_id}})
+ @scheduler_api.reroute_compute("add_fixed_ip")
+ def add_fixed_ip(self, context, instance_id, network_id):
+ """Add fixed_ip from specified network to given instance."""
+ self._cast_compute_message('add_fixed_ip_to_instance', context,
+ instance_id,
+ network_id)
+
+ #TODO(tr3buchet): how to run this in the correct zone?
+ def add_network_to_project(self, context, project_id):
+ """Force adds a network to the project."""
+ # this will raise if zone doesn't know about project so the decorator
+ # can catch it and pass it down
+ self.db.project_get(context, project_id)
+
+ # didn't raise so this is the correct zone
+ self.network_api.add_network_to_project(context, project_id)
+
@scheduler_api.reroute_compute("pause")
def pause(self, context, instance_id):
"""Pause the given instance."""
@@ -881,6 +959,11 @@ class API(base.Base):
"""Unpause the given instance."""
self._cast_compute_message('unpause_instance', context, instance_id)
+ def set_host_enabled(self, context, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ return self._call_compute_message("set_host_enabled", context,
+ instance_id=None, host=host, params={"enabled": enabled})
+
@scheduler_api.reroute_compute("diagnostics")
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for the given instance."""
@@ -1013,11 +1096,34 @@ class API(base.Base):
return instance
def associate_floating_ip(self, context, instance_id, address):
- """Associate a floating ip with an instance."""
+ """Makes calls to network_api to associate_floating_ip.
+
+ :param address: is a string floating ip address
+ """
instance = self.get(context, instance_id)
+
+ # TODO(tr3buchet): currently network_info doesn't contain floating IPs
+ # in its info, if this changes, the next few lines will need to
+ # accomodate the info containing floating as well as fixed ip addresses
+ fixed_ip_addrs = []
+ for info in self.network_api.get_instance_nw_info(context,
+ instance):
+ ips = info[1]['ips']
+ fixed_ip_addrs.extend([ip_dict['ip'] for ip_dict in ips])
+
+ # TODO(tr3buchet): this will associate the floating IP with the first
+ # fixed_ip (lowest id) an instance has. This should be changed to
+ # support specifying a particular fixed_ip if multiple exist.
+ if not fixed_ip_addrs:
+ msg = _("instance |%s| has no fixed_ips. "
+ "unable to associate floating ip") % instance_id
+ raise exception.ApiError(msg)
+ if len(fixed_ip_addrs) > 1:
+ LOG.warning(_("multiple fixed_ips exist, using the first: %s"),
+ fixed_ip_addrs[0])
self.network_api.associate_floating_ip(context,
floating_ip=address,
- fixed_ip=instance['fixed_ip'])
+ fixed_ip=fixed_ip_addrs[0])
def get_instance_metadata(self, context, instance_id):
"""Get all metadata associated with an instance."""
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 77ed1cf53..57beb5f72 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -46,6 +46,7 @@ from eventlet import greenthread
from nova import exception
from nova import flags
+import nova.image
from nova import log as logging
from nova import manager
from nova import network
@@ -53,6 +54,7 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
+from nova.notifier import api as notifier_api
from nova.compute.utils import terminate_volumes
from nova.virt import driver
@@ -129,9 +131,9 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.error(_("Unable to load the virtualization driver: %s") % (e))
sys.exit(1)
+ self.network_api = network.API()
self.network_manager = utils.import_object(FLAGS.network_manager)
self.volume_manager = utils.import_object(FLAGS.volume_manager)
- self.network_api = network.API()
self._last_host_check = 0
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
@@ -178,20 +180,6 @@ class ComputeManager(manager.SchedulerDependentManager):
FLAGS.console_topic,
FLAGS.console_host)
- def get_network_topic(self, context, **kwargs):
- """Retrieves the network host for a project on this host."""
- # TODO(vish): This method should be memoized. This will make
- # the call to get_network_host cheaper, so that
- # it can pas messages instead of checking the db
- # locally.
- if FLAGS.stub_network:
- host = FLAGS.network_host
- else:
- host = self.network_manager.get_network_host(context)
- return self.db.queue_get_for(context,
- FLAGS.network_topic,
- host)
-
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@@ -281,10 +269,10 @@ class ComputeManager(manager.SchedulerDependentManager):
def _run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
- instance_ref.injected_files = kwargs.get('injected_files', [])
- instance_ref.admin_pass = kwargs.get('admin_password', None)
- if instance_ref['name'] in self.driver.list_instances():
+ instance = self.db.instance_get(context, instance_id)
+ instance.injected_files = kwargs.get('injected_files', [])
+ instance.admin_pass = kwargs.get('admin_password', None)
+ if instance['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
@@ -297,54 +285,45 @@ class ComputeManager(manager.SchedulerDependentManager):
power_state.NOSTATE,
'networking')
- is_vpn = instance_ref['image_ref'] == str(FLAGS.vpn_image_id)
+ is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
try:
# NOTE(vish): This could be a cast because we don't do anything
# with the address currently, but I'm leaving it as
# a call to ensure that network setup completes. We
# will eventually also need to save the address here.
if not FLAGS.stub_network:
- address = rpc.call(context,
- self.get_network_topic(context),
- {"method": "allocate_fixed_ip",
- "args": {"instance_id": instance_id,
- "vpn": is_vpn}})
-
+ network_info = self.network_api.allocate_for_instance(context,
+ instance, vpn=is_vpn)
+ LOG.debug(_("instance network_info: |%s|"), network_info)
self.network_manager.setup_compute_network(context,
instance_id)
+ else:
+ # TODO(tr3buchet) not really sure how this should be handled.
+ # virt requires network_info to be passed in but stub_network
+ # is enabled. Setting to [] for now will cause virt to skip
+ # all vif creation and network injection, maybe this is correct
+ network_info = []
- block_device_mapping = self._setup_block_device_mapping(
- context,
- instance_id)
+ bd_mapping = self._setup_block_device_mapping(context, instance_id)
# TODO(vish) check to make sure the availability zone matches
self._update_state(context, instance_id, power_state.BUILDING)
try:
- self.driver.spawn(instance_ref,
- block_device_mapping=block_device_mapping)
+ self.driver.spawn(instance, network_info, bd_mapping)
except Exception as ex: # pylint: disable=W0702
msg = _("Instance '%(instance_id)s' failed to spawn. Is "
"virtualization enabled in the BIOS? Details: "
"%(ex)s") % locals()
LOG.exception(msg)
- if not FLAGS.stub_network and FLAGS.auto_assign_floating_ip:
- public_ip = self.network_api.allocate_floating_ip(context)
-
- self.db.floating_ip_set_auto_assigned(context, public_ip)
- fixed_ip = self.db.fixed_ip_get_by_address(context, address)
- floating_ip = self.db.floating_ip_get_by_address(context,
- public_ip)
-
- self.network_api.associate_floating_ip(
- context,
- floating_ip,
- fixed_ip,
- affect_auto_assigned=True)
-
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
+ usage_info = utils.usage_from_instance(instance)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.create',
+ notifier_api.INFO,
+ usage_info)
except exception.InstanceNotFound:
# FIXME(wwolf): We are just ignoring InstanceNotFound
# exceptions here in case the instance was immediately
@@ -367,53 +346,24 @@ class ComputeManager(manager.SchedulerDependentManager):
def _shutdown_instance(self, context, instance_id, action_str):
"""Shutdown an instance on this host."""
context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
+ instance = self.db.instance_get(context, instance_id)
LOG.audit(_("%(action_str)s instance %(instance_id)s") %
{'action_str': action_str, 'instance_id': instance_id},
context=context)
- fixed_ip = instance_ref.get('fixed_ip')
- if not FLAGS.stub_network and fixed_ip:
- floating_ips = fixed_ip.get('floating_ips') or []
- for floating_ip in floating_ips:
- address = floating_ip['address']
- LOG.debug("Disassociating address %s", address,
- context=context)
- # NOTE(vish): Right now we don't really care if the ip is
- # disassociated. We may need to worry about
- # checking this later.
- self.network_api.disassociate_floating_ip(context,
- address,
- True)
- if (FLAGS.auto_assign_floating_ip
- and floating_ip.get('auto_assigned')):
- LOG.debug(_("Deallocating floating ip %s"),
- floating_ip['address'],
- context=context)
- self.network_api.release_floating_ip(context,
- address,
- True)
-
- address = fixed_ip['address']
- if address:
- LOG.debug(_("Deallocating address %s"), address,
- context=context)
- # NOTE(vish): Currently, nothing needs to be done on the
- # network node until release. If this changes,
- # we will need to cast here.
- self.network_manager.deallocate_fixed_ip(context.elevated(),
- address)
-
- volumes = instance_ref.get('volumes') or []
+ if not FLAGS.stub_network:
+ self.network_api.deallocate_for_instance(context, instance)
+
+ volumes = instance.get('volumes') or []
for volume in volumes:
self._detach_volume(context, instance_id, volume['id'], False)
- if (instance_ref['state'] == power_state.SHUTOFF and
- instance_ref['state_description'] != 'stopped'):
+ if (instance['state'] == power_state.SHUTOFF and
+ instance['state_description'] != 'stopped'):
self.db.instance_destroy(context, instance_id)
raise exception.Error(_('trying to destroy already destroyed'
' instance: %s') % instance_id)
- self.driver.destroy(instance_ref)
+ self.driver.destroy(instance)
if action_str == 'Terminating':
terminate_volumes(self.db, context, instance_id)
@@ -423,9 +373,15 @@ class ComputeManager(manager.SchedulerDependentManager):
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this host."""
self._shutdown_instance(context, instance_id, 'Terminating')
+ instance = self.db.instance_get(context.elevated(), instance_id)
# TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
+ usage_info = utils.usage_from_instance(instance)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.delete',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -462,6 +418,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_image_ref(context, instance_id, image_ref)
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
+ usage_info = utils.usage_from_instance(instance_ref,
+ image_ref=image_ref)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.rebuild',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -489,8 +451,19 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_state(context, instance_id)
@exception.wrap_exception
- def snapshot_instance(self, context, instance_id, image_id):
- """Snapshot an instance on this host."""
+ def snapshot_instance(self, context, instance_id, image_id,
+ image_type='snapshot', backup_type=None,
+ rotation=None):
+ """Snapshot an instance on this host.
+
+ :param context: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param image_id: glance.db.sqlalchemy.models.Image.Id
+ :param image_type: snapshot | backup
+ :param backup_type: daily | weekly
+ :param rotation: int representing how many backups to keep around;
+ None if rotation shouldn't be used (as in the case of snapshots)
+ """
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
@@ -510,6 +483,65 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.snapshot(instance_ref, image_id)
+ if image_type == 'snapshot':
+ if rotation:
+ raise exception.ImageRotationNotAllowed()
+ elif image_type == 'backup':
+ if rotation:
+ instance_uuid = instance_ref['uuid']
+ self.rotate_backups(context, instance_uuid, backup_type,
+ rotation)
+ else:
+ raise exception.RotationRequiredForBackup()
+ else:
+ raise Exception(_('Image type not recognized %s') % image_type)
+
+ def rotate_backups(self, context, instance_uuid, backup_type, rotation):
+ """Delete excess backups associated to an instance.
+
+ Instances are allowed a fixed number of backups (the rotation number);
+ this method deletes the oldest backups that exceed the rotation
+ threshold.
+
+ :param context: security context
+ :param instance_uuid: string representing uuid of instance
+ :param backup_type: daily | weekly
+ :param rotation: int representing how many backups to keep around;
+ None if rotation shouldn't be used (as in the case of snapshots)
+ """
+ # NOTE(jk0): Eventually extract this out to the ImageService?
+ def fetch_images():
+ images = []
+ marker = None
+ while True:
+ batch = image_service.detail(context, filters=filters,
+ marker=marker, sort_key='created_at', sort_dir='desc')
+ if not batch:
+ break
+ images += batch
+ marker = batch[-1]['id']
+ return images
+
+ image_service = nova.image.get_default_image_service()
+ filters = {'property-image_type': 'backup',
+ 'property-backup_type': backup_type,
+ 'property-instance_uuid': instance_uuid}
+
+ images = fetch_images()
+ num_images = len(images)
+ LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)"
+ % locals()))
+ if num_images > rotation:
+ # NOTE(sirp): this deletes all backups that exceed the rotation
+ # limit
+ excess = len(images) - rotation
+ LOG.debug(_("Rotating out %d backups" % excess))
+ for i in xrange(excess):
+ image = images.pop()
+ image_id = image['id']
+ LOG.debug(_("Deleting image %d" % image_id))
+ image_service.delete(context, image_id)
+
@exception.wrap_exception
@checks_instance_lock
def set_admin_password(self, context, instance_id, new_pass=None):
@@ -639,6 +671,11 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
self.driver.destroy(instance_ref)
+ usage_info = utils.usage_from_instance(instance_ref)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.resize.confirm',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -686,6 +723,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.revert_resize(instance_ref)
self.db.migration_update(context, migration_id,
{'status': 'reverted'})
+ usage_info = utils.usage_from_instance(instance_ref)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.resize.revert',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -722,6 +764,13 @@ class ComputeManager(manager.SchedulerDependentManager):
'migration_id': migration_ref['id'],
'instance_id': instance_id, },
})
+ usage_info = utils.usage_from_instance(instance_ref,
+ new_instance_type=instance_type['name'],
+ new_instance_type_id=instance_type['id'])
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.resize.prep',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -773,14 +822,28 @@ class ComputeManager(manager.SchedulerDependentManager):
# reload the updated instance ref
# FIXME(mdietz): is there reload functionality?
- instance_ref = self.db.instance_get(context, instance_id)
- self.driver.finish_resize(instance_ref, disk_info)
+ instance = self.db.instance_get(context, instance_id)
+ network_info = self.network_api.get_instance_nw_info(context,
+ instance)
+ self.driver.finish_resize(instance, disk_info, network_info)
self.db.migration_update(context, migration_id,
{'status': 'finished', })
@exception.wrap_exception
@checks_instance_lock
+ def add_fixed_ip_to_instance(self, context, instance_id, network_id):
+ """Calls network_api to add new fixed_ip to instance
+ then injects the new network info and resets instance networking.
+
+ """
+ self.network_api.add_fixed_ip_to_instance(context, instance_id,
+ network_id)
+ self.inject_network_info(context, instance_id)
+ self.reset_network(context, instance_id)
+
+ @exception.wrap_exception
+ @checks_instance_lock
def pause_instance(self, context, instance_id):
"""Pause an instance on this host."""
context = context.elevated()
@@ -814,6 +877,12 @@ class ComputeManager(manager.SchedulerDependentManager):
result))
@exception.wrap_exception
+ def set_host_enabled(self, context, instance_id=None, host=None,
+ enabled=None):
+ """Sets the specified host's ability to accept new instances."""
+ return self.driver.set_host_enabled(host, enabled)
+
+ @exception.wrap_exception
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this host."""
instance_ref = self.db.instance_get(context, instance_id)
@@ -882,20 +951,22 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def reset_network(self, context, instance_id):
"""Reset networking on the given instance."""
- context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
+ instance = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: reset network'), instance_id,
context=context)
- self.driver.reset_network(instance_ref)
+ self.driver.reset_network(instance)
@checks_instance_lock
def inject_network_info(self, context, instance_id):
"""Inject network info for the given instance."""
- context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: inject network info'), instance_id,
context=context)
- self.driver.inject_network_info(instance_ref)
+ instance = self.db.instance_get(context, instance_id)
+ network_info = self.network_api.get_instance_nw_info(context,
+ instance)
+ LOG.debug(_("network_info to inject: |%s|"), network_info)
+
+ self.driver.inject_network_info(instance, network_info)
@exception.wrap_exception
def get_console_output(self, context, instance_id):
@@ -1089,16 +1160,16 @@ class ComputeManager(manager.SchedulerDependentManager):
# Getting instance info
instance_ref = self.db.instance_get(context, instance_id)
- ec2_id = instance_ref['hostname']
+ hostname = instance_ref['hostname']
# Getting fixed ips
- fixed_ip = self.db.instance_get_fixed_address(context, instance_id)
- if not fixed_ip:
- raise exception.NoFixedIpsFoundForInstance(instance_id=instance_id)
+ fixed_ips = self.db.instance_get_fixed_addresses(context, instance_id)
+ if not fixed_ips:
+ raise exception.FixedIpNotFoundForInstance(instance_id=instance_id)
# If any volume is mounted, prepare here.
if not instance_ref['volumes']:
- LOG.info(_("%s has no volume."), ec2_id)
+ LOG.info(_("%s has no volume."), hostname)
else:
for v in instance_ref['volumes']:
self.volume_manager.setup_compute_volume(context, v['id'])
@@ -1121,7 +1192,7 @@ class ComputeManager(manager.SchedulerDependentManager):
raise
else:
LOG.warn(_("setup_compute_network() failed %(cnt)d."
- "Retry up to %(max_retry)d for %(ec2_id)s.")
+ "Retry up to %(max_retry)d for %(hostname)s.")
% locals())
time.sleep(1)
@@ -1218,9 +1289,10 @@ class ComputeManager(manager.SchedulerDependentManager):
{'host': dest})
except exception.NotFound:
LOG.info(_('No floating_ip is found for %s.'), i_name)
- except:
- LOG.error(_("Live migration: Unexpected error:"
- "%s cannot inherit floating ip..") % i_name)
+ except Exception, e:
+ LOG.error(_("Live migration: Unexpected error: "
+ "%(i_name)s cannot inherit floating "
+ "ip.\n%(e)s") % (locals()))
# Restore instance/volume state
self.recover_live_migration(ctxt, instance_ref, dest)