summaryrefslogtreecommitdiffstats
path: root/nova/compute
diff options
context:
space:
mode:
Diffstat (limited to 'nova/compute')
-rw-r--r--nova/compute/manager.py118
1 files changed, 44 insertions, 74 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 625d2c94b..be48e253e 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -1806,77 +1806,39 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.destroy(instance_ref, network_info,
block_device_info, True)
- def periodic_tasks(self, context=None):
- """Tasks to be run at a periodic interval."""
- error_list = super(ComputeManager, self).periodic_tasks(context)
- if error_list is None:
- error_list = []
+ @manager.periodic_task
+ def _poll_rebooting_instances(self, context):
+ if FLAGS.reboot_timeout > 0:
+ self.driver.poll_rebooting_instances(FLAGS.reboot_timeout)
+
+ @manager.periodic_task
+ def _poll_rescued_instances(self, context):
+ if FLAGS.rescue_timeout > 0:
+ self.driver.poll_rescued_instances(FLAGS.rescue_timeout)
+
+ @manager.periodic_task
+ def _poll_unconfirmed_resizes(self, context):
+ if FLAGS.resize_confirm_window > 0:
+ self.driver.poll_unconfirmed_resizes(FLAGS.resize_confirm_window)
+
+ @manager.periodic_task
+ def _poll_bandwidth_usage(self, context, start_time=None, stop_time=None):
+ if not start_time:
+ start_time = utils.current_audit_period()[1]
- try:
- if FLAGS.reboot_timeout > 0:
- self.driver.poll_rebooting_instances(FLAGS.reboot_timeout)
- except Exception as ex:
- LOG.warning(_("Error during poll_rebooting_instances: %s"),
- unicode(ex))
- error_list.append(ex)
-
- try:
- if FLAGS.rescue_timeout > 0:
- self.driver.poll_rescued_instances(FLAGS.rescue_timeout)
- except Exception as ex:
- LOG.warning(_("Error during poll_rescued_instances: %s"),
- unicode(ex))
- error_list.append(ex)
-
- try:
- if FLAGS.resize_confirm_window > 0:
- self.driver.poll_unconfirmed_resizes(
- FLAGS.resize_confirm_window)
- except Exception as ex:
- LOG.warning(_("Error during poll_unconfirmed_resizes: %s"),
- unicode(ex))
- error_list.append(ex)
-
- try:
- self._report_driver_status()
- except Exception as ex:
- LOG.warning(_("Error during report_driver_status(): %s"),
- unicode(ex))
- error_list.append(ex)
-
- try:
- self._sync_power_states(context)
- except Exception as ex:
- LOG.warning(_("Error during power_state sync: %s"), unicode(ex))
- error_list.append(ex)
-
- try:
- self._reclaim_queued_deletes(context)
- except Exception as ex:
- LOG.warning(_("Error during reclamation of queued deletes: %s"),
- unicode(ex))
- error_list.append(ex)
- try:
- start = utils.current_audit_period()[1]
- self._update_bandwidth_usage(context, start)
- except NotImplementedError:
- # Not all hypervisors have bandwidth polling implemented yet.
- # If they don't id doesn't break anything, they just don't get the
- # info in the usage events. (mdragon)
- pass
- except Exception as ex:
- LOG.warning(_("Error updating bandwidth usage: %s"),
- unicode(ex))
- error_list.append(ex)
-
- return error_list
-
- def _update_bandwidth_usage(self, context, start_time, stop_time=None):
curr_time = time.time()
if curr_time - self._last_bw_usage_poll > FLAGS.bandwith_poll_interval:
self._last_bw_usage_poll = curr_time
LOG.info(_("Updating bandwidth usage cache"))
- bw_usage = self.driver.get_all_bw_usage(start_time, stop_time)
+
+ try:
+ bw_usage = self.driver.get_all_bw_usage(start_time, stop_time)
+ except NotImplementedError:
+ # NOTE(mdragon): Not all hypervisors have bandwidth polling
+ # implemented yet. If they don't it doesn't break anything,
+ # they just don't get the info in the usage events.
+ return
+
for usage in bw_usage:
vif = usage['virtual_interface']
self.db.bw_usage_update(context,
@@ -1885,7 +1847,8 @@ class ComputeManager(manager.SchedulerDependentManager):
start_time,
usage['bw_in'], usage['bw_out'])
- def _report_driver_status(self):
+ @manager.periodic_task
+ def _report_driver_status(self, context):
curr_time = time.time()
if curr_time - self._last_host_check > FLAGS.host_state_interval:
self._last_host_check = curr_time
@@ -1895,6 +1858,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.update_service_capabilities(
self.driver.get_host_stats(refresh=True))
+ @manager.periodic_task
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
@@ -1933,16 +1897,22 @@ class ComputeManager(manager.SchedulerDependentManager):
db_instance["id"],
power_state=vm_power_state)
+ @manager.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
+ if FLAGS.reclaim_instance_interval <= 0:
+ LOG.debug(_("FLAGS.reclaim_instance_interval <= 0, skipping..."))
+ return
instances = self.db.instance_get_all_by_host(context, self.host)
-
- queue_time = datetime.timedelta(
- seconds=FLAGS.reclaim_instance_interval)
- curtime = utils.utcnow()
for instance in instances:
- if instance['vm_state'] == vm_states.SOFT_DELETE and \
- (curtime - instance['deleted_at']) >= queue_time:
- LOG.info('Deleting %s' % instance['name'])
+ old_enough = (not instance.deleted_at or utils.is_older_than(
+ instance.deleted_at,
+ FLAGS.reclaim_instance_interval))
+ soft_deleted = instance.vm_state == vm_states.SOFT_DELETE
+
+ if soft_deleted and old_enough:
+ instance_id = instance.id
+ LOG.info(_("Reclaiming deleted instance %(instance_id)s"),
+ locals())
self._delete_instance(context, instance)