diff options
| author | Soren Hansen <soren@linux2go.dk> | 2011-01-08 15:35:50 +0100 |
|---|---|---|
| committer | Soren Hansen <soren@linux2go.dk> | 2011-01-08 15:35:50 +0100 |
| commit | 325330840ebe87da8e5943735b8956c8dfc4d112 (patch) | |
| tree | 08e7ca119859b667889eba903f64b69506b8f8ae /nova/compute | |
| parent | 19ffc1275814a6c00f6ff19dd0c03060143d097a (diff) | |
| parent | 3885195ba05ca5317975797760a0cf81b5e4c647 (diff) | |
| download | nova-325330840ebe87da8e5943735b8956c8dfc4d112.tar.gz nova-325330840ebe87da8e5943735b8956c8dfc4d112.tar.xz nova-325330840ebe87da8e5943735b8956c8dfc4d112.zip | |
Merge with trunk
Diffstat (limited to 'nova/compute')
| -rw-r--r-- | nova/compute/api.py | 33 | ||||
| -rw-r--r-- | nova/compute/disk.py | 11 | ||||
| -rw-r--r-- | nova/compute/manager.py | 118 | ||||
| -rw-r--r-- | nova/compute/monitor.py | 39 |
4 files changed, 102 insertions, 99 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py index 0d04d344c..6364a80ef 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -21,12 +21,12 @@ Handles all requests relating to instances (guest vms). """ import datetime -import logging import time from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import network from nova import quota from nova import rpc @@ -36,6 +36,7 @@ from nova.compute import instance_types from nova.db import base FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.compute.api') def generate_default_hostname(instance_id): @@ -63,13 +64,13 @@ class API(base.Base): try: instance = self.get(context, instance_id) except exception.NotFound as e: - logging.warning("Instance %d was not found in get_network_topic", - instance_id) + LOG.warning(_("Instance %d was not found in get_network_topic"), + instance_id) raise e host = instance['host'] if not host: - raise exception.Error("Instance %d has no host" % instance_id) + raise exception.Error(_("Instance %d has no host") % instance_id) topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) return rpc.call(context, topic, @@ -88,10 +89,10 @@ class API(base.Base): type_data = instance_types.INSTANCE_TYPES[instance_type] num_instances = quota.allowed_instances(context, max_count, type_data) if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % + LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"), + context.project_id, min_count) + raise quota.QuotaError(_("Instance quota exceeded. You can only " + "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") is_vpn = image_id == FLAGS.vpn_image_id @@ -105,7 +106,7 @@ class API(base.Base): if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None - logging.debug("Creating a raw instance") + LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) if kernel_id: self.image_service.show(context, kernel_id) @@ -152,7 +153,7 @@ class API(base.Base): elevated = context.elevated() instances = [] - logging.debug(_("Going to run %s instances..."), num_instances) + LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, @@ -176,7 +177,7 @@ class API(base.Base): instance = self.update(context, instance_id, **updates) instances.append(instance) - logging.debug(_("Casting to scheduler for %s/%s's instance %s"), + LOG.debug(_("Casting to scheduler for %s/%s's instance %s"), context.project_id, context.user_id, instance_id) rpc.cast(context, FLAGS.scheduler_topic, @@ -275,17 +276,17 @@ class API(base.Base): return self.db.instance_update(context, instance_id, kwargs) def delete(self, context, instance_id): - logging.debug("Going to try and terminate %s" % instance_id) + LOG.debug(_("Going to try and terminate %s"), instance_id) try: instance = self.get(context, instance_id) except exception.NotFound as e: - logging.warning(_("Instance %s was not found during terminate"), - instance_id) + LOG.warning(_("Instance %d was not found during terminate"), + instance_id) raise e if (instance['state_description'] == 'terminating'): - logging.warning(_("Instance %s is already being terminated"), - instance_id) + LOG.warning(_("Instance %d is already being terminated"), + instance_id) return self.update(context, diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 814a258cd..741499294 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -22,14 +22,15 @@ Includes injection of SSH PGP keys into authorized_keys file. """ -import logging import os import tempfile from nova import exception from nova import flags +from nova import log as logging +LOG = logging.getLogger('nova.compute.disk') FLAGS = flags.FLAGS flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10, 'minimum size in bytes of root partition') @@ -67,12 +68,12 @@ def partition(infile, outfile, local_bytes=0, resize=True, execute('resize2fs %s' % infile) file_size = FLAGS.minimum_root_size elif file_size % sector_size != 0: - logging.warn(_("Input partition size not evenly divisible by" - " sector size: %d / %d"), file_size, sector_size) + LOG.warn(_("Input partition size not evenly divisible by" + " sector size: %d / %d"), file_size, sector_size) primary_sectors = file_size / sector_size if local_bytes % sector_size != 0: - logging.warn(_("Bytes for local storage not evenly divisible" - " by sector size: %d / %d"), local_bytes, sector_size) + LOG.warn(_("Bytes for local storage not evenly divisible" + " by sector size: %d / %d"), local_bytes, sector_size) local_sectors = local_bytes / sector_size mbr_last = 62 # a diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 1fc95a13d..6ae9b689a 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -35,11 +35,11 @@ terminating it. """ import datetime -import logging import functools from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import rpc from nova import utils @@ -53,6 +53,8 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', flags.DEFINE_string('stub_network', False, 'Stub network related code') +LOG = logging.getLogger('nova.compute.manager') + def checks_instance_lock(function): """ @@ -64,23 +66,25 @@ def checks_instance_lock(function): @functools.wraps(function) def decorated_function(self, context, instance_id, *args, **kwargs): - logging.info(_("check_instance_lock: decorating: |%s|"), function) - logging.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"), - self, - context, - instance_id) + LOG.info(_("check_instance_lock: decorating: |%s|"), function, + context=context) + LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"), + self, context, instance_id, context=context) locked = self.get_lock(context, instance_id) admin = context.is_admin - logging.info(_("check_instance_lock: locked: |%s|"), locked) - logging.info(_("check_instance_lock: admin: |%s|"), admin) + LOG.info(_("check_instance_lock: locked: |%s|"), locked, + context=context) + LOG.info(_("check_instance_lock: admin: |%s|"), admin, + context=context) # if admin or unlocked call function otherwise log error if admin or not locked: - logging.info(_("check_instance_lock: executing: |%s|"), function) + LOG.info(_("check_instance_lock: executing: |%s|"), function, + context=context) function(self, context, instance_id, *args, **kwargs) else: - logging.error(_("check_instance_lock: not executing |%s|"), - function) + LOG.error(_("check_instance_lock: not executing |%s|"), + function, context=context) return False return decorated_function @@ -151,7 +155,8 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) - logging.debug(_("instance %s: starting..."), instance_id) + LOG.audit(_("instance %s: starting..."), instance_id, + context=context) self.db.instance_update(context, instance_id, {'host': self.host}) @@ -189,8 +194,8 @@ class ComputeManager(manager.Manager): instance_id, {'launched_at': now}) except Exception: # pylint: disable-msg=W0702 - logging.exception(_("instance %s: Failed to spawn"), - instance_ref['name']) + LOG.exception(_("instance %s: Failed to spawn"), instance_id, + context=context) self.db.instance_set_state(context, instance_id, power_state.SHUTDOWN) @@ -202,14 +207,15 @@ class ComputeManager(manager.Manager): def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Terminating instance %s"), instance_id, context=context) if not FLAGS.stub_network: address = self.db.instance_get_floating_address(context, instance_ref['id']) if address: - logging.debug(_("Disassociating address %s") % address) + LOG.debug(_("Disassociating address %s"), address, + context=context) # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. @@ -221,15 +227,14 @@ class ComputeManager(manager.Manager): address = self.db.instance_get_fixed_address(context, instance_ref['id']) if address: - logging.debug(_("Deallocating address %s") % address) + LOG.debug(_("Deallocating address %s"), address, + context=context) # NOTE(vish): Currently, nothing needs to be done on the # network node until release. If this changes, # we will need to cast here. self.network_manager.deallocate_fixed_ip(context.elevated(), address) - logging.debug(_("instance %s: terminating"), instance_id) - volumes = instance_ref.get('volumes', []) or [] for volume in volumes: self.detach_volume(context, instance_id, volume['id']) @@ -249,15 +254,16 @@ class ComputeManager(manager.Manager): context = context.elevated() self._update_state(context, instance_id) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Rebooting instance %s"), instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to reboot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_id, - instance_ref['state'], - power_state.RUNNING) + LOG.warn(_('trying to reboot a non-running ' + 'instance: %s (state: %s excepted: %s)'), + instance_id, + instance_ref['state'], + power_state.RUNNING, + context=context) - logging.debug(_('instance %s: rebooting'), instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -277,13 +283,12 @@ class ComputeManager(manager.Manager): # potentially? self._update_state(context, instance_id) - logging.debug(_('instance %s: snapshotting'), instance_ref['name']) + LOG.audit(_('instance %s: snapshotting'), instance_id, + context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to snapshot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_id, - instance_ref['state'], - power_state.RUNNING) + LOG.warn(_('trying to snapshot a non-running ' + 'instance: %s (state: %s excepted: %s)'), + instance_id, instance_ref['state'], power_state.RUNNING) self.driver.snapshot(instance_ref, name) @@ -293,8 +298,7 @@ class ComputeManager(manager.Manager): """Rescue an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: rescuing'), instance_id) + LOG.audit(_('instance %s: rescuing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -309,8 +313,7 @@ class ComputeManager(manager.Manager): """Rescue an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: unrescuing'), instance_id) + LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -329,8 +332,7 @@ class ComputeManager(manager.Manager): """Pause an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug('instance %s: pausing', instance_id) + LOG.audit(_('instance %s: pausing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -347,8 +349,7 @@ class ComputeManager(manager.Manager): """Unpause a paused instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug('instance %s: unpausing', instance_id) + LOG.audit(_('instance %s: unpausing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -365,8 +366,8 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref["state"] == power_state.RUNNING: - logging.debug(_("instance %s: retrieving diagnostics"), - instance_id) + LOG.audit(_("instance %s: retrieving diagnostics"), instance_id, + context=context) return self.driver.get_diagnostics(instance_ref) @exception.wrap_exception @@ -378,8 +379,7 @@ class ComputeManager(manager.Manager): """ context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: suspending'), instance_id) + LOG.audit(_('instance %s: suspending'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'suspending') @@ -398,8 +398,7 @@ class ComputeManager(manager.Manager): """ context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: resuming'), instance_id) + LOG.audit(_('instance %s: resuming'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'resuming') @@ -418,7 +417,7 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: locking'), instance_id) + LOG.debug(_('instance %s: locking'), instance_id, context=context) self.db.instance_update(context, instance_id, {'locked': True}) @exception.wrap_exception @@ -430,7 +429,7 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: unlocking'), instance_id) + LOG.debug(_('instance %s: unlocking'), instance_id, context=context) self.db.instance_update(context, instance_id, {'locked': False}) @exception.wrap_exception @@ -440,7 +439,8 @@ class ComputeManager(manager.Manager): """ context = context.elevated() - logging.debug(_('instance %s: getting locked state'), instance_id) + LOG.debug(_('instance %s: getting locked state'), instance_id, + context=context) instance_ref = self.db.instance_get(context, instance_id) return instance_ref['locked'] @@ -448,9 +448,9 @@ class ComputeManager(manager.Manager): def get_console_output(self, context, instance_id): """Send the console output for an instance.""" context = context.elevated() - logging.debug(_("instance %s: getting console output"), instance_id) instance_ref = self.db.instance_get(context, instance_id) - + LOG.audit(_("Get console output for instance %s"), instance_id, + context=context) return self.driver.get_console_output(instance_ref) @exception.wrap_exception @@ -458,9 +458,9 @@ class ComputeManager(manager.Manager): def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() - logging.debug(_("instance %s: attaching volume %s to %s"), instance_id, - volume_id, mountpoint) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id, + volume_id, mountpoint, context=context) dev_path = self.volume_manager.setup_compute_volume(context, volume_id) try: @@ -475,8 +475,8 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - logging.exception(_("instance %s: attach failed %s, removing"), - instance_id, mountpoint) + LOG.exception(_("instance %s: attach failed %s, removing"), + instance_id, mountpoint, context=context) self.volume_manager.remove_compute_volume(context, volume_id) raise exc @@ -488,14 +488,14 @@ class ComputeManager(manager.Manager): def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" context = context.elevated() - logging.debug(_("instance %s: detaching volume %s"), - instance_id, - volume_id) instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) + LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"), + volume_id, volume_ref['mountpoint'], instance_id, + context=context) if instance_ref['name'] not in self.driver.list_instances(): - logging.warn(_("Detaching volume from unknown instance %s"), - instance_ref['name']) + LOG.warn(_("Detaching volume from unknown instance %s"), + instance_id, context=context) else: self.driver.detach_volume(instance_ref['name'], volume_ref['mountpoint']) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 60c347a5e..14d0e8ca1 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -25,19 +25,17 @@ Instance Monitoring: """ import datetime -import logging import os -import sys import time import boto import boto.s3 import rrdtool -from twisted.internet import defer from twisted.internet import task from twisted.application import service from nova import flags +from nova import log as logging from nova.virt import connection as virt_connection @@ -91,6 +89,9 @@ RRD_VALUES = { utcnow = datetime.datetime.utcnow +LOG = logging.getLogger('nova.compute.monitor') + + def update_rrd(instance, name, data): """ Updates the specified RRD file. @@ -255,20 +256,20 @@ class Instance(object): Updates the instances statistics and stores the resulting graphs in the internal object store on the cloud controller. """ - logging.debug(_('updating %s...'), self.instance_id) + LOG.debug(_('updating %s...'), self.instance_id) try: data = self.fetch_cpu_stats() if data != None: - logging.debug('CPU: %s', data) + LOG.debug('CPU: %s', data) update_rrd(self, 'cpu', data) data = self.fetch_net_stats() - logging.debug('NET: %s', data) + LOG.debug('NET: %s', data) update_rrd(self, 'net', data) data = self.fetch_disk_stats() - logging.debug('DISK: %s', data) + LOG.debug('DISK: %s', data) update_rrd(self, 'disk', data) # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls @@ -285,7 +286,7 @@ class Instance(object): graph_disk(self, '1w') graph_disk(self, '1m') except Exception: - logging.exception(_('unexpected error during update')) + LOG.exception(_('unexpected error during update')) self.last_updated = utcnow() @@ -309,7 +310,7 @@ class Instance(object): self.cputime = float(info['cpu_time']) self.cputime_last_updated = utcnow() - logging.debug('CPU: %d', self.cputime) + LOG.debug('CPU: %d', self.cputime) # Skip calculation on first pass. Need delta to get a meaningful value. if cputime_last_updated == None: @@ -319,17 +320,17 @@ class Instance(object): d = self.cputime_last_updated - cputime_last_updated t = d.days * 86400 + d.seconds - logging.debug('t = %d', t) + LOG.debug('t = %d', t) # Calculate change over time in number of nanoseconds of CPU time used. cputime_delta = self.cputime - cputime_last - logging.debug('cputime_delta = %s', cputime_delta) + LOG.debug('cputime_delta = %s', cputime_delta) # Get the number of virtual cpus in this domain. vcpus = int(info['num_cpu']) - logging.debug('vcpus = %d', vcpus) + LOG.debug('vcpus = %d', vcpus) # Calculate CPU % used and cap at 100. return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100) @@ -351,8 +352,8 @@ class Instance(object): rd += rd_bytes wr += wr_bytes except TypeError: - logging.error(_('Cannot get blockstats for "%s" on "%s"'), - disk, self.instance_id) + LOG.error(_('Cannot get blockstats for "%s" on "%s"'), + disk, self.instance_id) raise return '%d:%d' % (rd, wr) @@ -373,8 +374,8 @@ class Instance(object): rx += stats[0] tx += stats[4] except TypeError: - logging.error(_('Cannot get ifstats for "%s" on "%s"'), - interface, self.instance_id) + LOG.error(_('Cannot get ifstats for "%s" on "%s"'), + interface, self.instance_id) raise return '%d:%d' % (rx, tx) @@ -408,7 +409,7 @@ class InstanceMonitor(object, service.Service): try: conn = virt_connection.get_connection(read_only=True) except Exception, exn: - logging.exception(_('unexpected exception getting connection')) + LOG.exception(_('unexpected exception getting connection')) time.sleep(FLAGS.monitoring_instances_delay) return @@ -416,14 +417,14 @@ class InstanceMonitor(object, service.Service): try: self.updateInstances_(conn, domain_ids) except Exception, exn: - logging.exception('updateInstances_') + LOG.exception('updateInstances_') def updateInstances_(self, conn, domain_ids): for domain_id in domain_ids: if not domain_id in self._instances: instance = Instance(conn, domain_id) self._instances[domain_id] = instance - logging.debug(_('Found instance: %s'), domain_id) + LOG.debug(_('Found instance: %s'), domain_id) for key in self._instances.keys(): instance = self._instances[key] |
