From be9a3cd7e17edac4032c8ae554f75d725b0ad54a Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 13 Dec 2010 16:42:35 +0100 Subject: Move security group refresh logic into ComputeAPI. Add a trigger_security_group_members_refresh to ComputeAPI which finds the hosts that have instances that have security groups that reference a security group in which a new instance has just been placed, and sends a refresh_security_group_members to each of them. --- nova/compute/api.py | 61 +++++++++++++++++++++++++++++++++++++++++++++++++ nova/compute/manager.py | 16 ++++++++++--- 2 files changed, 74 insertions(+), 3 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 8e0efa4cc..27010d513 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -24,6 +24,7 @@ import datetime import logging import time +from nova import context from nova import db from nova import exception from nova import flags @@ -165,6 +166,10 @@ class ComputeAPI(base.Base): "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id}}) + + for group_id in security_groups: + self.trigger_security_group_members_refresh(elevated, group_id) + return instances def ensure_default_security_group(self, context): @@ -184,6 +189,62 @@ class ComputeAPI(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) + + def trigger_security_group_rules_refresh(self, context, security_group_id): + """Called when a rule is added to or removed from a security_group""" + + security_group = db.security_group_get(context, security_group_id) + + hosts = set() + for instance in security_group['instances']: + if instance['host'] is not None: + hosts.add(instance['host']) + + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group", + "args": {"security_group_id": security_group.id}}) + + + def trigger_security_group_members_refresh(self, context, group_id): + """Called when a security group gains a new or loses a member + + Sends an update request to each compute node for whom this is + relevant.""" + + # First, we get the security group rules that reference this group as + # the grantee.. + security_group_rules = \ + db.security_group_rule_get_by_security_group_grantee(context, + group_id) + + # ..then we distill the security groups to which they belong.. + security_groups = set() + for rule in security_group_rules: + security_groups.add(rule['parent_group_id']) + + # ..then we find the instances that are members of these groups.. + instances = set() + for security_group in security_groups: + for instance in security_group['instances']: + instances.add(instance['id']) + + # ...then we find the hosts where they live... + hosts = set() + for instance in instances: + if instance['host']: + hosts.add(instance['host']) + + # ...and finally we tell these nodes to refresh their view of this + # particular security group. + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_members", + "args": {"security_group_id": group_id}}) + + def update_instance(self, context, instance_id, **kwargs): """Updates the instance in the datastore. diff --git a/nova/compute/manager.py b/nova/compute/manager.py index dd8d41129..ee449c819 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -80,9 +80,19 @@ class ComputeManager(manager.Manager): @defer.inlineCallbacks @exception.wrap_exception - def refresh_security_group(self, context, security_group_id, **_kwargs): - """This call passes stright through to the virtualization driver.""" - yield self.driver.refresh_security_group(security_group_id) + def refresh_security_group_rules(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + yield self.driver.refresh_security_group_rules(security_group_id) + + + @defer.inlineCallbacks + @exception.wrap_exception + def refresh_security_group_members(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + yield self.driver.refresh_security_group_members(security_group_id) + @defer.inlineCallbacks @exception.wrap_exception -- cgit From 1539df7429a235ba2fefe3f65422fe94b248ac08 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 15 Dec 2010 14:03:19 +0100 Subject: refresh_security_group renamed to refresh_security_group_rules --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 27010d513..686c1eb0a 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -203,7 +203,7 @@ class ComputeAPI(base.Base): for host in hosts: rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "refresh_security_group", + {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group.id}}) -- cgit From b420a3daa5f1b827f49e5d6557aaa0f8d396b81b Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 15 Dec 2010 14:04:06 +0100 Subject: Lots of PEP-8 work. --- nova/compute/api.py | 8 ++------ nova/compute/manager.py | 2 -- 2 files changed, 2 insertions(+), 8 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 686c1eb0a..7c91792e3 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -166,7 +166,6 @@ class ComputeAPI(base.Base): "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id}}) - for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) @@ -189,7 +188,6 @@ class ComputeAPI(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) - def trigger_security_group_rules_refresh(self, context, security_group_id): """Called when a rule is added to or removed from a security_group""" @@ -206,10 +204,9 @@ class ComputeAPI(base.Base): {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group.id}}) - def trigger_security_group_members_refresh(self, context, group_id): """Called when a security group gains a new or loses a member - + Sends an update request to each compute node for whom this is relevant.""" @@ -223,7 +220,7 @@ class ComputeAPI(base.Base): security_groups = set() for rule in security_group_rules: security_groups.add(rule['parent_group_id']) - + # ..then we find the instances that are members of these groups.. instances = set() for security_group in security_groups: @@ -244,7 +241,6 @@ class ComputeAPI(base.Base): {"method": "refresh_security_group_members", "args": {"security_group_id": group_id}}) - def update_instance(self, context, instance_id, **kwargs): """Updates the instance in the datastore. diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ee449c819..f039bca2e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -85,7 +85,6 @@ class ComputeManager(manager.Manager): """This call passes straight through to the virtualization driver.""" yield self.driver.refresh_security_group_rules(security_group_id) - @defer.inlineCallbacks @exception.wrap_exception def refresh_security_group_members(self, context, @@ -93,7 +92,6 @@ class ComputeManager(manager.Manager): """This call passes straight through to the virtualization driver.""" yield self.driver.refresh_security_group_members(security_group_id) - @defer.inlineCallbacks @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): -- cgit From d1118830c01267082c1371ef2faad1057e7a811e Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 3 Jan 2011 13:55:44 +0100 Subject: Stop returning generators in the refresh_security_group_{rules,members} methods. --- nova/compute/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6615ad65b..235237091 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -103,13 +103,13 @@ class ComputeManager(manager.Manager): def refresh_security_group_rules(self, context, security_group_id, **_kwargs): """This call passes straight through to the virtualization driver.""" - yield self.driver.refresh_security_group_rules(security_group_id) + return self.driver.refresh_security_group_rules(security_group_id) @exception.wrap_exception def refresh_security_group_members(self, context, security_group_id, **_kwargs): """This call passes straight through to the virtualization driver.""" - yield self.driver.refresh_security_group_members(security_group_id) + return self.driver.refresh_security_group_members(security_group_id) @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): -- cgit From c7305af78049f94dedcbb55480b91a3c6d843b9f Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 4 Jan 2011 00:23:35 -0500 Subject: Apply logging changes as a giant patch to work around the cloudpipe delete + add issue in the original patch. --- nova/compute/api.py | 33 ++++++++++----------- nova/compute/disk.py | 11 +++---- nova/compute/manager.py | 76 ++++++++++++++++++++++++++++--------------------- nova/compute/monitor.py | 39 ++++++++++++------------- 4 files changed, 86 insertions(+), 73 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 07c69bd31..c0141e569 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -21,12 +21,12 @@ Handles all API requests relating to instances (guest vms). """ import datetime -import logging import time from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import quota from nova import rpc from nova import utils @@ -34,6 +34,7 @@ from nova.compute import instance_types from nova.db import base FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.compute.api') def generate_default_hostname(internal_id): @@ -58,13 +59,13 @@ class ComputeAPI(base.Base): instance = self.db.instance_get_by_internal_id(context, instance_id) except exception.NotFound as e: - logging.warning("Instance %d was not found in get_network_topic", - instance_id) + LOG.warning(_("Instance %d was not found in get_network_topic"), + instance_id) raise e host = instance['host'] if not host: - raise exception.Error("Instance %d has no host" % instance_id) + raise exception.Error(_("Instance %d has no host") % instance_id) topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) return rpc.call(context, topic, @@ -83,10 +84,10 @@ class ComputeAPI(base.Base): num_instances = quota.allowed_instances(context, max_count, instance_type) if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % + LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"), + context.project_id, min_count) + raise quota.QuotaError(_("Instance quota exceeded. You can only " + "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") is_vpn = image_id == FLAGS.vpn_image_id @@ -100,7 +101,7 @@ class ComputeAPI(base.Base): if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None - logging.debug("Creating a raw instance") + LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) if kernel_id: self.image_service.show(context, kernel_id) @@ -147,7 +148,7 @@ class ComputeAPI(base.Base): elevated = context.elevated() instances = [] - logging.debug(_("Going to run %s instances..."), num_instances) + LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, @@ -172,7 +173,7 @@ class ComputeAPI(base.Base): instance = self.update_instance(context, instance_id, **updates) instances.append(instance) - logging.debug(_("Casting to scheduler for %s/%s's instance %s"), + LOG.debug(_("Casting to scheduler for %s/%s's instance %s"), context.project_id, context.user_id, instance_id) rpc.cast(context, FLAGS.scheduler_topic, @@ -214,18 +215,18 @@ class ComputeAPI(base.Base): return self.db.instance_update(context, instance_id, kwargs) def delete_instance(self, context, instance_id): - logging.debug("Going to try and terminate %d" % instance_id) + LOG.debug(_("Going to try and terminate %d"), instance_id) try: instance = self.db.instance_get_by_internal_id(context, instance_id) except exception.NotFound as e: - logging.warning(_("Instance %d was not found during terminate"), - instance_id) + LOG.warning(_("Instance %d was not found during terminate"), + instance_id) raise e if (instance['state_description'] == 'terminating'): - logging.warning(_("Instance %d is already being terminated"), - instance_id) + LOG.warning(_("Instance %d is already being terminated"), + instance_id) return self.update_instance(context, diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 814a258cd..741499294 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -22,14 +22,15 @@ Includes injection of SSH PGP keys into authorized_keys file. """ -import logging import os import tempfile from nova import exception from nova import flags +from nova import log as logging +LOG = logging.getLogger('nova.compute.disk') FLAGS = flags.FLAGS flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10, 'minimum size in bytes of root partition') @@ -67,12 +68,12 @@ def partition(infile, outfile, local_bytes=0, resize=True, execute('resize2fs %s' % infile) file_size = FLAGS.minimum_root_size elif file_size % sector_size != 0: - logging.warn(_("Input partition size not evenly divisible by" - " sector size: %d / %d"), file_size, sector_size) + LOG.warn(_("Input partition size not evenly divisible by" + " sector size: %d / %d"), file_size, sector_size) primary_sectors = file_size / sector_size if local_bytes % sector_size != 0: - logging.warn(_("Bytes for local storage not evenly divisible" - " by sector size: %d / %d"), local_bytes, sector_size) + LOG.warn(_("Bytes for local storage not evenly divisible" + " by sector size: %d / %d"), local_bytes, sector_size) local_sectors = local_bytes / sector_size mbr_last = 62 # a diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6e8f34347..cc5724346 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -35,10 +35,10 @@ terminating it. """ import datetime -import logging from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import rpc from nova import utils @@ -53,6 +53,9 @@ flags.DEFINE_string('stub_network', False, 'Stub network related code') +LOG = logging.getLogger('nova.computemanager') + + class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" @@ -111,7 +114,7 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) - logging.debug(_("instance %s: starting..."), instance_id) + LOG.debug(_("instance %s: starting..."), instance_id) self.db.instance_update(context, instance_id, {'host': self.host}) @@ -149,8 +152,8 @@ class ComputeManager(manager.Manager): instance_id, {'launched_at': now}) except Exception: # pylint: disable-msg=W0702 - logging.exception(_("instance %s: Failed to spawn"), - instance_ref['name']) + LOG.exception(_("instance %s: Failed to spawn"), + instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.SHUTDOWN) @@ -161,14 +164,16 @@ class ComputeManager(manager.Manager): def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Terminating instance %s//%s"), + instance_ref['internal_id'], instance_id, context=context) if not FLAGS.stub_network: address = self.db.instance_get_floating_address(context, instance_ref['id']) if address: - logging.debug(_("Disassociating address %s") % address) + LOG.debug(_("Disassociating address %s"), address, + context=context) # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. @@ -180,14 +185,15 @@ class ComputeManager(manager.Manager): address = self.db.instance_get_fixed_address(context, instance_ref['id']) if address: - logging.debug(_("Deallocating address %s") % address) + LOG.debug(_("Deallocating address %s"), address, + context=context) # NOTE(vish): Currently, nothing needs to be done on the # network node until release. If this changes, # we will need to cast here. self.network_manager.deallocate_fixed_ip(context.elevated(), address) - logging.debug(_("instance %s: terminating"), instance_id) + LOG.debug(_("instance %s: terminating"), instance_id, context=context) volumes = instance_ref.get('volumes', []) or [] for volume in volumes: @@ -207,15 +213,18 @@ class ComputeManager(manager.Manager): context = context.elevated() self._update_state(context, instance_id) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Rebooting instance %s//%s"), instance_ref['internal_id'], + instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to reboot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_ref['internal_id'], - instance_ref['state'], - power_state.RUNNING) - - logging.debug(_('instance %s: rebooting'), instance_ref['name']) + LOG.warn(_('trying to reboot a non-running ' + 'instance: %s (state: %s excepted: %s)'), + instance_ref['internal_id'], + instance_ref['state'], + power_state.RUNNING, + context=context) + + LOG.debug(_('instance %s: rebooting'), instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -251,8 +260,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: rescuing'), - instance_ref['internal_id']) + LOG.audit(_('instance %s: rescuing'), instance_ref['internal_id'], + context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -267,8 +276,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: unrescuing'), - instance_ref['internal_id']) + LOG.audit(_('instance %s: unrescuing'), instance_ref['internal_id'], + context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -287,8 +296,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug('instance %s: pausing', - instance_ref['internal_id']) + LOG.debug(_('instance %s: pausing'), instance_ref['internal_id'], + context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -305,8 +314,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug('instance %s: unpausing', - instance_ref['internal_id']) + LOG.debug(_('instance %s: unpausing'), instance_ref['internal_id'], + context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -364,8 +373,9 @@ class ComputeManager(manager.Manager): def get_console_output(self, context, instance_id): """Send the console output for an instance.""" context = context.elevated() - logging.debug(_("instance %s: getting console output"), instance_id) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Get console output instance %s//%s"), + instance_ref['internal_id'], instance_id, context=context) return self.driver.get_console_output(instance_ref) @@ -373,8 +383,8 @@ class ComputeManager(manager.Manager): def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() - logging.debug(_("instance %s: attaching volume %s to %s"), instance_id, - volume_id, mountpoint) + LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id, + volume_id, mountpoint, context=context) instance_ref = self.db.instance_get(context, instance_id) dev_path = self.volume_manager.setup_compute_volume(context, volume_id) @@ -390,8 +400,8 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - logging.exception(_("instance %s: attach failed %s, removing"), - instance_id, mountpoint) + LOG.exception(_("instance %s: attach failed %s, removing"), + instance_id, mountpoint, context=context) self.volume_manager.remove_compute_volume(context, volume_id) raise exc @@ -402,14 +412,14 @@ class ComputeManager(manager.Manager): def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" context = context.elevated() - logging.debug(_("instance %s: detaching volume %s"), - instance_id, - volume_id) instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) + LOG.audit(_("Detach volume %s from mountpoint %s on instance %s//%s"), + volume_id, volume_ref['mountpoint'], + instance_ref['internal_id'], instance_id, context=context) if instance_ref['name'] not in self.driver.list_instances(): - logging.warn(_("Detaching volume from unknown instance %s"), - instance_ref['name']) + LOG.warn(_("Detaching volume from unknown instance %s"), + instance_ref['name'], context=context) else: self.driver.detach_volume(instance_ref['name'], volume_ref['mountpoint']) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 60c347a5e..cc94e44f4 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -25,19 +25,17 @@ Instance Monitoring: """ import datetime -import logging import os -import sys import time import boto import boto.s3 import rrdtool -from twisted.internet import defer from twisted.internet import task from twisted.application import service from nova import flags +from nova import log as logging from nova.virt import connection as virt_connection @@ -91,6 +89,9 @@ RRD_VALUES = { utcnow = datetime.datetime.utcnow +LOG = logging.getLogger('nova.instancemonitor') + + def update_rrd(instance, name, data): """ Updates the specified RRD file. @@ -255,20 +256,20 @@ class Instance(object): Updates the instances statistics and stores the resulting graphs in the internal object store on the cloud controller. """ - logging.debug(_('updating %s...'), self.instance_id) + LOG.debug(_('updating %s...'), self.instance_id) try: data = self.fetch_cpu_stats() if data != None: - logging.debug('CPU: %s', data) + LOG.debug('CPU: %s', data) update_rrd(self, 'cpu', data) data = self.fetch_net_stats() - logging.debug('NET: %s', data) + LOG.debug('NET: %s', data) update_rrd(self, 'net', data) data = self.fetch_disk_stats() - logging.debug('DISK: %s', data) + LOG.debug('DISK: %s', data) update_rrd(self, 'disk', data) # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls @@ -285,7 +286,7 @@ class Instance(object): graph_disk(self, '1w') graph_disk(self, '1m') except Exception: - logging.exception(_('unexpected error during update')) + LOG.exception(_('unexpected error during update')) self.last_updated = utcnow() @@ -309,7 +310,7 @@ class Instance(object): self.cputime = float(info['cpu_time']) self.cputime_last_updated = utcnow() - logging.debug('CPU: %d', self.cputime) + LOG.debug('CPU: %d', self.cputime) # Skip calculation on first pass. Need delta to get a meaningful value. if cputime_last_updated == None: @@ -319,17 +320,17 @@ class Instance(object): d = self.cputime_last_updated - cputime_last_updated t = d.days * 86400 + d.seconds - logging.debug('t = %d', t) + LOG.debug('t = %d', t) # Calculate change over time in number of nanoseconds of CPU time used. cputime_delta = self.cputime - cputime_last - logging.debug('cputime_delta = %s', cputime_delta) + LOG.debug('cputime_delta = %s', cputime_delta) # Get the number of virtual cpus in this domain. vcpus = int(info['num_cpu']) - logging.debug('vcpus = %d', vcpus) + LOG.debug('vcpus = %d', vcpus) # Calculate CPU % used and cap at 100. return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100) @@ -351,8 +352,8 @@ class Instance(object): rd += rd_bytes wr += wr_bytes except TypeError: - logging.error(_('Cannot get blockstats for "%s" on "%s"'), - disk, self.instance_id) + LOG.error(_('Cannot get blockstats for "%s" on "%s"'), + disk, self.instance_id) raise return '%d:%d' % (rd, wr) @@ -373,8 +374,8 @@ class Instance(object): rx += stats[0] tx += stats[4] except TypeError: - logging.error(_('Cannot get ifstats for "%s" on "%s"'), - interface, self.instance_id) + LOG.error(_('Cannot get ifstats for "%s" on "%s"'), + interface, self.instance_id) raise return '%d:%d' % (rx, tx) @@ -408,7 +409,7 @@ class InstanceMonitor(object, service.Service): try: conn = virt_connection.get_connection(read_only=True) except Exception, exn: - logging.exception(_('unexpected exception getting connection')) + LOG.exception(_('unexpected exception getting connection')) time.sleep(FLAGS.monitoring_instances_delay) return @@ -416,14 +417,14 @@ class InstanceMonitor(object, service.Service): try: self.updateInstances_(conn, domain_ids) except Exception, exn: - logging.exception('updateInstances_') + LOG.exception('updateInstances_') def updateInstances_(self, conn, domain_ids): for domain_id in domain_ids: if not domain_id in self._instances: instance = Instance(conn, domain_id) self._instances[domain_id] = instance - logging.debug(_('Found instance: %s'), domain_id) + LOG.debug(_('Found instance: %s'), domain_id) for key in self._instances.keys(): instance = self._instances[key] -- cgit From b9576a9f73195656f4a0a1327cd6bee3c4a6b6c9 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 4 Jan 2011 00:26:41 -0500 Subject: Final few log tweaks, i18n, levels, including contexts, etc. --- nova/compute/manager.py | 77 ++++++++++++++++++++++++------------------------- nova/compute/monitor.py | 2 +- 2 files changed, 39 insertions(+), 40 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index cc5724346..0098ded74 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -52,8 +52,7 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', flags.DEFINE_string('stub_network', False, 'Stub network related code') - -LOG = logging.getLogger('nova.computemanager') +LOG = logging.getLogger('nova.compute.manager') class ComputeManager(manager.Manager): @@ -114,7 +113,8 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) - LOG.debug(_("instance %s: starting..."), instance_id) + LOG.audit(_("instance %s//%s: starting..."), + instance_ref['internal_id'], instance_id, context=context) self.db.instance_update(context, instance_id, {'host': self.host}) @@ -152,8 +152,9 @@ class ComputeManager(manager.Manager): instance_id, {'launched_at': now}) except Exception: # pylint: disable-msg=W0702 - LOG.exception(_("instance %s: Failed to spawn"), - instance_ref['name']) + LOG.exception(_("instance %s//%s: Failed to spawn"), + instance_ref['internal_id'], instance_id, + context=context) self.db.instance_set_state(context, instance_id, power_state.SHUTDOWN) @@ -193,8 +194,6 @@ class ComputeManager(manager.Manager): self.network_manager.deallocate_fixed_ip(context.elevated(), address) - LOG.debug(_("instance %s: terminating"), instance_id, context=context) - volumes = instance_ref.get('volumes', []) or [] for volume in volumes: self.detach_volume(context, instance_id, volume['id']) @@ -217,14 +216,13 @@ class ComputeManager(manager.Manager): instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - LOG.warn(_('trying to reboot a non-running ' + .warn(_('trying to reboot a non-running ' 'instance: %s (state: %s excepted: %s)'), instance_ref['internal_id'], instance_ref['state'], power_state.RUNNING, context=context) - LOG.debug(_('instance %s: rebooting'), instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -244,13 +242,14 @@ class ComputeManager(manager.Manager): # potentially? self._update_state(context, instance_id) - logging.debug(_('instance %s: snapshotting'), instance_ref['name']) + LOG.audit(_('instance %s//%s: snapshotting'), + instance_ref['internal_id'], instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to snapshot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_ref['internal_id'], - instance_ref['state'], - power_state.RUNNING) + LOG.warn(_('trying to snapshot a non-running ' + 'instance: %s//%s (state: %s excepted: %s)'), + instance_ref['internal_id'], instance_id, + instance_ref['state'], + power_state.RUNNING) self.driver.snapshot(instance_ref, name) @@ -259,9 +258,8 @@ class ComputeManager(manager.Manager): """Rescue an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - LOG.audit(_('instance %s: rescuing'), instance_ref['internal_id'], - context=context) + LOG.audit(_('instance %s//%s: rescuing'), instance_ref['internal_id'], + instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -276,8 +274,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: unrescuing'), instance_ref['internal_id'], - context=context) + LOG.audit(_('instance %s//%s: unrescuing'), + instance_ref['internal_id'], instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -295,9 +293,8 @@ class ComputeManager(manager.Manager): """Pause an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - LOG.debug(_('instance %s: pausing'), instance_ref['internal_id'], - context=context) + LOG.audit(_('instance %s//%s: pausing'), instance_ref['internal_id'], + instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -313,9 +310,8 @@ class ComputeManager(manager.Manager): """Unpause a paused instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - LOG.debug(_('instance %s: unpausing'), instance_ref['internal_id'], - context=context) + LOG.audit(_('instance %s//%s: unpausing'), + instance_ref['internal_id'], instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -332,8 +328,9 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref["state"] == power_state.RUNNING: - logging.debug(_("instance %s: retrieving diagnostics"), - instance_ref["internal_id"]) + LOG.audit(_("instance %s//%s: retrieving diagnostics"), + instance_ref["internal_id"], instance_id, + context=context) return self.driver.get_diagnostics(instance_ref) @exception.wrap_exception @@ -342,8 +339,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: suspending'), - instance_ref['internal_id']) + LOG.audit(_('instance %s//%s: suspending'), + instance_ref['internal_id'], instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'suspending') @@ -359,7 +356,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: resuming'), instance_ref['internal_id']) + LOG.audit(_('instance %s//%s: resuming'), instance_ref['internal_id'], + instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'resuming') @@ -374,18 +372,18 @@ class ComputeManager(manager.Manager): """Send the console output for an instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_("Get console output instance %s//%s"), + LOG.audit(_("Get console output for instance %s//%s"), instance_ref['internal_id'], instance_id, context=context) - return self.driver.get_console_output(instance_ref) @exception.wrap_exception def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() - LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id, - volume_id, mountpoint, context=context) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("instance %s//%s: attaching volume %s to %s"), + instance_ref['internal_id'], instance_id, + volume_id, mountpoint, context=context) dev_path = self.volume_manager.setup_compute_volume(context, volume_id) try: @@ -400,8 +398,9 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - LOG.exception(_("instance %s: attach failed %s, removing"), - instance_id, mountpoint, context=context) + LOG.exception(_("instance %s//%s: attach failed %s, removing"), + instance_ref['internal_id'], instance_id, + mountpoint, context=context) self.volume_manager.remove_compute_volume(context, volume_id) raise exc @@ -418,8 +417,8 @@ class ComputeManager(manager.Manager): volume_id, volume_ref['mountpoint'], instance_ref['internal_id'], instance_id, context=context) if instance_ref['name'] not in self.driver.list_instances(): - LOG.warn(_("Detaching volume from unknown instance %s"), - instance_ref['name'], context=context) + LOG.warn(_("Detaching volume from unknown instance %s//%s"), + instance_ref['internal_id'], instance_id, context=context) else: self.driver.detach_volume(instance_ref['name'], volume_ref['mountpoint']) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index cc94e44f4..14d0e8ca1 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -89,7 +89,7 @@ RRD_VALUES = { utcnow = datetime.datetime.utcnow -LOG = logging.getLogger('nova.instancemonitor') +LOG = logging.getLogger('nova.compute.monitor') def update_rrd(instance, name, data): -- cgit From 45f2f563d1722d2f4d81d49de9d6a3cfd3d0fe3e Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 4 Jan 2011 00:37:47 -0500 Subject: Don't know where that LOG went... --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0098ded74..d68801146 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -216,7 +216,7 @@ class ComputeManager(manager.Manager): instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - .warn(_('trying to reboot a non-running ' + LOG.warn(_('trying to reboot a non-running ' 'instance: %s (state: %s excepted: %s)'), instance_ref['internal_id'], instance_ref['state'], -- cgit From b437a98738c7a564205d1b27e36b844cd54445d1 Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Wed, 5 Jan 2011 14:16:14 -0600 Subject: add in xs-console worker and tests. --- nova/compute/manager.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 70b175e7c..295e75eca 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -99,6 +99,11 @@ class ComputeManager(manager.Manager): FLAGS.network_topic, host) + + def get_console_pool_info(self, context, console_type): + return self.driver.get_console_pool_info(console_type) + + @exception.wrap_exception def refresh_security_group(self, context, security_group_id, **_kwargs): """This call passes stright through to the virtualization driver.""" -- cgit From f21f078113fc81c1dcee4f3a077bd555c0cf85f6 Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Wed, 5 Jan 2011 19:45:46 -0600 Subject: Fix a bunch of pep8 stuff --- nova/compute/manager.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3e73c351c..403b46b2a 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -112,11 +112,9 @@ class ComputeManager(manager.Manager): FLAGS.network_topic, host) - def get_console_pool_info(self, context, console_type): return self.driver.get_console_pool_info(console_type) - @exception.wrap_exception def refresh_security_group(self, context, security_group_id, **_kwargs): """This call passes stright through to the virtualization driver.""" -- cgit From 19ffc1275814a6c00f6ff19dd0c03060143d097a Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 7 Jan 2011 12:08:22 +0100 Subject: Remove redundant import of nova.context. Use db instance attribute rather than module directly. --- nova/compute/api.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 2c2937f48..0d04d344c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -24,7 +24,6 @@ import datetime import logging import time -from nova import context from nova import db from nova import exception from nova import flags @@ -210,7 +209,7 @@ class API(base.Base): def trigger_security_group_rules_refresh(self, context, security_group_id): """Called when a rule is added to or removed from a security_group""" - security_group = db.security_group_get(context, security_group_id) + security_group = self.db.security_group_get(context, security_group_id) hosts = set() for instance in security_group['instances']: @@ -232,7 +231,8 @@ class API(base.Base): # First, we get the security group rules that reference this group as # the grantee.. security_group_rules = \ - db.security_group_rule_get_by_security_group_grantee(context, + self.db.security_group_rule_get_by_security_group_grantee( + context, group_id) # ..then we distill the security groups to which they belong.. -- cgit