diff options
| author | Sandy Walsh <sandy.walsh@rackspace.com> | 2011-01-11 02:47:35 -0400 |
|---|---|---|
| committer | Sandy Walsh <sandy.walsh@rackspace.com> | 2011-01-11 02:47:35 -0400 |
| commit | 77258c8c5e8f94fbcb15fc4be83cf623ac414bd6 (patch) | |
| tree | aafbbd96f9ef443e3154a3f885f3932c0a6604e4 /nova/compute | |
| parent | 2222851017c5c34b1a9ea1d2855f49c45395843c (diff) | |
| parent | b8de5221368c4055fc593c6d0d7164f2be956924 (diff) | |
Changed shared_ip_group detail routing
Diffstat (limited to 'nova/compute')
| -rw-r--r-- | nova/compute/api.py | 90 | ||||
| -rw-r--r-- | nova/compute/disk.py | 11 | ||||
| -rw-r--r-- | nova/compute/manager.py | 146 | ||||
| -rw-r--r-- | nova/compute/monitor.py | 39 |
4 files changed, 185 insertions, 101 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py index 496c01ef7..fbe3c6e75 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -21,12 +21,12 @@ Handles all requests relating to instances (guest vms). """ import datetime -import logging import time from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import network from nova import quota from nova import rpc @@ -36,6 +36,7 @@ from nova.compute import instance_types from nova.db import base FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.compute.api') def generate_default_hostname(instance_id): @@ -63,13 +64,13 @@ class API(base.Base): try: instance = self.get(context, instance_id) except exception.NotFound as e: - logging.warning("Instance %d was not found in get_network_topic", - instance_id) + LOG.warning(_("Instance %d was not found in get_network_topic"), + instance_id) raise e host = instance['host'] if not host: - raise exception.Error("Instance %d has no host" % instance_id) + raise exception.Error(_("Instance %d has no host") % instance_id) topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) return rpc.call(context, topic, @@ -88,10 +89,10 @@ class API(base.Base): type_data = instance_types.INSTANCE_TYPES[instance_type] num_instances = quota.allowed_instances(context, max_count, type_data) if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % + LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"), + context.project_id, min_count) + raise quota.QuotaError(_("Instance quota exceeded. You can only " + "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") is_vpn = image_id == FLAGS.vpn_image_id @@ -105,7 +106,7 @@ class API(base.Base): if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None - logging.debug("Creating a raw instance") + LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) @@ -154,7 +155,7 @@ class API(base.Base): elevated = context.elevated() instances = [] - logging.debug(_("Going to run %s instances..."), num_instances) + LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, @@ -179,7 +180,7 @@ class API(base.Base): instance = self.update(context, instance_id, **updates) instances.append(instance) - logging.debug(_("Casting to scheduler for %s/%s's instance %s"), + LOG.debug(_("Casting to scheduler for %s/%s's instance %s"), context.project_id, context.user_id, instance_id) rpc.cast(context, FLAGS.scheduler_topic, @@ -187,6 +188,9 @@ class API(base.Base): "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id}}) + for group_id in security_groups: + self.trigger_security_group_members_refresh(elevated, group_id) + return instances def ensure_default_security_group(self, context): @@ -206,6 +210,60 @@ class API(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) + def trigger_security_group_rules_refresh(self, context, security_group_id): + """Called when a rule is added to or removed from a security_group""" + + security_group = self.db.security_group_get(context, security_group_id) + + hosts = set() + for instance in security_group['instances']: + if instance['host'] is not None: + hosts.add(instance['host']) + + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_rules", + "args": {"security_group_id": security_group.id}}) + + def trigger_security_group_members_refresh(self, context, group_id): + """Called when a security group gains a new or loses a member + + Sends an update request to each compute node for whom this is + relevant.""" + + # First, we get the security group rules that reference this group as + # the grantee.. + security_group_rules = \ + self.db.security_group_rule_get_by_security_group_grantee( + context, + group_id) + + # ..then we distill the security groups to which they belong.. + security_groups = set() + for rule in security_group_rules: + security_groups.add(rule['parent_group_id']) + + # ..then we find the instances that are members of these groups.. + instances = set() + for security_group in security_groups: + for instance in security_group['instances']: + instances.add(instance['id']) + + # ...then we find the hosts where they live... + hosts = set() + for instance in instances: + if instance['host']: + hosts.add(instance['host']) + + # ...and finally we tell these nodes to refresh their view of this + # particular security group. + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_members", + "args": {"security_group_id": group_id}}) + def update(self, context, instance_id, **kwargs): """Updates the instance in the datastore. @@ -221,17 +279,17 @@ class API(base.Base): return self.db.instance_update(context, instance_id, kwargs) def delete(self, context, instance_id): - logging.debug("Going to try and terminate %s" % instance_id) + LOG.debug(_("Going to try and terminate %s"), instance_id) try: instance = self.get(context, instance_id) except exception.NotFound as e: - logging.warning(_("Instance %s was not found during terminate"), - instance_id) + LOG.warning(_("Instance %d was not found during terminate"), + instance_id) raise e if (instance['state_description'] == 'terminating'): - logging.warning(_("Instance %s is already being terminated"), - instance_id) + LOG.warning(_("Instance %d is already being terminated"), + instance_id) return self.update(context, diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 814a258cd..741499294 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -22,14 +22,15 @@ Includes injection of SSH PGP keys into authorized_keys file. """ -import logging import os import tempfile from nova import exception from nova import flags +from nova import log as logging +LOG = logging.getLogger('nova.compute.disk') FLAGS = flags.FLAGS flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10, 'minimum size in bytes of root partition') @@ -67,12 +68,12 @@ def partition(infile, outfile, local_bytes=0, resize=True, execute('resize2fs %s' % infile) file_size = FLAGS.minimum_root_size elif file_size % sector_size != 0: - logging.warn(_("Input partition size not evenly divisible by" - " sector size: %d / %d"), file_size, sector_size) + LOG.warn(_("Input partition size not evenly divisible by" + " sector size: %d / %d"), file_size, sector_size) primary_sectors = file_size / sector_size if local_bytes % sector_size != 0: - logging.warn(_("Bytes for local storage not evenly divisible" - " by sector size: %d / %d"), local_bytes, sector_size) + LOG.warn(_("Bytes for local storage not evenly divisible" + " by sector size: %d / %d"), local_bytes, sector_size) local_sectors = local_bytes / sector_size mbr_last = 62 # a diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d5136eb26..018e0bbbe 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -36,10 +36,12 @@ terminating it. import datetime import logging +import socket import functools from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import rpc from nova import utils @@ -52,6 +54,11 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for controlling virtualization') flags.DEFINE_string('stub_network', False, 'Stub network related code') +flags.DEFINE_string('console_host', socket.gethostname(), + 'Console proxy host to use to connect to instances on' + 'this host.') + +LOG = logging.getLogger('nova.compute.manager') def checks_instance_lock(function): @@ -64,23 +71,25 @@ def checks_instance_lock(function): @functools.wraps(function) def decorated_function(self, context, instance_id, *args, **kwargs): - logging.info(_("check_instance_lock: decorating: |%s|"), function) - logging.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"), - self, - context, - instance_id) + LOG.info(_("check_instance_lock: decorating: |%s|"), function, + context=context) + LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"), + self, context, instance_id, context=context) locked = self.get_lock(context, instance_id) admin = context.is_admin - logging.info(_("check_instance_lock: locked: |%s|"), locked) - logging.info(_("check_instance_lock: admin: |%s|"), admin) + LOG.info(_("check_instance_lock: locked: |%s|"), locked, + context=context) + LOG.info(_("check_instance_lock: admin: |%s|"), admin, + context=context) # if admin or unlocked call function otherwise log error if admin or not locked: - logging.info(_("check_instance_lock: executing: |%s|"), function) + LOG.info(_("check_instance_lock: executing: |%s|"), function, + context=context) function(self, context, instance_id, *args, **kwargs) else: - logging.error(_("check_instance_lock: not executing |%s|"), - function) + LOG.error(_("check_instance_lock: not executing |%s|"), + function, context=context) return False return decorated_function @@ -118,6 +127,15 @@ class ComputeManager(manager.Manager): state = power_state.NOSTATE self.db.instance_set_state(context, instance_id, state) + def get_console_topic(self, context, **_kwargs): + """Retrieves the console host for a project on this host + Currently this is just set in the flags for each compute + host.""" + #TODO(mdragon): perhaps make this variable by console_type? + return self.db.queue_get_for(context, + FLAGS.console_topic, + FLAGS.console_host) + def get_network_topic(self, context, **_kwargs): """Retrieves the network host for a project on this host""" # TODO(vish): This method should be memoized. This will make @@ -132,10 +150,20 @@ class ComputeManager(manager.Manager): FLAGS.network_topic, host) + def get_console_pool_info(self, context, console_type): + return self.driver.get_console_pool_info(console_type) + @exception.wrap_exception - def refresh_security_group(self, context, security_group_id, **_kwargs): - """This call passes stright through to the virtualization driver.""" - self.driver.refresh_security_group(security_group_id) + def refresh_security_group_rules(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + return self.driver.refresh_security_group_rules(security_group_id) + + @exception.wrap_exception + def refresh_security_group_members(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + return self.driver.refresh_security_group_members(security_group_id) @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): @@ -144,7 +172,8 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) - logging.debug(_("instance %s: starting..."), instance_id) + LOG.audit(_("instance %s: starting..."), instance_id, + context=context) self.db.instance_update(context, instance_id, {'host': self.host}) @@ -182,8 +211,8 @@ class ComputeManager(manager.Manager): instance_id, {'launched_at': now}) except Exception: # pylint: disable-msg=W0702 - logging.exception(_("instance %s: Failed to spawn"), - instance_ref['name']) + LOG.exception(_("instance %s: Failed to spawn"), instance_id, + context=context) self.db.instance_set_state(context, instance_id, power_state.SHUTDOWN) @@ -195,14 +224,15 @@ class ComputeManager(manager.Manager): def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Terminating instance %s"), instance_id, context=context) if not FLAGS.stub_network: address = self.db.instance_get_floating_address(context, instance_ref['id']) if address: - logging.debug(_("Disassociating address %s") % address) + LOG.debug(_("Disassociating address %s"), address, + context=context) # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. @@ -214,15 +244,14 @@ class ComputeManager(manager.Manager): address = self.db.instance_get_fixed_address(context, instance_ref['id']) if address: - logging.debug(_("Deallocating address %s") % address) + LOG.debug(_("Deallocating address %s"), address, + context=context) # NOTE(vish): Currently, nothing needs to be done on the # network node until release. If this changes, # we will need to cast here. self.network_manager.deallocate_fixed_ip(context.elevated(), address) - logging.debug(_("instance %s: terminating"), instance_id) - volumes = instance_ref.get('volumes', []) or [] for volume in volumes: self.detach_volume(context, instance_id, volume['id']) @@ -242,15 +271,16 @@ class ComputeManager(manager.Manager): context = context.elevated() self._update_state(context, instance_id) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Rebooting instance %s"), instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to reboot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_id, - instance_ref['state'], - power_state.RUNNING) + LOG.warn(_('trying to reboot a non-running ' + 'instance: %s (state: %s excepted: %s)'), + instance_id, + instance_ref['state'], + power_state.RUNNING, + context=context) - logging.debug(_('instance %s: rebooting'), instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -270,13 +300,12 @@ class ComputeManager(manager.Manager): # potentially? self._update_state(context, instance_id) - logging.debug(_('instance %s: snapshotting'), instance_ref['name']) + LOG.audit(_('instance %s: snapshotting'), instance_id, + context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to snapshot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_id, - instance_ref['state'], - power_state.RUNNING) + LOG.warn(_('trying to snapshot a non-running ' + 'instance: %s (state: %s excepted: %s)'), + instance_id, instance_ref['state'], power_state.RUNNING) self.driver.snapshot(instance_ref, name) @@ -286,8 +315,7 @@ class ComputeManager(manager.Manager): """Rescue an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: rescuing'), instance_id) + LOG.audit(_('instance %s: rescuing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -302,8 +330,7 @@ class ComputeManager(manager.Manager): """Rescue an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: unrescuing'), instance_id) + LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -322,8 +349,7 @@ class ComputeManager(manager.Manager): """Pause an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug('instance %s: pausing', instance_id) + LOG.audit(_('instance %s: pausing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -340,8 +366,7 @@ class ComputeManager(manager.Manager): """Unpause a paused instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug('instance %s: unpausing', instance_id) + LOG.audit(_('instance %s: unpausing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -358,8 +383,8 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref["state"] == power_state.RUNNING: - logging.debug(_("instance %s: retrieving diagnostics"), - instance_id) + LOG.audit(_("instance %s: retrieving diagnostics"), instance_id, + context=context) return self.driver.get_diagnostics(instance_ref) @exception.wrap_exception @@ -371,8 +396,7 @@ class ComputeManager(manager.Manager): """ context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: suspending'), instance_id) + LOG.audit(_('instance %s: suspending'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'suspending') @@ -391,8 +415,7 @@ class ComputeManager(manager.Manager): """ context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: resuming'), instance_id) + LOG.audit(_('instance %s: resuming'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'resuming') @@ -411,7 +434,7 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: locking'), instance_id) + LOG.debug(_('instance %s: locking'), instance_id, context=context) self.db.instance_update(context, instance_id, {'locked': True}) @exception.wrap_exception @@ -423,7 +446,7 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: unlocking'), instance_id) + LOG.debug(_('instance %s: unlocking'), instance_id, context=context) self.db.instance_update(context, instance_id, {'locked': False}) @exception.wrap_exception @@ -433,7 +456,8 @@ class ComputeManager(manager.Manager): """ context = context.elevated() - logging.debug(_('instance %s: getting locked state'), instance_id) + LOG.debug(_('instance %s: getting locked state'), instance_id, + context=context) instance_ref = self.db.instance_get(context, instance_id) return instance_ref['locked'] @@ -441,9 +465,9 @@ class ComputeManager(manager.Manager): def get_console_output(self, context, instance_id): """Send the console output for an instance.""" context = context.elevated() - logging.debug(_("instance %s: getting console output"), instance_id) instance_ref = self.db.instance_get(context, instance_id) - + LOG.audit(_("Get console output for instance %s"), instance_id, + context=context) return self.driver.get_console_output(instance_ref) @exception.wrap_exception @@ -451,9 +475,9 @@ class ComputeManager(manager.Manager): def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() - logging.debug(_("instance %s: attaching volume %s to %s"), instance_id, - volume_id, mountpoint) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id, + volume_id, mountpoint, context=context) dev_path = self.volume_manager.setup_compute_volume(context, volume_id) try: @@ -468,8 +492,8 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - logging.exception(_("instance %s: attach failed %s, removing"), - instance_id, mountpoint) + LOG.exception(_("instance %s: attach failed %s, removing"), + instance_id, mountpoint, context=context) self.volume_manager.remove_compute_volume(context, volume_id) raise exc @@ -481,14 +505,14 @@ class ComputeManager(manager.Manager): def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" context = context.elevated() - logging.debug(_("instance %s: detaching volume %s"), - instance_id, - volume_id) instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) + LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"), + volume_id, volume_ref['mountpoint'], instance_id, + context=context) if instance_ref['name'] not in self.driver.list_instances(): - logging.warn(_("Detaching volume from unknown instance %s"), - instance_ref['name']) + LOG.warn(_("Detaching volume from unknown instance %s"), + instance_id, context=context) else: self.driver.detach_volume(instance_ref['name'], volume_ref['mountpoint']) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 60c347a5e..14d0e8ca1 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -25,19 +25,17 @@ Instance Monitoring: """ import datetime -import logging import os -import sys import time import boto import boto.s3 import rrdtool -from twisted.internet import defer from twisted.internet import task from twisted.application import service from nova import flags +from nova import log as logging from nova.virt import connection as virt_connection @@ -91,6 +89,9 @@ RRD_VALUES = { utcnow = datetime.datetime.utcnow +LOG = logging.getLogger('nova.compute.monitor') + + def update_rrd(instance, name, data): """ Updates the specified RRD file. @@ -255,20 +256,20 @@ class Instance(object): Updates the instances statistics and stores the resulting graphs in the internal object store on the cloud controller. """ - logging.debug(_('updating %s...'), self.instance_id) + LOG.debug(_('updating %s...'), self.instance_id) try: data = self.fetch_cpu_stats() if data != None: - logging.debug('CPU: %s', data) + LOG.debug('CPU: %s', data) update_rrd(self, 'cpu', data) data = self.fetch_net_stats() - logging.debug('NET: %s', data) + LOG.debug('NET: %s', data) update_rrd(self, 'net', data) data = self.fetch_disk_stats() - logging.debug('DISK: %s', data) + LOG.debug('DISK: %s', data) update_rrd(self, 'disk', data) # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls @@ -285,7 +286,7 @@ class Instance(object): graph_disk(self, '1w') graph_disk(self, '1m') except Exception: - logging.exception(_('unexpected error during update')) + LOG.exception(_('unexpected error during update')) self.last_updated = utcnow() @@ -309,7 +310,7 @@ class Instance(object): self.cputime = float(info['cpu_time']) self.cputime_last_updated = utcnow() - logging.debug('CPU: %d', self.cputime) + LOG.debug('CPU: %d', self.cputime) # Skip calculation on first pass. Need delta to get a meaningful value. if cputime_last_updated == None: @@ -319,17 +320,17 @@ class Instance(object): d = self.cputime_last_updated - cputime_last_updated t = d.days * 86400 + d.seconds - logging.debug('t = %d', t) + LOG.debug('t = %d', t) # Calculate change over time in number of nanoseconds of CPU time used. cputime_delta = self.cputime - cputime_last - logging.debug('cputime_delta = %s', cputime_delta) + LOG.debug('cputime_delta = %s', cputime_delta) # Get the number of virtual cpus in this domain. vcpus = int(info['num_cpu']) - logging.debug('vcpus = %d', vcpus) + LOG.debug('vcpus = %d', vcpus) # Calculate CPU % used and cap at 100. return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100) @@ -351,8 +352,8 @@ class Instance(object): rd += rd_bytes wr += wr_bytes except TypeError: - logging.error(_('Cannot get blockstats for "%s" on "%s"'), - disk, self.instance_id) + LOG.error(_('Cannot get blockstats for "%s" on "%s"'), + disk, self.instance_id) raise return '%d:%d' % (rd, wr) @@ -373,8 +374,8 @@ class Instance(object): rx += stats[0] tx += stats[4] except TypeError: - logging.error(_('Cannot get ifstats for "%s" on "%s"'), - interface, self.instance_id) + LOG.error(_('Cannot get ifstats for "%s" on "%s"'), + interface, self.instance_id) raise return '%d:%d' % (rx, tx) @@ -408,7 +409,7 @@ class InstanceMonitor(object, service.Service): try: conn = virt_connection.get_connection(read_only=True) except Exception, exn: - logging.exception(_('unexpected exception getting connection')) + LOG.exception(_('unexpected exception getting connection')) time.sleep(FLAGS.monitoring_instances_delay) return @@ -416,14 +417,14 @@ class InstanceMonitor(object, service.Service): try: self.updateInstances_(conn, domain_ids) except Exception, exn: - logging.exception('updateInstances_') + LOG.exception('updateInstances_') def updateInstances_(self, conn, domain_ids): for domain_id in domain_ids: if not domain_id in self._instances: instance = Instance(conn, domain_id) self._instances[domain_id] = instance - logging.debug(_('Found instance: %s'), domain_id) + LOG.debug(_('Found instance: %s'), domain_id) for key in self._instances.keys(): instance = self._instances[key] |
