summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTodd Willey <todd@ansolabs.com>2011-01-04 00:23:35 -0500
committerTodd Willey <todd@ansolabs.com>2011-01-04 00:23:35 -0500
commitc7305af78049f94dedcbb55480b91a3c6d843b9f (patch)
treeb8c0305cd39408f825270f0c253c7f487ee40727
parentdd1e36b9690a2c2de18c565c496b25295a13d0aa (diff)
downloadnova-c7305af78049f94dedcbb55480b91a3c6d843b9f.tar.gz
nova-c7305af78049f94dedcbb55480b91a3c6d843b9f.tar.xz
nova-c7305af78049f94dedcbb55480b91a3c6d843b9f.zip
Apply logging changes as a giant patch to work around the cloudpipe delete + add issue in the original patch.
-rwxr-xr-xbin/nova-dhcpbridge20
-rwxr-xr-xbin/nova-instancemonitor7
-rw-r--r--bin/nova-logspool156
-rwxr-xr-xbin/nova-manage19
-rw-r--r--bin/nova-spoolsentry97
-rw-r--r--nova/api/ec2/__init__.py92
-rw-r--r--nova/api/ec2/admin.py38
-rw-r--r--nova/api/ec2/apirequest.py12
-rw-r--r--nova/api/ec2/cloud.py45
-rw-r--r--nova/api/ec2/metadatarequesthandler.py7
-rw-r--r--nova/api/openstack/__init__.py13
-rw-r--r--nova/auth/ldapdriver.py7
-rw-r--r--nova/auth/manager.py62
-rw-r--r--nova/auth/signer.py15
-rw-r--r--nova/compute/api.py33
-rw-r--r--nova/compute/disk.py11
-rw-r--r--nova/compute/manager.py76
-rw-r--r--nova/compute/monitor.py39
-rw-r--r--nova/crypto.py5
-rw-r--r--nova/exception.py8
-rw-r--r--nova/fakerabbit.py20
-rw-r--r--nova/flags.py12
-rw-r--r--nova/image/glance.py23
-rw-r--r--nova/log.py261
-rw-r--r--nova/network/linux_net.py19
-rw-r--r--nova/network/manager.py16
-rw-r--r--nova/objectstore/handler.py68
-rw-r--r--nova/rpc.py26
-rw-r--r--nova/scheduler/manager.py5
-rw-r--r--nova/service.py19
-rw-r--r--nova/tests/api/openstack/test_images.py1
-rw-r--r--nova/tests/objectstore_unittest.py2
-rw-r--r--nova/tests/test_access.py1
-rw-r--r--nova/tests/test_auth.py9
-rw-r--r--nova/tests/test_cloud.py18
-rw-r--r--nova/tests/test_compute.py8
-rw-r--r--nova/tests/test_log.py107
-rw-r--r--nova/tests/test_network.py9
-rw-r--r--nova/tests/test_quota.py4
-rw-r--r--nova/tests/test_rpc.py7
-rw-r--r--nova/tests/test_volume.py6
-rw-r--r--nova/twistd.py25
-rw-r--r--nova/utils.py17
-rw-r--r--nova/virt/connection.py5
-rw-r--r--nova/virt/libvirt_conn.py63
-rw-r--r--nova/virt/xenapi/fake.py24
-rw-r--r--nova/virt/xenapi/vm_utils.py68
-rw-r--r--nova/virt/xenapi/vmops.py26
-rw-r--r--nova/virt/xenapi/volume_utils.py44
-rw-r--r--nova/virt/xenapi/volumeops.py31
-rw-r--r--nova/virt/xenapi_conn.py16
-rw-r--r--nova/volume/driver.py11
-rw-r--r--nova/volume/manager.py21
-rw-r--r--nova/wsgi.py8
-rw-r--r--setup.py2
55 files changed, 1282 insertions, 482 deletions
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 828aba3d1..cecc1c80c 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -22,7 +22,6 @@ Handle lease database updates from DHCP servers.
"""
import gettext
-import logging
import os
import sys
@@ -39,6 +38,7 @@ gettext.install('nova', unicode=1)
from nova import context
from nova import db
from nova import flags
+from nova import log as logging
from nova import rpc
from nova import utils
from nova.network import linux_net
@@ -50,10 +50,15 @@ flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
+LOG = logging.getLogger('nova-dhcpbridge')
+if FLAGS.verbose:
+ LOG.setLevel(logging.DEBUG)
+
+
def add_lease(mac, ip_address, _hostname, _interface):
"""Set the IP that was assigned by the DHCP server."""
if FLAGS.fake_rabbit:
- logging.debug("leasing ip")
+ LOG.debug("leasing ip")
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.lease_fixed_ip(context.get_admin_context(),
mac,
@@ -68,14 +73,14 @@ def add_lease(mac, ip_address, _hostname, _interface):
def old_lease(mac, ip_address, hostname, interface):
"""Update just as add lease."""
- logging.debug("Adopted old lease or got a change of mac/hostname")
+ LOG.debug("Adopted old lease or got a change of mac/hostname")
add_lease(mac, ip_address, hostname, interface)
def del_lease(mac, ip_address, _hostname, _interface):
"""Called when a lease expires."""
if FLAGS.fake_rabbit:
- logging.debug("releasing ip")
+ LOG.debug("releasing ip")
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.release_fixed_ip(context.get_admin_context(),
mac,
@@ -100,6 +105,7 @@ def main():
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
utils.default_flagfile(flagfile)
argv = FLAGS(sys.argv)
+ logging.basicConfig()
interface = os.environ.get('DNSMASQ_INTERFACE', 'br0')
if int(os.environ.get('TESTING', '0')):
FLAGS.fake_rabbit = True
@@ -117,9 +123,9 @@ def main():
mac = argv[2]
ip = argv[3]
hostname = argv[4]
- logging.debug("Called %s for mac %s with ip %s and "
- "hostname %s on interface %s",
- action, mac, ip, hostname, interface)
+ LOG.debug("Called %s for mac %s with ip %s and "
+ "hostname %s on interface %s",
+ action, mac, ip, hostname, interface)
globals()[action + '_lease'](mac, ip, hostname, interface)
else:
print init_leases(interface)
diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor
index 5dac3ffe6..17f573b9d 100755
--- a/bin/nova-instancemonitor
+++ b/bin/nova-instancemonitor
@@ -23,7 +23,6 @@
import gettext
import os
-import logging
import sys
from twisted.application import service
@@ -37,19 +36,23 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
+from nova import log as logging
from nova import utils
from nova import twistd
from nova.compute import monitor
+# TODO(todd): shouldn't this be done with flags? And what about verbose?
logging.getLogger('boto').setLevel(logging.WARN)
+LOG = logging.getLogger('nova-instancemonitor')
+
if __name__ == '__main__':
utils.default_flagfile()
twistd.serve(__file__)
if __name__ == '__builtin__':
- logging.warn('Starting instance monitor')
+ LOG.warn(_('Starting instance monitor'))
# pylint: disable-msg=C0103
monitor = monitor.InstanceMonitor()
diff --git a/bin/nova-logspool b/bin/nova-logspool
new file mode 100644
index 000000000..d5aef4756
--- /dev/null
+++ b/bin/nova-logspool
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Tools for working with logs generated by nova components
+"""
+
+
+import json
+import os
+import re
+import sys
+
+
+class Request(object):
+
+ def __init__(self):
+ self.time = ""
+ self.host = ""
+ self.logger = ""
+ self.message = ""
+ self.trace = ""
+ self.env = ""
+ self.request_id = ""
+
+ def add_error_line(self, error_line):
+ self.time = " ".join(error_line.split(" ")[:3])
+ self.host = error_line.split(" ")[3]
+ self.logger = error_line.split("(")[1].split(" ")[0]
+ self.request_id = error_line.split("[")[1].split(" ")[0]
+ error_lines = error_line.split("#012")
+ self.message = self.clean_log_line(error_lines.pop(0))
+ self.trace = "\n".join([self.clean_trace(l) for l in error_lines])
+
+ def add_environment_line(self, env_line):
+ self.env = self.clean_env_line(env_line)
+
+ def clean_log_line(self, line):
+ """Remove log format for time, level, etc: split after context"""
+ return line.split('] ')[-1]
+
+ def clean_env_line(self, line):
+ """Also has an 'Environment: ' string in the message"""
+ return re.sub(r'^Environment: ', '', self.clean_log_line(line))
+
+ def clean_trace(self, line):
+ """trace has a different format, so split on TRACE:"""
+ return line.split('TRACE: ')[-1]
+
+ def to_dict(self):
+ return {'traceback': self.trace, 'message': self.message,
+ 'host': self.host, 'env': self.env, 'logger': self.logger,
+ 'request_id': self.request_id}
+
+class LogReader(object):
+ def __init__(self, filename):
+ self.filename = filename
+ self._errors = {}
+
+ def process(self, spooldir):
+ with open(self.filename) as f:
+ line = f.readline()
+ while len(line) > 0:
+ parts = line.split(" ")
+ level = (len(parts) < 6) or parts[5]
+ if level == 'ERROR':
+ self.handle_logged_error(line)
+ elif level == '[-]' and self.last_error:
+ # twisted stack trace line
+ clean_line = " ".join(line.split(" ")[6:])
+ self.last_error.trace = self.last_error.trace + clean_line
+ else:
+ self.last_error = None
+ line = f.readline()
+ self.update_spool(spooldir)
+
+ def handle_logged_error(self, line):
+ request_id = re.search(r' \[([A-Z0-9\-/]+)', line)
+ if not request_id:
+ raise Exception("Unable to parse request id from %s" % line)
+ request_id = request_id.group(1)
+ data = self._errors.get(request_id, Request())
+ if self.is_env_line(line):
+ data.add_environment_line(line)
+ elif self.is_error_line(line):
+ data.add_error_line(line)
+ else:
+ # possibly error from twsited
+ data.add_error_line(line)
+ self.last_error = data
+ self._errors[request_id] = data
+
+ def is_env_line(self, line):
+ return re.search('Environment: ', line)
+
+ def is_error_line(self, line):
+ return re.search('raised', line)
+
+ def update_spool(self, directory):
+ processed_dir = "%s/processed" % directory
+ self._ensure_dir_exists(processed_dir)
+ for rid, value in self._errors.iteritems():
+ if not self.has_been_processed(processed_dir, rid):
+ with open("%s/%s" % (directory, rid), "w") as spool:
+ spool.write(json.dumps(value.to_dict()))
+ self.flush_old_processed_spool(processed_dir)
+
+ def _ensure_dir_exists(self, d):
+ mkdir = False
+ try:
+ os.stat(d)
+ except:
+ mkdir = True
+ if mkdir:
+ os.mkdir(d)
+
+ def has_been_processed(self, processed_dir, rid):
+ rv = False
+ try:
+ os.stat("%s/%s" % (processed_dir, rid))
+ rv = True
+ except:
+ pass
+ return rv
+
+ def flush_old_processed_spool(self, processed_dir):
+ keys = self._errors.keys()
+ procs = os.listdir(processed_dir)
+ for p in procs:
+ if p not in keys:
+ # log has rotated and the old error won't be seen again
+ os.unlink("%s/%s" % (processed_dir, p))
+
+if __name__ == '__main__':
+ filename = '/var/log/nova.log'
+ spooldir = '/var/spool/nova'
+ if len(sys.argv) > 1:
+ filename = sys.argv[1]
+ if len(sys.argv) > 2:
+ spooldir = sys.argv[2]
+ LogReader(filename).process(spooldir)
diff --git a/bin/nova-manage b/bin/nova-manage
index 3416c1a52..169ab5f62 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -55,8 +55,8 @@
import datetime
import gettext
-import logging
import os
+import re
import sys
import time
@@ -77,6 +77,7 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import quota
from nova import utils
from nova.auth import manager
@@ -499,6 +500,15 @@ class ServiceCommands(object):
db.service_update(ctxt, svc['id'], {'disabled': True})
+class LogCommands(object):
+ def request(self, request_id, logfile='/var/log/nova.log'):
+ """Show all fields in the log for the given request. Assumes you
+ haven't changed the log format too much.
+ ARGS: request_id [logfile]"""
+ lines = utils.execute("cat %s | grep '\[%s '" % (logfile, request_id))
+ print re.sub('#012', "\n", "\n".join(lines))
+
+
CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
@@ -507,7 +517,8 @@ CATEGORIES = [
('vpn', VpnCommands),
('floating', FloatingIpCommands),
('network', NetworkCommands),
- ('service', ServiceCommands)]
+ ('service', ServiceCommands),
+ ('log', LogCommands)]
def lazy_match(name, key_value_tuples):
@@ -546,9 +557,7 @@ def main():
utils.default_flagfile()
argv = FLAGS(sys.argv)
- if FLAGS.verbose:
- logging.getLogger().setLevel(logging.DEBUG)
-
+ logging._set_log_levels()
script_name = argv.pop(0)
if len(argv) < 1:
print script_name + " category action [<args>]"
diff --git a/bin/nova-spoolsentry b/bin/nova-spoolsentry
new file mode 100644
index 000000000..ab20268a9
--- /dev/null
+++ b/bin/nova-spoolsentry
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import base64
+import json
+import logging
+import os
+import shutil
+import sys
+import urllib
+import urllib2
+try:
+ import cPickle as pickle
+except:
+ import pickle
+
+
+class SpoolSentry(object):
+ def __init__(self, spool_dir, sentry_url, key=None):
+ self.spool_dir = spool_dir
+ self.sentry_url = sentry_url
+ self.key = key
+
+ def process(self):
+ for fname in os.listdir(self.spool_dir):
+ if fname == "processed":
+ continue
+ try:
+ sourcefile = "%s/%s" % (self.spool_dir, fname)
+ with open(sourcefile) as f:
+ fdata = f.read()
+ data_from_json = json.loads(fdata)
+ data = self.build_data(data_from_json)
+ self.send_data(data)
+ destfile = "%s/processed/%s" % (self.spool_dir, fname)
+ shutil.move(sourcefile, destfile)
+ except:
+ logging.exception("Unable to upload record %s", fname)
+ raise
+
+ def build_data(self, filejson):
+ env = {'SERVER_NAME': 'unknown', 'SERVER_PORT': '0000',
+ 'SCRIPT_NAME': '/unknown/', 'PATH_INFO': 'unknown'}
+ if filejson['env']:
+ env = json.loads(filejson['env'])
+ url = "http://%s:%s%s%s" % (env['SERVER_NAME'], env['SERVER_PORT'],
+ env['SCRIPT_NAME'], env['PATH_INFO'])
+ rv = {'logger': filejson['logger'], 'level': logging.ERROR,
+ 'server_name': filejson['host'], 'url': url,
+ 'message': filejson['message'],
+ 'traceback': filejson['traceback']}
+ rv['data'] = {}
+ if filejson['env']:
+ rv['data']['META'] = env
+ if filejson['request_id']:
+ rv['data']['request_id'] = filejson['request_id']
+ return rv
+
+ def send_data(self, data):
+ data = {
+ 'data': base64.b64encode(pickle.dumps(data).encode('zlib')),
+ 'key': self.key
+ }
+ req = urllib2.Request(self.sentry_url)
+ res = urllib2.urlopen(req, urllib.urlencode(data))
+ if res.getcode() != 200:
+ raise Exception("Bad HTTP code: %s" % res.getcode())
+ txt = res.read()
+
+if __name__ == '__main__':
+ sentryurl = 'http://127.0.0.1/sentry/store/'
+ key = ''
+ spooldir = '/var/spool/nova'
+ if len(sys.argv) > 1:
+ sentryurl = sys.argv[1]
+ if len(sys.argv) > 2:
+ key = sys.argv[2]
+ if len(sys.argv) > 3:
+ spooldir = sys.argv[3]
+ SpoolSentry(spooldir, sentryurl, key).process()
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index aa3bfaeb4..4dd2e55cd 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -20,7 +20,7 @@ Starting point for routing EC2 requests.
"""
-import logging
+import datetime
import routes
import webob
import webob.dec
@@ -29,6 +29,7 @@ import webob.exc
from nova import context
from nova import exception
from nova import flags
+from nova import log as logging
from nova import wsgi
from nova.api.ec2 import apirequest
from nova.api.ec2 import admin
@@ -37,6 +38,7 @@ from nova.auth import manager
FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.api")
flags.DEFINE_boolean('use_forwarded_for', False,
'Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.')
@@ -52,10 +54,6 @@ flags.DEFINE_list('lockout_memcached_servers', None,
'Memcached servers or None for in process cache.')
-_log = logging.getLogger("api")
-_log.setLevel(logging.DEBUG)
-
-
class API(wsgi.Middleware):
"""Routing for all EC2 API requests."""
@@ -64,6 +62,40 @@ class API(wsgi.Middleware):
if FLAGS.use_lockout:
self.application = Lockout(self.application)
+ @webob.dec.wsgify
+ def __call__(self, req):
+ rv = req.get_response(self.application)
+ self.log_request_completion(rv, req)
+ return rv
+
+ def log_request_completion(self, response, request):
+ controller = request.environ.get('ec2.controller', None)
+ if controller:
+ controller = controller.__class__.__name__
+ action = request.environ.get('ec2.action', None)
+ ctxt = request.environ.get('ec2.context', None)
+ seconds = 'X'
+ microseconds = 'X'
+ if ctxt:
+ delta = datetime.datetime.utcnow() - \
+ ctxt.timestamp
+ seconds = delta.seconds
+ microseconds = delta.microseconds
+ LOG.info(
+ "%s.%ss %s %s %s %s:%s %s [%s] %s %s",
+ seconds,
+ microseconds,
+ request.remote_addr,
+ request.method,
+ request.path_info,
+ controller,
+ action,
+ response.status_int,
+ request.user_agent,
+ request.content_type,
+ response.content_type,
+ context=ctxt)
+
class Lockout(wsgi.Middleware):
"""Lockout for x minutes on y failed auths in a z minute period.
@@ -107,14 +139,13 @@ class Lockout(wsgi.Middleware):
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60)
elif failures >= FLAGS.lockout_attempts:
- _log.warn('Access key %s has had %d failed authentications'
- ' and will be locked out for %d minutes.' %
- (access_key, failures, FLAGS.lockout_minutes))
+ LOG.warn('Access key %s has had %d failed authentications'
+ ' and will be locked out for %d minutes.',
+ access_key, failures, FLAGS.lockout_minutes)
self.mc.set(failures_key, str(failures),
time=FLAGS.lockout_minutes * 60)
return res
-
class Authenticate(wsgi.Middleware):
"""Authenticate an EC2 request and add 'ec2.context' to WSGI environ."""
@@ -142,8 +173,9 @@ class Authenticate(wsgi.Middleware):
req.method,
req.host,
req.path)
- except exception.Error, ex:
- logging.debug(_("Authentication Failure: %s") % ex)
+ # Be explicit for what exceptions are 403, the rest bubble as 500
+ except (exception.NotFound, exception.NotAuthorized) as ex:
+ LOG.audit(_("Authentication Failure: %s"), str(ex))
raise webob.exc.HTTPForbidden()
# Authenticated!
@@ -154,6 +186,8 @@ class Authenticate(wsgi.Middleware):
project=project,
remote_address=remote_address)
req.environ['ec2.context'] = ctxt
+ LOG.audit(_('Authenticated Request For %s:%s)'), user.name,
+ project.name, context=req.environ['ec2.context'])
return self.application
@@ -189,9 +223,9 @@ class Router(wsgi.Middleware):
except:
raise webob.exc.HTTPBadRequest()
- _log.debug(_('action: %s') % action)
+ LOG.debug(_('action: %s'), action)
for key, value in args.items():
- _log.debug(_('arg: %s\t\tval: %s') % (key, value))
+ LOG.debug(_('arg: %s\t\tval: %s'), key, value)
# Success!
req.environ['ec2.controller'] = controller
@@ -263,6 +297,8 @@ class Authorizer(wsgi.Middleware):
if self._matches_any_role(context, allowed_roles):
return self.application
else:
+ LOG.audit("Unauthorized request for controller=%s and action=%s",
+ controller_name, action, context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
@@ -297,15 +333,24 @@ class Executor(wsgi.Application):
result = None
try:
result = api_request.send(context, **args)
+ except exception.NotFound as ex:
+ LOG.info(_('NotFound raised: %s'), str(ex), context=context)
+ return self._error(req, context, type(ex).__name__, str(ex))
except exception.ApiError as ex:
-
+ LOG.exception('ApiError raised', context=context)
if ex.code:
- return self._error(req, ex.code, ex.message)
+ return self._error(req, context, ex.code, str(ex))
else:
- return self._error(req, type(ex).__name__, ex.message)
- # TODO(vish): do something more useful with unknown exceptions
+ return self._error(req, context, type(ex).__name__, str(ex))
except Exception as ex:
- return self._error(req, type(ex).__name__, str(ex))
+ extra = {'environment': req.environ}
+ LOG.exception(_('Unexpected error raised: %s'), str(ex),
+ extra=extra, context=context)
+ return self._error(req,
+ context,
+ 'UnknownError',
+ _('An unknown error has occurred. '
+ 'Please try your request again.'))
else:
resp = webob.Response()
resp.status = 200
@@ -313,15 +358,16 @@ class Executor(wsgi.Application):
resp.body = str(result)
return resp
- def _error(self, req, code, message):
- logging.error("%s: %s", code, message)
+ def _error(self, req, context, code, message):
+ LOG.error("%s: %s", code, message, context=context)
resp = webob.Response()
resp.status = 400
resp.headers['Content-Type'] = 'text/xml'
resp.body = str('<?xml version="1.0"?>\n'
- '<Response><Errors><Error><Code>%s</Code>'
- '<Message>%s</Message></Error></Errors>'
- '<RequestID>?</RequestID></Response>' % (code, message))
+ '<Response><Errors><Error><Code>%s</Code>'
+ '<Message>%s</Message></Error></Errors>'
+ '<RequestID>%s</RequestID></Response>' %
+ (code, message, context.request_id))
return resp
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index fac01369e..758b612e8 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -24,9 +24,13 @@ import base64
from nova import db
from nova import exception
+from nova import log as logging
from nova.auth import manager
+LOG = logging.getLogger('nova.api.ec2.admin')
+
+
def user_dict(user, base64_file=None):
"""Convert the user object to a result dict"""
if user:
@@ -75,17 +79,18 @@ class AdminController(object):
return {'userSet':
[user_dict(u) for u in manager.AuthManager().get_users()]}
- def register_user(self, _context, name, **_kwargs):
+ def register_user(self, context, name, **_kwargs):
"""Creates a new user, and returns generated credentials."""
+ LOG.audit(_("Creating new user: %s"), name, context=context)
return user_dict(manager.AuthManager().create_user(name))
- def deregister_user(self, _context, name, **_kwargs):
+ def deregister_user(self, context, name, **_kwargs):
"""Deletes a single user (NOT undoable.)
Should throw an exception if the user has instances,
volumes, or buckets remaining.
"""
+ LOG.audit(_("Deleting user: %s"), name, context=context)
manager.AuthManager().delete_user(name)
-
return True
def describe_roles(self, context, project_roles=True, **kwargs):
@@ -105,15 +110,27 @@ class AdminController(object):
operation='add', **kwargs):
"""Add or remove a role for a user and project."""
if operation == 'add':
+ if project:
+ LOG.audit(_("Adding role %s to user %s for project %s"), role,
+ user, project, context=context)
+ else:
+ LOG.audit(_("Adding sitewide role %s to user %s"), role, user,
+ context=context)
manager.AuthManager().add_role(user, role, project)
elif operation == 'remove':
+ if project:
+ LOG.audit(_("Removing role %s from user %s for project %s"),
+ role, user, project, context=context)
+ else:
+ LOG.audit(_("Removing sitewide role %s from user %s"), role,
+ user, context=context)
manager.AuthManager().remove_role(user, role, project)
else:
- raise exception.ApiError('operation must be add or remove')
+ raise exception.ApiError(_('operation must be add or remove'))
return True
- def generate_x509_for_user(self, _context, name, project=None, **kwargs):
+ def generate_x509_for_user(self, context, name, project=None, **kwargs):
"""Generates and returns an x509 certificate for a single user.
Is usually called from a client that will wrap this with
access and secret key info, and return a zip file.
@@ -122,6 +139,8 @@ class AdminController(object):
project = name
project = manager.AuthManager().get_project(project)
user = manager.AuthManager().get_user(name)
+ LOG.audit(_("Getting x509 for user: %s on project: %s"), name,
+ project, context=context)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
def describe_project(self, context, name, **kwargs):
@@ -137,6 +156,8 @@ class AdminController(object):
def register_project(self, context, name, manager_user, description=None,
member_users=None, **kwargs):
"""Creates a new project"""
+ LOG.audit(_("Create project %s managed by %s"), name, manager_user,
+ context=context)
return project_dict(
manager.AuthManager().create_project(
name,
@@ -146,6 +167,7 @@ class AdminController(object):
def deregister_project(self, context, name):
"""Permanently deletes a project."""
+ LOG.audit(_("Delete project: %s"), name, context=context)
manager.AuthManager().delete_project(name)
return True
@@ -159,11 +181,15 @@ class AdminController(object):
**kwargs):
"""Add or remove a user from a project."""
if operation == 'add':
+ LOG.audit(_("Adding user %s to project %s"), user, project,
+ context=context)
manager.AuthManager().add_to_project(user, project)
elif operation == 'remove':
+ LOG.audit(_("Removing user %s from project %s"), user, project,
+ context=context)
manager.AuthManager().remove_from_project(user, project)
else:
- raise exception.ApiError('operation must be add or remove')
+ raise exception.ApiError(_('operation must be add or remove'))
return True
# FIXME(vish): these host commands don't work yet, perhaps some of the
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index a90fbeb0c..d0b417db1 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -20,13 +20,13 @@
APIRequest class
"""
-import logging
import re
# TODO(termie): replace minidom with etree
from xml.dom import minidom
-_log = logging.getLogger("api")
-_log.setLevel(logging.DEBUG)
+from nova import log as logging
+
+LOG = logging.getLogger("nova.api.request")
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
@@ -94,7 +94,7 @@ class APIRequest(object):
except AttributeError:
_error = _('Unsupported API request: controller = %s,'
'action = %s') % (self.controller, self.action)
- _log.warning(_error)
+ LOG.exception(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.
raise Exception(_error)
@@ -142,7 +142,7 @@ class APIRequest(object):
response = xml.toxml()
xml.unlink()
- _log.debug(response)
+ LOG.debug(response)
return response
def _render_dict(self, xml, el, data):
@@ -151,7 +151,7 @@ class APIRequest(object):
val = data[key]
el.appendChild(self._render_data(xml, key, val))
except:
- _log.debug(data)
+ LOG.debug(data)
raise
def _render_data(self, xml, el_name, data):
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 9fb6307a8..d0db08db7 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -24,17 +24,16 @@ datastore.
import base64
import datetime
-import logging
+import IPy
import re
import os
from nova import context
-import IPy
-
from nova import crypto
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import quota
from nova import rpc
from nova import utils
@@ -45,6 +44,8 @@ from nova.compute import instance_types
FLAGS = flags.FLAGS
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
+LOG = logging.getLogger("nova.api.cloud")
+
InvalidInputException = exception.InvalidInputException
@@ -280,6 +281,7 @@ class CloudController(object):
return {'keypairsSet': result}
def create_key_pair(self, context, key_name, **kwargs):
+ LOG.audit(_("Create key pair %s"), key_name, context=context)
data = _gen_key(context, context.user.id, key_name)
return {'keyName': key_name,
'keyFingerprint': data['fingerprint'],
@@ -287,6 +289,7 @@ class CloudController(object):
# TODO(vish): when context is no longer an object, pass it here
def delete_key_pair(self, context, key_name, **kwargs):
+ LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
db.key_pair_destroy(context, context.user.id, key_name)
except exception.NotFound:
@@ -393,6 +396,8 @@ class CloudController(object):
return False
def revoke_security_group_ingress(self, context, group_name, **kwargs):
+ LOG.audit(_("Revoke security group ingress %s"), group_name,
+ context=context)
self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
@@ -419,6 +424,8 @@ class CloudController(object):
# for these operations, so support for newer API versions
# is sketchy.
def authorize_security_group_ingress(self, context, group_name, **kwargs):
+ LOG.audit(_("Authorize security group ingress %s"), group_name,
+ context=context)
self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
@@ -455,6 +462,7 @@ class CloudController(object):
return source_project_id
def create_security_group(self, context, group_name, group_description):
+ LOG.audit(_("Create Security Group %s"), group_name, context=context)
self.compute_api.ensure_default_security_group(context)
if db.security_group_exists(context, context.project_id, group_name):
raise exception.ApiError(_('group %s already exists') % group_name)
@@ -469,6 +477,7 @@ class CloudController(object):
group_ref)]}
def delete_security_group(self, context, group_name, **kwargs):
+ LOG.audit(_("Delete security group %s"), group_name, context=context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
@@ -476,6 +485,8 @@ class CloudController(object):
return True
def get_console_output(self, context, instance_id, **kwargs):
+ LOG.audit(_("Get console output for instance %s"), instance_id,
+ context=context)
# instance_id is passed in as a list of instances
ec2_id = instance_id[0]
internal_id = ec2_id_to_internal_id(ec2_id)
@@ -539,10 +550,12 @@ class CloudController(object):
return v
def create_volume(self, context, size, **kwargs):
+ LOG.audit(_("Create volume of %s GB"), size, context=context)
# check quota
if quota.allowed_volumes(context, 1, size) < 1:
- logging.warn("Quota exceeeded for %s, tried to create %sG volume",
- context.project_id, size)
+ LOG.warn(_("Quota exceeeded for project %s, tried to create "
+ "%sG volume"), context.project_id, size,
+ context=context)
raise quota.QuotaError("Volume quota exceeded. You cannot "
"create a volume of size %s" % size)
vol = {}
@@ -568,6 +581,8 @@ class CloudController(object):
return {'volumeSet': [self._format_volume(context, dict(volume_ref))]}
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
+ LOG.audit(_("Attach volume %s to instacne %s at %s"), volume_id,
+ instance_id, device, context=context)
volume_ref = db.volume_get_by_ec2_id(context, volume_id)
if not re.match("^/dev/[a-z]d[a-z]+$", device):
raise exception.ApiError(_("Invalid device specified: %s. "
@@ -594,6 +609,7 @@ class CloudController(object):
'volumeId': volume_ref['id']}
def detach_volume(self, context, volume_id, **kwargs):
+ LOG.audit("Detach volume %s", volume_id, context=context)
volume_ref = db.volume_get_by_ec2_id(context, volume_id)
instance_ref = db.volume_get_instance(context.elevated(),
volume_ref['id'])
@@ -728,11 +744,11 @@ class CloudController(object):
return {'addressesSet': addresses}
def allocate_address(self, context, **kwargs):
+ LOG.audit(_("Allocate address"), context=context)
# check quota
if quota.allowed_floating_ips(context, 1) < 1:
- logging.warn(_("Quota exceeeded for %s, tried to allocate "
- "address"),
- context.project_id)
+ LOG.warn(_("Quota exceeeded for %s, tried to allocate address"),
+ context.project_id, context=context)
raise quota.QuotaError(_("Address quota exceeded. You cannot "
"allocate any more addresses"))
# NOTE(vish): We don't know which network host should get the ip
@@ -746,6 +762,7 @@ class CloudController(object):
return {'addressSet': [{'publicIp': public_ip}]}
def release_address(self, context, public_ip, **kwargs):
+ LOG.audit(_("Release address %s"), public_ip, context=context)
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
# NOTE(vish): We don't know which network host should get the ip
# when we deallocate, so just send it to any one. This
@@ -758,6 +775,8 @@ class CloudController(object):
return {'releaseResponse': ["Address released."]}
def associate_address(self, context, instance_id, public_ip, **kwargs):
+ LOG.audit(_("Associate address %s to instance %s"), public_ip,
+ instance_id, context=context)
internal_id = ec2_id_to_internal_id(instance_id)
instance_ref = self.compute_api.get_instance(context, internal_id)
fixed_address = db.instance_get_fixed_address(context,
@@ -775,6 +794,7 @@ class CloudController(object):
return {'associateResponse': ["Address associated."]}
def disassociate_address(self, context, public_ip, **kwargs):
+ LOG.audit(_("Disassociate address %s"), public_ip, context=context)
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
# NOTE(vish): Get the topic from the host name of the network of
# the associated fixed ip.
@@ -811,7 +831,7 @@ class CloudController(object):
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified."""
- logging.debug("Going to start terminating instances")
+ LOG.debug(_("Going to start terminating instances"))
for ec2_id in instance_id:
internal_id = ec2_id_to_internal_id(ec2_id)
self.compute_api.delete_instance(context, internal_id)
@@ -819,6 +839,7 @@ class CloudController(object):
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
+ LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for ec2_id in instance_id:
internal_id = ec2_id_to_internal_id(ec2_id)
self.compute_api.reboot(context, internal_id)
@@ -850,6 +871,7 @@ class CloudController(object):
def delete_volume(self, context, volume_id, **kwargs):
# TODO: return error if not authorized
+ LOG.audit(_("Deleting volume %s"), volume_id, context=context)
volume_ref = db.volume_get_by_ec2_id(context, volume_id)
if volume_ref['status'] != "available":
raise exception.ApiError(_("Volume status must be available"))
@@ -871,6 +893,7 @@ class CloudController(object):
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
+ LOG.audit("De-registering image %s", image_id, context=context)
self.image_service.deregister(context, image_id)
return {'imageId': image_id}
@@ -878,7 +901,8 @@ class CloudController(object):
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
image_id = self.image_service.register(context, image_location)
- logging.debug("Registered %s as %s" % (image_location, image_id))
+ LOG.audit(_("Registered image %s with id %s"), image_location,
+ image_id, context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
@@ -906,6 +930,7 @@ class CloudController(object):
raise exception.ApiError(_('only group "all" is supported'))
if not operation_type in ['add', 'remove']:
raise exception.ApiError(_('operation_type must be add or remove'))
+ LOG.audit(_("Updating image %s publicity"), image_id, context=context)
return self.image_service.modify(context, image_id, operation_type)
def update_image(self, context, image_id, **kwargs):
diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py
index a57a6698a..848f0b034 100644
--- a/nova/api/ec2/metadatarequesthandler.py
+++ b/nova/api/ec2/metadatarequesthandler.py
@@ -18,15 +18,15 @@
"""Metadata request handler."""
-import logging
-
import webob.dec
import webob.exc
+from nova import log as logging
from nova import flags
from nova.api.ec2 import cloud
+LOG = logging.getLogger('nova.api.ec2.metadata')
FLAGS = flags.FLAGS
@@ -72,8 +72,7 @@ class MetadataRequestHandler(object):
remote_address = req.headers.get('X-Forwarded-For', remote_address)
meta_data = cc.get_metadata(remote_address)
if meta_data is None:
- logging.error(_('Failed to get metadata for ip: %s') %
- remote_address)
+ LOG.error(_('Failed to get metadata for ip: %s'), remote_address)
raise webob.exc.HTTPNotFound()
data = self.lookup(req.path_info, meta_data)
if data is None:
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index a1430caed..7e1c03d9f 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -20,28 +20,24 @@
WSGI middleware for OpenStack API controllers.
"""
-import time
-
-import logging
import routes
-import traceback
import webob.dec
import webob.exc
import webob
-from nova import context
from nova import flags
+from nova import log as logging
from nova import utils
from nova import wsgi
from nova.api.openstack import faults
from nova.api.openstack import backup_schedules
from nova.api.openstack import flavors
from nova.api.openstack import images
-from nova.api.openstack import ratelimiting
from nova.api.openstack import servers
from nova.api.openstack import sharedipgroups
+LOG = logging.getLogger('nova.api.openstack')
FLAGS = flags.FLAGS
flags.DEFINE_string('os_api_auth',
'nova.api.openstack.auth.AuthMiddleware',
@@ -71,8 +67,7 @@ class API(wsgi.Middleware):
try:
return req.get_response(self.application)
except Exception as ex:
- logging.warn(_("Caught error: %s") % str(ex))
- logging.error(traceback.format_exc())
+ LOG.exception(_("Caught error: %s"), str(ex))
exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
return faults.Fault(exc)
@@ -88,7 +83,7 @@ class APIRouter(wsgi.Router):
server_members = {'action': 'POST'}
if FLAGS.allow_admin_api:
- logging.debug("Including admin operations in API.")
+ LOG.debug("Including admin operations in API.")
server_members['pause'] = 'POST'
server_members['unpause'] = 'POST'
server_members["diagnostics"] = "GET"
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index 7616ff112..3e0837ba8 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -24,11 +24,11 @@ other backends by creating another class that exposes the same
public methods.
"""
-import logging
import sys
from nova import exception
from nova import flags
+from nova import log as logging
FLAGS = flags.FLAGS
@@ -66,6 +66,9 @@ flags.DEFINE_string('ldap_developer',
'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
+LOG = logging.getLogger("nova.ldapdriver")
+
+
# TODO(vish): make an abstract base class with the same public methods
# to define a set interface for AuthDrivers. I'm delaying
# creating this now because I'm expecting an auth refactor
@@ -502,7 +505,7 @@ class LdapDriver(object):
try:
self.conn.modify_s(group_dn, attr)
except self.ldap.OBJECT_CLASS_VIOLATION:
- logging.debug(_("Attempted to remove the last member of a group. "
+ LOG.debug(_("Attempted to remove the last member of a group. "
"Deleting the group at %s instead."), group_dn)
self.__delete_group(group_dn)
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index d3e266952..bfac7fc2a 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -20,7 +20,6 @@
Nova authentication management
"""
-import logging
import os
import shutil
import string # pylint: disable-msg=W0402
@@ -33,6 +32,7 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth import signer
@@ -71,6 +71,9 @@ flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
'Driver that auth manager uses')
+LOG = logging.getLogger('nova.authmanager')
+
+
class AuthBase(object):
"""Base class for objects relating to auth
@@ -254,43 +257,51 @@ class AuthManager(object):
# TODO(vish): check for valid timestamp
(access_key, _sep, project_id) = access.partition(':')
- logging.info(_('Looking up user: %r'), access_key)
+ LOG.debug(_('Looking up user: %r'), access_key)
user = self.get_user_from_access_key(access_key)
- logging.info('user: %r', user)
+ LOG.debug('user: %r', user)
if user == None:
+ LOG.audit(_("Failed authorization for access key %s"), access_key)
raise exception.NotFound(_('No user found for access key %s')
% access_key)
# NOTE(vish): if we stop using project name as id we need better
# logic to find a default project for user
if project_id == '':
+ LOG.debug(_("Using project name = user name (%s)"), user.name)
project_id = user.name
project = self.get_project(project_id)
if project == None:
+ LOG.audit(_("failed authorization: no project named %s (user=%s)"),
+ project_id, user.name)
raise exception.NotFound(_('No project called %s could be found')
% project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
+ LOG.audit(_("Failed authorization: user %s not admin and not "
+ "member of project %s"), user.name, project.name)
raise exception.NotFound(_('User %s is not a member of project %s')
% (user.id, project.id))
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
- logging.debug('user.secret: %s', user.secret)
- logging.debug('expected_signature: %s', expected_signature)
- logging.debug('signature: %s', signature)
+ LOG.debug('user.secret: %s', user.secret)
+ LOG.debug('expected_signature: %s', expected_signature)
+ LOG.debug('signature: %s', signature)
if signature != expected_signature:
+ LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.NotAuthorized(_('Signature does not match'))
elif check_type == 'ec2':
# NOTE(vish): hmac can't handle unicode, so encode ensures that
# secret isn't unicode
expected_signature = signer.Signer(user.secret.encode()).generate(
params, verb, server_string, path)
- logging.debug('user.secret: %s', user.secret)
- logging.debug('expected_signature: %s', expected_signature)
- logging.debug('signature: %s', signature)
+ LOG.debug('user.secret: %s', user.secret)
+ LOG.debug('expected_signature: %s', expected_signature)
+ LOG.debug('signature: %s', signature)
if signature != expected_signature:
+ LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.NotAuthorized(_('Signature does not match'))
return (user, project)
@@ -398,6 +409,12 @@ class AuthManager(object):
raise exception.NotFound(_("The %s role can not be found") % role)
if project is not None and role in FLAGS.global_roles:
raise exception.NotFound(_("The %s role is global only") % role)
+ if project:
+ LOG.audit(_("Adding role %s to user %s in project %s"), role,
+ User.safe_id(user), Project.safe_id(project))
+ else:
+ LOG.audit(_("Adding sitewide role %s to user %s"), role,
+ User.safe_id(user))
with self.driver() as drv:
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
@@ -418,6 +435,12 @@ class AuthManager(object):
@type project: Project or project_id
@param project: Project in which to remove local role.
"""
+ if project:
+ LOG.audit(_("Removing role %s from user %s on project %s"),
+ role, User.safe_id(user), Project.safe_id(project))
+ else:
+ LOG.audit(_("Removing sitewide role %s from user %s"), role,
+ User.safe_id(user))
with self.driver() as drv:
drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
@@ -480,6 +503,8 @@ class AuthManager(object):
description,
member_users)
if project_dict:
+ LOG.audit(_("Created project %s with manager %s"), name,
+ manager_user)
project = Project(**project_dict)
return project
@@ -496,6 +521,7 @@ class AuthManager(object):
@param project: This will be the new description of the project.
"""
+ LOG.audit(_("modifying project %s"), Project.safe_id(project))
if manager_user:
manager_user = User.safe_id(manager_user)
with self.driver() as drv:
@@ -505,6 +531,8 @@ class AuthManager(object):
def add_to_project(self, user, project):
"""Add user to project"""
+ LOG.audit(_("Adding user %s to project %s"), User.safe_id(user),
+ Project.safe_id(project))
with self.driver() as drv:
return drv.add_to_project(User.safe_id(user),
Project.safe_id(project))
@@ -523,6 +551,8 @@ class AuthManager(object):
def remove_from_project(self, user, project):
"""Removes a user from a project"""
+ LOG.audit(_("Remove user %s from project %s"), User.safe_id(user),
+ Project.safe_id(project))
with self.driver() as drv:
return drv.remove_from_project(User.safe_id(user),
Project.safe_id(project))
@@ -549,6 +579,7 @@ class AuthManager(object):
def delete_project(self, project):
"""Deletes a project"""
+ LOG.audit(_("Deleting project %s"), Project.safe_id(project))
with self.driver() as drv:
drv.delete_project(Project.safe_id(project))
@@ -603,13 +634,16 @@ class AuthManager(object):
with self.driver() as drv:
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
- return User(**user_dict)
+ rv = User(**user_dict)
+ LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin)
+ return rv
def delete_user(self, user):
"""Deletes a user
Additionally deletes all users key_pairs"""
uid = User.safe_id(user)
+ LOG.audit(_("Deleting user %s"), uid)
db.key_pair_destroy_all_by_user(context.get_admin_context(),
uid)
with self.driver() as drv:
@@ -618,6 +652,12 @@ class AuthManager(object):
def modify_user(self, user, access_key=None, secret_key=None, admin=None):
"""Modify credentials for a user"""
uid = User.safe_id(user)
+ if access_key:
+ LOG.audit(_("Access Key change for user %s"), uid)
+ if secret_key:
+ LOG.audit(_("Secret Key change for user %s"), uid)
+ if admin is not None:
+ LOG.audit(_("Admin status set to %r for user %s"), admin, uid)
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)
@@ -666,7 +706,7 @@ class AuthManager(object):
port=vpn_port)
zippy.writestr(FLAGS.credential_vpn_file, config)
else:
- logging.warn(_("No vpn data for project %s"), pid)
+ LOG.warn(_("No vpn data for project %s"), pid)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid))
zippy.close()
diff --git a/nova/auth/signer.py b/nova/auth/signer.py
index f7d29f534..744e315d4 100644
--- a/nova/auth/signer.py
+++ b/nova/auth/signer.py
@@ -46,7 +46,6 @@ Utility class for parsing signed AMI manifests.
import base64
import hashlib
import hmac
-import logging
import urllib
# NOTE(vish): for new boto
@@ -54,9 +53,13 @@ import boto
# NOTE(vish): for old boto
import boto.utils
+from nova import log as logging
from nova.exception import Error
+LOG = logging.getLogger('nova.signer')
+
+
class Signer(object):
"""Hacked up code from boto/connection.py"""
@@ -120,7 +123,7 @@ class Signer(object):
def _calc_signature_2(self, params, verb, server_string, path):
"""Generate AWS signature version 2 string."""
- logging.debug('using _calc_signature_2')
+ LOG.debug('using _calc_signature_2')
string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path)
if self.hmac_256:
current_hmac = self.hmac_256
@@ -136,13 +139,13 @@ class Signer(object):
val = urllib.quote(val, safe='-_~')
pairs.append(urllib.quote(key, safe='') + '=' + val)
qs = '&'.join(pairs)
- logging.debug('query string: %s', qs)
+ LOG.debug('query string: %s', qs)
string_to_sign += qs
- logging.debug('string_to_sign: %s', string_to_sign)
+ LOG.debug('string_to_sign: %s', string_to_sign)
current_hmac.update(string_to_sign)
b64 = base64.b64encode(current_hmac.digest())
- logging.debug('len(b64)=%d', len(b64))
- logging.debug('base64 encoded digest: %s', b64)
+ LOG.debug('len(b64)=%d', len(b64))
+ LOG.debug('base64 encoded digest: %s', b64)
return b64
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 07c69bd31..c0141e569 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -21,12 +21,12 @@ Handles all API requests relating to instances (guest vms).
"""
import datetime
-import logging
import time
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import quota
from nova import rpc
from nova import utils
@@ -34,6 +34,7 @@ from nova.compute import instance_types
from nova.db import base
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.compute.api')
def generate_default_hostname(internal_id):
@@ -58,13 +59,13 @@ class ComputeAPI(base.Base):
instance = self.db.instance_get_by_internal_id(context,
instance_id)
except exception.NotFound as e:
- logging.warning("Instance %d was not found in get_network_topic",
- instance_id)
+ LOG.warning(_("Instance %d was not found in get_network_topic"),
+ instance_id)
raise e
host = instance['host']
if not host:
- raise exception.Error("Instance %d has no host" % instance_id)
+ raise exception.Error(_("Instance %d has no host") % instance_id)
topic = self.db.queue_get_for(context, FLAGS.compute_topic, host)
return rpc.call(context,
topic,
@@ -83,10 +84,10 @@ class ComputeAPI(base.Base):
num_instances = quota.allowed_instances(context, max_count,
instance_type)
if num_instances < min_count:
- logging.warn("Quota exceeeded for %s, tried to run %s instances",
- context.project_id, min_count)
- raise quota.QuotaError("Instance quota exceeded. You can only "
- "run %s more instances of this type." %
+ LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"),
+ context.project_id, min_count)
+ raise quota.QuotaError(_("Instance quota exceeded. You can only "
+ "run %s more instances of this type.") %
num_instances, "InstanceLimitExceeded")
is_vpn = image_id == FLAGS.vpn_image_id
@@ -100,7 +101,7 @@ class ComputeAPI(base.Base):
if kernel_id == str(FLAGS.null_kernel):
kernel_id = None
ramdisk_id = None
- logging.debug("Creating a raw instance")
+ LOG.debug(_("Creating a raw instance"))
# Make sure we have access to kernel and ramdisk (if not raw)
if kernel_id:
self.image_service.show(context, kernel_id)
@@ -147,7 +148,7 @@ class ComputeAPI(base.Base):
elevated = context.elevated()
instances = []
- logging.debug(_("Going to run %s instances..."), num_instances)
+ LOG.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
instance = dict(mac_address=utils.generate_mac(),
launch_index=num,
@@ -172,7 +173,7 @@ class ComputeAPI(base.Base):
instance = self.update_instance(context, instance_id, **updates)
instances.append(instance)
- logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
+ LOG.debug(_("Casting to scheduler for %s/%s's instance %s"),
context.project_id, context.user_id, instance_id)
rpc.cast(context,
FLAGS.scheduler_topic,
@@ -214,18 +215,18 @@ class ComputeAPI(base.Base):
return self.db.instance_update(context, instance_id, kwargs)
def delete_instance(self, context, instance_id):
- logging.debug("Going to try and terminate %d" % instance_id)
+ LOG.debug(_("Going to try and terminate %d"), instance_id)
try:
instance = self.db.instance_get_by_internal_id(context,
instance_id)
except exception.NotFound as e:
- logging.warning(_("Instance %d was not found during terminate"),
- instance_id)
+ LOG.warning(_("Instance %d was not found during terminate"),
+ instance_id)
raise e
if (instance['state_description'] == 'terminating'):
- logging.warning(_("Instance %d is already being terminated"),
- instance_id)
+ LOG.warning(_("Instance %d is already being terminated"),
+ instance_id)
return
self.update_instance(context,
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index 814a258cd..741499294 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -22,14 +22,15 @@ Includes injection of SSH PGP keys into authorized_keys file.
"""
-import logging
import os
import tempfile
from nova import exception
from nova import flags
+from nova import log as logging
+LOG = logging.getLogger('nova.compute.disk')
FLAGS = flags.FLAGS
flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10,
'minimum size in bytes of root partition')
@@ -67,12 +68,12 @@ def partition(infile, outfile, local_bytes=0, resize=True,
execute('resize2fs %s' % infile)
file_size = FLAGS.minimum_root_size
elif file_size % sector_size != 0:
- logging.warn(_("Input partition size not evenly divisible by"
- " sector size: %d / %d"), file_size, sector_size)
+ LOG.warn(_("Input partition size not evenly divisible by"
+ " sector size: %d / %d"), file_size, sector_size)
primary_sectors = file_size / sector_size
if local_bytes % sector_size != 0:
- logging.warn(_("Bytes for local storage not evenly divisible"
- " by sector size: %d / %d"), local_bytes, sector_size)
+ LOG.warn(_("Bytes for local storage not evenly divisible"
+ " by sector size: %d / %d"), local_bytes, sector_size)
local_sectors = local_bytes / sector_size
mbr_last = 62 # a
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 6e8f34347..cc5724346 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -35,10 +35,10 @@ terminating it.
"""
import datetime
-import logging
from nova import exception
from nova import flags
+from nova import log as logging
from nova import manager
from nova import rpc
from nova import utils
@@ -53,6 +53,9 @@ flags.DEFINE_string('stub_network', False,
'Stub network related code')
+LOG = logging.getLogger('nova.computemanager')
+
+
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
@@ -111,7 +114,7 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
- logging.debug(_("instance %s: starting..."), instance_id)
+ LOG.debug(_("instance %s: starting..."), instance_id)
self.db.instance_update(context,
instance_id,
{'host': self.host})
@@ -149,8 +152,8 @@ class ComputeManager(manager.Manager):
instance_id,
{'launched_at': now})
except Exception: # pylint: disable-msg=W0702
- logging.exception(_("instance %s: Failed to spawn"),
- instance_ref['name'])
+ LOG.exception(_("instance %s: Failed to spawn"),
+ instance_ref['name'])
self.db.instance_set_state(context,
instance_id,
power_state.SHUTDOWN)
@@ -161,14 +164,16 @@ class ComputeManager(manager.Manager):
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this machine."""
context = context.elevated()
-
instance_ref = self.db.instance_get(context, instance_id)
+ LOG.audit(_("Terminating instance %s//%s"),
+ instance_ref['internal_id'], instance_id, context=context)
if not FLAGS.stub_network:
address = self.db.instance_get_floating_address(context,
instance_ref['id'])
if address:
- logging.debug(_("Disassociating address %s") % address)
+ LOG.debug(_("Disassociating address %s"), address,
+ context=context)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later.
@@ -180,14 +185,15 @@ class ComputeManager(manager.Manager):
address = self.db.instance_get_fixed_address(context,
instance_ref['id'])
if address:
- logging.debug(_("Deallocating address %s") % address)
+ LOG.debug(_("Deallocating address %s"), address,
+ context=context)
# NOTE(vish): Currently, nothing needs to be done on the
# network node until release. If this changes,
# we will need to cast here.
self.network_manager.deallocate_fixed_ip(context.elevated(),
address)
- logging.debug(_("instance %s: terminating"), instance_id)
+ LOG.debug(_("instance %s: terminating"), instance_id, context=context)
volumes = instance_ref.get('volumes', []) or []
for volume in volumes:
@@ -207,15 +213,18 @@ class ComputeManager(manager.Manager):
context = context.elevated()
self._update_state(context, instance_id)
instance_ref = self.db.instance_get(context, instance_id)
+ LOG.audit(_("Rebooting instance %s//%s"), instance_ref['internal_id'],
+ instance_id, context=context)
if instance_ref['state'] != power_state.RUNNING:
- logging.warn(_('trying to reboot a non-running '
- 'instance: %s (state: %s excepted: %s)'),
- instance_ref['internal_id'],
- instance_ref['state'],
- power_state.RUNNING)
-
- logging.debug(_('instance %s: rebooting'), instance_ref['name'])
+ LOG.warn(_('trying to reboot a non-running '
+ 'instance: %s (state: %s excepted: %s)'),
+ instance_ref['internal_id'],
+ instance_ref['state'],
+ power_state.RUNNING,
+ context=context)
+
+ LOG.debug(_('instance %s: rebooting'), instance_ref['name'])
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -251,8 +260,8 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug(_('instance %s: rescuing'),
- instance_ref['internal_id'])
+ LOG.audit(_('instance %s: rescuing'), instance_ref['internal_id'],
+ context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -267,8 +276,8 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug(_('instance %s: unrescuing'),
- instance_ref['internal_id'])
+ LOG.audit(_('instance %s: unrescuing'), instance_ref['internal_id'],
+ context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -287,8 +296,8 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug('instance %s: pausing',
- instance_ref['internal_id'])
+ LOG.debug(_('instance %s: pausing'), instance_ref['internal_id'],
+ context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -305,8 +314,8 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug('instance %s: unpausing',
- instance_ref['internal_id'])
+ LOG.debug(_('instance %s: unpausing'), instance_ref['internal_id'],
+ context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -364,8 +373,9 @@ class ComputeManager(manager.Manager):
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
context = context.elevated()
- logging.debug(_("instance %s: getting console output"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
+ LOG.audit(_("Get console output instance %s//%s"),
+ instance_ref['internal_id'], instance_id, context=context)
return self.driver.get_console_output(instance_ref)
@@ -373,8 +383,8 @@ class ComputeManager(manager.Manager):
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
context = context.elevated()
- logging.debug(_("instance %s: attaching volume %s to %s"), instance_id,
- volume_id, mountpoint)
+ LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id,
+ volume_id, mountpoint, context=context)
instance_ref = self.db.instance_get(context, instance_id)
dev_path = self.volume_manager.setup_compute_volume(context,
volume_id)
@@ -390,8 +400,8 @@ class ComputeManager(manager.Manager):
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
- logging.exception(_("instance %s: attach failed %s, removing"),
- instance_id, mountpoint)
+ LOG.exception(_("instance %s: attach failed %s, removing"),
+ instance_id, mountpoint, context=context)
self.volume_manager.remove_compute_volume(context,
volume_id)
raise exc
@@ -402,14 +412,14 @@ class ComputeManager(manager.Manager):
def detach_volume(self, context, instance_id, volume_id):
"""Detach a volume from an instance."""
context = context.elevated()
- logging.debug(_("instance %s: detaching volume %s"),
- instance_id,
- volume_id)
instance_ref = self.db.instance_get(context, instance_id)
volume_ref = self.db.volume_get(context, volume_id)
+ LOG.audit(_("Detach volume %s from mountpoint %s on instance %s//%s"),
+ volume_id, volume_ref['mountpoint'],
+ instance_ref['internal_id'], instance_id, context=context)
if instance_ref['name'] not in self.driver.list_instances():
- logging.warn(_("Detaching volume from unknown instance %s"),
- instance_ref['name'])
+ LOG.warn(_("Detaching volume from unknown instance %s"),
+ instance_ref['name'], context=context)
else:
self.driver.detach_volume(instance_ref['name'],
volume_ref['mountpoint'])
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 60c347a5e..cc94e44f4 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -25,19 +25,17 @@ Instance Monitoring:
"""
import datetime
-import logging
import os
-import sys
import time
import boto
import boto.s3
import rrdtool
-from twisted.internet import defer
from twisted.internet import task
from twisted.application import service
from nova import flags
+from nova import log as logging
from nova.virt import connection as virt_connection
@@ -91,6 +89,9 @@ RRD_VALUES = {
utcnow = datetime.datetime.utcnow
+LOG = logging.getLogger('nova.instancemonitor')
+
+
def update_rrd(instance, name, data):
"""
Updates the specified RRD file.
@@ -255,20 +256,20 @@ class Instance(object):
Updates the instances statistics and stores the resulting graphs
in the internal object store on the cloud controller.
"""
- logging.debug(_('updating %s...'), self.instance_id)
+ LOG.debug(_('updating %s...'), self.instance_id)
try:
data = self.fetch_cpu_stats()
if data != None:
- logging.debug('CPU: %s', data)
+ LOG.debug('CPU: %s', data)
update_rrd(self, 'cpu', data)
data = self.fetch_net_stats()
- logging.debug('NET: %s', data)
+ LOG.debug('NET: %s', data)
update_rrd(self, 'net', data)
data = self.fetch_disk_stats()
- logging.debug('DISK: %s', data)
+ LOG.debug('DISK: %s', data)
update_rrd(self, 'disk', data)
# TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls
@@ -285,7 +286,7 @@ class Instance(object):
graph_disk(self, '1w')
graph_disk(self, '1m')
except Exception:
- logging.exception(_('unexpected error during update'))
+ LOG.exception(_('unexpected error during update'))
self.last_updated = utcnow()
@@ -309,7 +310,7 @@ class Instance(object):
self.cputime = float(info['cpu_time'])
self.cputime_last_updated = utcnow()
- logging.debug('CPU: %d', self.cputime)
+ LOG.debug('CPU: %d', self.cputime)
# Skip calculation on first pass. Need delta to get a meaningful value.
if cputime_last_updated == None:
@@ -319,17 +320,17 @@ class Instance(object):
d = self.cputime_last_updated - cputime_last_updated
t = d.days * 86400 + d.seconds
- logging.debug('t = %d', t)
+ LOG.debug('t = %d', t)
# Calculate change over time in number of nanoseconds of CPU time used.
cputime_delta = self.cputime - cputime_last
- logging.debug('cputime_delta = %s', cputime_delta)
+ LOG.debug('cputime_delta = %s', cputime_delta)
# Get the number of virtual cpus in this domain.
vcpus = int(info['num_cpu'])
- logging.debug('vcpus = %d', vcpus)
+ LOG.debug('vcpus = %d', vcpus)
# Calculate CPU % used and cap at 100.
return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100)
@@ -351,8 +352,8 @@ class Instance(object):
rd += rd_bytes
wr += wr_bytes
except TypeError:
- logging.error(_('Cannot get blockstats for "%s" on "%s"'),
- disk, self.instance_id)
+ LOG.error(_('Cannot get blockstats for "%s" on "%s"'),
+ disk, self.instance_id)
raise
return '%d:%d' % (rd, wr)
@@ -373,8 +374,8 @@ class Instance(object):
rx += stats[0]
tx += stats[4]
except TypeError:
- logging.error(_('Cannot get ifstats for "%s" on "%s"'),
- interface, self.instance_id)
+ LOG.error(_('Cannot get ifstats for "%s" on "%s"'),
+ interface, self.instance_id)
raise
return '%d:%d' % (rx, tx)
@@ -408,7 +409,7 @@ class InstanceMonitor(object, service.Service):
try:
conn = virt_connection.get_connection(read_only=True)
except Exception, exn:
- logging.exception(_('unexpected exception getting connection'))
+ LOG.exception(_('unexpected exception getting connection'))
time.sleep(FLAGS.monitoring_instances_delay)
return
@@ -416,14 +417,14 @@ class InstanceMonitor(object, service.Service):
try:
self.updateInstances_(conn, domain_ids)
except Exception, exn:
- logging.exception('updateInstances_')
+ LOG.exception('updateInstances_')
def updateInstances_(self, conn, domain_ids):
for domain_id in domain_ids:
if not domain_id in self._instances:
instance = Instance(conn, domain_id)
self._instances[domain_id] = instance
- logging.debug(_('Found instance: %s'), domain_id)
+ LOG.debug(_('Found instance: %s'), domain_id)
for key in self._instances.keys():
instance = self._instances[key]
diff --git a/nova/crypto.py b/nova/crypto.py
index b8405552d..a34b940f5 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -24,7 +24,6 @@ Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
import base64
import gettext
import hashlib
-import logging
import os
import shutil
import struct
@@ -39,8 +38,10 @@ gettext.install('nova', unicode=1)
from nova import context
from nova import db
from nova import flags
+from nova import log as logging
+LOG = logging.getLogger("nova.crypto")
FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA'))
flags.DEFINE_string('key_file',
@@ -254,7 +255,7 @@ def _sign_csr(csr_text, ca_folder):
csrfile = open(inbound, "w")
csrfile.write(csr_text)
csrfile.close()
- logging.debug(_("Flags path: %s") % ca_folder)
+ LOG.debug(_("Flags path: %s"), ca_folder)
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
diff --git a/nova/exception.py b/nova/exception.py
index 277033e0f..7c6675e62 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -21,10 +21,8 @@ Nova base exception handling, including decorator for re-raising
Nova-type exceptions. SHOULD include dedicated exception logging.
"""
-import logging
-import sys
-import traceback
-
+from nova import log as logging
+LOG = logging.getLogger('nova.exception')
class ProcessExecutionError(IOError):
@@ -84,7 +82,7 @@ def wrap_exception(f):
except Exception, e:
if not isinstance(e, Error):
#exc_type, exc_value, exc_traceback = sys.exc_info()
- logging.exception(_('Uncaught exception'))
+ LOG.exception(_('Uncaught exception'))
#logging.error(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index 79d8b894d..7c2d7177b 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -18,12 +18,16 @@
"""Based a bit on the carrot.backeds.queue backend... but a lot better."""
-import logging
import Queue as queue
from carrot.backends import base
from eventlet import greenthread
+from nova import log as logging
+
+
+LOG = logging.getLogger("nova.fakerabbit")
+
EXCHANGES = {}
QUEUES = {}
@@ -41,12 +45,12 @@ class Exchange(object):
self._routes = {}
def publish(self, message, routing_key=None):
- logging.debug(_('(%s) publish (key: %s) %s'),
- self.name, routing_key, message)
+ LOG.debug(_('(%s) publish (key: %s) %s'),
+ self.name, routing_key, message)
routing_key = routing_key.split('.')[0]
if routing_key in self._routes:
for f in self._routes[routing_key]:
- logging.debug(_('Publishing to route %s'), f)
+ LOG.debug(_('Publishing to route %s'), f)
f(message, routing_key=routing_key)
def bind(self, callback, routing_key):
@@ -76,19 +80,19 @@ class Backend(base.BaseBackend):
def queue_declare(self, queue, **kwargs):
global QUEUES
if queue not in QUEUES:
- logging.debug(_('Declaring queue %s'), queue)
+ LOG.debug(_('Declaring queue %s'), queue)
QUEUES[queue] = Queue(queue)
def exchange_declare(self, exchange, type, *args, **kwargs):
global EXCHANGES
if exchange not in EXCHANGES:
- logging.debug(_('Declaring exchange %s'), exchange)
+ LOG.debug(_('Declaring exchange %s'), exchange)
EXCHANGES[exchange] = Exchange(exchange, type)
def queue_bind(self, queue, exchange, routing_key, **kwargs):
global EXCHANGES
global QUEUES
- logging.debug(_('Binding %s to %s with key %s'),
+ LOG.debug(_('Binding %s to %s with key %s'),
queue, exchange, routing_key)
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
@@ -113,7 +117,7 @@ class Backend(base.BaseBackend):
content_type=content_type,
content_encoding=content_encoding)
message.result = True
- logging.debug(_('Getting from %s: %s'), queue, message)
+ LOG.debug(_('Getting from %s: %s'), queue, message)
return message
def prepare_message(self, message_data, delivery_mode,
diff --git a/nova/flags.py b/nova/flags.py
index 4b7334927..f5c2d4233 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -29,8 +29,6 @@ import sys
import gflags
-from nova import utils
-
class FlagValues(gflags.FlagValues):
"""Extension of gflags.FlagValues that allows undefined and runtime flags.
@@ -213,10 +211,10 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
DEFINE_integer('glance_port', 9292, 'glance port')
-DEFINE_string('glance_host', utils.get_my_ip(), 'glance host')
+DEFINE_string('glance_host', '127.0.0.1', 'glance host')
DEFINE_integer('s3_port', 3333, 's3 port')
-DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)')
-DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)')
+DEFINE_string('s3_host', '127.0.0.1', 's3 host (for infrastructure)')
+DEFINE_string('s3_dmz', '127.0.0.1', 's3 dmz ip (for instances)')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
DEFINE_string('scheduler_topic', 'scheduler',
'the topic scheduler nodes listen on')
@@ -236,8 +234,8 @@ DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_string('ec2_prefix', 'http', 'prefix for ec2')
-DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server')
-DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server')
+DEFINE_string('cc_host', '127.0.0.1', 'ip of api server')
+DEFINE_string('cc_dmz', '127.0.0.1', 'internal ip of api server')
DEFINE_integer('cc_port', 8773, 'cloud controller port')
DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2')
diff --git a/nova/image/glance.py b/nova/image/glance.py
index cc3192e7c..a3a2f4308 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -19,20 +19,17 @@
import httplib
import json
-import logging
import urlparse
-import webob.exc
-
-from nova.compute import api as compute_api
-from nova import utils
-from nova import flags
from nova import exception
-import nova.image.service
+from nova import flags
+from nova import log as logging
+from nova.image import service
-FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.image.glance')
+FLAGS = flags.FLAGS
flags.DEFINE_string('glance_teller_address', 'http://127.0.0.1',
'IP address or URL where Glance\'s Teller service resides')
flags.DEFINE_string('glance_teller_port', '9191',
@@ -78,8 +75,8 @@ class ParallaxClient(object):
data = json.loads(res.read())['images']
return data
else:
- logging.warn(_("Parallax returned HTTP error %d from "
- "request for /images"), res.status_int)
+ LOG.warn(_("Parallax returned HTTP error %d from "
+ "request for /images"), res.status_int)
return []
finally:
c.close()
@@ -97,8 +94,8 @@ class ParallaxClient(object):
data = json.loads(res.read())['images']
return data
else:
- logging.warn(_("Parallax returned HTTP error %d from "
- "request for /images/detail"), res.status_int)
+ LOG.warn(_("Parallax returned HTTP error %d from "
+ "request for /images/detail"), res.status_int)
return []
finally:
c.close()
@@ -166,7 +163,7 @@ class ParallaxClient(object):
c.close()
-class GlanceImageService(nova.image.service.BaseImageService):
+class GlanceImageService(service.BaseImageService):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self):
diff --git a/nova/log.py b/nova/log.py
new file mode 100644
index 000000000..88a961e13
--- /dev/null
+++ b/nova/log.py
@@ -0,0 +1,261 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Nova logging handler.
+
+This module adds to logging functionality by adding the option to specify
+a context object when calling the various log methods. If the context object
+is not specified, default formatting is used.
+
+It also allows setting of formatting information through flags.
+"""
+
+
+import cStringIO
+import json
+import logging
+import logging.handlers
+import traceback
+
+from nova import flags
+# TODO(todd): fix after version.py merge
+# from nova import version
+
+
+FLAGS = flags.FLAGS
+
+# TODO(todd): fix after version.py merge
+# '(%(name)s %(nova_version)s): %(levelname)s '
+flags.DEFINE_string('logging_context_format_string',
+ '(%(name)s): %(levelname)s '
+ '[%(request_id)s %(user)s '
+ '%(project)s] %(message)s',
+ 'format string to use for log messages')
+
+# TODO(todd): fix after version.py merge
+# '(%(name)s %(nova_version)s): %(levelname)s [N/A] '
+flags.DEFINE_string('logging_default_format_string',
+ '(%(name)s): %(levelname)s [N/A] '
+ '%(message)s',
+ 'format string to use for log messages')
+
+flags.DEFINE_string('logging_debug_format_suffix',
+ 'from %(processName)s (pid=%(process)d) %(funcName)s'
+ ' %(pathname)s:%(lineno)d',
+ 'data to append to log format when level is DEBUG')
+
+flags.DEFINE_string('logging_exception_prefix',
+ '(%(name)s): TRACE: ',
+ 'prefix each line of exception output with this format')
+
+flags.DEFINE_list('default_log_levels',
+ ['amqplib=WARN',
+ 'sqlalchemy=WARN',
+ 'audit=INFO'],
+ 'list of logger=LEVEL pairs')
+
+flags.DEFINE_bool('use_syslog', False, 'output to syslog')
+flags.DEFINE_string('logfile', None, 'output to named file')
+
+
+
+# A list of things we want to replicate from logging.
+# levels
+CRITICAL = logging.CRITICAL
+FATAL = logging.FATAL
+ERROR = logging.ERROR
+WARNING = logging.WARNING
+WARN = logging.WARN
+INFO = logging.INFO
+DEBUG = logging.DEBUG
+NOTSET = logging.NOTSET
+# methods
+getLogger = logging.getLogger
+debug = logging.debug
+info = logging.info
+warning = logging.warning
+warn = logging.warn
+error = logging.error
+exception = logging.exception
+critical = logging.critical
+log = logging.log
+# handlers
+StreamHandler = logging.StreamHandler
+FileHandler = logging.FileHandler
+# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler.
+SysLogHandler = logging.handlers.SysLogHandler
+
+
+# our new audit level
+AUDIT = logging.INFO + 1
+logging.addLevelName(AUDIT, 'AUDIT')
+
+
+def _dictify_context(context):
+ if context == None:
+ return None
+ if not isinstance(context, dict) \
+ and getattr(context, 'to_dict', None):
+ context = context.to_dict()
+ return context
+
+
+def basicConfig():
+ logging.basicConfig()
+ for handler in logging.root.handlers:
+ handler.setFormatter(_formatter)
+ if FLAGS.verbose:
+ logging.root.setLevel(logging.DEBUG)
+ if FLAGS.use_syslog:
+ syslog = SysLogHandler(address='/dev/log')
+ syslog.setFormatter(_formatter)
+ logging.root.addHandler(syslog)
+ if FLAGS.logfile:
+ logfile = FileHandler(FLAGS.logfile)
+ logfile.setFormatter(_formatter)
+ logging.root.addHandler(logfile)
+
+
+class NovaLogger(logging.Logger):
+ """
+ NovaLogger manages request context and formatting.
+
+ This becomes the class that is instanciated by logging.getLogger.
+ """
+ def __init__(self, name, level=NOTSET):
+ level_name = self._get_level_from_flags(name, FLAGS)
+ level = globals()[level_name]
+ logging.Logger.__init__(self, name, level)
+
+ def _get_level_from_flags(self, name, FLAGS):
+ # if exactly "nova", or a child logger, honor the verbose flag
+ if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose:
+ return 'DEBUG'
+ for pair in FLAGS.default_log_levels:
+ logger, _sep, level = pair.partition('=')
+ # NOTE(todd): if we set a.b, we want a.b.c to have the same level
+ # (but not a.bc, so we check the dot)
+ if name == logger:
+ return level
+ if name.startswith(logger) and name[len(logger)] == '.':
+ return level
+ return 'INFO'
+
+ def _log(self, level, msg, args, exc_info=None, extra=None, context=None):
+ """Extract context from any log call"""
+ if not extra:
+ extra = {}
+ if context:
+ extra.update(_dictify_context(context))
+ # TODO(todd): fix after version.py merge
+ #extra.update({"nova_version": version.string_with_vcs()})
+ logging.Logger._log(self, level, msg, args, exc_info, extra)
+
+ def addHandler(self, handler):
+ """Each handler gets our custom formatter"""
+ handler.setFormatter(_formatter)
+ logging.Logger.addHandler(self, handler)
+
+ def audit(self, msg, *args, **kwargs):
+ """Shortcut for our AUDIT level"""
+ if self.isEnabledFor(AUDIT):
+ self._log(AUDIT, msg, args, **kwargs)
+
+ def exception(self, msg, *args, **kwargs):
+ """Logging.exception doesn't handle kwargs, so breaks context"""
+ if not kwargs.get('exc_info'):
+ kwargs['exc_info'] = 1
+ self.error(msg, *args, **kwargs)
+ # NOTE(todd): does this really go here, or in _log ?
+ extra = kwargs.get('extra')
+ if not extra:
+ return
+ env = extra.get('environment')
+ if env:
+ env = env.copy()
+ for k in env.keys():
+ if not isinstance(env[k], str):
+ env.pop(k)
+ message = "Environment: %s" % json.dumps(env)
+ kwargs.pop('exc_info')
+ self.error(message, **kwargs)
+
+logging.setLoggerClass(NovaLogger)
+
+
+class NovaRootLogger(NovaLogger):
+ pass
+
+if not isinstance(logging.root, NovaRootLogger):
+ logging.root = NovaRootLogger("nova.root", WARNING)
+ NovaLogger.root = logging.root
+ NovaLogger.manager.root = logging.root
+
+
+class NovaFormatter(logging.Formatter):
+ """
+ A nova.context.RequestContext aware formatter configured through flags.
+
+ The flags used to set format strings are: logging_context_foramt_string
+ and logging_default_format_string. You can also specify
+ logging_debug_format_suffix to append extra formatting if the log level is
+ debug.
+
+ For information about what variables are available for the formatter see:
+ http://docs.python.org/library/logging.html#formatter
+ """
+
+ def format(self, record):
+ """Uses contextstring if request_id is set, otherwise default"""
+ if record.__dict__.get('request_id', None):
+ self._fmt = FLAGS.logging_context_format_string
+ else:
+ self._fmt = FLAGS.logging_default_format_string
+ if record.levelno == logging.DEBUG \
+ and FLAGS.logging_debug_format_suffix:
+ self._fmt += " " + FLAGS.logging_debug_format_suffix
+ # Cache this on the record, Logger will respect our formated copy
+ if record.exc_info:
+ record.exc_text = self.formatException(record.exc_info, record)
+ return logging.Formatter.format(self, record)
+
+ def formatException(self, exc_info, record=None):
+ """Format exception output with FLAGS.logging_exception_prefix"""
+ if not record:
+ return logging.Formatter.formatException(self, exc_info)
+ stringbuffer = cStringIO.StringIO()
+ traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
+ None, stringbuffer)
+ lines = stringbuffer.getvalue().split("\n")
+ stringbuffer.close()
+ formatted_lines = []
+ for line in lines:
+ pl = FLAGS.logging_exception_prefix % record.__dict__
+ fl = "%s%s" % (pl, line)
+ formatted_lines.append(fl)
+ return "\n".join(formatted_lines)
+
+_formatter = NovaFormatter()
+
+
+def audit(msg, *args, **kwargs):
+ """Shortcut for logging to root log with sevrity 'AUDIT'."""
+ if len(logging.root.handlers) == 0:
+ basicConfig()
+ logging.root.log(AUDIT, msg, *args, **kwargs)
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 931a89554..c525d5dc8 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -17,16 +17,17 @@
Implements vlans, bridges, and iptables rules using linux utilities.
"""
-import logging
import os
-# TODO(ja): does the definition of network_path belong here?
-
from nova import db
from nova import flags
+from nova import log as logging
from nova import utils
+LOG = logging.getLogger("nova.linux_net")
+
+
def _bin_file(script):
"""Return the absolute path to scipt in the bin directory"""
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
@@ -172,7 +173,7 @@ def ensure_vlan(vlan_num):
"""Create a vlan unless it already exists"""
interface = "vlan%s" % vlan_num
if not _device_exists(interface):
- logging.debug(_("Starting VLAN inteface %s"), interface)
+ LOG.debug(_("Starting VLAN inteface %s"), interface)
_execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
_execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
_execute("sudo ifconfig %s up" % interface)
@@ -182,7 +183,7 @@ def ensure_vlan(vlan_num):
def ensure_bridge(bridge, interface, net_attrs=None):
"""Create a bridge unless it already exists"""
if not _device_exists(bridge):
- logging.debug(_("Starting Bridge interface for %s"), interface)
+ LOG.debug(_("Starting Bridge interface for %s"), interface)
_execute("sudo brctl addbr %s" % bridge)
_execute("sudo brctl setfd %s 0" % bridge)
# _execute("sudo brctl setageing %s 10" % bridge)
@@ -248,9 +249,9 @@ def update_dhcp(context, network_id):
_execute('sudo kill -HUP %d' % pid)
return
except Exception as exc: # pylint: disable-msg=W0703
- logging.debug(_("Hupping dnsmasq threw %s"), exc)
+ LOG.debug(_("Hupping dnsmasq threw %s"), exc)
else:
- logging.debug(_("Pid %d is stale, relaunching dnsmasq"), pid)
+ LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid)
# FLAGFILE and DNSMASQ_INTERFACE in env
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
@@ -270,7 +271,7 @@ def _host_dhcp(fixed_ip_ref):
def _execute(cmd, *args, **kwargs):
"""Wrapper around utils._execute for fake_network"""
if FLAGS.fake_network:
- logging.debug("FAKE NET: %s", cmd)
+ LOG.debug("FAKE NET: %s", cmd)
return "fake", 0
else:
return utils.execute(cmd, *args, **kwargs)
@@ -328,7 +329,7 @@ def _stop_dnsmasq(network):
try:
_execute('sudo kill -TERM %d' % pid)
except Exception as exc: # pylint: disable-msg=W0703
- logging.debug(_("Killing dnsmasq threw %s"), exc)
+ LOG.debug(_("Killing dnsmasq threw %s"), exc)
def _dhcp_file(bridge, kind):
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 16aa8f895..2b7325fd0 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -45,7 +45,6 @@ topologies. All of the network commands are issued to a subclass of
"""
import datetime
-import logging
import math
import socket
@@ -55,11 +54,13 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import manager
from nova import utils
from nova import rpc
+LOG = logging.getLogger("nova.networkmanager")
FLAGS = flags.FLAGS
flags.DEFINE_string('flat_network_bridge', 'br100',
'Bridge for simple network instances')
@@ -131,7 +132,7 @@ class NetworkManager(manager.Manager):
def set_network_host(self, context, network_id):
"""Safely sets the host of the network."""
- logging.debug(_("setting network host"))
+ LOG.debug(_("setting network host"))
host = self.db.network_set_host(context,
network_id,
self.host)
@@ -186,7 +187,7 @@ class NetworkManager(manager.Manager):
def lease_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is leased."""
- logging.debug("Leasing IP %s", address)
+ LOG.debug(_("Leasing IP %s"), address)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
@@ -201,12 +202,11 @@ class NetworkManager(manager.Manager):
{'leased': True,
'updated_at': now})
if not fixed_ip_ref['allocated']:
- logging.warn(_("IP %s leased that was already deallocated"),
- address)
+ LOG.warn(_("IP %s leased that was already deallocated"), address)
def release_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is released."""
- logging.debug("Releasing IP %s", address)
+ LOG.debug("Releasing IP %s", address)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
@@ -216,7 +216,7 @@ class NetworkManager(manager.Manager):
raise exception.Error(_("IP %s released from bad mac %s vs %s") %
(address, instance_ref['mac_address'], mac))
if not fixed_ip_ref['leased']:
- logging.warn(_("IP %s released that was not leased"), address)
+ LOG.warn(_("IP %s released that was not leased"), address)
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
{'leased': False})
@@ -437,7 +437,7 @@ class VlanManager(NetworkManager):
self.host,
time)
if num:
- logging.debug(_("Dissassociated %s stale fixed ip(s)"), num)
+ LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num)
def init_host(self):
"""Do any initialization that needs to be run if this is a
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index 52257f69f..8e9235da4 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -39,7 +39,6 @@ S3 client with this module::
import datetime
import json
-import logging
import multiprocessing
import os
import urllib
@@ -54,12 +53,14 @@ from twisted.web import static
from nova import context
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth import manager
from nova.objectstore import bucket
from nova.objectstore import image
+LOG = logging.getLogger('nova.objectstore.handler')
FLAGS = flags.FLAGS
flags.DEFINE_string('s3_listen_host', '', 'Host to listen on.')
@@ -132,9 +133,11 @@ def get_context(request):
request.uri,
headers=request.getAllHeaders(),
check_type='s3')
- return context.RequestContext(user, project)
+ rv = context.RequestContext(user, project)
+ LOG.audit("Authenticated request", context=rv)
+ return rv
except exception.Error as ex:
- logging.debug(_("Authentication Failure: %s"), ex)
+ LOG.debug(_("Authentication Failure: %s"), ex)
raise exception.NotAuthorized()
@@ -176,7 +179,7 @@ class S3(ErrorHandlingResource):
def render_GET(self, request): # pylint: disable-msg=R0201
"""Renders the GET request for a list of buckets as XML"""
- logging.debug('List of buckets requested')
+ LOG.debug(_('List of buckets requested'), context=request.context)
buckets = [b for b in bucket.Bucket.all() \
if b.is_authorized(request.context)]
@@ -203,7 +206,7 @@ class BucketResource(ErrorHandlingResource):
def render_GET(self, request):
"Returns the keys for the bucket resource"""
- logging.debug("List keys for bucket %s", self.name)
+ LOG.debug(_("List keys for bucket %s"), self.name)
try:
bucket_object = bucket.Bucket(self.name)
@@ -211,6 +214,8 @@ class BucketResource(ErrorHandlingResource):
return error.NoResource(message="No such bucket").render(request)
if not bucket_object.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to access bucket %s"),
+ self.name, context=request.context)
raise exception.NotAuthorized()
prefix = get_argument(request, "prefix", u"")
@@ -227,8 +232,8 @@ class BucketResource(ErrorHandlingResource):
def render_PUT(self, request):
"Creates the bucket resource"""
- logging.debug(_("Creating bucket %s"), self.name)
- logging.debug("calling bucket.Bucket.create(%r, %r)",
+ LOG.debug(_("Creating bucket %s"), self.name)
+ LOG.debug("calling bucket.Bucket.create(%r, %r)",
self.name,
request.context)
bucket.Bucket.create(self.name, request.context)
@@ -237,10 +242,12 @@ class BucketResource(ErrorHandlingResource):
def render_DELETE(self, request):
"""Deletes the bucket resource"""
- logging.debug(_("Deleting bucket %s"), self.name)
+ LOG.debug(_("Deleting bucket %s"), self.name)
bucket_object = bucket.Bucket(self.name)
if not bucket_object.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to delete bucket %s"),
+ self.name, context=request.context)
raise exception.NotAuthorized()
bucket_object.delete()
@@ -261,11 +268,12 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
- logging.debug(_("Getting object: %s / %s"),
- self.bucket.name,
- self.name)
+ LOG.debug(_("Getting object: %s / %s"), self.bucket.name, self.name)
if not self.bucket.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to get object %s from bucket "
+ "%s"), self.name, self.bucket.name,
+ context=request.context)
raise exception.NotAuthorized()
obj = self.bucket[urllib.unquote(self.name)]
@@ -281,11 +289,12 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
- logging.debug(_("Putting object: %s / %s"),
- self.bucket.name,
- self.name)
+ LOG.debug(_("Putting object: %s / %s"), self.bucket.name, self.name)
if not self.bucket.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to upload object %s to bucket "
+ "%s"),
+ self.name, self.bucket.name, context=request.context)
raise exception.NotAuthorized()
key = urllib.unquote(self.name)
@@ -302,11 +311,13 @@ class ObjectResource(ErrorHandlingResource):
authorized to delete the object.
"""
- logging.debug(_("Deleting object: %s / %s"),
- self.bucket.name,
- self.name)
+ LOG.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name,
+ context=request.context)
if not self.bucket.is_authorized(request.context):
+ LOG.audit("Unauthorized attempt to delete object %s from "
+ "bucket %s", self.name, self.bucket.name,
+ context=request.context)
raise exception.NotAuthorized()
del self.bucket[urllib.unquote(self.name)]
@@ -379,13 +390,21 @@ class ImagesResource(resource.Resource):
image_path = os.path.join(FLAGS.images_path, image_id)
if not image_path.startswith(FLAGS.images_path) or \
os.path.exists(image_path):
+ LOG.audit(_("Not authorized to upload image: invalid directory "
+ "%s"),
+ image_path, context=request.context)
raise exception.NotAuthorized()
bucket_object = bucket.Bucket(image_location.split("/")[0])
if not bucket_object.is_authorized(request.context):
+ LOG.audit(_("Not authorized to upload image: unauthorized "
+ "bucket %s"), bucket_object.name,
+ context=request.context)
raise exception.NotAuthorized()
+ LOG.audit(_("Starting image upload: %s"), image_id,
+ context=request.context)
p = multiprocessing.Process(target=image.Image.register_aws_image,
args=(image_id, image_location, request.context))
p.start()
@@ -398,17 +417,21 @@ class ImagesResource(resource.Resource):
image_id = get_argument(request, 'image_id', u'')
image_object = image.Image(image_id)
if not image_object.is_authorized(request.context):
- logging.debug(_("not authorized for render_POST in images"))
+ LOG.audit(_("Not authorized to update attributes of image %s"),
+ image_id, context=request.context)
raise exception.NotAuthorized()
operation = get_argument(request, 'operation', u'')
if operation:
# operation implies publicity toggle
- logging.debug(_("handling publicity toggle"))
- image_object.set_public(operation == 'add')
+ newstatus = (operation == 'add')
+ LOG.audit(_("Toggling publicity flag of image %s %r"), image_id,
+ newstatus, context=request.context)
+ image_object.set_public(newstatus)
else:
# other attributes imply update
- logging.debug(_("update user fields"))
+ LOG.audit(_("Updating user fields on image %s"), image_id,
+ context=request.context)
clean_args = {}
for arg in request.args.keys():
clean_args[arg] = request.args[arg][0]
@@ -421,9 +444,12 @@ class ImagesResource(resource.Resource):
image_object = image.Image(image_id)
if not image_object.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to delete image %s"),
+ image_id, context=request.context)
raise exception.NotAuthorized()
image_object.delete()
+ LOG.audit(_("Deleted image: %s"), image_id, context=request.context)
request.setResponseCode(204)
return ''
diff --git a/nova/rpc.py b/nova/rpc.py
index 844088348..02052485d 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -22,7 +22,6 @@ No fan-out support yet.
"""
import json
-import logging
import sys
import time
import traceback
@@ -36,13 +35,12 @@ from nova import context
from nova import exception
from nova import fakerabbit
from nova import flags
+from nova import log as logging
from nova import utils
FLAGS = flags.FLAGS
-
-LOG = logging.getLogger('amqplib')
-LOG.setLevel(logging.DEBUG)
+LOG = logging.getLogger('nova.rpc')
class Connection(carrot_connection.BrokerConnection):
@@ -91,14 +89,14 @@ class Consumer(messaging.Consumer):
self.failed_connection = False
break
except: # Catching all because carrot sucks
- logging.exception(_("AMQP server on %s:%d is unreachable."
- " Trying again in %d seconds.") % (
- FLAGS.rabbit_host,
- FLAGS.rabbit_port,
- FLAGS.rabbit_retry_interval))
+ LOG.exception(_("AMQP server on %s:%d is unreachable."
+ " Trying again in %d seconds.") % (
+ FLAGS.rabbit_host,
+ FLAGS.rabbit_port,
+ FLAGS.rabbit_retry_interval))
self.failed_connection = True
if self.failed_connection:
- logging.exception(_("Unable to connect to AMQP server"
+ LOG.exception(_("Unable to connect to AMQP server"
" after %d tries. Shutting down.") % FLAGS.rabbit_max_retries)
sys.exit(1)
@@ -116,14 +114,14 @@ class Consumer(messaging.Consumer):
self.declare()
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
if self.failed_connection:
- logging.error(_("Reconnected to queue"))
+ LOG.error(_("Reconnected to queue"))
self.failed_connection = False
# NOTE(vish): This is catching all errors because we really don't
# exceptions to be logged 10 times a second if some
# persistent failure occurs.
except Exception: # pylint: disable-msg=W0703
if not self.failed_connection:
- logging.exception(_("Failed to fetch message from queue"))
+ LOG.exception(_("Failed to fetch message from queue"))
self.failed_connection = True
def attach_to_eventlet(self):
@@ -242,8 +240,8 @@ def msg_reply(msg_id, reply=None, failure=None):
if failure:
message = str(failure[1])
tb = traceback.format_exception(*failure)
- logging.error(_("Returning exception %s to caller"), message)
- logging.error(tb)
+ LOG.error(_("Returning exception %s to caller"), message)
+ LOG.error(tb)
failure = (failure[0].__name__, str(failure[1]), tb)
conn = Connection.instance(True)
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 44e21f2fd..a4d6dd574 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -21,15 +21,16 @@
Scheduler Service
"""
-import logging
import functools
from nova import db
from nova import flags
+from nova import log as logging
from nova import manager
from nova import rpc
from nova import utils
+LOG = logging.getLogger('nova.scheduler.manager')
FLAGS = flags.FLAGS
flags.DEFINE_string('scheduler_driver',
'nova.scheduler.chance.ChanceScheduler',
@@ -65,4 +66,4 @@ class SchedulerManager(manager.Manager):
db.queue_get_for(context, topic, host),
{"method": method,
"args": kwargs})
- logging.debug(_("Casting to %s %s for %s"), topic, host, method)
+ LOG.debug(_("Casting to %s %s for %s"), topic, host, method)
diff --git a/nova/service.py b/nova/service.py
index f1f90742f..fef7f0593 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -21,7 +21,6 @@ Generic Node baseclass for all workers that run on hosts
"""
import inspect
-import logging
import os
import sys
@@ -32,6 +31,7 @@ from eventlet import greenpool
from nova import context
from nova import db
from nova import exception
+from nova import log as logging
from nova import flags
from nova import rpc
from nova import utils
@@ -151,7 +151,7 @@ class Service(object):
report_interval = FLAGS.report_interval
if not periodic_interval:
periodic_interval = FLAGS.periodic_interval
- logging.warn(_("Starting %s node"), topic)
+ logging.audit(_("Starting %s node"), topic)
service_obj = cls(host, binary, topic, manager,
report_interval, periodic_interval)
@@ -206,20 +206,17 @@ class Service(object):
def serve(*services):
- argv = FLAGS(sys.argv)
+ FLAGS(sys.argv)
+ logging.basicConfig()
+
+ # TODO(todd): make this pigggyback the flag-based level override method
+ logging.getLogger('amqplib').setLevel(logging.WARN)
if not services:
services = [Service.create()]
name = '_'.join(x.binary for x in services)
- logging.debug("Serving %s" % name)
-
- logging.getLogger('amqplib').setLevel(logging.WARN)
-
- if FLAGS.verbose:
- logging.getLogger().setLevel(logging.DEBUG)
- else:
- logging.getLogger().setLevel(logging.WARNING)
+ logging.debug(_("Serving %s"), name)
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 0f274bd15..f5be9c94f 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -22,7 +22,6 @@ and as a WSGI layer
import json
import datetime
-import logging
import unittest
import stubout
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index ceac17adb..da86e6e11 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -23,7 +23,6 @@ Unittets for S3 objectstore clone.
import boto
import glob
import hashlib
-import logging
import os
import shutil
import tempfile
@@ -63,7 +62,6 @@ class ObjectStoreTestCase(test.TestCase):
self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
images_path=os.path.join(OSS_TEMPDIR, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
- logging.getLogger().setLevel(logging.DEBUG)
self.auth_manager = manager.AuthManager()
self.auth_manager.create_user('user1')
diff --git a/nova/tests/test_access.py b/nova/tests/test_access.py
index 58fdea3b5..0929903cf 100644
--- a/nova/tests/test_access.py
+++ b/nova/tests/test_access.py
@@ -17,7 +17,6 @@
# under the License.
import unittest
-import logging
import webob
from nova import context
diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py
index 15d40bc53..35ffffb67 100644
--- a/nova/tests/test_auth.py
+++ b/nova/tests/test_auth.py
@@ -16,17 +16,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
from M2Crypto import X509
import unittest
from nova import crypto
from nova import flags
+from nova import log as logging
from nova import test
from nova.auth import manager
from nova.api.ec2 import cloud
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.auth_unittest')
class user_generator(object):
@@ -211,12 +212,12 @@ class AuthManagerTestCase(object):
# NOTE(vish): Setup runs genroot.sh if it hasn't been run
cloud.CloudController().setup()
_key, cert_str = crypto.generate_x509_cert(user.id, project.id)
- logging.debug(cert_str)
+ LOG.debug(cert_str)
full_chain = crypto.fetch_ca(project_id=project.id, chain=True)
int_cert = crypto.fetch_ca(project_id=project.id, chain=False)
cloud_cert = crypto.fetch_ca()
- logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain)
+ LOG.debug("CA chain:\n\n =====\n%s\n\n=====", full_chain)
signed_cert = X509.load_cert_string(cert_str)
chain_cert = X509.load_cert_string(full_chain)
int_cert = X509.load_cert_string(int_cert)
@@ -331,7 +332,7 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase):
test.TestCase.__init__(self, *args, **kwargs)
import nova.auth.fakeldap as fakeldap
if FLAGS.flush_db:
- logging.info("Flushing datastore")
+ LOG.info("Flushing datastore")
r = fakeldap.Store.instance()
r.flushdb()
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 70d2c44da..e6ad2432b 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -18,7 +18,6 @@
from base64 import b64decode
import json
-import logging
from M2Crypto import BIO
from M2Crypto import RSA
import os
@@ -31,6 +30,7 @@ from nova import context
from nova import crypto
from nova import db
from nova import flags
+from nova import log as logging
from nova import rpc
from nova import service
from nova import test
@@ -41,6 +41,7 @@ from nova.objectstore import image
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.cloud')
# Temp dirs for working with image attributes through the cloud controller
# (stole this from objectstore_unittest.py)
@@ -56,7 +57,6 @@ class CloudTestCase(test.TestCase):
images_path=IMAGES_PATH)
self.conn = rpc.Connection.instance()
- logging.getLogger().setLevel(logging.DEBUG)
# set up our cloud
self.cloud = cloud.CloudController()
@@ -178,7 +178,7 @@ class CloudTestCase(test.TestCase):
def test_run_instances(self):
if FLAGS.connection_type == 'fake':
- logging.debug("Can't test instances without a real virtual env.")
+ LOG.debug(_("Can't test instances without a real virtual env."))
return
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
@@ -190,25 +190,25 @@ class CloudTestCase(test.TestCase):
# TODO: check for proper response
instance_id = rv['reservationSet'][0].keys()[0]
instance = rv['reservationSet'][0][instance_id][0]
- logging.debug("Need to watch instance %s until it's running..." %
- instance['instance_id'])
+ LOG.debug(_("Need to watch instance %s until it's running..."),
+ instance['instance_id'])
while True:
greenthread.sleep(1)
info = self.cloud._get_instance(instance['instance_id'])
- logging.debug(info['state'])
+ LOG.debug(info['state'])
if info['state'] == power_state.RUNNING:
break
self.assert_(rv)
- if connection_type != 'fake':
+ if FLAGS.connection_type != 'fake':
time.sleep(45) # Should use boto for polling here
for reservations in rv['reservationSet']:
# for res_id in reservations.keys():
- # logging.debug(reservations[res_id])
+ # LOG.debug(reservations[res_id])
# for instance in reservations[res_id]:
for instance in reservations[reservations.keys()[0]]:
instance_id = instance['instance_id']
- logging.debug("Terminating instance %s" % instance_id)
+ LOG.debug(_("Terminating instance %s"), instance_id)
rv = yield self.compute.terminate_instance(instance_id)
def test_instance_update_state(self):
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 1fb9143f1..889ffe99f 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -20,12 +20,12 @@ Tests For Compute
"""
import datetime
-import logging
from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
@@ -33,12 +33,12 @@ from nova.compute import api as compute_api
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.compute')
class ComputeTestCase(test.TestCase):
"""Test case for compute"""
def setUp(self):
- logging.getLogger().setLevel(logging.DEBUG)
super(ComputeTestCase, self).setUp()
self.flags(connection_type='fake',
stub_network=True,
@@ -101,13 +101,13 @@ class ComputeTestCase(test.TestCase):
self.compute.run_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context())
- logging.info(_("Running instances: %s"), instances)
+ LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.compute.terminate_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context())
- logging.info(_("After terminating instances: %s"), instances)
+ LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
def test_run_terminate_timestamps(self):
diff --git a/nova/tests/test_log.py b/nova/tests/test_log.py
new file mode 100644
index 000000000..d8dd38708
--- /dev/null
+++ b/nova/tests/test_log.py
@@ -0,0 +1,107 @@
+import cStringIO
+
+from nova import context
+from nova import log
+from nova import test
+
+
+def _fake_context():
+ return context.RequestContext(1, 1)
+
+
+class RootLoggerTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(RootLoggerTestCase, self).setUp()
+ self.log = log.logging.root
+
+ def tearDown(self):
+ super(RootLoggerTestCase, self).tearDown()
+ log.NovaLogger.manager.loggerDict = {}
+
+ def test_is_nova_instance(self):
+ self.assert_(isinstance(self.log, log.NovaLogger))
+
+ def test_name_is_nova_root(self):
+ self.assertEqual("nova.root", self.log.name)
+
+ def test_handlers_have_nova_formatter(self):
+ formatters = []
+ for h in self.log.handlers:
+ f = h.formatter
+ if isinstance(f, log.NovaFormatter):
+ formatters.append(f)
+ self.assert_(formatters)
+ self.assertEqual(len(formatters), len(self.log.handlers))
+
+ def test_handles_context_kwarg(self):
+ self.log.info("foo", context=_fake_context())
+ self.assert_(True) # didn't raise exception
+
+ def test_module_level_methods_handle_context_arg(self):
+ log.info("foo", context=_fake_context())
+ self.assert_(True) # didn't raise exception
+
+ def test_module_level_audit_handles_context_arg(self):
+ log.audit("foo", context=_fake_context())
+ self.assert_(True) # didn't raise exception
+
+class NovaFormatterTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(NovaFormatterTestCase, self).setUp()
+ self.flags(logging_context_format_string="HAS CONTEXT "\
+ "[%(request_id)s]: %(message)s",
+ logging_default_format_string="NOCTXT: %(message)s",
+ logging_debug_format_suffix="--DBG")
+ self.log = log.logging.root
+ self.stream = cStringIO.StringIO()
+ handler = log.StreamHandler(self.stream)
+ self.log.addHandler(handler)
+ self.log.setLevel(log.DEBUG)
+
+ def tearDown(self):
+ super(NovaFormatterTestCase, self).tearDown()
+ log.NovaLogger.manager.loggerDict = {}
+
+ def test_uncontextualized_log(self):
+ self.log.info("foo")
+ self.assertEqual("NOCTXT: foo\n", self.stream.getvalue())
+
+ def test_contextualized_log(self):
+ ctxt = _fake_context()
+ self.log.info("bar", context=ctxt)
+ expected = "HAS CONTEXT [%s]: bar\n" % ctxt.request_id
+ self.assertEqual(expected, self.stream.getvalue())
+
+ def test_debugging_log(self):
+ self.log.debug("baz")
+ self.assertEqual("NOCTXT: baz --DBG\n", self.stream.getvalue())
+
+class NovaLoggerTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(NovaLoggerTestCase, self).setUp()
+ self.flags(default_log_levels=["nova-test=AUDIT"], verbose=False)
+ self.log = log.getLogger('nova-test')
+
+ def tearDown(self):
+ super(NovaLoggerTestCase, self).tearDown()
+ log.NovaLogger.manager.loggerDict = {}
+
+ def test_has_level_from_flags(self):
+ self.assertEqual(log.AUDIT, self.log.level)
+
+ def test_child_log_has_level_of_parent_flag(self):
+ l = log.getLogger('nova-test.foo')
+ self.assertEqual(log.AUDIT, l.level)
+
+class VerboseLoggerTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(VerboseLoggerTestCase, self).setUp()
+ self.flags(default_log_levels=["nova.test=AUDIT"], verbose=True)
+ self.log = log.getLogger('nova.test')
+
+ def tearDown(self):
+ super(VerboseLoggerTestCase, self).tearDown()
+ log.NovaLogger.manager.loggerDict = {}
+
+ def test_will_be_verbose_if_named_nova_and_verbose_flag_set(self):
+ self.assertEqual(log.DEBUG, self.log.level)
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 96473ac7c..349e20f84 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -20,18 +20,18 @@ Unit Tests for network code
"""
import IPy
import os
-import logging
from nova import context
from nova import db
from nova import exception
from nova import flags
-from nova import service
+from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.network')
class NetworkTestCase(test.TestCase):
@@ -45,7 +45,6 @@ class NetworkTestCase(test.TestCase):
fake_network=True,
network_size=16,
num_networks=5)
- logging.getLogger().setLevel(logging.DEBUG)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
@@ -328,7 +327,7 @@ def lease_ip(private_ip):
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
- logging.debug("ISSUE_IP: %s, %s ", out, err)
+ LOG.debug("ISSUE_IP: %s, %s ", out, err)
def release_ip(private_ip):
@@ -344,4 +343,4 @@ def release_ip(private_ip):
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
- logging.debug("RELEASE_IP: %s, %s ", out, err)
+ LOG.debug("RELEASE_IP: %s, %s ", out, err)
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 8cf2a5e54..ea7a006e2 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -16,11 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-
from nova import context
from nova import db
-from nova import exception
from nova import flags
from nova import quota
from nova import test
@@ -34,7 +31,6 @@ FLAGS = flags.FLAGS
class QuotaTestCase(test.TestCase):
def setUp(self):
- logging.getLogger().setLevel(logging.DEBUG)
super(QuotaTestCase, self).setUp()
self.flags(connection_type='fake',
quota_instances=2,
diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py
index 6ea2edcab..0e72b3eba 100644
--- a/nova/tests/test_rpc.py
+++ b/nova/tests/test_rpc.py
@@ -18,15 +18,16 @@
"""
Unit Tests for remote procedure calls using queue
"""
-import logging
from nova import context
from nova import flags
+from nova import logging
from nova import rpc
from nova import test
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.rpc')
class RpcTestCase(test.TestCase):
@@ -115,13 +116,13 @@ class TestReceiver(object):
@staticmethod
def echo(context, value):
"""Simply returns whatever value is sent in"""
- logging.debug("Received %s", value)
+ LOG.debug(_("Received %s"), value)
return value
@staticmethod
def context(context, value):
"""Returns dictionary version of context"""
- logging.debug("Received %s", context)
+ LOG.debug(_("Received %s"), context)
return context.to_dict()
@staticmethod
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index b13455fb0..b40ca004b 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -19,23 +19,23 @@
Tests for Volume Code.
"""
-import logging
from nova import context
from nova import exception
from nova import db
from nova import flags
+from nova import log as logging
from nova import test
from nova import utils
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.volume')
class VolumeTestCase(test.TestCase):
"""Test Case for volumes."""
def setUp(self):
- logging.getLogger().setLevel(logging.DEBUG)
super(VolumeTestCase, self).setUp()
self.compute = utils.import_object(FLAGS.compute_manager)
self.flags(connection_type='fake')
@@ -159,7 +159,7 @@ class VolumeTestCase(test.TestCase):
volume_id)
self.assert_(iscsi_target not in targets)
targets.append(iscsi_target)
- logging.debug("Target %s allocated", iscsi_target)
+ LOG.debug(_("Target %s allocated"), iscsi_target)
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume_id = self._create_volume()
diff --git a/nova/twistd.py b/nova/twistd.py
index 29be9c4e1..556271999 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -22,7 +22,6 @@ manage pid files and support syslogging.
"""
import gflags
-import logging
import os
import signal
import sys
@@ -34,6 +33,7 @@ from twisted.python import runtime
from twisted.python import usage
from nova import flags
+from nova import log as logging
if runtime.platformType == "win32":
@@ -234,22 +234,12 @@ def serve(filename):
OptionsClass = WrapTwistedOptions(TwistdServerOptions)
options = OptionsClass()
argv = options.parseOptions()
- logging.getLogger('amqplib').setLevel(logging.WARN)
FLAGS.python = filename
FLAGS.no_save = True
if not FLAGS.pidfile:
FLAGS.pidfile = '%s.pid' % name
elif FLAGS.pidfile.endswith('twistd.pid'):
FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name)
- # NOTE(vish): if we're running nodaemon, redirect the log to stdout
- if FLAGS.nodaemon and not FLAGS.logfile:
- FLAGS.logfile = "-"
- if not FLAGS.logfile:
- FLAGS.logfile = '%s.log' % name
- elif FLAGS.logfile.endswith('twistd.log'):
- FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name)
- if FLAGS.logdir:
- FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile)
if not FLAGS.prefix:
FLAGS.prefix = name
elif FLAGS.prefix.endswith('twisted'):
@@ -270,19 +260,10 @@ def serve(filename):
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
- formatter = logging.Formatter(
- '(%(name)s): %(levelname)s %(message)s')
- handler = logging.StreamHandler(log.StdioOnnaStick())
- handler.setFormatter(formatter)
- logging.getLogger().addHandler(handler)
-
- if FLAGS.verbose:
- logging.getLogger().setLevel(logging.DEBUG)
- else:
- logging.getLogger().setLevel(logging.WARNING)
-
+ logging.basicConfig()
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
+ logging.audit(_("Starting %s"), name)
twistd.runApp(options)
diff --git a/nova/utils.py b/nova/utils.py
index 15112faa2..cc632b835 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -22,7 +22,6 @@ System-level utilities and helper functions.
import datetime
import inspect
-import logging
import os
import random
import subprocess
@@ -37,8 +36,10 @@ from eventlet import greenthread
from nova import exception
from nova.exception import ProcessExecutionError
+from nova import log as logging
+LOG = logging.getLogger("nova.utils")
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
@@ -109,7 +110,7 @@ def vpn_ping(address, port, timeout=0.05, session_id=None):
def fetchfile(url, target):
- logging.debug(_("Fetching %s") % url)
+ LOG.debug(_("Fetching %s") % url)
# c = pycurl.Curl()
# fp = open(target, "wb")
# c.setopt(c.URL, url)
@@ -121,7 +122,7 @@ def fetchfile(url, target):
def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
- logging.debug(_("Running cmd (subprocess): %s"), cmd)
+ LOG.debug(_("Running cmd (subprocess): %s"), cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)
@@ -134,7 +135,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
result = obj.communicate()
obj.stdin.close()
if obj.returncode:
- logging.debug(_("Result was %s") % (obj.returncode))
+ LOG.debug(_("Result was %s") % (obj.returncode))
if check_exit_code and obj.returncode != 0:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=obj.returncode,
@@ -167,12 +168,12 @@ def default_flagfile(filename='nova.conf'):
def debug(arg):
- logging.debug('debug in callback: %s', arg)
+ LOG.debug(_('debug in callback: %s'), arg)
return arg
def runthis(prompt, cmd, check_exit_code=True):
- logging.debug(_("Running %s") % (cmd))
+ LOG.debug(_("Running %s"), (cmd))
rv, err = execute(cmd, check_exit_code=check_exit_code)
@@ -203,7 +204,7 @@ def get_my_ip():
csock.close()
return addr
except socket.gaierror as ex:
- logging.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex)
+ LOG.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex)
return "127.0.0.1"
@@ -296,7 +297,7 @@ class LazyPluggable(object):
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
- logging.info('backend %s', self.__backend)
+ LOG.debug(_('backend %s'), self.__backend)
return self.__backend
def __getattr__(self, key):
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 61e99944e..7602cbe50 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -19,15 +19,16 @@
"""Abstraction of the underlying virtualization API."""
-import logging
import sys
from nova import flags
+from nova import log as logging
from nova.virt import fake
from nova.virt import libvirt_conn
from nova.virt import xenapi_conn
+LOG = logging.getLogger("nova.virt.connection")
FLAGS = flags.FLAGS
@@ -66,6 +67,6 @@ def get_connection(read_only=False):
raise Exception('Unknown connection type "%s"' % t)
if conn is None:
- logging.error(_('Failed to open connection to the hypervisor'))
+ LOG.error(_('Failed to open connection to the hypervisor'))
sys.exit(1)
return conn
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 00edfbdc8..ac82fdadb 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -36,7 +36,6 @@ Supports KVM, QEMU, UML, and XEN.
"""
-import logging
import os
import shutil
@@ -50,6 +49,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
#from nova.api import context
from nova.auth import manager
@@ -63,6 +63,9 @@ libxml2 = None
Template = None
+LOG = logging.getLogger('nova.virt.libvirt_conn')
+
+
FLAGS = flags.FLAGS
# TODO(vish): These flags should probably go into a shared location
flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
@@ -130,7 +133,7 @@ class LibvirtConnection(object):
@property
def _conn(self):
if not self._wrapped_conn or not self._test_connection():
- logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri)
+ LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri)
self._wrapped_conn = self._connect(self.libvirt_uri,
self.read_only)
return self._wrapped_conn
@@ -142,7 +145,7 @@ class LibvirtConnection(object):
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \
e.get_error_domain() == libvirt.VIR_FROM_REMOTE:
- logging.debug(_('Connection to libvirt broke'))
+ LOG.debug(_('Connection to libvirt broke'))
return False
raise
@@ -214,8 +217,8 @@ class LibvirtConnection(object):
def _cleanup(self, instance):
target = os.path.join(FLAGS.instances_path, instance['name'])
- logging.info(_('instance %s: deleting instance files %s'),
- instance['name'], target)
+ LOG.info(_('instance %s: deleting instance files %s'),
+ instance['name'], target)
if os.path.exists(target):
shutil.rmtree(target)
@@ -279,10 +282,10 @@ class LibvirtConnection(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('instance %s: rebooted'), instance['name'])
+ LOG.debug(_('instance %s: rebooted'), instance['name'])
timer.stop()
except Exception, exn:
- logging.error(_('_wait_for_reboot failed: %s'), exn)
+ LOG.exception(_('_wait_for_reboot failed: %s'), exn)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -325,10 +328,10 @@ class LibvirtConnection(object):
state = self.get_info(instance['name'])['state']
db.instance_set_state(None, instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('instance %s: rescued'), instance['name'])
+ LOG.debug(_('instance %s: rescued'), instance['name'])
timer.stop()
except Exception, exn:
- logging.error(_('_wait_for_rescue failed: %s'), exn)
+ LOG.exception(_('_wait_for_rescue failed: %s'), exn)
db.instance_set_state(None,
instance['id'],
power_state.SHUTDOWN)
@@ -353,7 +356,7 @@ class LibvirtConnection(object):
NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance)
self._create_image(instance, xml)
self._conn.createXML(xml, 0)
- logging.debug(_("instance %s: is running"), instance['name'])
+ LOG.debug(_("instance %s: is running"), instance['name'])
timer = utils.LoopingCall(f=None)
@@ -363,11 +366,11 @@ class LibvirtConnection(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('instance %s: booted'), instance['name'])
+ LOG.debug(_('instance %s: booted'), instance['name'])
timer.stop()
except:
- logging.exception(_('instance %s: failed to boot'),
- instance['name'])
+ LOG.exception(_('instance %s: failed to boot'),
+ instance['name'])
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -377,11 +380,11 @@ class LibvirtConnection(object):
return timer.start(interval=0.5, now=True)
def _flush_xen_console(self, virsh_output):
- logging.info('virsh said: %r' % (virsh_output,))
+ LOG.info('virsh said: %r', virsh_output)
virsh_output = virsh_output[0].strip()
if virsh_output.startswith('/dev/'):
- logging.info(_('cool, it\'s a device'))
+ LOG.info(_('cool, it\'s a device'))
out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
virsh_output, check_exit_code=False)
return out
@@ -389,7 +392,7 @@ class LibvirtConnection(object):
return ''
def _append_to_file(self, data, fpath):
- logging.info(_('data: %r, fpath: %r') % (data, fpath))
+ LOG.info(_('data: %r, fpath: %r'), data, fpath)
fp = open(fpath, 'a+')
fp.write(data)
return fpath
@@ -397,7 +400,7 @@ class LibvirtConnection(object):
def _dump_file(self, fpath):
fp = open(fpath, 'r+')
contents = fp.read()
- logging.info('Contents: %r' % (contents,))
+ LOG.info('Contents: %r', contents)
return contents
@exception.wrap_exception
@@ -431,7 +434,7 @@ class LibvirtConnection(object):
# TODO(termie): these are blocking calls, it would be great
# if they weren't.
- logging.info(_('instance %s: Creating image'), inst['name'])
+ LOG.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
f.close()
@@ -487,10 +490,10 @@ class LibvirtConnection(object):
'dns': network_ref['dns']}
if key or net:
if key:
- logging.info(_('instance %s: injecting key into image %s'),
+ LOG.info(_('instance %s: injecting key into image %s'),
inst['name'], inst.image_id)
if net:
- logging.info(_('instance %s: injecting net into image %s'),
+ LOG.info(_('instance %s: injecting net into image %s'),
inst['name'], inst.image_id)
try:
disk.inject_data(basepath('disk-raw'), key, net,
@@ -498,9 +501,9 @@ class LibvirtConnection(object):
execute=execute)
except Exception as e:
# This could be a windows image, or a vmdk format disk
- logging.warn(_('instance %s: ignoring error injecting data'
- ' into image %s (%s)'),
- inst['name'], inst.image_id, e)
+ LOG.warn(_('instance %s: ignoring error injecting data'
+ ' into image %s (%s)'),
+ inst['name'], inst.image_id, e)
if inst['kernel_id']:
if os.path.exists(basepath('disk')):
@@ -526,8 +529,10 @@ class LibvirtConnection(object):
def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
- logging.debug(_('instance %s: starting toXML method'),
- instance['name'])
+ LOG.debug('instance %s: starting toXML method', instance['name'])
+ network = db.project_get_network(context.get_admin_context(),
+ instance['project_id'])
+ LOG.debug(_('instance %s: starting toXML method'), instance['name'])
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
# FIXME(vish): stick this in db
@@ -569,7 +574,7 @@ class LibvirtConnection(object):
xml_info['disk'] = xml_info['basepath'] + "/disk"
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
- logging.debug(_('instance %s: finished toXML method'),
+ LOG.debug(_('instance %s: finished toXML method'),
instance['name'])
return xml
@@ -870,9 +875,9 @@ class NWFilterFirewall(object):
rule_xml += "dstportstart='%s' dstportend='%s' " % \
(rule.from_port, rule.to_port)
elif rule.protocol == 'icmp':
- logging.info('rule.protocol: %r, rule.from_port: %r, '
- 'rule.to_port: %r' %
- (rule.protocol, rule.from_port, rule.to_port))
+ LOG.info('rule.protocol: %r, rule.from_port: %r, '
+ 'rule.to_port: %r', rule.protocol,
+ rule.from_port, rule.to_port)
if rule.from_port != -1:
rule_xml += "type='%s' " % rule.from_port
if rule.to_port != -1:
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index aa4026f97..f2c3a34f6 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -52,12 +52,12 @@ A fake XenAPI SDK.
import datetime
-import logging
import uuid
from pprint import pformat
from nova import exception
+from nova import log as logging
_CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
@@ -65,9 +65,11 @@ _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
_db_content = {}
+LOG = logging.getLogger("nova.virt.xenapi.fake")
+
def log_db_contents(msg=None):
- logging.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
+ LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
def reset():
@@ -242,9 +244,9 @@ class SessionBase(object):
full_params = (self._session,) + params
meth = getattr(self, methodname, None)
if meth is None:
- logging.warn('Raising NotImplemented')
+ LOG.debug('Raising NotImplemented')
raise NotImplementedError(
- 'xenapi.fake does not have an implementation for %s' %
+ _('xenapi.fake does not have an implementation for %s') %
methodname)
return meth(*full_params)
@@ -278,12 +280,12 @@ class SessionBase(object):
if impl is not None:
def callit(*params):
- logging.warn('Calling %s %s', name, impl)
+ LOG.debug(_('Calling %s %s'), name, impl)
self._check_session(params)
return impl(*params)
return callit
if self._is_gettersetter(name, True):
- logging.warn('Calling getter %s', name)
+ LOG.debug(_('Calling getter %s'), name)
return lambda *params: self._getter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
@@ -333,10 +335,10 @@ class SessionBase(object):
field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
- logging.error('Raising NotImplemented')
+ LOG.debuug(_('Raising NotImplemented'))
raise NotImplementedError(
- 'xenapi.fake does not have an implementation for %s or it has '
- 'been called with the wrong number of arguments' % name)
+ _('xenapi.fake does not have an implementation for %s or it has '
+ 'been called with the wrong number of arguments') % name)
def _setter(self, name, params):
self._check_session(params)
@@ -351,7 +353,7 @@ class SessionBase(object):
field in _db_content[cls][ref]):
_db_content[cls][ref][field] = val
- logging.warn('Raising NotImplemented')
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
'xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments or the database '
@@ -399,7 +401,7 @@ class SessionBase(object):
self._session not in _db_content['session']):
raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
- logging.warn('Raising NotImplemented')
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError('Call to XenAPI without using .xenapi')
def _check_arg_count(self, params, expected):
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 9d1b51848..1e9448a26 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -19,7 +19,6 @@ Helper methods for operations related to the management of VM records and
their attributes like VDIs, VIFs, as well as their lookup functions.
"""
-import logging
import pickle
import urllib
from xml.dom import minidom
@@ -27,6 +26,7 @@ from xml.dom import minidom
from eventlet import event
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import instance_types
@@ -37,6 +37,7 @@ from nova.virt.xenapi.volume_utils import StorageError
FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.virt.xenapi.vm_utils")
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
@@ -121,9 +122,9 @@ class VMHelper(HelperBase):
rec['HVM_boot_params'] = {'order': 'dc'}
rec['platform'] = {'acpi': 'true', 'apic': 'true',
'pae': 'true', 'viridian': 'true'}
- logging.debug('Created VM %s...', instance.name)
+ LOG.debug(_('Created VM %s...'), instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
- logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
+ LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
return vm_ref
@classmethod
@@ -143,10 +144,9 @@ class VMHelper(HelperBase):
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
- logging.debug(_('Creating VBD for VM %s, VDI %s ... '),
- vm_ref, vdi_ref)
+ LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref)
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
- logging.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
+ LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
vdi_ref)
return vbd_ref
@@ -161,7 +161,7 @@ class VMHelper(HelperBase):
if vbd_rec['userdevice'] == str(number):
return vbd
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('VBD not found in instance %s') % vm_ref)
@classmethod
@@ -170,7 +170,7 @@ class VMHelper(HelperBase):
try:
vbd_ref = session.call_xenapi('VBD.unplug', vbd_ref)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
if exc.details[0] != 'DEVICE_ALREADY_DETACHED':
raise StorageError(_('Unable to unplug VBD %s') % vbd_ref)
@@ -183,7 +183,7 @@ class VMHelper(HelperBase):
#with Josh Kearney
session.wait_for_task(0, task)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
@@ -199,11 +199,11 @@ class VMHelper(HelperBase):
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
- logging.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
- network_ref)
+ LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
+ network_ref)
vif_ref = session.call_xenapi('VIF.create', vif_rec)
- logging.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
- vm_ref, network_ref)
+ LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
+ vm_ref, network_ref)
return vif_ref
@classmethod
@@ -213,8 +213,7 @@ class VMHelper(HelperBase):
"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
- logging.debug(_("Snapshotting VM %s with label '%s'..."),
- vm_ref, label)
+ LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label)
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
vm_vdi_uuid = vm_vdi_rec["uuid"]
@@ -227,8 +226,8 @@ class VMHelper(HelperBase):
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
- logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
- vm_ref)
+ LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
+ vm_ref)
parent_uuid = wait_for_vhd_coalesce(
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
@@ -241,8 +240,7 @@ class VMHelper(HelperBase):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
- logging.debug(_("Asking xapi to upload %s as '%s'"),
- vdi_uuids, image_name)
+ LOG.debug(_("Asking xapi to upload %s as '%s'"), vdi_uuids, image_name)
params = {'vdi_uuids': vdi_uuids,
'image_name': image_name,
@@ -260,7 +258,7 @@ class VMHelper(HelperBase):
"""
url = images.image_url(image)
access = AuthManager().get_access_key(user, project)
- logging.debug("Asking xapi to fetch %s as %s", url, access)
+ LOG.debug(_("Asking xapi to fetch %s as %s"), url, access)
fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
@@ -278,7 +276,7 @@ class VMHelper(HelperBase):
@classmethod
def lookup_image(cls, session, vdi_ref):
- logging.debug("Looking up vdi %s for PV kernel", vdi_ref)
+ LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
fn = "is_vdi_pv"
args = {}
args['vdi-ref'] = vdi_ref
@@ -289,7 +287,7 @@ class VMHelper(HelperBase):
pv = True
elif pv_str.lower() == 'false':
pv = False
- logging.debug("PV Kernel in VDI:%d", pv)
+ LOG.debug(_("PV Kernel in VDI:%d"), pv)
return pv
@classmethod
@@ -317,10 +315,9 @@ class VMHelper(HelperBase):
vdi = session.get_xenapi().VBD.get_VDI(vbd)
# Test valid VDI
record = session.get_xenapi().VDI.get_record(vdi)
- logging.debug(_('VDI %s is still available'),
- record['uuid'])
+ LOG.debug(_('VDI %s is still available'), record['uuid'])
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
else:
vdis.append(vdi)
if len(vdis) > 0:
@@ -331,10 +328,10 @@ class VMHelper(HelperBase):
@classmethod
def compile_info(cls, record):
"""Fill record with VM status information"""
- logging.info(_("(VM_UTILS) xenserver vm state -> |%s|"),
- record['power_state'])
- logging.info(_("(VM_UTILS) xenapi power_state -> |%s|"),
- XENAPI_POWER_STATE[record['power_state']])
+ LOG.info(_("(VM_UTILS) xenserver vm state -> |%s|"),
+ record['power_state'])
+ LOG.info(_("(VM_UTILS) xenapi power_state -> |%s|"),
+ XENAPI_POWER_STATE[record['power_state']])
return {'state': XENAPI_POWER_STATE[record['power_state']],
'max_mem': long(record['memory_static_max']) >> 10,
'mem': long(record['memory_dynamic_max']) >> 10,
@@ -388,11 +385,9 @@ def get_vhd_parent(session, vdi_rec):
"""
if 'vhd-parent' in vdi_rec['sm_config']:
parent_uuid = vdi_rec['sm_config']['vhd-parent']
- #NOTE(sirp): changed xenapi -> get_xenapi()
parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid)
parent_rec = session.get_xenapi().VDI.get_record(parent_ref)
- #NOTE(sirp): changed log -> logging
- logging.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
+ LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
return parent_ref, parent_rec
else:
return None
@@ -409,7 +404,7 @@ def get_vhd_parent_uuid(session, vdi_ref):
def scan_sr(session, instance_id, sr_ref):
- logging.debug(_("Re-scanning SR %s"), sr_ref)
+ LOG.debug(_("Re-scanning SR %s"), sr_ref)
task = session.call_xenapi('Async.SR.scan', sr_ref)
session.wait_for_task(instance_id, task)
@@ -433,10 +428,9 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
- logging.debug(
- _("Parent %s doesn't match original parent %s, "
- "waiting for coalesce..."),
- parent_uuid, original_parent_uuid)
+ LOG.debug(_("Parent %s doesn't match original parent %s, "
+ "waiting for coalesce..."), parent_uuid,
+ original_parent_uuid)
else:
done.send(parent_uuid)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 76f31635a..b35153f90 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -18,10 +18,9 @@
Management class for VM-related functions (spawn, reboot, etc).
"""
-import logging
-
from nova import db
from nova import context
+from nova import log as logging
from nova import exception
from nova import utils
@@ -31,6 +30,8 @@ from nova.virt.xenapi.network_utils import NetworkHelper
from nova.virt.xenapi.vm_utils import VMHelper
from nova.virt.xenapi.vm_utils import ImageType
+XenAPI = None
+LOG = logging.getLogger("nova.virt.xenapi.vmops")
class VMOps(object):
"""
@@ -92,10 +93,9 @@ class VMOps(object):
if network_ref:
VMHelper.create_vif(self._session, vm_ref,
network_ref, instance.mac_address)
- logging.debug(_('Starting VM %s...'), vm_ref)
+ LOG.debug(_('Starting VM %s...'), vm_ref)
self._session.call_xenapi('VM.start', vm_ref, False, False)
- logging.info(_('Spawning VM %s created %s.'), instance.name,
- vm_ref)
+ LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref)
# NOTE(armando): Do we really need to do this in virt?
timer = utils.LoopingCall(f=None)
@@ -106,12 +106,12 @@ class VMOps(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('Instance %s: booted'), instance['name'])
+ LOG.debug(_('Instance %s: booted'), instance['name'])
timer.stop()
except Exception, exc:
- logging.warn(exc)
- logging.exception(_('instance %s: failed to boot'),
- instance['name'])
+ LOG.warn(exc)
+ LOG.exception(_('instance %s: failed to boot'),
+ instance['name'])
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -194,7 +194,7 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
# Disk clean-up
if vdis:
@@ -203,20 +203,20 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
# VM Destroy
try:
task = self._session.call_xenapi('Async.VM.destroy', vm)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
ret = self._session.wait_for_task(instance_id, task)
except XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
callback(ret)
def pause(self, instance, callback):
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 1ca813bcf..d95859225 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -21,16 +21,17 @@ and storage repositories
import re
import string
-import logging
from nova import db
from nova import context
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.virt.xenapi import HelperBase
FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.virt.xenapi.volume_utils")
class StorageError(Exception):
@@ -53,7 +54,7 @@ class VolumeHelper(HelperBase):
"""
sr_ref = session.get_xenapi().SR.get_by_name_label(label)
if len(sr_ref) == 0:
- logging.debug('Introducing %s...', label)
+ LOG.debug(_('Introducing %s...'), label)
record = {}
if 'chapuser' in info and 'chappassword' in info:
record = {'target': info['targetHost'],
@@ -70,10 +71,10 @@ class VolumeHelper(HelperBase):
session.get_xenapi_host(),
record,
'0', label, description, 'iscsi', '', False, {})
- logging.debug('Introduced %s as %s.', label, sr_ref)
+ LOG.debug(_('Introduced %s as %s.'), label, sr_ref)
return sr_ref
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to create Storage Repository'))
else:
return sr_ref[0]
@@ -85,32 +86,32 @@ class VolumeHelper(HelperBase):
vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref)
sr_ref = session.get_xenapi().VDI.get_SR(vdi_ref)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref)
return sr_ref
@classmethod
def destroy_iscsi_storage(cls, session, sr_ref):
"""Forget the SR whilst preserving the state of the disk"""
- logging.debug("Forgetting SR %s ... ", sr_ref)
+ LOG.debug(_("Forgetting SR %s ... "), sr_ref)
pbds = []
try:
pbds = session.get_xenapi().SR.get_PBDs(sr_ref)
except cls.XenAPI.Failure, exc:
- logging.warn('Ignoring exception %s when getting PBDs for %s',
- exc, sr_ref)
+ LOG.warn(_('Ignoring exception %s when getting PBDs for %s'),
+ exc, sr_ref)
for pbd in pbds:
try:
session.get_xenapi().PBD.unplug(pbd)
except cls.XenAPI.Failure, exc:
- logging.warn('Ignoring exception %s when unplugging PBD %s',
- exc, pbd)
+ LOG.warn(_('Ignoring exception %s when unplugging PBD %s'),
+ exc, pbd)
try:
session.get_xenapi().SR.forget(sr_ref)
- logging.debug("Forgetting SR %s done.", sr_ref)
+ LOG.debug(_("Forgetting SR %s done."), sr_ref)
except cls.XenAPI.Failure, exc:
- logging.warn('Ignoring exception %s when forgetting SR %s',
- exc, sr_ref)
+ LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc,
+ sr_ref)
@classmethod
def introduce_vdi(cls, session, sr_ref):
@@ -118,12 +119,12 @@ class VolumeHelper(HelperBase):
try:
vdis = session.get_xenapi().SR.get_VDIs(sr_ref)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
try:
vdi_rec = session.get_xenapi().VDI.get_record(vdis[0])
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to get record'
' of VDI %s on') % vdis[0])
else:
@@ -141,7 +142,7 @@ class VolumeHelper(HelperBase):
vdi_rec['xenstore_data'],
vdi_rec['sm_config'])
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI for SR %s')
% sr_ref)
@@ -165,11 +166,8 @@ class VolumeHelper(HelperBase):
target_host = _get_target_host(iscsi_portal)
target_port = _get_target_port(iscsi_portal)
target_iqn = _get_iqn(iscsi_name, volume_id)
- logging.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
- volume_id,
- target_host,
- target_port,
- target_iqn)
+ LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
+ volume_id, target_host, target_port, target_iqn)
if (device_number < 0) or \
(volume_id is None) or \
(target_host is None) or \
@@ -196,7 +194,7 @@ class VolumeHelper(HelperBase):
elif re.match('^[0-9]+$', mountpoint):
return string.atoi(mountpoint, 10)
else:
- logging.warn('Mountpoint cannot be translated: %s', mountpoint)
+ LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint)
return -1
@@ -253,7 +251,7 @@ def _get_target(volume_id):
"sendtargets -p %s" %
volume_ref['host'])
except exception.ProcessExecutionError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
else:
targets = r.splitlines()
if len(_e) == 0 and len(targets) == 1:
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index fdeb2506c..189f968c6 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -17,14 +17,17 @@
"""
Management class for Storage-related functions (attach, detach, etc).
"""
-import logging
from nova import exception
+from nova import log as logging
from nova.virt.xenapi.vm_utils import VMHelper
from nova.virt.xenapi.volume_utils import VolumeHelper
from nova.virt.xenapi.volume_utils import StorageError
+LOG = logging.getLogger("nova.virt.xenapi.volumeops")
+
+
class VolumeOps(object):
"""
Management class for Volume-related tasks
@@ -45,8 +48,8 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# NOTE: No Resource Pool concept so far
- logging.debug(_("Attach_volume: %s, %s, %s"),
- instance_name, device_path, mountpoint)
+ LOG.debug(_("Attach_volume: %s, %s, %s"),
+ instance_name, device_path, mountpoint)
# Create the iSCSI SR, and the PDB through which hosts access SRs.
# But first, retrieve target info, like Host, IQN, LUN and SCSIID
vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint)
@@ -61,7 +64,7 @@ class VolumeOps(object):
try:
vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
raise Exception(_('Unable to create VDI on SR %s for instance %s')
% (sr_ref,
@@ -73,7 +76,7 @@ class VolumeOps(object):
vol_rec['deviceNumber'],
False)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
raise Exception(_('Unable to use SR %s for instance %s')
% (sr_ref,
@@ -84,13 +87,13 @@ class VolumeOps(object):
vbd_ref)
self._session.wait_for_task(vol_rec['deviceNumber'], task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session,
sr_ref)
raise Exception(_('Unable to attach volume to instance %s')
% instance_name)
- logging.info(_('Mountpoint %s attached to instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %s attached to instance %s'),
+ mountpoint, instance_name)
def detach_volume(self, instance_name, mountpoint):
"""Detach volume storage to VM instance"""
@@ -100,13 +103,13 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# Detach VBD from VM
- logging.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
+ LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
device_number = VolumeHelper.mountpoint_to_number(mountpoint)
try:
vbd_ref = VMHelper.find_vbd_by_number(self._session,
vm_ref, device_number)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise Exception(_('Unable to locate volume %s') % mountpoint)
else:
try:
@@ -114,13 +117,13 @@ class VolumeOps(object):
vbd_ref)
VMHelper.unplug_vbd(self._session, vbd_ref)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise Exception(_('Unable to detach volume %s') % mountpoint)
try:
VMHelper.destroy_vbd(self._session, vbd_ref)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
# Forget SR
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- logging.info(_('Mountpoint %s detached from instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %s detached from instance %s'),
+ mountpoint, instance_name)
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index f17c8f39d..a798f9e33 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -50,7 +50,6 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block.
:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
"""
-import logging
import sys
import xmlrpclib
@@ -61,9 +60,14 @@ from nova import context
from nova import db
from nova import utils
from nova import flags
+from nova import log as logging
from nova.virt.xenapi.vmops import VMOps
from nova.virt.xenapi.volumeops import VolumeOps
+
+LOG = logging.getLogger("nova.virt.xenapi")
+
+
FLAGS = flags.FLAGS
flags.DEFINE_string('xenapi_connection_url',
@@ -248,7 +252,7 @@ class XenAPISession(object):
return
elif status == "success":
result = self._session.xenapi.task.get_result(task)
- logging.info(_("Task [%s] %s status: success %s") % (
+ LOG.info(_("Task [%s] %s status: success %s") % (
name,
task,
result))
@@ -256,7 +260,7 @@ class XenAPISession(object):
else:
error_info = self._session.xenapi.task.get_error_info(task)
action["error"] = str(error_info)
- logging.warn(_("Task [%s] %s status: %s %s") % (
+ LOG.warn(_("Task [%s] %s status: %s %s") % (
name,
task,
status,
@@ -264,7 +268,7 @@ class XenAPISession(object):
done.send_exception(self.XenAPI.Failure(error_info))
db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.warn(exc)
done.send_exception(*sys.exc_info())
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
@@ -272,7 +276,7 @@ class XenAPISession(object):
try:
return func(*args, **kwargs)
except self.XenAPI.Failure, exc:
- logging.debug(_("Got exception: %s"), exc)
+ LOG.debug(_("Got exception: %s"), exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
@@ -285,7 +289,7 @@ class XenAPISession(object):
else:
raise
except xmlrpclib.ProtocolError, exc:
- logging.debug(_("Got exception: %s"), exc)
+ LOG.debug(_("Got exception: %s"), exc)
raise
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 8353b9712..477e0abf4 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -20,15 +20,16 @@ Drivers for volumes.
"""
-import logging
import os
import time
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
+LOG = logging.getLogger("nova.volume.driver")
FLAGS = flags.FLAGS
flags.DEFINE_string('volume_group', 'nova-volumes',
'Name for the VG that will contain exported volumes')
@@ -73,8 +74,8 @@ class VolumeDriver(object):
tries = tries + 1
if tries >= FLAGS.num_shell_tries:
raise
- logging.exception(_("Recovering from a failed execute."
- "Try number %s"), tries)
+ LOG.exception(_("Recovering from a failed execute. "
+ "Try number %s"), tries)
time.sleep(tries ** 2)
def check_for_setup_error(self):
@@ -205,7 +206,7 @@ class FakeAOEDriver(AOEDriver):
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
- logging.debug(_("FAKE AOE: %s"), cmd)
+ LOG.debug(_("FAKE AOE: %s"), cmd)
return (None, None)
@@ -310,5 +311,5 @@ class FakeISCSIDriver(ISCSIDriver):
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
- logging.debug(_("FAKE ISCSI: %s"), cmd)
+ LOG.debug(_("FAKE ISCSI: %s"), cmd)
return (None, None)
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 966334c50..6348539c5 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -42,17 +42,18 @@ intact.
"""
-import logging
import datetime
from nova import context
from nova import exception
from nova import flags
+from nova import log as logging
from nova import manager
from nova import utils
+LOG = logging.getLogger('nova.volume.manager')
FLAGS = flags.FLAGS
flags.DEFINE_string('storage_availability_zone',
'nova',
@@ -81,7 +82,7 @@ class VolumeManager(manager.Manager):
self.driver.check_for_setup_error()
ctxt = context.get_admin_context()
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
- logging.debug(_("Re-exporting %s volumes"), len(volumes))
+ LOG.debug(_("Re-exporting %s volumes"), len(volumes))
for volume in volumes:
self.driver.ensure_export(ctxt, volume)
@@ -89,7 +90,7 @@ class VolumeManager(manager.Manager):
"""Creates and exports the volume."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
- logging.info(_("volume %s: creating"), volume_ref['name'])
+ LOG.info(_("volume %s: creating"), volume_ref['name'])
self.db.volume_update(context,
volume_id,
@@ -98,18 +99,18 @@ class VolumeManager(manager.Manager):
# before passing it to the driver.
volume_ref['host'] = self.host
- logging.debug(_("volume %s: creating lv of size %sG"),
- volume_ref['name'], volume_ref['size'])
+ LOG.debug(_("volume %s: creating lv of size %sG"), volume_ref['name'],
+ volume_ref['size'])
self.driver.create_volume(volume_ref)
- logging.debug(_("volume %s: creating export"), volume_ref['name'])
+ LOG.debug(_("volume %s: creating export"), volume_ref['name'])
self.driver.create_export(context, volume_ref)
now = datetime.datetime.utcnow()
self.db.volume_update(context,
volume_ref['id'], {'status': 'available',
'launched_at': now})
- logging.debug(_("volume %s: created successfully"), volume_ref['name'])
+ LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
return volume_id
def delete_volume(self, context, volume_id):
@@ -120,12 +121,12 @@ class VolumeManager(manager.Manager):
raise exception.Error(_("Volume is still attached"))
if volume_ref['host'] != self.host:
raise exception.Error(_("Volume is not local to this node"))
- logging.debug(_("volume %s: removing export"), volume_ref['name'])
+ LOG.debug(_("volume %s: removing export"), volume_ref['name'])
self.driver.remove_export(context, volume_ref)
- logging.debug(_("volume %s: deleting"), volume_ref['name'])
+ LOG.debug(_("volume %s: deleting"), volume_ref['name'])
self.driver.delete_volume(volume_ref)
self.db.volume_destroy(context, volume_id)
- logging.debug(_("volume %s: deleted successfully"), volume_ref['name'])
+ LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
return True
def setup_compute_volume(self, context, volume_id):
diff --git a/nova/wsgi.py b/nova/wsgi.py
index b5d6b96c1..6336912e4 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -22,7 +22,6 @@ Utility methods for working with WSGI servers
"""
import json
-import logging
import sys
from xml.dom import minidom
@@ -35,18 +34,23 @@ import webob
import webob.dec
import webob.exc
+from nova import log as logging
-logging.getLogger("routes.middleware").addHandler(logging.StreamHandler())
+# TODO(todd): should this just piggyback the handler for root logger
+# since we usually log to syslog, but changes if not daemonzied?
+logging.getLogger("routes.middleware").addHandler(logging.StreamHandler())
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, threads=1000):
+ logging.basicConfig()
self.pool = eventlet.GreenPool(threads)
def start(self, application, port, host='0.0.0.0', backlog=128):
"""Run a WSGI server with the given application."""
+ logging.audit("Starting %s on %s:%s", sys.argv[0], host, port)
socket = eventlet.listen((host, port), backlog=backlog)
self.pool.spawn_n(self._run, application, socket)
diff --git a/setup.py b/setup.py
index 1abf4d9fe..e00911099 100644
--- a/setup.py
+++ b/setup.py
@@ -64,9 +64,11 @@ setup(name='nova',
'bin/nova-dhcpbridge',
'bin/nova-import-canonical-imagestore',
'bin/nova-instancemonitor',
+ 'bin/nova-logspool',
'bin/nova-manage',
'bin/nova-network',
'bin/nova-objectstore',
'bin/nova-scheduler',
+ 'bin/nova-spoolsentry',
'bin/nova-volume',
'tools/nova-debug'])