summaryrefslogtreecommitdiffstats
path: root/nova/service.py
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@yahoo.com>2010-09-15 21:23:26 +0000
committerTarmac <>2010-09-15 21:23:26 +0000
commite21c310ced6992cf2eb33b372cd4e5e69a79d140 (patch)
treec2a49b39aaa957a37c9ba718c1c7cd9624011e21 /nova/service.py
parent433d83a7e487b41ba4caa7aa5addfc7365975f0b (diff)
parentfe78b3651c9064e527b8e3b74d7669d3d364daab (diff)
downloadnova-e21c310ced6992cf2eb33b372cd4e5e69a79d140.tar.gz
nova-e21c310ced6992cf2eb33b372cd4e5e69a79d140.tar.xz
nova-e21c310ced6992cf2eb33b372cd4e5e69a79d140.zip
Proposing merge to get feedback on orm refactoring. I am very interested in feedback to all of these changes.
This is a huge set of changes, that touches almost all of the files. I'm sure I have broken quite a bit, but better to take the plunge now than to postpone this until later. The idea is to allow for pluggable backends throughout the code. Brief Overview For compute/volume/network, there are multiple classes service - responsible for rpc this currently uses the existing cast and call in rpc.py and a little bit of magic to call public methods on the manager class. each service also reports its state into the database every 10 seconds manager - responsible for managing respective object classes all the business logic for the classes go here db (db_driver) - responsible for abstracting database access driver (domain_driver) - responsible for executing actual shell commands and implementation Compute hasn't been fully cleaned up, but to get an idea of how it works, take a look at volume and network Known issues/Things to be done: * nova-api accesses db objects directly It seems cleaner to have only the managers dealing with their respective objects. This would mean code for 'run_instances' would move into the manager class and it would do the initial setup and cast out to the remote service * db code uses flat methods to define its interface In my mind this is a little prettier as an abstract base class, but driver loading code can load a module or a class. It works, so I'm not sure it needs to be changed but feel free to debate it. * Service classes have no code in them Not sure if this is a problem for people, but the magic of calling the manager's methods is done in the base class. We could remove the magic from the base class and explicitly wrap methods that we want to make available via rpc if this seems nasty. * AuthManager Projects/Users/Roles are not integrated into this system. In order for everything to live happily in the backend, we need some type of adaptor for LDAP * Context is not passed properly across rabbit Context should probably be changed to a simple dictionary so that it can be passed properly through the queue * No authorization checks on access to objects We need to decide on which layer auth checks should happen. * Some of the methods in ComputeManager need to be moved into other layers/managers * Compute driver layer should be abstracted more cleanly * Flat networking is untested and may need to be reworked * Some of the api commands are not working yet * Nova Swift Authentication needs to be refactored(Todd is working on this)
Diffstat (limited to 'nova/service.py')
-rw-r--r--nova/service.py125
1 files changed, 91 insertions, 34 deletions
diff --git a/nova/service.py b/nova/service.py
index 96281bc6b..870dd6ceb 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -28,75 +28,132 @@ from twisted.internet import defer
from twisted.internet import task
from twisted.application import service
-from nova import datastore
+from nova import db
+from nova import exception
from nova import flags
from nova import rpc
-from nova.compute import model
+from nova import utils
FLAGS = flags.FLAGS
-
flags.DEFINE_integer('report_interval', 10,
'seconds between nodes reporting state to cloud',
lower_bound=1)
+
class Service(object, service.Service):
- """Base class for workers that run on hosts"""
+ """Base class for workers that run on hosts."""
+
+ def __init__(self, host, binary, topic, manager, *args, **kwargs):
+ self.host = host
+ self.binary = binary
+ self.topic = topic
+ manager_class = utils.import_class(manager)
+ self.manager = manager_class(host=host, *args, **kwargs)
+ self.model_disconnected = False
+ super(Service, self).__init__(*args, **kwargs)
+ try:
+ service_ref = db.service_get_by_args(None,
+ self.host,
+ self.binary)
+ self.service_id = service_ref['id']
+ except exception.NotFound:
+ self._create_service_ref()
+
+
+ def _create_service_ref(self):
+ service_ref = db.service_create(None, {'host': self.host,
+ 'binary': self.binary,
+ 'topic': self.topic,
+ 'report_count': 0})
+ self.service_id = service_ref['id']
+
+ def __getattr__(self, key):
+ try:
+ return super(Service, self).__getattr__(key)
+ except AttributeError:
+ return getattr(self.manager, key)
@classmethod
def create(cls,
- report_interval=None, # defaults to flag
- bin_name=None, # defaults to basename of executable
- topic=None): # defaults to basename - "nova-" part
- """Instantiates class and passes back application object"""
+ host=None,
+ binary=None,
+ topic=None,
+ manager=None,
+ report_interval=None):
+ """Instantiates class and passes back application object.
+
+ Args:
+ host, defaults to FLAGS.host
+ binary, defaults to basename of executable
+ topic, defaults to bin_name - "nova-" part
+ manager, defaults to FLAGS.<topic>_manager
+ report_interval, defaults to FLAGS.report_interval
+ """
+ if not host:
+ host = FLAGS.host
+ if not binary:
+ binary = os.path.basename(inspect.stack()[-1][1])
+ if not topic:
+ topic = binary.rpartition("nova-")[2]
+ if not manager:
+ manager = FLAGS.get('%s_manager' % topic, None)
if not report_interval:
- # NOTE(vish): set here because if it is set to flag in the
- # parameter list, it wrongly uses the default
report_interval = FLAGS.report_interval
- # NOTE(vish): magic to automatically determine bin_name and topic
- if not bin_name:
- bin_name = os.path.basename(inspect.stack()[-1][1])
- if not topic:
- topic = bin_name.rpartition("nova-")[2]
- logging.warn("Starting %s node" % topic)
- node_instance = cls()
-
+ logging.warn("Starting %s node", topic)
+ service_obj = cls(host, binary, topic, manager)
conn = rpc.Connection.instance()
consumer_all = rpc.AdapterConsumer(
connection=conn,
- topic='%s' % topic,
- proxy=node_instance)
-
+ topic=topic,
+ proxy=service_obj)
consumer_node = rpc.AdapterConsumer(
connection=conn,
- topic='%s.%s' % (topic, FLAGS.node_name),
- proxy=node_instance)
+ topic='%s.%s' % (topic, host),
+ proxy=service_obj)
- pulse = task.LoopingCall(node_instance.report_state,
- FLAGS.node_name,
- bin_name)
+ pulse = task.LoopingCall(service_obj.report_state)
pulse.start(interval=report_interval, now=False)
consumer_all.attach_to_twisted()
consumer_node.attach_to_twisted()
# This is the parent service that twistd will be looking for when it
- # parses this file, return it so that we can get it into globals below
- application = service.Application(bin_name)
- node_instance.setServiceParent(application)
+ # parses this file, return it so that we can get it into globals.
+ application = service.Application(binary)
+ service_obj.setServiceParent(application)
return application
+ def kill(self, context=None):
+ """Destroy the service object in the datastore"""
+ try:
+ db.service_destroy(context, self.service_id)
+ except exception.NotFound:
+ logging.warn("Service killed that has no database entry")
+
@defer.inlineCallbacks
- def report_state(self, nodename, daemon):
- # TODO(termie): make this pattern be more elegant. -todd
+ def report_state(self, context=None):
+ """Update the state of this service in the datastore."""
try:
- record = model.Daemon(nodename, daemon)
- record.heartbeat()
+ try:
+ service_ref = db.service_get(context, self.service_id)
+ except exception.NotFound:
+ logging.debug("The service database object disappeared, "
+ "Recreating it.")
+ self._create_service_ref()
+ service_ref = db.service_get(context, self.service_id)
+
+ db.service_update(context,
+ self.service_id,
+ {'report_count': service_ref['report_count'] + 1})
+
+ # TODO(termie): make this pattern be more elegant.
if getattr(self, "model_disconnected", False):
self.model_disconnected = False
logging.error("Recovered model server connection!")
- except datastore.ConnectionError, ex:
+ # TODO(vish): this should probably only catch connection errors
+ except Exception: # pylint: disable-msg=W0702
if not getattr(self, "model_disconnected", False):
self.model_disconnected = True
logging.exception("model server went away")