summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@yahoo.com>2010-09-02 16:59:41 -0700
committerVishvananda Ishaya <vishvananda@yahoo.com>2010-09-02 16:59:41 -0700
commit9bab02f79e4f54ab909f699bcb86ff51c1a97e0a (patch)
treeded8438d295fc97679fe7615b3989ff66322fb17
parent68d8f54e00c153eccd426256a25c8a70ccce2dcc (diff)
parent98b6a25ea57c43ecd400eff49e23a202dc6f9869 (diff)
more scheduler tests
-rw-r--r--bzrplugins/novalog/__init__.py59
-rw-r--r--nova/datastore.old.py261
-rw-r--r--nova/db/api.py16
-rw-r--r--nova/db/sqlalchemy/api.py58
-rw-r--r--nova/scheduler/chance.py24
-rw-r--r--nova/scheduler/driver.py6
-rw-r--r--nova/scheduler/manager.py30
-rw-r--r--nova/scheduler/simple.py9
-rw-r--r--nova/service.py11
-rw-r--r--nova/tests/api/__init__.py (renamed from nova/api/test.py)12
-rw-r--r--nova/tests/api/rackspace/__init__.py0
-rw-r--r--nova/tests/api/rackspace/flavors.py34
-rw-r--r--nova/tests/api/rackspace/images.py39
-rw-r--r--nova/tests/api/rackspace/servers.py58
-rw-r--r--nova/tests/api/rackspace/sharedipgroups.py40
-rw-r--r--nova/tests/api/test_helper.py7
-rw-r--r--nova/tests/api/wsgi_test.py (renamed from nova/wsgi_test.py)0
-rw-r--r--nova/tests/model_unittest.py292
-rw-r--r--nova/tests/scheduler_unittest.py49
-rw-r--r--nova/tests/service_unittest.py26
-rw-r--r--nova/virt/libvirt_conn.py9
-rw-r--r--nova/wsgi.py31
-rw-r--r--run_tests.py1
-rw-r--r--setup.py6
24 files changed, 437 insertions, 641 deletions
diff --git a/bzrplugins/novalog/__init__.py b/bzrplugins/novalog/__init__.py
new file mode 100644
index 000000000..e16b2e00f
--- /dev/null
+++ b/bzrplugins/novalog/__init__.py
@@ -0,0 +1,59 @@
+# Copyright 2010 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Log format for Nova's changelog."""
+
+import bzrlib.log
+from bzrlib.osutils import format_date
+
+#
+# This is mostly stolen from bzrlib.log.GnuChangelogLogFormatter
+# The difference is that it logs the author rather than the committer
+# which for Nova always is Tarmac.
+#
+class NovaLogFormat(bzrlib.log.GnuChangelogLogFormatter):
+ preferred_levels = 1
+ def log_revision(self, revision):
+ """Log a revision, either merged or not."""
+ to_file = self.to_file
+
+ date_str = format_date(revision.rev.timestamp,
+ revision.rev.timezone or 0,
+ self.show_timezone,
+ date_fmt='%Y-%m-%d',
+ show_offset=False)
+
+ authors = revision.rev.get_apparent_authors()
+ to_file.write('%s %s\n\n' % (date_str, ", ".join(authors)))
+
+ if revision.delta is not None and revision.delta.has_changed():
+ for c in revision.delta.added + revision.delta.removed + revision.delta.modified:
+ path, = c[:1]
+ to_file.write('\t* %s:\n' % (path,))
+ for c in revision.delta.renamed:
+ oldpath,newpath = c[:2]
+ # For renamed files, show both the old and the new path
+ to_file.write('\t* %s:\n\t* %s:\n' % (oldpath,newpath))
+ to_file.write('\n')
+
+ if not revision.rev.message:
+ to_file.write('\tNo commit message\n')
+ else:
+ message = revision.rev.message.rstrip('\r\n')
+ for l in message.split('\n'):
+ to_file.write('\t%s\n' % (l.lstrip(),))
+ to_file.write('\n')
+
+bzrlib.log.register_formatter('novalog', NovaLogFormat)
+
diff --git a/nova/datastore.old.py b/nova/datastore.old.py
deleted file mode 100644
index 751c5eeeb..000000000
--- a/nova/datastore.old.py
+++ /dev/null
@@ -1,261 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Datastore:
-
-MAKE Sure that ReDIS is running, and your flags are set properly,
-before trying to run this.
-"""
-
-import logging
-
-from nova import exception
-from nova import flags
-from nova import utils
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('redis_host', '127.0.0.1',
- 'Host that redis is running on.')
-flags.DEFINE_integer('redis_port', 6379,
- 'Port that redis is running on.')
-flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away')
-
-
-class Redis(object):
- def __init__(self):
- if hasattr(self.__class__, '_instance'):
- raise Exception('Attempted to instantiate singleton')
-
- @classmethod
- def instance(cls):
- if not hasattr(cls, '_instance'):
- inst = redis.Redis(host=FLAGS.redis_host,
- port=FLAGS.redis_port,
- db=FLAGS.redis_db)
- cls._instance = inst
- return cls._instance
-
-
-class ConnectionError(exception.Error):
- pass
-
-
-def absorb_connection_error(fn):
- def _wrapper(*args, **kwargs):
- try:
- return fn(*args, **kwargs)
- except redis.exceptions.ConnectionError, ce:
- raise ConnectionError(str(ce))
- return _wrapper
-
-
-class BasicModel(object):
- """
- All Redis-backed data derives from this class.
-
- You MUST specify an identifier() property that returns a unique string
- per instance.
-
- You MUST have an initializer that takes a single argument that is a value
- returned by identifier() to load a new class with.
-
- You may want to specify a dictionary for default_state().
-
- You may also specify override_type at the class left to use a key other
- than __class__.__name__.
-
- You override save and destroy calls to automatically build and destroy
- associations.
- """
-
- override_type = None
-
- @absorb_connection_error
- def __init__(self):
- state = Redis.instance().hgetall(self.__redis_key)
- if state:
- self.initial_state = state
- self.state = dict(self.initial_state)
- else:
- self.initial_state = {}
- self.state = self.default_state()
-
-
- def default_state(self):
- """You probably want to define this in your subclass"""
- return {}
-
- @classmethod
- def _redis_name(cls):
- return cls.override_type or cls.__name__.lower()
-
- @classmethod
- def lookup(cls, identifier):
- rv = cls(identifier)
- if rv.is_new_record():
- return None
- else:
- return rv
-
- @classmethod
- @absorb_connection_error
- def all(cls):
- """yields all objects in the store"""
- redis_set = cls._redis_set_name(cls.__name__)
- for identifier in Redis.instance().smembers(redis_set):
- yield cls(identifier)
-
- @classmethod
- def associated_to(cls, foreign_type, foreign_id):
- for identifier in cls.associated_keys(foreign_type, foreign_id):
- yield cls(identifier)
-
- @classmethod
- @absorb_connection_error
- def associated_keys(cls, foreign_type, foreign_id):
- redis_set = cls._redis_association_name(foreign_type, foreign_id)
- return Redis.instance().smembers(redis_set) or []
-
- @classmethod
- def _redis_set_name(cls, kls_name):
- # stupidly pluralize (for compatiblity with previous codebase)
- return kls_name.lower() + "s"
-
- @classmethod
- def _redis_association_name(cls, foreign_type, foreign_id):
- return cls._redis_set_name("%s:%s:%s" %
- (foreign_type, foreign_id, cls._redis_name()))
-
- @property
- def identifier(self):
- """You DEFINITELY want to define this in your subclass"""
- raise NotImplementedError("Your subclass should define identifier")
-
- @property
- def __redis_key(self):
- return '%s:%s' % (self._redis_name(), self.identifier)
-
- def __repr__(self):
- return "<%s:%s>" % (self.__class__.__name__, self.identifier)
-
- def keys(self):
- return self.state.keys()
-
- def copy(self):
- copyDict = {}
- for item in self.keys():
- copyDict[item] = self[item]
- return copyDict
-
- def get(self, item, default):
- return self.state.get(item, default)
-
- def update(self, update_dict):
- return self.state.update(update_dict)
-
- def setdefault(self, item, default):
- return self.state.setdefault(item, default)
-
- def __contains__(self, item):
- return item in self.state
-
- def __getitem__(self, item):
- return self.state[item]
-
- def __setitem__(self, item, val):
- self.state[item] = val
- return self.state[item]
-
- def __delitem__(self, item):
- """We don't support this"""
- raise Exception("Silly monkey, models NEED all their properties.")
-
- def is_new_record(self):
- return self.initial_state == {}
-
- @absorb_connection_error
- def add_to_index(self):
- """Each insance of Foo has its id tracked int the set named Foos"""
- set_name = self.__class__._redis_set_name(self.__class__.__name__)
- Redis.instance().sadd(set_name, self.identifier)
-
- @absorb_connection_error
- def remove_from_index(self):
- """Remove id of this instance from the set tracking ids of this type"""
- set_name = self.__class__._redis_set_name(self.__class__.__name__)
- Redis.instance().srem(set_name, self.identifier)
-
- @absorb_connection_error
- def associate_with(self, foreign_type, foreign_id):
- """Add this class id into the set foreign_type:foreign_id:this_types"""
- # note the extra 's' on the end is for plurality
- # to match the old data without requiring a migration of any sort
- self.add_associated_model_to_its_set(foreign_type, foreign_id)
- redis_set = self.__class__._redis_association_name(foreign_type,
- foreign_id)
- Redis.instance().sadd(redis_set, self.identifier)
-
- @absorb_connection_error
- def unassociate_with(self, foreign_type, foreign_id):
- """Delete from foreign_type:foreign_id:this_types set"""
- redis_set = self.__class__._redis_association_name(foreign_type,
- foreign_id)
- Redis.instance().srem(redis_set, self.identifier)
-
- def add_associated_model_to_its_set(self, model_type, model_id):
- """
- When associating an X to a Y, save Y for newer timestamp, etc, and to
- make sure to save it if Y is a new record.
- If the model_type isn't found as a usable class, ignore it, this can
- happen when associating to things stored in LDAP (user, project, ...).
- """
- table = globals()
- klsname = model_type.capitalize()
- if table.has_key(klsname):
- model_class = table[klsname]
- model_inst = model_class(model_id)
- model_inst.save()
-
- @absorb_connection_error
- def save(self):
- """
- update the directory with the state from this model
- also add it to the index of items of the same type
- then set the initial_state = state so new changes are tracked
- """
- # TODO(ja): implement hmset in redis-py and use it
- # instead of multiple calls to hset
- if self.is_new_record():
- self["create_time"] = utils.isotime()
- for key, val in self.state.iteritems():
- Redis.instance().hset(self.__redis_key, key, val)
- self.add_to_index()
- self.initial_state = dict(self.state)
- return True
-
- @absorb_connection_error
- def destroy(self):
- """deletes all related records from datastore."""
- logging.info("Destroying datamodel for %s %s",
- self.__class__.__name__, self.identifier)
- Redis.instance().delete(self.__redis_key)
- self.remove_from_index()
- return True
-
diff --git a/nova/db/api.py b/nova/db/api.py
index 7e889c872..872be6919 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -75,6 +75,22 @@ def service_get_all_compute_sorted(context):
return IMPL.service_get_all_compute_sorted(context)
+def service_get_all_network_sorted(context):
+ """Get all network services sorted by network count
+
+ Returns a list of (Service, network_count) tuples
+ """
+ return IMPL.service_get_all_network_sorted(context)
+
+
+def service_get_all_volume_sorted(context):
+ """Get all volume services sorted by volume count
+
+ Returns a list of (Service, volume_count) tuples
+ """
+ return IMPL.service_get_all_volume_sorted(context)
+
+
def service_get_by_args(context, host, binary):
"""Get the state of an service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index e917a3959..4ae55eaf4 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -52,25 +52,67 @@ def service_get_all_by_topic(context, topic):
.all()
-def service_get_all_compute_sorted(_context):
+def _service_get_all_topic_subquery(_context, session, topic, subq, label):
+ sort_value = getattr(subq.c, label)
+ return session.query(models.Service, sort_value) \
+ .filter_by(topic=topic) \
+ .filter_by(deleted=False) \
+ .outerjoin((subq, models.Service.host == subq.c.host)) \
+ .order_by(sort_value) \
+ .all()
+
+
+def service_get_all_compute_sorted(context):
with managed_session() as session:
# NOTE(vish): The intended query is below
# SELECT services.*, inst_count.instance_count
# FROM services LEFT OUTER JOIN
# (SELECT host, count(*) AS instance_count
# FROM instances GROUP BY host) AS inst_count
+ topic = 'compute'
+ label = 'instance_count'
subq = session.query(models.Instance.host,
- func.count('*').label('instance_count')) \
+ func.count('*').label(label)) \
.filter_by(deleted=False) \
.group_by(models.Instance.host) \
.subquery()
- topic = 'compute'
- return session.query(models.Service, subq.c.instance_count) \
- .filter_by(topic=topic) \
+ return _service_get_all_topic_subquery(context,
+ session,
+ topic,
+ subq,
+ label)
+
+
+def service_get_all_network_sorted(context):
+ with managed_session() as session:
+ topic = 'network'
+ label = 'network_count'
+ subq = session.query(models.Network.host,
+ func.count('*').label(label)) \
.filter_by(deleted=False) \
- .outerjoin((subq, models.Service.host == subq.c.host)) \
- .order_by(subq.c.instance_count) \
- .all()
+ .group_by(models.Network.host) \
+ .subquery()
+ return _service_get_all_topic_subquery(context,
+ session,
+ topic,
+ subq,
+ label)
+
+
+def service_get_all_volume_sorted(context):
+ with managed_session() as session:
+ topic = 'volume'
+ label = 'volume_count'
+ subq = session.query(models.Volume.host,
+ func.count('*').label(label)) \
+ .filter_by(deleted=False) \
+ .group_by(models.Volume.host) \
+ .subquery()
+ return _service_get_all_topic_subquery(context,
+ session,
+ topic,
+ subq,
+ label)
def service_get_by_args(_context, host, binary):
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index 12321cec1..1054cdbf5 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -31,32 +31,12 @@ class ChanceScheduler(driver.Scheduler):
Implements Scheduler as a random node selector
"""
- def pick_compute_host(self, context, instance_id, **_kwargs):
+ def schedule(self, context, topic, *_args, **_kwargs):
"""
Picks a host that is up at random
"""
- hosts = self.hosts_up(context, 'compute')
- if not hosts:
- raise driver.NoValidHost("No hosts found")
- return hosts[int(random.random() * len(hosts))]
-
- def pick_volume_host(self, context, volume_id, **_kwargs):
- """
- Picks a host that is up at random
- """
-
- hosts = self.hosts_up(context, 'volume')
- if not hosts:
- raise driver.NoValidHost("No hosts found")
- return hosts[int(random.random() * len(hosts))]
-
- def pick_network_host(self, context, network_id, **_kwargs):
- """
- Picks a host that is up at random
- """
-
- hosts = self.hosts_up(context, 'network')
+ hosts = self.hosts_up(context, topic)
if not hosts:
raise driver.NoValidHost("No hosts found")
return hosts[int(random.random() * len(hosts))]
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 830f05b13..f5872e9c8 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -61,3 +61,9 @@ class Scheduler(object):
return [service.host
for service in services
if self.service_is_up(service)]
+
+ def schedule(self, context, topic, *_args, **_kwargs):
+ """
+ Must override at least this method for scheduler to work
+ """
+ raise NotImplementedError("Must implement a fallback schedule")
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index a75b4ac41..1755a6fef 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -22,6 +22,7 @@ Scheduler Service
"""
import logging
+import functools
from nova import db
from nova import flags
@@ -45,16 +46,23 @@ class SchedulerManager(manager.Manager):
self.driver = utils.import_object(scheduler_driver)
super(SchedulerManager, self).__init__(*args, **kwargs)
- def run_instance(self, context, instance_id, **_kwargs):
- """
- Picks a node for a running VM and casts the run_instance request
- """
+ def __getattr__(self, key):
+ """Converts all method calls to use the schedule method"""
+ return functools.partial(self._schedule, key)
- host = self.driver.pick_host(context, instance_id, **_kwargs)
+ def _schedule(self, method, context, topic, *args, **kwargs):
+ """Tries to call schedule_* method on the driver to retrieve host.
+
+ Falls back to schedule(context, topic) if method doesn't exist.
+ """
+ driver_method = 'schedule_%s' % method
+ try:
+ host = getattr(self.driver, driver_method)(context, *args, **kwargs)
+ except AttributeError:
+ host = self.driver.schedule(context, topic, *args, **kwargs)
- rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
- {"method": "run_instance",
- "args": {"context": context,
- "instance_id": instance_id}})
- logging.debug("Casting to compute %s for running instance %s",
- host, instance_id)
+ kwargs.update({"context": None})
+ rpc.cast(db.queue_get_for(context, topic, host),
+ {"method": method,
+ "args": kwargs})
+ logging.debug("Casting to %s %s for %s", topic, host, self.method)
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index 832417208..d10ddabac 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -24,6 +24,7 @@ Simple Scheduler
from nova import db
from nova import flags
from nova.scheduler import driver
+from nova.scheduler import chance
FLAGS = flags.FLAGS
flags.DEFINE_integer("max_instances", 16,
@@ -33,12 +34,12 @@ flags.DEFINE_integer("max_volumes", 100,
flags.DEFINE_integer("max_networks", 1000,
"maximum number of networks to allow per host")
-class SimpleScheduler(driver.Scheduler):
+class SimpleScheduler(chance.ChanceScheduler):
"""
Implements Naive Scheduler that tries to find least loaded host
"""
- def pick_compute_host(self, context, instance_id, **_kwargs):
+ def schedule_run_instance(self, context, _instance_id, *_args, **_kwargs):
"""
Picks a host that is up and has the fewest running instances
"""
@@ -52,7 +53,7 @@ class SimpleScheduler(driver.Scheduler):
return service['host']
raise driver.NoValidHost("No hosts found")
- def pick_volume_host(self, context, volume_id, **_kwargs):
+ def schedule_create_volume(self, context, _volume_id, *_args, **_kwargs):
"""
Picks a host that is up and has the fewest volumes
"""
@@ -66,7 +67,7 @@ class SimpleScheduler(driver.Scheduler):
return service['host']
raise driver.NoValidHost("No hosts found")
- def pick_network_host(self, context, network_id, **_kwargs):
+ def schedule_set_network_host(self, context, _network_id, *_args, **_kwargs):
"""
Picks a host that is up and has the fewest networks
"""
diff --git a/nova/service.py b/nova/service.py
index fc188be34..d7471f4c6 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -85,9 +85,6 @@ class Service(object, service.Service):
manager, defaults to FLAGS.<topic>_manager
report_interval, defaults to FLAGS.report_interval
"""
- if not report_interval:
- report_interval = FLAGS.report_interval
-
if not host:
host = FLAGS.host
if not binary:
@@ -96,16 +93,18 @@ class Service(object, service.Service):
topic = binary.rpartition("nova-")[2]
if not manager:
manager = FLAGS.get('%s_manager' % topic, None)
+ if not report_interval:
+ report_interval = FLAGS.report_interval
logging.warn("Starting %s node", topic)
- service_obj = cls(FLAGS.host, binary, topic, manager)
+ service_obj = cls(host, binary, topic, manager)
conn = rpc.Connection.instance()
consumer_all = rpc.AdapterConsumer(
connection=conn,
- topic='%s' % topic,
+ topic=topic,
proxy=service_obj)
consumer_node = rpc.AdapterConsumer(
connection=conn,
- topic='%s.%s' % (topic, FLAGS.host),
+ topic='%s.%s' % (topic, host),
proxy=service_obj)
pulse = task.LoopingCall(service_obj.report_state)
diff --git a/nova/api/test.py b/nova/tests/api/__init__.py
index 51b114b8e..59c4adc3d 100644
--- a/nova/api/test.py
+++ b/nova/tests/api/__init__.py
@@ -26,7 +26,7 @@ import webob
import webob.dec
from nova import api
-
+from nova.tests.api.test_helper import *
class Test(unittest.TestCase):
@@ -52,10 +52,8 @@ class Test(unittest.TestCase):
result = webob.Request.blank('/test/cloud').get_response(api.API())
self.assertNotEqual(result.body, "/cloud")
+ def test_query_api_version(self):
+ pass
-class APIStub(object):
- """Class to verify request and mark it was called."""
-
- @webob.dec.wsgify
- def __call__(self, req):
- return req.path_info
+if __name__ == '__main__':
+ unittest.main()
diff --git a/nova/tests/api/rackspace/__init__.py b/nova/tests/api/rackspace/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/tests/api/rackspace/__init__.py
diff --git a/nova/tests/api/rackspace/flavors.py b/nova/tests/api/rackspace/flavors.py
new file mode 100644
index 000000000..fb8ba94a5
--- /dev/null
+++ b/nova/tests/api/rackspace/flavors.py
@@ -0,0 +1,34 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+from nova.api.rackspace import flavors
+from nova.tests.api.test_helper import *
+
+class FlavorsTest(unittest.TestCase):
+ def setUp(self):
+ self.stubs = stubout.StubOutForTesting()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+
+ def test_get_flavor_list(self):
+ pass
+
+ def test_get_flavor_by_id(self):
+ pass
diff --git a/nova/tests/api/rackspace/images.py b/nova/tests/api/rackspace/images.py
new file mode 100644
index 000000000..560d8c898
--- /dev/null
+++ b/nova/tests/api/rackspace/images.py
@@ -0,0 +1,39 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+from nova.api.rackspace import images
+from nova.tests.api.test_helper import *
+
+class ImagesTest(unittest.TestCase):
+ def setUp(self):
+ self.stubs = stubout.StubOutForTesting()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+
+ def test_get_image_list(self):
+ pass
+
+ def test_delete_image(self):
+ pass
+
+ def test_create_image(self):
+ pass
+
+
diff --git a/nova/tests/api/rackspace/servers.py b/nova/tests/api/rackspace/servers.py
new file mode 100644
index 000000000..6d628e78a
--- /dev/null
+++ b/nova/tests/api/rackspace/servers.py
@@ -0,0 +1,58 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+from nova.api.rackspace import servers
+from nova.tests.api.test_helper import *
+
+class ServersTest(unittest.TestCase):
+ def setUp(self):
+ self.stubs = stubout.StubOutForTesting()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+
+ def test_get_server_list(self):
+ pass
+
+ def test_create_instance(self):
+ pass
+
+ def test_get_server_by_id(self):
+ pass
+
+ def test_get_backup_schedule(self):
+ pass
+
+ def test_get_server_details(self):
+ pass
+
+ def test_get_server_ips(self):
+ pass
+
+ def test_server_reboot(self):
+ pass
+
+ def test_server_rebuild(self):
+ pass
+
+ def test_server_resize(self):
+ pass
+
+ def test_delete_server_instance(self):
+ pass
diff --git a/nova/tests/api/rackspace/sharedipgroups.py b/nova/tests/api/rackspace/sharedipgroups.py
new file mode 100644
index 000000000..b4b281db7
--- /dev/null
+++ b/nova/tests/api/rackspace/sharedipgroups.py
@@ -0,0 +1,40 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+from nova.api.rackspace import sharedipgroups
+from nova.tests.api.test_helper import *
+
+class SharedIpGroupsTest(unittest.TestCase):
+ def setUp(self):
+ self.stubs = stubout.StubOutForTesting()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+
+ def test_get_shared_ip_groups(self):
+ pass
+
+ def test_create_shared_ip_group(self):
+ pass
+
+ def test_delete_shared_ip_group(self):
+ pass
+
+
+
diff --git a/nova/tests/api/test_helper.py b/nova/tests/api/test_helper.py
new file mode 100644
index 000000000..8151a4af6
--- /dev/null
+++ b/nova/tests/api/test_helper.py
@@ -0,0 +1,7 @@
+import webob.dec
+
+class APIStub(object):
+ """Class to verify request and mark it was called."""
+ @webob.dec.wsgify
+ def __call__(self, req):
+ return req.path_info
diff --git a/nova/wsgi_test.py b/nova/tests/api/wsgi_test.py
index 786dc1bce..786dc1bce 100644
--- a/nova/wsgi_test.py
+++ b/nova/tests/api/wsgi_test.py
diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py
deleted file mode 100644
index 130516c66..000000000
--- a/nova/tests/model_unittest.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from datetime import datetime, timedelta
-import logging
-import time
-
-from nova import flags
-from nova import test
-from nova import utils
-from nova.compute import model
-
-
-FLAGS = flags.FLAGS
-
-
-class ModelTestCase(test.TrialTestCase):
- def setUp(self):
- super(ModelTestCase, self).setUp()
- self.flags(connection_type='fake',
- fake_storage=True)
-
- def tearDown(self):
- model.Instance('i-test').destroy()
- model.Host('testhost').destroy()
- model.Daemon('testhost', 'nova-testdaemon').destroy()
-
- def create_instance(self):
- inst = model.Instance('i-test')
- inst['reservation_id'] = 'r-test'
- inst['launch_time'] = '10'
- inst['user_id'] = 'fake'
- inst['project_id'] = 'fake'
- inst['instance_type'] = 'm1.tiny'
- inst['mac_address'] = utils.generate_mac()
- inst['ami_launch_index'] = 0
- inst['private_dns_name'] = '10.0.0.1'
- inst.save()
- return inst
-
- def create_host(self):
- host = model.Host('testhost')
- host.save()
- return host
-
- def create_daemon(self):
- daemon = model.Daemon('testhost', 'nova-testdaemon')
- daemon.save()
- return daemon
-
- def create_session_token(self):
- session_token = model.SessionToken('tk12341234')
- session_token['user'] = 'testuser'
- session_token.save()
- return session_token
-
- def test_create_instance(self):
- """store with create_instace, then test that a load finds it"""
- instance = self.create_instance()
- old = model.Instance(instance.identifier)
- self.assertFalse(old.is_new_record())
-
- def test_delete_instance(self):
- """create, then destroy, then make sure loads a new record"""
- instance = self.create_instance()
- instance.destroy()
- newinst = model.Instance('i-test')
- self.assertTrue(newinst.is_new_record())
-
- def test_instance_added_to_set(self):
- """create, then check that it is listed in global set"""
- instance = self.create_instance()
- found = False
- for x in model.InstanceDirectory().all:
- if x.identifier == 'i-test':
- found = True
- self.assert_(found)
-
- def test_instance_associates_project(self):
- """create, then check that it is listed for the project"""
- instance = self.create_instance()
- found = False
- for x in model.InstanceDirectory().by_project(instance.project):
- if x.identifier == 'i-test':
- found = True
- self.assert_(found)
-
- def test_instance_associates_ip(self):
- """create, then check that it is listed for the ip"""
- instance = self.create_instance()
- found = False
- x = model.InstanceDirectory().by_ip(instance['private_dns_name'])
- self.assertEqual(x.identifier, 'i-test')
-
- def test_instance_associates_node(self):
- """create, then check that it is listed for the host"""
- instance = self.create_instance()
- found = False
- for x in model.InstanceDirectory().by_node(FLAGS.host):
- if x.identifier == 'i-test':
- found = True
- self.assertFalse(found)
- instance['host'] = 'test_node'
- instance.save()
- for x in model.InstanceDirectory().by_node('test_node'):
- if x.identifier == 'i-test':
- found = True
- self.assert_(found)
-
-
- def test_host_class_finds_hosts(self):
- host = self.create_host()
- self.assertEqual('testhost', model.Host.lookup('testhost').identifier)
-
- def test_host_class_doesnt_find_missing_hosts(self):
- rv = model.Host.lookup('woahnelly')
- self.assertEqual(None, rv)
-
- def test_create_host(self):
- """store with create_host, then test that a load finds it"""
- host = self.create_host()
- old = model.Host(host.identifier)
- self.assertFalse(old.is_new_record())
-
- def test_delete_host(self):
- """create, then destroy, then make sure loads a new record"""
- instance = self.create_host()
- instance.destroy()
- newinst = model.Host('testhost')
- self.assertTrue(newinst.is_new_record())
-
- def test_host_added_to_set(self):
- """create, then check that it is included in list"""
- instance = self.create_host()
- found = False
- for x in model.Host.all():
- if x.identifier == 'testhost':
- found = True
- self.assert_(found)
-
- def test_create_daemon_two_args(self):
- """create a daemon with two arguments"""
- d = self.create_daemon()
- d = model.Daemon('testhost', 'nova-testdaemon')
- self.assertFalse(d.is_new_record())
-
- def test_create_daemon_single_arg(self):
- """Create a daemon using the combined host:bin format"""
- d = model.Daemon("testhost:nova-testdaemon")
- d.save()
- d = model.Daemon('testhost:nova-testdaemon')
- self.assertFalse(d.is_new_record())
-
- def test_equality_of_daemon_single_and_double_args(self):
- """Create a daemon using the combined host:bin arg, find with 2"""
- d = model.Daemon("testhost:nova-testdaemon")
- d.save()
- d = model.Daemon('testhost', 'nova-testdaemon')
- self.assertFalse(d.is_new_record())
-
- def test_equality_daemon_of_double_and_single_args(self):
- """Create a daemon using the combined host:bin arg, find with 2"""
- d = self.create_daemon()
- d = model.Daemon('testhost:nova-testdaemon')
- self.assertFalse(d.is_new_record())
-
- def test_delete_daemon(self):
- """create, then destroy, then make sure loads a new record"""
- instance = self.create_daemon()
- instance.destroy()
- newinst = model.Daemon('testhost', 'nova-testdaemon')
- self.assertTrue(newinst.is_new_record())
-
- def test_daemon_heartbeat(self):
- """Create a daemon, sleep, heartbeat, check for update"""
- d = self.create_daemon()
- ts = d['updated_at']
- time.sleep(2)
- d.heartbeat()
- d2 = model.Daemon('testhost', 'nova-testdaemon')
- ts2 = d2['updated_at']
- self.assert_(ts2 > ts)
-
- def test_daemon_added_to_set(self):
- """create, then check that it is included in list"""
- instance = self.create_daemon()
- found = False
- for x in model.Daemon.all():
- if x.identifier == 'testhost:nova-testdaemon':
- found = True
- self.assert_(found)
-
- def test_daemon_associates_host(self):
- """create, then check that it is listed for the host"""
- instance = self.create_daemon()
- found = False
- for x in model.Daemon.by_host('testhost'):
- if x.identifier == 'testhost:nova-testdaemon':
- found = True
- self.assertTrue(found)
-
- def test_create_session_token(self):
- """create"""
- d = self.create_session_token()
- d = model.SessionToken(d.token)
- self.assertFalse(d.is_new_record())
-
- def test_delete_session_token(self):
- """create, then destroy, then make sure loads a new record"""
- instance = self.create_session_token()
- instance.destroy()
- newinst = model.SessionToken(instance.token)
- self.assertTrue(newinst.is_new_record())
-
- def test_session_token_added_to_set(self):
- """create, then check that it is included in list"""
- instance = self.create_session_token()
- found = False
- for x in model.SessionToken.all():
- if x.identifier == instance.token:
- found = True
- self.assert_(found)
-
- def test_session_token_associates_user(self):
- """create, then check that it is listed for the user"""
- instance = self.create_session_token()
- found = False
- for x in model.SessionToken.associated_to('user', 'testuser'):
- if x.identifier == instance.identifier:
- found = True
- self.assertTrue(found)
-
- def test_session_token_generation(self):
- instance = model.SessionToken.generate('username', 'TokenType')
- self.assertFalse(instance.is_new_record())
-
- def test_find_generated_session_token(self):
- instance = model.SessionToken.generate('username', 'TokenType')
- found = model.SessionToken.lookup(instance.identifier)
- self.assert_(found)
-
- def test_update_session_token_expiry(self):
- instance = model.SessionToken('tk12341234')
- oldtime = datetime.utcnow()
- instance['expiry'] = oldtime.strftime(utils.TIME_FORMAT)
- instance.update_expiry()
- expiry = utils.parse_isotime(instance['expiry'])
- self.assert_(expiry > datetime.utcnow())
-
- def test_session_token_lookup_when_expired(self):
- instance = model.SessionToken.generate("testuser")
- instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
- instance.save()
- inst = model.SessionToken.lookup(instance.identifier)
- self.assertFalse(inst)
-
- def test_session_token_lookup_when_not_expired(self):
- instance = model.SessionToken.generate("testuser")
- inst = model.SessionToken.lookup(instance.identifier)
- self.assert_(inst)
-
- def test_session_token_is_expired_when_expired(self):
- instance = model.SessionToken.generate("testuser")
- instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
- self.assert_(instance.is_expired())
-
- def test_session_token_is_expired_when_not_expired(self):
- instance = model.SessionToken.generate("testuser")
- self.assertFalse(instance.is_expired())
-
- def test_session_token_ttl(self):
- instance = model.SessionToken.generate("testuser")
- now = datetime.utcnow()
- delta = timedelta(hours=1)
- instance['expiry'] = (now + delta).strftime(utils.TIME_FORMAT)
- # give 5 seconds of fuzziness
- self.assert_(abs(instance.ttl() - FLAGS.auth_token_ttl) < 5)
diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py
index bdd77713a..51b9aeaad 100644
--- a/nova/tests/scheduler_unittest.py
+++ b/nova/tests/scheduler_unittest.py
@@ -19,10 +19,13 @@
Tests For Scheduler
"""
+import mox
+
from nova import db
from nova import flags
from nova import service
from nova import test
+from nova import rpc
from nova import utils
from nova.auth import manager as auth_manager
from nova.scheduler import manager
@@ -32,11 +35,45 @@ from nova.scheduler import driver
FLAGS = flags.FLAGS
flags.DECLARE('max_instances', 'nova.scheduler.simple')
+class TestDriver(driver.Scheduler):
+ """Scheduler Driver for Tests"""
+ def schedule(context, topic, *args, **kwargs):
+ return 'fallback_host'
+
+ def schedule_named_method(context, topic, num):
+ return 'named_host'
-class SimpleSchedulerTestCase(test.TrialTestCase):
+class SchedulerTestCase(test.TrialTestCase):
"""Test case for scheduler"""
+ def setUp(self): # pylint: disable=C0103
+ super(SchedulerTestCase, self).setUp()
+ self.flags(scheduler_driver='nova.tests.scheduler_unittest.TestDriver')
+
+ def test_fallback(self):
+ scheduler = manager.SchedulerManager()
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+ rpc.cast('topic.fallback_host',
+ {'method': 'noexist',
+ 'args': {'context': None,
+ 'num': 7}})
+ self.mox.ReplayAll()
+ scheduler.noexist(None, 'topic', num=7)
+
+ def test_named_method(self):
+ scheduler = manager.SchedulerManager()
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+ rpc.cast('topic.named_host',
+ {'method': 'named_method',
+ 'args': {'context': None,
+ 'num': 7}})
+ self.mox.ReplayAll()
+ scheduler.named_method(None, 'topic', num=7)
+
+
+class SimpleDriverTestCase(test.TrialTestCase):
+ """Test case for simple driver"""
def setUp(self): # pylint: disable-msg=C0103
- super(SimpleSchedulerTestCase, self).setUp()
+ super(SimpleDriverTestCase, self).setUp()
self.flags(connection_type='fake',
max_instances=4,
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
@@ -83,8 +120,9 @@ class SimpleSchedulerTestCase(test.TrialTestCase):
def test_least_busy_host_gets_instance(self):
instance_id = self._create_instance()
self.service1.run_instance(self.context, instance_id)
- host = self.scheduler.driver.pick_compute_host(self.context,
- instance_id)
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ 'compute',
+ instance_id)
self.assertEqual(host, 'host2')
self.service1.terminate_instance(self.context, instance_id)
@@ -100,8 +138,9 @@ class SimpleSchedulerTestCase(test.TrialTestCase):
instance_ids2.append(instance_id)
instance_id = self._create_instance()
self.assertRaises(driver.NoValidHost,
- self.scheduler.driver.pick_compute_host,
+ self.scheduler.driver.schedule_run_instance,
self.context,
+ 'compute',
instance_id)
for instance_id in instance_ids1:
self.service1.terminate_instance(self.context, instance_id)
diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py
index 274e74b5b..590d760b9 100644
--- a/nova/tests/service_unittest.py
+++ b/nova/tests/service_unittest.py
@@ -47,34 +47,50 @@ class ServiceTestCase(test.BaseTestCase):
self.mox.StubOutWithMock(service, 'db')
def test_create(self):
+ host='foo'
+ binary='nova-fake'
+ topic='fake'
self.mox.StubOutWithMock(rpc,
'AdapterConsumer',
use_mock_anything=True)
self.mox.StubOutWithMock(
service.task, 'LoopingCall', use_mock_anything=True)
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
- topic='fake',
+ topic=topic,
proxy=mox.IsA(service.Service)).AndReturn(
rpc.AdapterConsumer)
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
- topic='fake.%s' % FLAGS.host,
+ topic='%s.%s' % (topic, host),
proxy=mox.IsA(service.Service)).AndReturn(
rpc.AdapterConsumer)
# Stub out looping call a bit needlessly since we don't have an easy
# way to cancel it (yet) when the tests finishes
- service.task.LoopingCall(
- mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ service.task.LoopingCall(mox.IgnoreArg()).AndReturn(
service.task.LoopingCall)
service.task.LoopingCall.start(interval=mox.IgnoreArg(),
now=mox.IgnoreArg())
rpc.AdapterConsumer.attach_to_twisted()
rpc.AdapterConsumer.attach_to_twisted()
+ service_create = {'host': host,
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0}
+ service_ref = {'host': host,
+ 'binary': binary,
+ 'report_count': 0,
+ 'id': 1}
+
+ service.db.service_get_by_args(None,
+ host,
+ binary).AndRaise(exception.NotFound())
+ service.db.service_create(None,
+ service_create).AndReturn(service_ref['id'])
self.mox.ReplayAll()
- app = service.Service.create(bin_name='nova-fake')
+ app = service.Service.create(host=host, binary=binary)
self.assert_(app)
# We're testing sort of weird behavior in how report_state decides
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 931355cbd..621b7d576 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -23,7 +23,7 @@ A connection to a hypervisor (e.g. KVM) through libvirt.
import json
import logging
-import os.path
+import os
import shutil
from twisted.internet import defer
@@ -232,8 +232,11 @@ class LibvirtConnection(object):
f.write(libvirt_xml)
f.close()
- user = manager.AuthManager().get_user(inst.user_id)
- project = manager.AuthManager().get_project(inst.project_id)
+ os.close(os.open(basepath('console.log'), os.O_CREAT | os.O_WRONLY, 0660))
+
+ user = manager.AuthManager().get_user(inst['user_id'])
+ project = manager.AuthManager().get_project(inst['project_id'])
+
if not os.path.exists(basepath('disk')):
yield images.fetch(inst.image_id, basepath('disk-raw'), user, project)
if not os.path.exists(basepath('kernel')):
diff --git a/nova/wsgi.py b/nova/wsgi.py
index bec0a7b1c..8a4e2a9f4 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -241,6 +241,9 @@ class Serializer(object):
"""
self.environ = environ
self.metadata = metadata or {}
+ self._methods = {
+ 'application/json': self._to_json,
+ 'application/xml': self._to_xml}
def to_content_type(self, data):
"""
@@ -250,20 +253,20 @@ class Serializer(object):
"""
mimetype = 'application/xml'
# TODO(gundlach): determine mimetype from request
-
- if mimetype == 'application/json':
- import json
- return json.dumps(data)
- elif mimetype == 'application/xml':
- metadata = self.metadata.get('application/xml', {})
- # We expect data to contain a single key which is the XML root.
- root_key = data.keys()[0]
- from xml.dom import minidom
- doc = minidom.Document()
- node = self._to_xml_node(doc, metadata, root_key, data[root_key])
- return node.toprettyxml(indent=' ')
- else:
- return repr(data)
+ return self._methods.get(mimetype, repr)(data)
+
+ def _to_json(self, data):
+ import json
+ return json.dumps(data)
+
+ def _to_xml(self, data):
+ metadata = self.metadata.get('application/xml', {})
+ # We expect data to contain a single key which is the XML root.
+ root_key = data.keys()[0]
+ from xml.dom import minidom
+ doc = minidom.Document()
+ node = self._to_xml_node(doc, metadata, root_key, data[root_key])
+ return node.toprettyxml(indent=' ')
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
diff --git a/run_tests.py b/run_tests.py
index 5d76a74ca..0068ba32c 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -55,7 +55,6 @@ from nova.tests.api_unittest import *
from nova.tests.cloud_unittest import *
from nova.tests.compute_unittest import *
from nova.tests.flags_unittest import *
-#from nova.tests.model_unittest import *
from nova.tests.network_unittest import *
from nova.tests.objectstore_unittest import *
from nova.tests.process_unittest import *
diff --git a/setup.py b/setup.py
index 25252e8f4..1767b00f4 100644
--- a/setup.py
+++ b/setup.py
@@ -29,8 +29,10 @@ class local_sdist(sdist):
def run(self):
if os.path.isdir('.bzr'):
# We're in a bzr branch
- log_cmd = subprocess.Popen(["bzr", "log", "--gnu"],
- stdout=subprocess.PIPE)
+ env = os.environ.copy()
+ env['BZR_PLUGIN_PATH'] = os.path.abspath('./bzrplugins')
+ log_cmd = subprocess.Popen(["bzr", "log", "--novalog"],
+ stdout=subprocess.PIPE, env=env)
changelog = log_cmd.communicate()[0]
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(changelog)