From 68e34c790612f3250bd902cc87a0ab9d3d69abfb Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 13 May 2011 15:36:42 -0500 Subject: first cut at weighted-sum tests --- nova/scheduler/zone_aware_scheduler.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'nova') diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index b3d230bd2..38b395d52 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -89,6 +89,8 @@ class ZoneAwareScheduler(driver.Scheduler): # then weigh the selected hosts. # weighted = [{weight=weight, name=hostname}, ...] + # TODO(sirp): weigh_hosts should also be a function of 'topic' or + # resources, so that we can apply different objective functions to it weighted = self.weigh_hosts(num_instances, specs, host_list) # Next, tack on the best weights from the child zones ... -- cgit From a4ea9ac61568ce5f8300a5ba138f0ac10c79b43c Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 16 May 2011 15:59:01 -0700 Subject: fix for lp783705 - remove nwfilters when instance is terminated --- nova/tests/test_virt.py | 42 ++++++++++++++++++++++++++++++++++++++++++ nova/virt/libvirt_conn.py | 27 +++++++++++++++++++++++++-- 2 files changed, 67 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 1311ba361..babb5de9b 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -1045,3 +1045,45 @@ class NWFilterTestCase(test.TestCase): network_info, "fake") self.assertEquals(len(result), 3) + + def test_unfilter_instance_undefines_nwfilters(self): + class FakeNWFilter: + def __init__(self): + self.undefine_call_count = 0 + + def undefine(self): + self.undefine_call_count += 1 + pass + + fakefilter = FakeNWFilter() + + def _nwfilterLookupByName(ignore): + return fakefilter + + def _filterDefineXMLMock(xml): + return True + + admin_ctxt = context.get_admin_context() + + self.fw._conn.nwfilterDefineXML = _filterDefineXMLMock + self.fw._conn.nwfilterLookupByName = _nwfilterLookupByName + + instance_ref = self._create_instance() + inst_id = instance_ref['id'] + instance = db.instance_get(self.context, inst_id) + + ip = '10.11.12.13' + network_ref = db.project_get_network(self.context, 'fake') + fixed_ip = {'address': ip, 'network_id': network_ref['id']} + db.fixed_ip_create(admin_ctxt, fixed_ip) + db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + 'instance_id': inst_id}) + self.fw.setup_basic_filtering(instance) + self.fw.prepare_instance_filter(instance) + self.fw.apply_instance_filter(instance) + self.fw.unfilter_instance(instance) + + # should attempt to undefine 2 filters: instance and instance-secgroup + self.assertEquals(fakefilter.undefine_call_count, 2) + + db.instance_destroy(admin_ctxt, instance_ref['id']) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 555e44ce2..706973176 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1835,8 +1835,30 @@ class NWFilterFirewall(FirewallDriver): tpool.execute(self._conn.nwfilterDefineXML, xml) def unfilter_instance(self, instance): - # Nothing to do - pass + """Clear out the nwfilter rules.""" + network_info = _get_network_info(instance) + instance_name = instance.name + for (network, mapping) in network_info: + nic_id = mapping['mac'].replace(':', '') + instance_filter_name = self._instance_filter_name(instance, nic_id) + + try: + self._conn.nwfilterLookupByName(instance_filter_name).\ + undefine() + except libvirt.libvirtError: + LOG.debug(_('The nwfilter(%(instance_filter_name)s) for ' + '%(instance_name)s is not found.') % locals()) + + instance_secgroup_filter_name =\ + '%s-secgroup' % (self._instance_filter_name(instance)) + + try: + self._conn.nwfilterLookupByName(instance_secgroup_filter_name).\ + undefine() + except libvirt.libvirtError: + # This will happen if called by IptablesFirewallDriver + LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) for ' + '%(instance_name)s is not found.') % locals()) def prepare_instance_filter(self, instance, network_info=None): """ @@ -2000,6 +2022,7 @@ class IptablesFirewallDriver(FirewallDriver): if self.instances.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() + self.nwfilter.unfilter_instance(instance) else: LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) -- cgit From 0e2aba9e9869a66a1c3a6ece0fb08be631daa5bf Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:38:44 +0400 Subject: Moved memcached connection in AuthManager to thread-local storage. Added caching of LDAP connection in thread-local storage. Optimized LDAP queries, added similar memcached support to LDAPDriver. Add "per-driver-request" caching of LDAP results. (should be per-api-request) --- nova/auth/ldapdriver.py | 93 ++++++++++++++++++++++++++++++++++++++++++++----- nova/auth/manager.py | 20 +++++++---- 2 files changed, 98 insertions(+), 15 deletions(-) (limited to 'nova') diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 3f8432851..7849d941e 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -24,7 +24,9 @@ other backends by creating another class that exposes the same public methods. """ +import functools import sys +import threading from nova import exception from nova import flags @@ -85,6 +87,7 @@ def _clean(attr): def sanitize(fn): """Decorator to sanitize all args""" + @functools.wraps(fn) def _wrapped(self, *args, **kwargs): args = [_clean(x) for x in args] kwargs = dict((k, _clean(v)) for (k, v) in kwargs) @@ -103,29 +106,74 @@ class LdapDriver(object): isadmin_attribute = 'isNovaAdmin' project_attribute = 'owner' project_objectclass = 'groupOfNames' + __local = threading.local() def __init__(self): """Imports the LDAP module""" self.ldap = __import__('ldap') - self.conn = None if FLAGS.ldap_schema_version == 1: LdapDriver.project_pattern = '(objectclass=novaProject)' LdapDriver.isadmin_attribute = 'isAdmin' LdapDriver.project_attribute = 'projectManager' LdapDriver.project_objectclass = 'novaProject' + self.__cache = None def __enter__(self): """Creates the connection to LDAP""" - self.conn = self.ldap.initialize(FLAGS.ldap_url) - self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + # TODO(yorik-sar): Should be per-request cache, not per-driver-request + self.__cache = {} return self def __exit__(self, exc_type, exc_value, traceback): """Destroys the connection to LDAP""" - self.conn.unbind_s() + self.__cache = None return False + def __local_cache(key_fmt): + """Wrap function to cache it's result in self.__cache. + Works only with functions with one fixed argument. + """ + def do_wrap(fn): + @functools.wraps(fn) + def inner(self, arg, **kwargs): + cache_key = key_fmt % (arg,) + try: + res = self.__cache[cache_key] + LOG.debug('Local cache hit for %s by key %s' % + (fn.__name__, cache_key)) + return res + except KeyError: + res = fn(self, arg, **kwargs) + self.__cache[cache_key] = res + return res + return inner + return do_wrap + + @property + def conn(self): + try: + return self.__local.conn + except AttributeError: + conn = self.ldap.initialize(FLAGS.ldap_url) + conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + self.__local.conn = conn + return conn + + @property + def mc(self): + try: + return self.__local.mc + except AttributeError: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + mc = memcache.Client(FLAGS.memcached_servers, debug=0) + self.__local.mc = mc + return mc + @sanitize + @__local_cache('uid_user-%s') def get_user(self, uid): """Retrieve user by id""" attr = self.__get_ldap_user(uid) @@ -134,15 +182,30 @@ class LdapDriver(object): @sanitize def get_user_from_access_key(self, access): """Retrieve user by access key""" + cache_key = 'uak_dn_%s'%(access,) + user_dn = self.mc.get(cache_key) + if user_dn: + user = self.__to_user( + self.__find_object(user_dn, scope=self.ldap.SCOPE_BASE)) + if user: + if user['access'] == access: + return user + else: + self.mc.set(cache_key, None) query = '(accessKey=%s)' % access dn = FLAGS.ldap_user_subtree - return self.__to_user(self.__find_object(dn, query)) + user_obj = self.__find_object(dn, query) + user = self.__to_user(user_obj) + if user: + self.mc.set(cache_key, user_obj['dn'][0]) + return user @sanitize + @__local_cache('pid_project-%s') def get_project(self, pid): """Retrieve project by id""" - dn = self.__project_to_dn(pid) - attr = self.__find_object(dn, LdapDriver.project_pattern) + dn = self.__project_to_dn(pid, search=False) + attr = self.__find_object(dn, LdapDriver.project_pattern, scope=self.ldap.SCOPE_BASE) return self.__to_project(attr) @sanitize @@ -395,6 +458,7 @@ class LdapDriver(object): """Check if project exists""" return self.get_project(project_id) is not None + @__local_cache('uid_attrs-%s') def __get_ldap_user(self, uid): """Retrieve LDAP user entry by id""" dn = FLAGS.ldap_user_subtree @@ -426,12 +490,20 @@ class LdapDriver(object): if scope is None: # One of the flags is 0! scope = self.ldap.SCOPE_SUBTREE + if query is None: + query = "(objectClass=*)" try: res = self.conn.search_s(dn, scope, query) except self.ldap.NO_SUCH_OBJECT: return [] # Just return the attributes - return [attributes for dn, attributes in res] + # FIXME(yorik-sar): Whole driver should be refactored to + # prevent this hack + res1 = [] + for dn, attrs in res: + attrs['dn'] = [dn] + res1.append(attrs) + return res1 def __find_role_dns(self, tree): """Find dns of role objects in given tree""" @@ -564,6 +636,7 @@ class LdapDriver(object): 'description': attr.get('description', [None])[0], 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} + @__local_cache('uid_dn-%s') def __uid_to_dn(self, uid, search=True): """Convert uid to dn""" # By default return a generated DN @@ -576,6 +649,7 @@ class LdapDriver(object): userdn = user[0] return userdn + @__local_cache('pid_dn-%s') def __project_to_dn(self, pid, search=True): """Convert pid to dn""" # By default return a generated DN @@ -603,10 +677,11 @@ class LdapDriver(object): else: return None + @__local_cache('dn_uid-%s') def __dn_to_uid(self, dn): """Convert user dn to uid""" query = '(objectclass=novaUser)' - user = self.__find_object(dn, query) + user = self.__find_object(dn, query, scope=self.ldap.SCOPE_BASE) return user[FLAGS.ldap_user_id_attribute][0] diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 07235a2a7..c71f0f161 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -23,6 +23,7 @@ Nova authentication management import os import shutil import string # pylint: disable=W0402 +import threading import tempfile import uuid import zipfile @@ -206,6 +207,7 @@ class AuthManager(object): """ _instance = None + __local = threading.local() def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" @@ -223,12 +225,18 @@ class AuthManager(object): if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache - self.mc = memcache.Client(FLAGS.memcached_servers, - debug=0) + @property + def mc(self): + try: + return self.__local.mc + except AttributeError: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + mc = memcache.Client(FLAGS.memcached_servers, debug=0) + self.__local.mc = mc + return mc def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', -- cgit From e4f8ef67065f1de36ceadf9dd97e07fbe9fc9d83 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:39:19 +0400 Subject: Fixed mistyped key, caused huge performance leak. --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/flags.py b/nova/flags.py index 519793643..5c536f6d8 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -110,7 +110,7 @@ class FlagValues(gflags.FlagValues): return name in self.__dict__['__dirty'] def ClearDirty(self): - self.__dict__['__is_dirty'] = [] + self.__dict__['__dirty'] = [] def WasAlreadyParsed(self): return self.__dict__['__was_already_parsed'] -- cgit From 2fcc10656222bea6056742ef943c1b82724c0b56 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:45:48 +0400 Subject: PEP8 fixes. --- nova/auth/ldapdriver.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 7849d941e..9fe0165a1 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -182,7 +182,7 @@ class LdapDriver(object): @sanitize def get_user_from_access_key(self, access): """Retrieve user by access key""" - cache_key = 'uak_dn_%s'%(access,) + cache_key = 'uak_dn_%s' % (access,) user_dn = self.mc.get(cache_key) if user_dn: user = self.__to_user( @@ -205,7 +205,8 @@ class LdapDriver(object): def get_project(self, pid): """Retrieve project by id""" dn = self.__project_to_dn(pid, search=False) - attr = self.__find_object(dn, LdapDriver.project_pattern, scope=self.ldap.SCOPE_BASE) + attr = self.__find_object(dn, LdapDriver.project_pattern, + scope=self.ldap.SCOPE_BASE) return self.__to_project(attr) @sanitize -- cgit From 23bbbfcd3317859d44dba7da7996a978ad922543 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 10:45:19 -0500 Subject: First cut at least cost scheduler --- nova/scheduler/least_cost.py | 79 +++++++++++++++++++++++++++++++++ nova/tests/test_least_cost_scheduler.py | 39 ++++++++++++++++ 2 files changed, 118 insertions(+) create mode 100644 nova/scheduler/least_cost.py create mode 100644 nova/tests/test_least_cost_scheduler.py (limited to 'nova') diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py new file mode 100644 index 000000000..75dde81ca --- /dev/null +++ b/nova/scheduler/least_cost.py @@ -0,0 +1,79 @@ +import collections + +# TODO(sirp): this should be just `zone_aware` to match naming scheme +# TODO(sirp): perhaps all zone-aware stuff should go under a `zone_aware` +# module +from nova.scheduler import zone_aware_scheduler + +class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): + def get_cost_fns(self): + """Returns a list of tuples containing weights and cost functions to + use for weighing hosts + """ + cost_fns = [] + + return cost_fns + + def weigh_hosts(self, num, specs, hosts): + """ + Returns a list of dictionaries of form: + [ {weight: weight, hostname: hostname} ] + """ + # FIXME(sirp): weigh_hosts should handle more than just instances + cost_fns = [] + hosts = [] + cost_hosts = weighted_sum(domain=hosts, weighted_fns=self.get_cost_fns()) + + # TODO convert hosts back to hostnames + weight_hostnames = [] + return weight_hostnames + +def normalize_list(L): + """Normalize an array of numbers such that each element satisfies: + 0 <= e <= 1 + """ + if not L: + return L + max_ = max(L) + if max_ > 0: + return [(float(e) / max_) for e in L] + return L + +def weighted_sum(domain, weighted_fns, normalize=True): + """ + Use the weighted-sum method to compute a score for an array of objects. + Normalize the results of the objective-functions so that the weights are + meaningful regardless of objective-function's range. + + domain - input to be scored + weighted_fns - list of weights and functions like: + [(weight, objective-functions)] + + Returns an unsorted list like: [(score, elem)] + """ + # Table of form: + # { domain1: [score1, score2, ..., scoreM] + # ... + # domainN: [score1, score2, ..., scoreM] } + score_table = collections.defaultdict(list) + + for weight, fn in weighted_fns: + scores = [fn(elem) for elem in domain] + + if normalize: + norm_scores = normalize_list(scores) + else: + norm_scores = scores + + for idx, score in enumerate(norm_scores): + weighted_score = score * weight + score_table[idx].append(weighted_score) + + # Sum rows in table to compute score for each element in domain + domain_scores = [] + for idx in sorted(score_table): + elem_score = sum(score_table[idx]) + elem = domain[idx] + domain_scores.append(elem_score) + + return domain_scores diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py new file mode 100644 index 000000000..a3a18a09f --- /dev/null +++ b/nova/tests/test_least_cost_scheduler.py @@ -0,0 +1,39 @@ +from nova import test +from nova.scheduler import least_cost + +MB = 1024 * 1024 + +class FakeHost(object): + def __init__(self, host_id, free_ram, io): + self.id = host_id + self.free_ram = free_ram + self.io = io + +class WeightedSumTest(test.TestCase): + def test_empty_domain(self): + domain = [] + weighted_fns = [] + result = least_cost.weighted_sum(domain, weighted_fns) + expected = [] + self.assertEqual(expected, result) + + def test_basic_costing(self): + hosts = [ + FakeHost(1, 512 * MB, 100), + FakeHost(2, 256 * MB, 400), + FakeHost(3, 512 * MB, 100) + ] + + weighted_fns = [ + (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* + (2, lambda h: h.io), # Avoid high I/O + ] + + costs = least_cost.weighted_sum(domain=hosts, weighted_fns=weighted_fns) + + # Each 256 MB unit of free-ram contributes 0.5 points by way of: + # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 + # Each 100 iops of IO adds 0.5 points by way of: + # cost = 2 * (100/400) = 2 * 0.25 = 0.5 + expected = [1.5, 2.5, 1.5] + self.assertEqual(expected, costs) -- cgit From a4035df4d031d3d90f3f7ce938ff0b8305be6773 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 17:27:04 -0500 Subject: Adding fill first cost function --- nova/scheduler/least_cost.py | 12 ++++++++++++ nova/test.py | 16 +++++++++++++--- nova/tests/test_least_cost_scheduler.py | 33 +++++++++++++++++++++++++++++++-- 3 files changed, 56 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index e47951f17..79376c358 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -34,6 +34,8 @@ flags.DEFINE_list('least_cost_scheduler_cost_functions', 'Which cost functions the LeastCostScheduler should use.') +# TODO(sirp): Once we have enough of these rules, we can break them out into a +# cost_functions.py file (perhaps in a least_cost_scheduler directory) flags.DEFINE_integer('noop_cost_fn_weight', 1, 'How much weight to give the noop cost function') def noop_cost_fn(host): @@ -41,6 +43,16 @@ def noop_cost_fn(host): return 1 +flags.DEFINE_integer('fill_first_cost_fn_weight', 1, + 'How much weight to give the fill-first cost function') +def fill_first_cost_fn(host): + """Prefer hosts that have less ram available, filter_hosts will exclude + hosts that don't have enough ram""" + hostname, caps = host + free_mem = caps['compute']['host_memory_free'] + return free_mem + + class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): def get_cost_fns(self): """Returns a list of tuples containing weights and cost functions to diff --git a/nova/test.py b/nova/test.py index 4deb2a175..401f82d38 100644 --- a/nova/test.py +++ b/nova/test.py @@ -181,7 +181,7 @@ class TestCase(unittest.TestCase): wsgi.Server.start = _wrapped_start # Useful assertions - def assertDictMatch(self, d1, d2): + def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): """Assert two dicts are equivalent. This is a 'deep' match in the sense that it handles nested @@ -212,15 +212,24 @@ class TestCase(unittest.TestCase): for key in d1keys: d1value = d1[key] d2value = d2[key] + + try: + within_tolerance = abs(float(d1value) - float(d2value)) < tolerance + except ValueError: + # If both values aren't convertable to float, just ignore + within_tolerance = False + if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): self.assertDictMatch(d1value, d2value) elif 'DONTCARE' in (d1value, d2value): continue + elif approx_equal and within_tolerance: + continue elif d1value != d2value: raise_assertion("d1['%(key)s']=%(d1value)s != " "d2['%(key)s']=%(d2value)s" % locals()) - def assertDictListMatch(self, L1, L2): + def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001): """Assert a list of dicts are equivalent.""" def raise_assertion(msg): L1str = str(L1) @@ -236,4 +245,5 @@ class TestCase(unittest.TestCase): 'len(L2)=%(L2count)d' % locals()) for d1, d2 in zip(L1, L2): - self.assertDictMatch(d1, d2) + self.assertDictMatch(d1, d2, approx_equal=approx_equal, + tolerance=tolerance) diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py index b2318a3bf..b7bcd2f02 100644 --- a/nova/tests/test_least_cost_scheduler.py +++ b/nova/tests/test_least_cost_scheduler.py @@ -119,7 +119,7 @@ class LeastCostSchedulerTestCase(test.TestCase): def assertWeights(self, expected, num, request_spec, hosts): weighted = self.sched.weigh_hosts(num, request_spec, hosts) - self.assertDictListMatch(weighted, expected) + self.assertDictListMatch(weighted, expected, approx_equal=True) def test_no_hosts(self): num = 1 @@ -137,12 +137,41 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) expected = [ dict(weight=1, hostname=hostname) for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) + def test_cost_fn_weights(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] FLAGS.noop_cost_fn_weight = 2 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + expected = [ dict(weight=2, hostname=hostname) for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) + + def test_fill_first_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.fill_first_cost_fn' + ] + FLAGS.fill_first_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [] + for idx, (hostname, caps) in enumerate(hosts): + # Costs are normalized so over 10 hosts, each host with increasing + # free ram will cost 1/N more. Since the lowest cost host has some + # free ram, we add in the 1/N for the base_cost + weight = 0.1 + (0.1 * idx) + weight_dict = dict(weight=weight, hostname=hostname) + expected.append(weight_dict) + + self.assertWeights(expected, num, request_spec, hosts) -- cgit From d6fbe417d7f8f7540cffe8c941c0591a22483978 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 17:44:08 -0500 Subject: Using import_class to import filter_host driver --- nova/scheduler/host_filter.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 7cb41a433..117f08242 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -42,6 +42,7 @@ from nova import exception from nova import flags from nova import log as logging from nova.scheduler import zone_aware_scheduler +from nova import utils LOG = logging.getLogger('nova.scheduler.host_filter') @@ -283,11 +284,13 @@ def choose_driver(driver_name=None): if not driver_name: driver_name = FLAGS.default_host_filter_driver - # FIXME(sirp): use utils.import_class here - for driver in DRIVERS: - if "%s.%s" % (driver.__module__, driver.__name__) == driver_name: - return driver() - raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name) + + try: + driver = utils.import_object(driver_name) + return driver + except exception.ClassNotFound: + raise exception.SchedulerHostFilterDriverNotFound( + driver_name=driver_name) class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): -- cgit From 439787e7588b2409f319f2d86a41a3581cff8861 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:15:31 -0500 Subject: Pep8 fixes --- nova/exception.py | 3 +++ nova/scheduler/least_cost.py | 30 +++++++++++++++++++++--------- nova/scheduler/zone_aware_scheduler.py | 11 ++++++----- nova/test.py | 5 +++-- nova/tests/test_least_cost_scheduler.py | 18 +++++++++++------- 5 files changed, 44 insertions(+), 23 deletions(-) (limited to 'nova') diff --git a/nova/exception.py b/nova/exception.py index 63ed6dd5e..16c443c61 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -460,15 +460,18 @@ class FlavorNotFound(NotFound): class ZoneNotFound(NotFound): message = _("Zone %(zone_id)s could not be found.") + # TODO(sirp): move these into the schedule classes since they are internal? class SchedulerHostFilterDriverNotFound(NotFound): message = _("Scheduler Host Filter Driver %(driver_name)s could" " not be found.") + class SchedulerCostFunctionNotFound(NotFound): message = _("Scheduler cost function %(cost_fn_str)s could" " not be found.") + class SchedulerWeightFlagNotFound(NotFound): message = _("Scheduler weight flag not found: %(flag_name)s") diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index 79376c358..629fe2e42 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -13,16 +13,19 @@ # License for the specific language governing permissions and limitations # under the License. """ -Helpful docstring here +Least Cost Scheduler is a mechanism for choosing which host machines to +provision a set of resources to. The input of the least-cost-scheduler is a +set of objective-functions, called the 'cost-functions', a weight for each +cost-function, and a list of candidate hosts (gathered via FilterHosts). + +The cost-function and weights are tabulated, and the host with the least cost +is then selected for provisioning. """ import collections from nova import flags from nova import log as logging -# TODO(sirp): this should be just `zone_aware` to match naming scheme -# TODO(sirp): perhaps all zone-aware stuff should go under a `zone_aware` -# module from nova.scheduler import zone_aware_scheduler from nova import utils @@ -38,6 +41,8 @@ flags.DEFINE_list('least_cost_scheduler_cost_functions', # cost_functions.py file (perhaps in a least_cost_scheduler directory) flags.DEFINE_integer('noop_cost_fn_weight', 1, 'How much weight to give the noop cost function') + + def noop_cost_fn(host): """Return a pre-weight cost of 1 for each host""" return 1 @@ -45,6 +50,8 @@ def noop_cost_fn(host): flags.DEFINE_integer('fill_first_cost_fn_weight', 1, 'How much weight to give the fill-first cost function') + + def fill_first_cost_fn(host): """Prefer hosts that have less ram available, filter_hosts will exclude hosts that don't have enough ram""" @@ -68,7 +75,7 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): except exception.ClassNotFound: raise exception.SchedulerCostFunctionNotFound( cost_fn_str=cost_fn_str) - + try: weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__) except AttributeError: @@ -82,17 +89,22 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): def weigh_hosts(self, num, request_spec, hosts): """Returns a list of dictionaries of form: [ {weight: weight, hostname: hostname} ]""" + # FIXME(sirp): weigh_hosts should handle more than just instances - hostnames = [hostname for hostname, _ in hosts] + hostnames = [hostname for hostname, caps in hosts] cost_fns = self.get_cost_fns() costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) - + weighted = [] + weight_log = [] for cost, hostname in zip(costs, hostnames): + weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) weight_dict = dict(weight=cost, hostname=hostname) weighted.append(weight_dict) - return weighted + + LOG.debug(_("Weighted Costs => %s") % weight_log) + return weighted def normalize_list(L): @@ -110,7 +122,7 @@ def weighted_sum(domain, weighted_fns, normalize=True): """Use the weighted-sum method to compute a score for an array of objects. Normalize the results of the objective-functions so that the weights are meaningful regardless of objective-function's range. - + domain - input to be scored weighted_fns - list of weights and functions like: [(weight, objective-functions)] diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index fa5b3b1b6..a1a68ce5e 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -80,7 +80,7 @@ class ZoneAwareScheduler(driver.Scheduler): LOG.debug(_("Casted to compute %(host)s for run_instance") % locals()) else: - # TODO(sandy) Provision in child zone ... + # TODO(sandy) Provision in child zone ... LOG.warning(_("Provision to Child Zone not supported (yet)") % locals()) pass @@ -117,11 +117,11 @@ class ZoneAwareScheduler(driver.Scheduler): # Filter local hosts based on requirements ... host_list = self.filter_hosts(num_instances, request_spec) - # then weigh the selected hosts. - # weighted = [{weight=weight, name=hostname}, ...] - # TODO(sirp): weigh_hosts should also be a function of 'topic' or # resources, so that we can apply different objective functions to it + + # then weigh the selected hosts. + # weighted = [{weight=weight, name=hostname}, ...] weighted = self.weigh_hosts(num_instances, request_spec, host_list) # Next, tack on the best weights from the child zones ... @@ -145,8 +145,9 @@ class ZoneAwareScheduler(driver.Scheduler): """Derived classes must override this method and return a list of hosts in [(hostname, capability_dict)] format.""" # NOTE(sirp): The default logic is the equivalent to AllHostsFilter + service_states = self.zone_manager.service_states return [(host, services) - for host, services in self.zone_manager.service_states.iteritems()] + for host, services in service_states.iteritems()] def weigh_hosts(self, num, request_spec, hosts): """Derived classes may override this to provide more sophisticated diff --git a/nova/test.py b/nova/test.py index 401f82d38..00a16dd68 100644 --- a/nova/test.py +++ b/nova/test.py @@ -212,9 +212,10 @@ class TestCase(unittest.TestCase): for key in d1keys: d1value = d1[key] d2value = d2[key] - + try: - within_tolerance = abs(float(d1value) - float(d2value)) < tolerance + error = abs(float(d1value) - float(d2value)) + within_tolerance = error <= tolerance except ValueError: # If both values aren't convertable to float, just ignore within_tolerance = False diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py index b7bcd2f02..c8ce7892f 100644 --- a/nova/tests/test_least_cost_scheduler.py +++ b/nova/tests/test_least_cost_scheduler.py @@ -30,6 +30,7 @@ class FakeHost(object): self.free_ram = free_ram self.io = io + class WeightedSumTestCase(test.TestCase): def test_empty_domain(self): domain = [] @@ -50,14 +51,15 @@ class WeightedSumTestCase(test.TestCase): (2, lambda h: h.io), # Avoid high I/O ] - costs = least_cost.weighted_sum(domain=hosts, weighted_fns=weighted_fns) + costs = least_cost.weighted_sum( + domain=hosts, weighted_fns=weighted_fns) # Each 256 MB unit of free-ram contributes 0.5 points by way of: # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 # Each 100 iops of IO adds 0.5 points by way of: # cost = 2 * (100/400) = 2 * 0.25 = 0.5 expected = [1.5, 2.5, 1.5] - self.assertEqual(expected, costs) + self.assertEqual(expected, costs) # TODO(sirp): unify this with test_host_filter tests? possibility of sharing @@ -65,6 +67,7 @@ class WeightedSumTestCase(test.TestCase): class FakeZoneManager: pass + class LeastCostSchedulerTestCase(test.TestCase): def _host_caps(self, multiplier): # Returns host capabilities in the following way: @@ -116,7 +119,6 @@ class LeastCostSchedulerTestCase(test.TestCase): #FLAGS.default_host_filter_driver = self.old_flag super(LeastCostSchedulerTestCase, self).tearDown() - def assertWeights(self, expected, num, request_spec, hosts): weighted = self.sched.weigh_hosts(num, request_spec, hosts) self.assertDictListMatch(weighted, expected, approx_equal=True) @@ -138,8 +140,9 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - - expected = [ dict(weight=1, hostname=hostname) for hostname, caps in hosts] + + expected = [dict(weight=1, hostname=hostname) + for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) def test_cost_fn_weights(self): @@ -152,7 +155,8 @@ class LeastCostSchedulerTestCase(test.TestCase): request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - expected = [ dict(weight=2, hostname=hostname) for hostname, caps in hosts] + expected = [dict(weight=2, hostname=hostname) + for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) def test_fill_first_cost_fn(self): @@ -164,7 +168,7 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - + expected = [] for idx, (hostname, caps) in enumerate(hosts): # Costs are normalized so over 10 hosts, each host with increasing -- cgit From 4ba215224e6c75037fd4f20be57d632da5d07469 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:32:56 -0500 Subject: Moving tests into scheduler subdirectory --- nova/scheduler/host_filter.py | 5 - nova/tests/scheduler/test_scheduler.py | 1118 +++++++++++++++++++++ nova/tests/scheduler/test_zone_aware_scheduler.py | 121 +++ nova/tests/test_host_filter.py | 211 ---- nova/tests/test_least_cost_scheduler.py | 181 ---- nova/tests/test_scheduler.py | 1118 --------------------- nova/tests/test_zone_aware_scheduler.py | 121 --- 7 files changed, 1239 insertions(+), 1636 deletions(-) create mode 100644 nova/tests/scheduler/test_scheduler.py create mode 100644 nova/tests/scheduler/test_zone_aware_scheduler.py delete mode 100644 nova/tests/test_host_filter.py delete mode 100644 nova/tests/test_least_cost_scheduler.py delete mode 100644 nova/tests/test_scheduler.py delete mode 100644 nova/tests/test_zone_aware_scheduler.py (limited to 'nova') diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 117f08242..79e9f3159 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -313,8 +313,3 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): instance_type = request_spec['instance_type'] name, query = driver.instance_type_to_filter(instance_type) return driver.filter_hosts(self.zone_manager, query) - - def weigh_hosts(self, num, request_spec, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format.""" - return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py new file mode 100644 index 000000000..54b3f80fb --- /dev/null +++ b/nova/tests/scheduler/test_scheduler.py @@ -0,0 +1,1118 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler +""" + +import datetime +import mox +import novaclient.exceptions +import stubout +import webob + +from mox import IgnoreArg +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import service +from nova import test +from nova import rpc +from nova import utils +from nova.auth import manager as auth_manager +from nova.scheduler import api +from nova.scheduler import manager +from nova.scheduler import driver +from nova.compute import power_state +from nova.db.sqlalchemy import models + + +FLAGS = flags.FLAGS +flags.DECLARE('max_cores', 'nova.scheduler.simple') +flags.DECLARE('stub_network', 'nova.compute.manager') +flags.DECLARE('instances_path', 'nova.compute.manager') + + +class TestDriver(driver.Scheduler): + """Scheduler Driver for Tests""" + def schedule(context, topic, *args, **kwargs): + return 'fallback_host' + + def schedule_named_method(context, topic, num): + return 'named_host' + + +class SchedulerTestCase(test.TestCase): + """Test case for scheduler""" + def setUp(self): + super(SchedulerTestCase, self).setUp() + self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') + + def _create_compute_service(self): + """Create compute-manager(ComputeNode and Service record).""" + ctxt = context.get_admin_context() + dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute', + 'report_count': 0, 'availability_zone': 'dummyzone'} + s_ref = db.service_create(ctxt, dic) + + dic = {'service_id': s_ref['id'], + 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, + 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10, + 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, + 'cpu_info': ''} + db.compute_node_create(ctxt, dic) + + return db.service_get(ctxt, s_ref['id']) + + def _create_instance(self, **kwargs): + """Create a test instance""" + ctxt = context.get_admin_context() + inst = {} + inst['user_id'] = 'admin' + inst['project_id'] = kwargs.get('project_id', 'fake') + inst['host'] = kwargs.get('host', 'dummy') + inst['vcpus'] = kwargs.get('vcpus', 1) + inst['memory_mb'] = kwargs.get('memory_mb', 10) + inst['local_gb'] = kwargs.get('local_gb', 20) + return db.instance_create(ctxt, inst) + + def test_fallback(self): + scheduler = manager.SchedulerManager() + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + ctxt = context.get_admin_context() + rpc.cast(ctxt, + 'topic.fallback_host', + {'method': 'noexist', + 'args': {'num': 7}}) + self.mox.ReplayAll() + scheduler.noexist(ctxt, 'topic', num=7) + + def test_named_method(self): + scheduler = manager.SchedulerManager() + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + ctxt = context.get_admin_context() + rpc.cast(ctxt, + 'topic.named_host', + {'method': 'named_method', + 'args': {'num': 7}}) + self.mox.ReplayAll() + scheduler.named_method(ctxt, 'topic', num=7) + + def test_show_host_resources_host_not_exit(self): + """A host given as an argument does not exists.""" + + scheduler = manager.SchedulerManager() + dest = 'dummydest' + ctxt = context.get_admin_context() + + self.assertRaises(exception.NotFound, scheduler.show_host_resources, + ctxt, dest) + #TODO(bcwaldon): reimplement this functionality + #c1 = (e.message.find(_("does not exist or is not a " + # "compute node.")) >= 0) + + def _dic_is_equal(self, dic1, dic2, keys=None): + """Compares 2 dictionary contents(Helper method)""" + if not keys: + keys = ['vcpus', 'memory_mb', 'local_gb', + 'vcpus_used', 'memory_mb_used', 'local_gb_used'] + + for key in keys: + if not (dic1[key] == dic2[key]): + return False + return True + + def test_show_host_resources_no_project(self): + """No instance are running on the given host.""" + + scheduler = manager.SchedulerManager() + ctxt = context.get_admin_context() + s_ref = self._create_compute_service() + + result = scheduler.show_host_resources(ctxt, s_ref['host']) + + # result checking + c1 = ('resource' in result and 'usage' in result) + compute_node = s_ref['compute_node'][0] + c2 = self._dic_is_equal(result['resource'], compute_node) + c3 = result['usage'] == {} + self.assertTrue(c1 and c2 and c3) + db.service_destroy(ctxt, s_ref['id']) + + def test_show_host_resources_works_correctly(self): + """Show_host_resources() works correctly as expected.""" + + scheduler = manager.SchedulerManager() + ctxt = context.get_admin_context() + s_ref = self._create_compute_service() + i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host']) + i_ref2 = self._create_instance(project_id='p-02', vcpus=3, + host=s_ref['host']) + + result = scheduler.show_host_resources(ctxt, s_ref['host']) + + c1 = ('resource' in result and 'usage' in result) + compute_node = s_ref['compute_node'][0] + c2 = self._dic_is_equal(result['resource'], compute_node) + c3 = result['usage'].keys() == ['p-01', 'p-02'] + keys = ['vcpus', 'memory_mb', 'local_gb'] + c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys) + c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys) + self.assertTrue(c1 and c2 and c3 and c4 and c5) + + db.service_destroy(ctxt, s_ref['id']) + db.instance_destroy(ctxt, i_ref1['id']) + db.instance_destroy(ctxt, i_ref2['id']) + + +class ZoneSchedulerTestCase(test.TestCase): + """Test case for zone scheduler""" + def setUp(self): + super(ZoneSchedulerTestCase, self).setUp() + self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler') + + def _create_service_model(self, **kwargs): + service = db.sqlalchemy.models.Service() + service.host = kwargs['host'] + service.disabled = False + service.deleted = False + service.report_count = 0 + service.binary = 'nova-compute' + service.topic = 'compute' + service.id = kwargs['id'] + service.availability_zone = kwargs['zone'] + service.created_at = datetime.datetime.utcnow() + return service + + def test_with_two_zones(self): + scheduler = manager.SchedulerManager() + ctxt = context.get_admin_context() + service_list = [self._create_service_model(id=1, + host='host1', + zone='zone1'), + self._create_service_model(id=2, + host='host2', + zone='zone2'), + self._create_service_model(id=3, + host='host3', + zone='zone2'), + self._create_service_model(id=4, + host='host4', + zone='zone2'), + self._create_service_model(id=5, + host='host5', + zone='zone2')] + self.mox.StubOutWithMock(db, 'service_get_all_by_topic') + arg = IgnoreArg() + db.service_get_all_by_topic(arg, arg).AndReturn(service_list) + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + rpc.cast(ctxt, + 'compute.host1', + {'method': 'run_instance', + 'args': {'instance_id': 'i-ffffffff', + 'availability_zone': 'zone1'}}) + self.mox.ReplayAll() + scheduler.run_instance(ctxt, + 'compute', + instance_id='i-ffffffff', + availability_zone='zone1') + + +class SimpleDriverTestCase(test.TestCase): + """Test case for simple driver""" + def setUp(self): + super(SimpleDriverTestCase, self).setUp() + self.flags(connection_type='fake', + stub_network=True, + max_cores=4, + max_gigabytes=4, + network_manager='nova.network.manager.FlatManager', + volume_driver='nova.volume.driver.FakeISCSIDriver', + scheduler_driver='nova.scheduler.simple.SimpleScheduler') + self.scheduler = manager.SchedulerManager() + self.manager = auth_manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake') + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.get_admin_context() + + def tearDown(self): + self.manager.delete_user(self.user) + self.manager.delete_project(self.project) + super(SimpleDriverTestCase, self).tearDown() + + def _create_instance(self, **kwargs): + """Create a test instance""" + inst = {} + inst['image_id'] = 1 + inst['reservation_id'] = 'r-fakeres' + inst['user_id'] = self.user.id + inst['project_id'] = self.project.id + inst['instance_type_id'] = '1' + inst['mac_address'] = utils.generate_mac() + inst['vcpus'] = kwargs.get('vcpus', 1) + inst['ami_launch_index'] = 0 + inst['availability_zone'] = kwargs.get('availability_zone', None) + inst['host'] = kwargs.get('host', 'dummy') + inst['memory_mb'] = kwargs.get('memory_mb', 20) + inst['local_gb'] = kwargs.get('local_gb', 30) + inst['launched_on'] = kwargs.get('launghed_on', 'dummy') + inst['state_description'] = kwargs.get('state_description', 'running') + inst['state'] = kwargs.get('state', power_state.RUNNING) + return db.instance_create(self.context, inst)['id'] + + def _create_volume(self): + """Create a test volume""" + vol = {} + vol['size'] = 1 + vol['availability_zone'] = 'test' + return db.volume_create(self.context, vol)['id'] + + def _create_compute_service(self, **kwargs): + """Create a compute service.""" + + dic = {'binary': 'nova-compute', 'topic': 'compute', + 'report_count': 0, 'availability_zone': 'dummyzone'} + dic['host'] = kwargs.get('host', 'dummy') + s_ref = db.service_create(self.context, dic) + if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): + t = datetime.datetime.utcnow() - datetime.timedelta(0) + dic['created_at'] = kwargs.get('created_at', t) + dic['updated_at'] = kwargs.get('updated_at', t) + db.service_update(self.context, s_ref['id'], dic) + + dic = {'service_id': s_ref['id'], + 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, + 'vcpus_used': 16, 'local_gb_used': 10, + 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, + 'cpu_info': ''} + dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32) + dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu') + dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003) + db.compute_node_create(self.context, dic) + return db.service_get(self.context, s_ref['id']) + + def test_doesnt_report_disabled_hosts_as_up(self): + """Ensures driver doesn't find hosts before they are enabled""" + # NOTE(vish): constructing service without create method + # because we are going to use it without queue + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + db.service_update(self.context, s2['id'], {'disabled': True}) + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(0, len(hosts)) + compute1.kill() + compute2.kill() + + def test_reports_enabled_hosts_as_up(self): + """Ensures driver can find the hosts that are up""" + # NOTE(vish): constructing service without create method + # because we are going to use it without queue + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(2, len(hosts)) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_instance(self): + """Ensures the host with less cores gets the next one""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance() + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual(host, 'host2') + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_specific_host_gets_instance(self): + """Ensures if you set availability_zone it launches on that zone""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_wont_sechedule_if_specified_host_is_down(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) + past = now - delta + db.service_update(self.context, s1['id'], {'updated_at': past}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + self.assertRaises(driver.WillNotSchedule, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id2) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_will_schedule_on_disabled_host_if_specified(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_too_many_cores(self): + """Ensures we don't go over max cores""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_ids1 = [] + instance_ids2 = [] + for index in xrange(FLAGS.max_cores): + instance_id = self._create_instance() + compute1.run_instance(self.context, instance_id) + instance_ids1.append(instance_id) + instance_id = self._create_instance() + compute2.run_instance(self.context, instance_id) + instance_ids2.append(instance_id) + instance_id = self._create_instance() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id) + for instance_id in instance_ids1: + compute1.terminate_instance(self.context, instance_id) + for instance_id in instance_ids2: + compute2.terminate_instance(self.context, instance_id) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_volume(self): + """Ensures the host with less gigabytes gets the next one""" + volume1 = service.Service('host1', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume1.start() + volume2 = service.Service('host2', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume2.start() + volume_id1 = self._create_volume() + volume1.create_volume(self.context, volume_id1) + volume_id2 = self._create_volume() + host = self.scheduler.driver.schedule_create_volume(self.context, + volume_id2) + self.assertEqual(host, 'host2') + volume1.delete_volume(self.context, volume_id1) + db.volume_destroy(self.context, volume_id2) + dic = {'service_id': s_ref['id'], + 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, + 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10, + 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, + 'cpu_info': ''} + + def test_doesnt_report_disabled_hosts_as_up(self): + """Ensures driver doesn't find hosts before they are enabled""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + db.service_update(self.context, s2['id'], {'disabled': True}) + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(0, len(hosts)) + compute1.kill() + compute2.kill() + + def test_reports_enabled_hosts_as_up(self): + """Ensures driver can find the hosts that are up""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(2, len(hosts)) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_instance(self): + """Ensures the host with less cores gets the next one""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance() + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual(host, 'host2') + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_specific_host_gets_instance(self): + """Ensures if you set availability_zone it launches on that zone""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_wont_sechedule_if_specified_host_is_down(self): + compute1 = self.start_service('compute', host='host1') + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) + past = now - delta + db.service_update(self.context, s1['id'], {'updated_at': past}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + self.assertRaises(driver.WillNotSchedule, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id2) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_will_schedule_on_disabled_host_if_specified(self): + compute1 = self.start_service('compute', host='host1') + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_too_many_cores(self): + """Ensures we don't go over max cores""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + instance_ids1 = [] + instance_ids2 = [] + for index in xrange(FLAGS.max_cores): + instance_id = self._create_instance() + compute1.run_instance(self.context, instance_id) + instance_ids1.append(instance_id) + instance_id = self._create_instance() + compute2.run_instance(self.context, instance_id) + instance_ids2.append(instance_id) + instance_id = self._create_instance() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id) + db.instance_destroy(self.context, instance_id) + for instance_id in instance_ids1: + compute1.terminate_instance(self.context, instance_id) + for instance_id in instance_ids2: + compute2.terminate_instance(self.context, instance_id) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_volume(self): + """Ensures the host with less gigabytes gets the next one""" + volume1 = self.start_service('volume', host='host1') + volume2 = self.start_service('volume', host='host2') + volume_id1 = self._create_volume() + volume1.create_volume(self.context, volume_id1) + volume_id2 = self._create_volume() + host = self.scheduler.driver.schedule_create_volume(self.context, + volume_id2) + self.assertEqual(host, 'host2') + volume1.delete_volume(self.context, volume_id1) + db.volume_destroy(self.context, volume_id2) + volume1.kill() + volume2.kill() + + def test_too_many_gigabytes(self): + """Ensures we don't go over max gigabytes""" + volume1 = self.start_service('volume', host='host1') + volume2 = self.start_service('volume', host='host2') + volume_ids1 = [] + volume_ids2 = [] + for index in xrange(FLAGS.max_gigabytes): + volume_id = self._create_volume() + volume1.create_volume(self.context, volume_id) + volume_ids1.append(volume_id) + volume_id = self._create_volume() + volume2.create_volume(self.context, volume_id) + volume_ids2.append(volume_id) + volume_id = self._create_volume() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_create_volume, + self.context, + volume_id) + for volume_id in volume_ids1: + volume1.delete_volume(self.context, volume_id) + for volume_id in volume_ids2: + volume2.delete_volume(self.context, volume_id) + volume1.kill() + volume2.kill() + + def test_scheduler_live_migration_with_volume(self): + """scheduler_live_migration() works correctly as expected. + + Also, checks instance state is changed from 'running' -> 'migrating'. + + """ + + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + dic = {'instance_id': instance_id, 'size': 1} + v_ref = db.volume_create(self.context, dic) + + # cannot check 2nd argument b/c the addresses of instance object + # is different. + driver_i = self.scheduler.driver + nocare = mox.IgnoreArg() + self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') + self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') + self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') + driver_i._live_migration_src_check(nocare, nocare) + driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) + driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} + rpc.cast(self.context, + db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), + {"method": 'live_migration', "args": kwargs}) + + self.mox.ReplayAll() + self.scheduler.live_migration(self.context, FLAGS.compute_topic, + instance_id=instance_id, + dest=i_ref['host']) + + i_ref = db.instance_get(self.context, instance_id) + self.assertTrue(i_ref['state_description'] == 'migrating') + db.instance_destroy(self.context, instance_id) + db.volume_destroy(self.context, v_ref['id']) + + def test_live_migration_src_check_instance_not_running(self): + """The instance given by instance_id is not running.""" + + instance_id = self._create_instance(state_description='migrating') + i_ref = db.instance_get(self.context, instance_id) + + try: + self.scheduler.driver._live_migration_src_check(self.context, + i_ref) + except exception.Invalid, e: + c = (e.message.find('is not running') > 0) + + self.assertTrue(c) + db.instance_destroy(self.context, instance_id) + + def test_live_migration_src_check_volume_node_not_alive(self): + """Raise exception when volume node is not alive.""" + + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + dic = {'instance_id': instance_id, 'size': 1} + v_ref = db.volume_create(self.context, {'instance_id': instance_id, + 'size': 1}) + t1 = datetime.datetime.utcnow() - datetime.timedelta(1) + dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', + 'topic': 'volume', 'report_count': 0} + s_ref = db.service_create(self.context, dic) + + self.assertRaises(exception.VolumeServiceUnavailable, + self.scheduler.driver.schedule_live_migration, + self.context, instance_id, i_ref['host']) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.volume_destroy(self.context, v_ref['id']) + + def test_live_migration_src_check_compute_node_not_alive(self): + """Confirms src-compute node is alive.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + t = datetime.datetime.utcnow() - datetime.timedelta(10) + s_ref = self._create_compute_service(created_at=t, updated_at=t, + host=i_ref['host']) + + self.assertRaises(exception.ComputeServiceUnavailable, + self.scheduler.driver._live_migration_src_check, + self.context, i_ref) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_src_check_works_correctly(self): + """Confirms this method finishes with no error.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host=i_ref['host']) + + ret = self.scheduler.driver._live_migration_src_check(self.context, + i_ref) + + self.assertTrue(ret is None) + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_not_alive(self): + """Confirms exception raises in case dest host does not exist.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + t = datetime.datetime.utcnow() - datetime.timedelta(10) + s_ref = self._create_compute_service(created_at=t, updated_at=t, + host=i_ref['host']) + + self.assertRaises(exception.ComputeServiceUnavailable, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, i_ref['host']) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_service_same_host(self): + """Confirms exceptioin raises in case dest and src is same host.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host=i_ref['host']) + + self.assertRaises(exception.UnableToMigrateToSelf, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, i_ref['host']) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_service_lack_memory(self): + """Confirms exception raises when dest doesn't have enough memory.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host='somewhere', + memory_mb_used=12) + + self.assertRaises(exception.MigrationError, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, 'somewhere') + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_service_works_correctly(self): + """Confirms method finishes with no error.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host='somewhere', + memory_mb_used=5) + + ret = self.scheduler.driver._live_migration_dest_check(self.context, + i_ref, + 'somewhere') + self.assertTrue(ret is None) + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_common_check_service_orig_not_exists(self): + """Destination host does not exist.""" + + dest = 'dummydest' + # mocks for live_migration_common_check() + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + t1 = datetime.datetime.utcnow() - datetime.timedelta(10) + s_ref = self._create_compute_service(created_at=t1, updated_at=t1, + host=dest) + + # mocks for mounted_on_same_shared_storage() + fpath = '/test/20110127120000' + self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) + topic = FLAGS.compute_topic + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(self.context, topic, dest), + {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), + {"method": 'check_shared_storage_test_file', + "args": {'filename': fpath}}) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), topic, dest), + {"method": 'cleanup_shared_storage_test_file', + "args": {'filename': fpath}}) + + self.mox.ReplayAll() + self.assertRaises(exception.SourceHostUnavailable, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_common_check_service_different_hypervisor(self): + """Original host and dest host has different hypervisor type.""" + dest = 'dummydest' + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + + # compute service for destination + s_ref = self._create_compute_service(host=i_ref['host']) + # compute service for original host + s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen') + + # mocks + driver = self.scheduler.driver + self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') + driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) + + self.mox.ReplayAll() + self.assertRaises(exception.InvalidHypervisorType, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.service_destroy(self.context, s_ref2['id']) + + def test_live_migration_common_check_service_different_version(self): + """Original host and dest host has different hypervisor version.""" + dest = 'dummydest' + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + + # compute service for destination + s_ref = self._create_compute_service(host=i_ref['host']) + # compute service for original host + s_ref2 = self._create_compute_service(host=dest, + hypervisor_version=12002) + + # mocks + driver = self.scheduler.driver + self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') + driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) + + self.mox.ReplayAll() + self.assertRaises(exception.DestinationHypervisorTooOld, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.service_destroy(self.context, s_ref2['id']) + + def test_live_migration_common_check_checking_cpuinfo_fail(self): + """Raise excetion when original host doen't have compatible cpu.""" + + dest = 'dummydest' + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + + # compute service for destination + s_ref = self._create_compute_service(host=i_ref['host']) + # compute service for original host + s_ref2 = self._create_compute_service(host=dest) + + # mocks + driver = self.scheduler.driver + self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') + driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) + self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True) + rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), + {"method": 'compare_cpu', + "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ + AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_common_check(self.context, + i_ref, + dest) + except rpc.RemoteError, e: + c = (e.message.find(_("doesn't have compatibility to")) >= 0) + + self.assertTrue(c) + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.service_destroy(self.context, s_ref2['id']) + + +class FakeZone(object): + def __init__(self, id, api_url, username, password): + self.id = id + self.api_url = api_url + self.username = username + self.password = password + + +def zone_get_all(context): + return [ + FakeZone(1, 'http://example.com', 'bob', 'xxx'), + ] + + +class FakeRerouteCompute(api.reroute_compute): + def _call_child_zones(self, zones, function): + return [] + + def get_collection_context_and_id(self, args, kwargs): + return ("servers", None, 1) + + def unmarshall_result(self, zone_responses): + return dict(magic="found me") + + +def go_boom(self, context, instance): + raise exception.InstanceNotFound(instance_id=instance) + + +def found_instance(self, context, instance): + return dict(name='myserver') + + +class FakeResource(object): + def __init__(self, attribute_dict): + for k, v in attribute_dict.iteritems(): + setattr(self, k, v) + + def pause(self): + pass + + +class ZoneRedirectTest(test.TestCase): + def setUp(self): + super(ZoneRedirectTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + + self.stubs.Set(db, 'zone_get_all', zone_get_all) + + self.enable_zone_routing = FLAGS.enable_zone_routing + FLAGS.enable_zone_routing = True + + def tearDown(self): + self.stubs.UnsetAll() + FLAGS.enable_zone_routing = self.enable_zone_routing + super(ZoneRedirectTest, self).tearDown() + + def test_trap_found_locally(self): + decorator = FakeRerouteCompute("foo") + try: + result = decorator(found_instance)(None, None, 1) + except api.RedirectResult, e: + self.fail(_("Successful database hit should succeed")) + + def test_trap_not_found_locally(self): + decorator = FakeRerouteCompute("foo") + try: + result = decorator(go_boom)(None, None, 1) + self.assertFail(_("Should have rerouted.")) + except api.RedirectResult, e: + self.assertEquals(e.results['magic'], 'found me') + + def test_routing_flags(self): + FLAGS.enable_zone_routing = False + decorator = FakeRerouteCompute("foo") + self.assertRaises(exception.InstanceNotFound, decorator(go_boom), + None, None, 1) + + def test_get_collection_context_and_id(self): + decorator = api.reroute_compute("foo") + self.assertEquals(decorator.get_collection_context_and_id( + (None, 10, 20), {}), ("servers", 10, 20)) + self.assertEquals(decorator.get_collection_context_and_id( + (None, 11,), dict(instance_id=21)), ("servers", 11, 21)) + self.assertEquals(decorator.get_collection_context_and_id( + (None,), dict(context=12, instance_id=22)), ("servers", 12, 22)) + + def test_unmarshal_single_server(self): + decorator = api.reroute_compute("foo") + self.assertEquals(decorator.unmarshall_result([]), {}) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, b=2)), ]), + dict(server=dict(a=1, b=2))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, _b=2)), ]), + dict(server=dict(a=1,))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, manager=2)), ]), + dict(server=dict(a=1,))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(_a=1, manager=2)), ]), + dict(server={})) + + +class FakeServerCollection(object): + def get(self, instance_id): + return FakeResource(dict(a=10, b=20)) + + def find(self, name): + return FakeResource(dict(a=11, b=22)) + + +class FakeEmptyServerCollection(object): + def get(self, f): + raise novaclient.NotFound(1) + + def find(self, name): + raise novaclient.NotFound(2) + + +class FakeNovaClient(object): + def __init__(self, collection): + self.servers = collection + + +class DynamicNovaClientTest(test.TestCase): + def test_issue_novaclient_command_found(self): + zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "get", 100).a, 10) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "find", "name").b, 22) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "pause", 100), None) + + def test_issue_novaclient_command_not_found(self): + zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "get", 100), None) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "find", "name"), None) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "any", "name"), None) + + +class FakeZonesProxy(object): + def do_something(*args, **kwargs): + return 42 + + def raises_exception(*args, **kwargs): + raise Exception('testing') + + +class FakeNovaClientOpenStack(object): + def __init__(self, *args, **kwargs): + self.zones = FakeZonesProxy() + + def authenticate(self): + pass + + +class CallZoneMethodTest(test.TestCase): + def setUp(self): + super(CallZoneMethodTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.stubs.Set(db, 'zone_get_all', zone_get_all) + self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack) + + def tearDown(self): + self.stubs.UnsetAll() + super(CallZoneMethodTest, self).tearDown() + + def test_call_zone_method(self): + context = {} + method = 'do_something' + results = api.call_zone_method(context, method) + expected = [(1, 42)] + self.assertEqual(expected, results) + + def test_call_zone_method_not_present(self): + context = {} + method = 'not_present' + self.assertRaises(AttributeError, api.call_zone_method, + context, method) + + def test_call_zone_method_generates_exception(self): + context = {} + method = 'raises_exception' + results = api.call_zone_method(context, method) + + # FIXME(sirp): for now the _error_trap code is catching errors and + # converting them to a ("ERROR", "string") tuples. The code (and this + # test) should eventually handle real exceptions. + expected = [(1, ('ERROR', 'testing'))] + self.assertEqual(expected, results) diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py new file mode 100644 index 000000000..37169fb97 --- /dev/null +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -0,0 +1,121 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Zone Aware Scheduler. +""" + +from nova import test +from nova.scheduler import driver +from nova.scheduler import zone_aware_scheduler +from nova.scheduler import zone_manager + + +class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): + def filter_hosts(self, num, specs): + # NOTE(sirp): this is returning [(hostname, services)] + return self.zone_manager.service_states.items() + + def weigh_hosts(self, num, specs, hosts): + fake_weight = 99 + weighted = [] + for hostname, caps in hosts: + weighted.append(dict(weight=fake_weight, name=hostname)) + return weighted + + +class FakeZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = { + 'host1': { + 'compute': {'ram': 1000} + }, + 'host2': { + 'compute': {'ram': 2000} + }, + 'host3': { + 'compute': {'ram': 3000} + } + } + + +class FakeEmptyZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = {} + + +def fake_empty_call_zone_method(context, method, specs): + return [] + + +def fake_call_zone_method(context, method, specs): + return [ + ('zone1', [ + dict(weight=1, blob='AAAAAAA'), + dict(weight=111, blob='BBBBBBB'), + dict(weight=112, blob='CCCCCCC'), + dict(weight=113, blob='DDDDDDD'), + ]), + ('zone2', [ + dict(weight=120, blob='EEEEEEE'), + dict(weight=2, blob='FFFFFFF'), + dict(weight=122, blob='GGGGGGG'), + dict(weight=123, blob='HHHHHHH'), + ]), + ('zone3', [ + dict(weight=130, blob='IIIIIII'), + dict(weight=131, blob='JJJJJJJ'), + dict(weight=132, blob='KKKKKKK'), + dict(weight=3, blob='LLLLLLL'), + ]), + ] + + +class ZoneAwareSchedulerTestCase(test.TestCase): + """Test case for Zone Aware Scheduler.""" + + def test_zone_aware_scheduler(self): + """ + Create a nested set of FakeZones, ensure that a select call returns the + appropriate build plan. + """ + sched = FakeZoneAwareScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) + + zm = FakeZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + build_plan = sched.select(fake_context, {}) + + self.assertEqual(15, len(build_plan)) + + hostnames = [plan_item['name'] + for plan_item in build_plan if 'name' in plan_item] + self.assertEqual(3, len(hostnames)) + + def test_empty_zone_aware_scheduler(self): + """ + Ensure empty hosts & child_zones result in NoValidHosts exception. + """ + sched = FakeZoneAwareScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) + + zm = FakeEmptyZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, + fake_context, 1, + dict(host_filter=None, instance_type={})) diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py deleted file mode 100644 index 1a2a86a79..000000000 --- a/nova/tests/test_host_filter.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler Host Filter Drivers. -""" - -import json - -from nova import exception -from nova import flags -from nova import test -from nova.scheduler import host_filter - -FLAGS = flags.FLAGS - - -class FakeZoneManager: - pass - - -class HostFilterTestCase(test.TestCase): - """Test case for host filter drivers.""" - - def _host_caps(self, multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - def setUp(self): - super(HostFilterTestCase, self).setUp() - self.old_flag = FLAGS.default_host_filter_driver - FLAGS.default_host_filter_driver = \ - 'nova.scheduler.host_filter.AllHostsFilter' - self.instance_type = dict(name='tiny', - memory_mb=50, - vcpus=10, - local_gb=500, - flavorid=1, - swap=500, - rxtx_quota=30000, - rxtx_cap=200) - - self.zone_manager = FakeZoneManager() - states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} - self.zone_manager.service_states = states - - def tearDown(self): - FLAGS.default_host_filter_driver = self.old_flag - super(HostFilterTestCase, self).tearDown() - - def test_choose_driver(self): - # Test default driver ... - driver = host_filter.choose_driver() - self.assertEquals(driver._full_name(), - 'nova.scheduler.host_filter.AllHostsFilter') - # Test valid driver ... - driver = host_filter.choose_driver( - 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(driver._full_name(), - 'nova.scheduler.host_filter.InstanceTypeFilter') - # Test invalid driver ... - try: - host_filter.choose_driver('does not exist') - self.fail("Should not find driver") - except exception.SchedulerHostFilterDriverNotFound: - pass - - def test_all_host_driver(self): - driver = host_filter.AllHostsFilter() - cooked = driver.instance_type_to_filter(self.instance_type) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(10, len(hosts)) - for host, capabilities in hosts: - self.assertTrue(host.startswith('host')) - - def test_instance_type_driver(self): - driver = host_filter.InstanceTypeFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - def test_json_driver(self): - driver = host_filter.JsonFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - # Try some custom queries - - raw = ['or', - ['and', - ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300] - ], - ['and', - ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700] - ] - ] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['not', - ['=', '$compute.host_memory_free', 30], - ] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(9, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([2, 4, 6, 8, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - # Try some bogus input ... - raw = ['unknown command', ] - cooked = json.dumps(raw) - try: - driver.filter_hosts(self.zone_manager, cooked) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([]))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({}))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps( - ['not', True, False, True, False] - ))) - - try: - driver.filter_hosts(self.zone_manager, json.dumps( - 'not', True, False, True, False - )) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$foo', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$.....', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] - ))) - - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', {}, ['>', '$missing....foo']] - ))) diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py deleted file mode 100644 index c8ce7892f..000000000 --- a/nova/tests/test_least_cost_scheduler.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Least Cost Scheduler -""" - -from nova import flags -from nova import test -from nova.scheduler import least_cost - -MB = 1024 * 1024 -FLAGS = flags.FLAGS - - -class FakeHost(object): - def __init__(self, host_id, free_ram, io): - self.id = host_id - self.free_ram = free_ram - self.io = io - - -class WeightedSumTestCase(test.TestCase): - def test_empty_domain(self): - domain = [] - weighted_fns = [] - result = least_cost.weighted_sum(domain, weighted_fns) - expected = [] - self.assertEqual(expected, result) - - def test_basic_costing(self): - hosts = [ - FakeHost(1, 512 * MB, 100), - FakeHost(2, 256 * MB, 400), - FakeHost(3, 512 * MB, 100) - ] - - weighted_fns = [ - (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* - (2, lambda h: h.io), # Avoid high I/O - ] - - costs = least_cost.weighted_sum( - domain=hosts, weighted_fns=weighted_fns) - - # Each 256 MB unit of free-ram contributes 0.5 points by way of: - # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 - # Each 100 iops of IO adds 0.5 points by way of: - # cost = 2 * (100/400) = 2 * 0.25 = 0.5 - expected = [1.5, 2.5, 1.5] - self.assertEqual(expected, costs) - - -# TODO(sirp): unify this with test_host_filter tests? possibility of sharing -# test setup code -class FakeZoneManager: - pass - - -class LeastCostSchedulerTestCase(test.TestCase): - def _host_caps(self, multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - def setUp(self): - super(LeastCostSchedulerTestCase, self).setUp() - #self.old_flag = FLAGS.default_host_filter_driver - #FLAGS.default_host_filter_driver = \ - # 'nova.scheduler.host_filter.AllHostsFilter' - self.instance_type = dict(name='tiny', - memory_mb=50, - vcpus=10, - local_gb=500, - flavorid=1, - swap=500, - rxtx_quota=30000, - rxtx_cap=200) - - zone_manager = FakeZoneManager() - states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} - zone_manager.service_states = states - - self.sched = least_cost.LeastCostScheduler() - self.sched.zone_manager = zone_manager - - def tearDown(self): - #FLAGS.default_host_filter_driver = self.old_flag - super(LeastCostSchedulerTestCase, self).tearDown() - - def assertWeights(self, expected, num, request_spec, hosts): - weighted = self.sched.weigh_hosts(num, request_spec, hosts) - self.assertDictListMatch(weighted, expected, approx_equal=True) - - def test_no_hosts(self): - num = 1 - request_spec = {} - hosts = [] - - expected = [] - self.assertWeights(expected, num, request_spec, hosts) - - def test_noop_cost_fn(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.noop_cost_fn' - ] - FLAGS.noop_cost_fn_weight = 1 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [dict(weight=1, hostname=hostname) - for hostname, caps in hosts] - self.assertWeights(expected, num, request_spec, hosts) - - def test_cost_fn_weights(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.noop_cost_fn' - ] - FLAGS.noop_cost_fn_weight = 2 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [dict(weight=2, hostname=hostname) - for hostname, caps in hosts] - self.assertWeights(expected, num, request_spec, hosts) - - def test_fill_first_cost_fn(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.fill_first_cost_fn' - ] - FLAGS.fill_first_cost_fn_weight = 1 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [] - for idx, (hostname, caps) in enumerate(hosts): - # Costs are normalized so over 10 hosts, each host with increasing - # free ram will cost 1/N more. Since the lowest cost host has some - # free ram, we add in the 1/N for the base_cost - weight = 0.1 + (0.1 * idx) - weight_dict = dict(weight=weight, hostname=hostname) - expected.append(weight_dict) - - self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py deleted file mode 100644 index 54b3f80fb..000000000 --- a/nova/tests/test_scheduler.py +++ /dev/null @@ -1,1118 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler -""" - -import datetime -import mox -import novaclient.exceptions -import stubout -import webob - -from mox import IgnoreArg -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import service -from nova import test -from nova import rpc -from nova import utils -from nova.auth import manager as auth_manager -from nova.scheduler import api -from nova.scheduler import manager -from nova.scheduler import driver -from nova.compute import power_state -from nova.db.sqlalchemy import models - - -FLAGS = flags.FLAGS -flags.DECLARE('max_cores', 'nova.scheduler.simple') -flags.DECLARE('stub_network', 'nova.compute.manager') -flags.DECLARE('instances_path', 'nova.compute.manager') - - -class TestDriver(driver.Scheduler): - """Scheduler Driver for Tests""" - def schedule(context, topic, *args, **kwargs): - return 'fallback_host' - - def schedule_named_method(context, topic, num): - return 'named_host' - - -class SchedulerTestCase(test.TestCase): - """Test case for scheduler""" - def setUp(self): - super(SchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') - - def _create_compute_service(self): - """Create compute-manager(ComputeNode and Service record).""" - ctxt = context.get_admin_context() - dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute', - 'report_count': 0, 'availability_zone': 'dummyzone'} - s_ref = db.service_create(ctxt, dic) - - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - db.compute_node_create(ctxt, dic) - - return db.service_get(ctxt, s_ref['id']) - - def _create_instance(self, **kwargs): - """Create a test instance""" - ctxt = context.get_admin_context() - inst = {} - inst['user_id'] = 'admin' - inst['project_id'] = kwargs.get('project_id', 'fake') - inst['host'] = kwargs.get('host', 'dummy') - inst['vcpus'] = kwargs.get('vcpus', 1) - inst['memory_mb'] = kwargs.get('memory_mb', 10) - inst['local_gb'] = kwargs.get('local_gb', 20) - return db.instance_create(ctxt, inst) - - def test_fallback(self): - scheduler = manager.SchedulerManager() - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - ctxt = context.get_admin_context() - rpc.cast(ctxt, - 'topic.fallback_host', - {'method': 'noexist', - 'args': {'num': 7}}) - self.mox.ReplayAll() - scheduler.noexist(ctxt, 'topic', num=7) - - def test_named_method(self): - scheduler = manager.SchedulerManager() - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - ctxt = context.get_admin_context() - rpc.cast(ctxt, - 'topic.named_host', - {'method': 'named_method', - 'args': {'num': 7}}) - self.mox.ReplayAll() - scheduler.named_method(ctxt, 'topic', num=7) - - def test_show_host_resources_host_not_exit(self): - """A host given as an argument does not exists.""" - - scheduler = manager.SchedulerManager() - dest = 'dummydest' - ctxt = context.get_admin_context() - - self.assertRaises(exception.NotFound, scheduler.show_host_resources, - ctxt, dest) - #TODO(bcwaldon): reimplement this functionality - #c1 = (e.message.find(_("does not exist or is not a " - # "compute node.")) >= 0) - - def _dic_is_equal(self, dic1, dic2, keys=None): - """Compares 2 dictionary contents(Helper method)""" - if not keys: - keys = ['vcpus', 'memory_mb', 'local_gb', - 'vcpus_used', 'memory_mb_used', 'local_gb_used'] - - for key in keys: - if not (dic1[key] == dic2[key]): - return False - return True - - def test_show_host_resources_no_project(self): - """No instance are running on the given host.""" - - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - s_ref = self._create_compute_service() - - result = scheduler.show_host_resources(ctxt, s_ref['host']) - - # result checking - c1 = ('resource' in result and 'usage' in result) - compute_node = s_ref['compute_node'][0] - c2 = self._dic_is_equal(result['resource'], compute_node) - c3 = result['usage'] == {} - self.assertTrue(c1 and c2 and c3) - db.service_destroy(ctxt, s_ref['id']) - - def test_show_host_resources_works_correctly(self): - """Show_host_resources() works correctly as expected.""" - - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - s_ref = self._create_compute_service() - i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host']) - i_ref2 = self._create_instance(project_id='p-02', vcpus=3, - host=s_ref['host']) - - result = scheduler.show_host_resources(ctxt, s_ref['host']) - - c1 = ('resource' in result and 'usage' in result) - compute_node = s_ref['compute_node'][0] - c2 = self._dic_is_equal(result['resource'], compute_node) - c3 = result['usage'].keys() == ['p-01', 'p-02'] - keys = ['vcpus', 'memory_mb', 'local_gb'] - c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys) - c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys) - self.assertTrue(c1 and c2 and c3 and c4 and c5) - - db.service_destroy(ctxt, s_ref['id']) - db.instance_destroy(ctxt, i_ref1['id']) - db.instance_destroy(ctxt, i_ref2['id']) - - -class ZoneSchedulerTestCase(test.TestCase): - """Test case for zone scheduler""" - def setUp(self): - super(ZoneSchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler') - - def _create_service_model(self, **kwargs): - service = db.sqlalchemy.models.Service() - service.host = kwargs['host'] - service.disabled = False - service.deleted = False - service.report_count = 0 - service.binary = 'nova-compute' - service.topic = 'compute' - service.id = kwargs['id'] - service.availability_zone = kwargs['zone'] - service.created_at = datetime.datetime.utcnow() - return service - - def test_with_two_zones(self): - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - service_list = [self._create_service_model(id=1, - host='host1', - zone='zone1'), - self._create_service_model(id=2, - host='host2', - zone='zone2'), - self._create_service_model(id=3, - host='host3', - zone='zone2'), - self._create_service_model(id=4, - host='host4', - zone='zone2'), - self._create_service_model(id=5, - host='host5', - zone='zone2')] - self.mox.StubOutWithMock(db, 'service_get_all_by_topic') - arg = IgnoreArg() - db.service_get_all_by_topic(arg, arg).AndReturn(service_list) - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - rpc.cast(ctxt, - 'compute.host1', - {'method': 'run_instance', - 'args': {'instance_id': 'i-ffffffff', - 'availability_zone': 'zone1'}}) - self.mox.ReplayAll() - scheduler.run_instance(ctxt, - 'compute', - instance_id='i-ffffffff', - availability_zone='zone1') - - -class SimpleDriverTestCase(test.TestCase): - """Test case for simple driver""" - def setUp(self): - super(SimpleDriverTestCase, self).setUp() - self.flags(connection_type='fake', - stub_network=True, - max_cores=4, - max_gigabytes=4, - network_manager='nova.network.manager.FlatManager', - volume_driver='nova.volume.driver.FakeISCSIDriver', - scheduler_driver='nova.scheduler.simple.SimpleScheduler') - self.scheduler = manager.SchedulerManager() - self.manager = auth_manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake') - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.get_admin_context() - - def tearDown(self): - self.manager.delete_user(self.user) - self.manager.delete_project(self.project) - super(SimpleDriverTestCase, self).tearDown() - - def _create_instance(self, **kwargs): - """Create a test instance""" - inst = {} - inst['image_id'] = 1 - inst['reservation_id'] = 'r-fakeres' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id - inst['instance_type_id'] = '1' - inst['mac_address'] = utils.generate_mac() - inst['vcpus'] = kwargs.get('vcpus', 1) - inst['ami_launch_index'] = 0 - inst['availability_zone'] = kwargs.get('availability_zone', None) - inst['host'] = kwargs.get('host', 'dummy') - inst['memory_mb'] = kwargs.get('memory_mb', 20) - inst['local_gb'] = kwargs.get('local_gb', 30) - inst['launched_on'] = kwargs.get('launghed_on', 'dummy') - inst['state_description'] = kwargs.get('state_description', 'running') - inst['state'] = kwargs.get('state', power_state.RUNNING) - return db.instance_create(self.context, inst)['id'] - - def _create_volume(self): - """Create a test volume""" - vol = {} - vol['size'] = 1 - vol['availability_zone'] = 'test' - return db.volume_create(self.context, vol)['id'] - - def _create_compute_service(self, **kwargs): - """Create a compute service.""" - - dic = {'binary': 'nova-compute', 'topic': 'compute', - 'report_count': 0, 'availability_zone': 'dummyzone'} - dic['host'] = kwargs.get('host', 'dummy') - s_ref = db.service_create(self.context, dic) - if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): - t = datetime.datetime.utcnow() - datetime.timedelta(0) - dic['created_at'] = kwargs.get('created_at', t) - dic['updated_at'] = kwargs.get('updated_at', t) - db.service_update(self.context, s_ref['id'], dic) - - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32) - dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu') - dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003) - db.compute_node_create(self.context, dic) - return db.service_get(self.context, s_ref['id']) - - def test_doesnt_report_disabled_hosts_as_up(self): - """Ensures driver doesn't find hosts before they are enabled""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - db.service_update(self.context, s2['id'], {'disabled': True}) - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(0, len(hosts)) - compute1.kill() - compute2.kill() - - def test_reports_enabled_hosts_as_up(self): - """Ensures driver can find the hosts that are up""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(2, len(hosts)) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_instance(self): - """Ensures the host with less cores gets the next one""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance() - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual(host, 'host2') - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_specific_host_gets_instance(self): - """Ensures if you set availability_zone it launches on that zone""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_wont_sechedule_if_specified_host_is_down(self): - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) - past = now - delta - db.service_update(self.context, s1['id'], {'updated_at': past}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - self.assertRaises(driver.WillNotSchedule, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id2) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_will_schedule_on_disabled_host_if_specified(self): - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_too_many_cores(self): - """Ensures we don't go over max cores""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_ids1 = [] - instance_ids2 = [] - for index in xrange(FLAGS.max_cores): - instance_id = self._create_instance() - compute1.run_instance(self.context, instance_id) - instance_ids1.append(instance_id) - instance_id = self._create_instance() - compute2.run_instance(self.context, instance_id) - instance_ids2.append(instance_id) - instance_id = self._create_instance() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id) - for instance_id in instance_ids1: - compute1.terminate_instance(self.context, instance_id) - for instance_id in instance_ids2: - compute2.terminate_instance(self.context, instance_id) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_volume(self): - """Ensures the host with less gigabytes gets the next one""" - volume1 = service.Service('host1', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume1.start() - volume2 = service.Service('host2', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume2.start() - volume_id1 = self._create_volume() - volume1.create_volume(self.context, volume_id1) - volume_id2 = self._create_volume() - host = self.scheduler.driver.schedule_create_volume(self.context, - volume_id2) - self.assertEqual(host, 'host2') - volume1.delete_volume(self.context, volume_id1) - db.volume_destroy(self.context, volume_id2) - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - - def test_doesnt_report_disabled_hosts_as_up(self): - """Ensures driver doesn't find hosts before they are enabled""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - db.service_update(self.context, s2['id'], {'disabled': True}) - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(0, len(hosts)) - compute1.kill() - compute2.kill() - - def test_reports_enabled_hosts_as_up(self): - """Ensures driver can find the hosts that are up""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(2, len(hosts)) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_instance(self): - """Ensures the host with less cores gets the next one""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance() - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual(host, 'host2') - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_specific_host_gets_instance(self): - """Ensures if you set availability_zone it launches on that zone""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_wont_sechedule_if_specified_host_is_down(self): - compute1 = self.start_service('compute', host='host1') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) - past = now - delta - db.service_update(self.context, s1['id'], {'updated_at': past}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - self.assertRaises(driver.WillNotSchedule, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id2) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_will_schedule_on_disabled_host_if_specified(self): - compute1 = self.start_service('compute', host='host1') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_too_many_cores(self): - """Ensures we don't go over max cores""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_ids1 = [] - instance_ids2 = [] - for index in xrange(FLAGS.max_cores): - instance_id = self._create_instance() - compute1.run_instance(self.context, instance_id) - instance_ids1.append(instance_id) - instance_id = self._create_instance() - compute2.run_instance(self.context, instance_id) - instance_ids2.append(instance_id) - instance_id = self._create_instance() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id) - db.instance_destroy(self.context, instance_id) - for instance_id in instance_ids1: - compute1.terminate_instance(self.context, instance_id) - for instance_id in instance_ids2: - compute2.terminate_instance(self.context, instance_id) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_volume(self): - """Ensures the host with less gigabytes gets the next one""" - volume1 = self.start_service('volume', host='host1') - volume2 = self.start_service('volume', host='host2') - volume_id1 = self._create_volume() - volume1.create_volume(self.context, volume_id1) - volume_id2 = self._create_volume() - host = self.scheduler.driver.schedule_create_volume(self.context, - volume_id2) - self.assertEqual(host, 'host2') - volume1.delete_volume(self.context, volume_id1) - db.volume_destroy(self.context, volume_id2) - volume1.kill() - volume2.kill() - - def test_too_many_gigabytes(self): - """Ensures we don't go over max gigabytes""" - volume1 = self.start_service('volume', host='host1') - volume2 = self.start_service('volume', host='host2') - volume_ids1 = [] - volume_ids2 = [] - for index in xrange(FLAGS.max_gigabytes): - volume_id = self._create_volume() - volume1.create_volume(self.context, volume_id) - volume_ids1.append(volume_id) - volume_id = self._create_volume() - volume2.create_volume(self.context, volume_id) - volume_ids2.append(volume_id) - volume_id = self._create_volume() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_create_volume, - self.context, - volume_id) - for volume_id in volume_ids1: - volume1.delete_volume(self.context, volume_id) - for volume_id in volume_ids2: - volume2.delete_volume(self.context, volume_id) - volume1.kill() - volume2.kill() - - def test_scheduler_live_migration_with_volume(self): - """scheduler_live_migration() works correctly as expected. - - Also, checks instance state is changed from 'running' -> 'migrating'. - - """ - - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - dic = {'instance_id': instance_id, 'size': 1} - v_ref = db.volume_create(self.context, dic) - - # cannot check 2nd argument b/c the addresses of instance object - # is different. - driver_i = self.scheduler.driver - nocare = mox.IgnoreArg() - self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') - self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') - self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') - driver_i._live_migration_src_check(nocare, nocare) - driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) - driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} - rpc.cast(self.context, - db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), - {"method": 'live_migration', "args": kwargs}) - - self.mox.ReplayAll() - self.scheduler.live_migration(self.context, FLAGS.compute_topic, - instance_id=instance_id, - dest=i_ref['host']) - - i_ref = db.instance_get(self.context, instance_id) - self.assertTrue(i_ref['state_description'] == 'migrating') - db.instance_destroy(self.context, instance_id) - db.volume_destroy(self.context, v_ref['id']) - - def test_live_migration_src_check_instance_not_running(self): - """The instance given by instance_id is not running.""" - - instance_id = self._create_instance(state_description='migrating') - i_ref = db.instance_get(self.context, instance_id) - - try: - self.scheduler.driver._live_migration_src_check(self.context, - i_ref) - except exception.Invalid, e: - c = (e.message.find('is not running') > 0) - - self.assertTrue(c) - db.instance_destroy(self.context, instance_id) - - def test_live_migration_src_check_volume_node_not_alive(self): - """Raise exception when volume node is not alive.""" - - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - dic = {'instance_id': instance_id, 'size': 1} - v_ref = db.volume_create(self.context, {'instance_id': instance_id, - 'size': 1}) - t1 = datetime.datetime.utcnow() - datetime.timedelta(1) - dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', - 'topic': 'volume', 'report_count': 0} - s_ref = db.service_create(self.context, dic) - - self.assertRaises(exception.VolumeServiceUnavailable, - self.scheduler.driver.schedule_live_migration, - self.context, instance_id, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.volume_destroy(self.context, v_ref['id']) - - def test_live_migration_src_check_compute_node_not_alive(self): - """Confirms src-compute node is alive.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t, updated_at=t, - host=i_ref['host']) - - self.assertRaises(exception.ComputeServiceUnavailable, - self.scheduler.driver._live_migration_src_check, - self.context, i_ref) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_src_check_works_correctly(self): - """Confirms this method finishes with no error.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host=i_ref['host']) - - ret = self.scheduler.driver._live_migration_src_check(self.context, - i_ref) - - self.assertTrue(ret is None) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_not_alive(self): - """Confirms exception raises in case dest host does not exist.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t, updated_at=t, - host=i_ref['host']) - - self.assertRaises(exception.ComputeServiceUnavailable, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_same_host(self): - """Confirms exceptioin raises in case dest and src is same host.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host=i_ref['host']) - - self.assertRaises(exception.UnableToMigrateToSelf, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_lack_memory(self): - """Confirms exception raises when dest doesn't have enough memory.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=12) - - self.assertRaises(exception.MigrationError, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, 'somewhere') - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_works_correctly(self): - """Confirms method finishes with no error.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=5) - - ret = self.scheduler.driver._live_migration_dest_check(self.context, - i_ref, - 'somewhere') - self.assertTrue(ret is None) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_common_check_service_orig_not_exists(self): - """Destination host does not exist.""" - - dest = 'dummydest' - # mocks for live_migration_common_check() - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t1 = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t1, updated_at=t1, - host=dest) - - # mocks for mounted_on_same_shared_storage() - fpath = '/test/20110127120000' - self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) - topic = FLAGS.compute_topic - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(self.context, topic, dest), - {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), - {"method": 'check_shared_storage_test_file', - "args": {'filename': fpath}}) - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(mox.IgnoreArg(), topic, dest), - {"method": 'cleanup_shared_storage_test_file', - "args": {'filename': fpath}}) - - self.mox.ReplayAll() - self.assertRaises(exception.SourceHostUnavailable, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_common_check_service_different_hypervisor(self): - """Original host and dest host has different hypervisor type.""" - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen') - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - - self.mox.ReplayAll() - self.assertRaises(exception.InvalidHypervisorType, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - def test_live_migration_common_check_service_different_version(self): - """Original host and dest host has different hypervisor version.""" - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest, - hypervisor_version=12002) - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - - self.mox.ReplayAll() - self.assertRaises(exception.DestinationHypervisorTooOld, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - def test_live_migration_common_check_checking_cpuinfo_fail(self): - """Raise excetion when original host doen't have compatible cpu.""" - - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest) - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True) - rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), - {"method": 'compare_cpu', - "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ - AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) - - self.mox.ReplayAll() - try: - self.scheduler.driver._live_migration_common_check(self.context, - i_ref, - dest) - except rpc.RemoteError, e: - c = (e.message.find(_("doesn't have compatibility to")) >= 0) - - self.assertTrue(c) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - -class FakeZone(object): - def __init__(self, id, api_url, username, password): - self.id = id - self.api_url = api_url - self.username = username - self.password = password - - -def zone_get_all(context): - return [ - FakeZone(1, 'http://example.com', 'bob', 'xxx'), - ] - - -class FakeRerouteCompute(api.reroute_compute): - def _call_child_zones(self, zones, function): - return [] - - def get_collection_context_and_id(self, args, kwargs): - return ("servers", None, 1) - - def unmarshall_result(self, zone_responses): - return dict(magic="found me") - - -def go_boom(self, context, instance): - raise exception.InstanceNotFound(instance_id=instance) - - -def found_instance(self, context, instance): - return dict(name='myserver') - - -class FakeResource(object): - def __init__(self, attribute_dict): - for k, v in attribute_dict.iteritems(): - setattr(self, k, v) - - def pause(self): - pass - - -class ZoneRedirectTest(test.TestCase): - def setUp(self): - super(ZoneRedirectTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - - self.stubs.Set(db, 'zone_get_all', zone_get_all) - - self.enable_zone_routing = FLAGS.enable_zone_routing - FLAGS.enable_zone_routing = True - - def tearDown(self): - self.stubs.UnsetAll() - FLAGS.enable_zone_routing = self.enable_zone_routing - super(ZoneRedirectTest, self).tearDown() - - def test_trap_found_locally(self): - decorator = FakeRerouteCompute("foo") - try: - result = decorator(found_instance)(None, None, 1) - except api.RedirectResult, e: - self.fail(_("Successful database hit should succeed")) - - def test_trap_not_found_locally(self): - decorator = FakeRerouteCompute("foo") - try: - result = decorator(go_boom)(None, None, 1) - self.assertFail(_("Should have rerouted.")) - except api.RedirectResult, e: - self.assertEquals(e.results['magic'], 'found me') - - def test_routing_flags(self): - FLAGS.enable_zone_routing = False - decorator = FakeRerouteCompute("foo") - self.assertRaises(exception.InstanceNotFound, decorator(go_boom), - None, None, 1) - - def test_get_collection_context_and_id(self): - decorator = api.reroute_compute("foo") - self.assertEquals(decorator.get_collection_context_and_id( - (None, 10, 20), {}), ("servers", 10, 20)) - self.assertEquals(decorator.get_collection_context_and_id( - (None, 11,), dict(instance_id=21)), ("servers", 11, 21)) - self.assertEquals(decorator.get_collection_context_and_id( - (None,), dict(context=12, instance_id=22)), ("servers", 12, 22)) - - def test_unmarshal_single_server(self): - decorator = api.reroute_compute("foo") - self.assertEquals(decorator.unmarshall_result([]), {}) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, b=2)), ]), - dict(server=dict(a=1, b=2))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, _b=2)), ]), - dict(server=dict(a=1,))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, manager=2)), ]), - dict(server=dict(a=1,))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(_a=1, manager=2)), ]), - dict(server={})) - - -class FakeServerCollection(object): - def get(self, instance_id): - return FakeResource(dict(a=10, b=20)) - - def find(self, name): - return FakeResource(dict(a=11, b=22)) - - -class FakeEmptyServerCollection(object): - def get(self, f): - raise novaclient.NotFound(1) - - def find(self, name): - raise novaclient.NotFound(2) - - -class FakeNovaClient(object): - def __init__(self, collection): - self.servers = collection - - -class DynamicNovaClientTest(test.TestCase): - def test_issue_novaclient_command_found(self): - zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "get", 100).a, 10) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "find", "name").b, 22) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "pause", 100), None) - - def test_issue_novaclient_command_not_found(self): - zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "get", 100), None) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "find", "name"), None) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "any", "name"), None) - - -class FakeZonesProxy(object): - def do_something(*args, **kwargs): - return 42 - - def raises_exception(*args, **kwargs): - raise Exception('testing') - - -class FakeNovaClientOpenStack(object): - def __init__(self, *args, **kwargs): - self.zones = FakeZonesProxy() - - def authenticate(self): - pass - - -class CallZoneMethodTest(test.TestCase): - def setUp(self): - super(CallZoneMethodTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - self.stubs.Set(db, 'zone_get_all', zone_get_all) - self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack) - - def tearDown(self): - self.stubs.UnsetAll() - super(CallZoneMethodTest, self).tearDown() - - def test_call_zone_method(self): - context = {} - method = 'do_something' - results = api.call_zone_method(context, method) - expected = [(1, 42)] - self.assertEqual(expected, results) - - def test_call_zone_method_not_present(self): - context = {} - method = 'not_present' - self.assertRaises(AttributeError, api.call_zone_method, - context, method) - - def test_call_zone_method_generates_exception(self): - context = {} - method = 'raises_exception' - results = api.call_zone_method(context, method) - - # FIXME(sirp): for now the _error_trap code is catching errors and - # converting them to a ("ERROR", "string") tuples. The code (and this - # test) should eventually handle real exceptions. - expected = [(1, ('ERROR', 'testing'))] - self.assertEqual(expected, results) diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py deleted file mode 100644 index 37169fb97..000000000 --- a/nova/tests/test_zone_aware_scheduler.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Zone Aware Scheduler. -""" - -from nova import test -from nova.scheduler import driver -from nova.scheduler import zone_aware_scheduler -from nova.scheduler import zone_manager - - -class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): - def filter_hosts(self, num, specs): - # NOTE(sirp): this is returning [(hostname, services)] - return self.zone_manager.service_states.items() - - def weigh_hosts(self, num, specs, hosts): - fake_weight = 99 - weighted = [] - for hostname, caps in hosts: - weighted.append(dict(weight=fake_weight, name=hostname)) - return weighted - - -class FakeZoneManager(zone_manager.ZoneManager): - def __init__(self): - self.service_states = { - 'host1': { - 'compute': {'ram': 1000} - }, - 'host2': { - 'compute': {'ram': 2000} - }, - 'host3': { - 'compute': {'ram': 3000} - } - } - - -class FakeEmptyZoneManager(zone_manager.ZoneManager): - def __init__(self): - self.service_states = {} - - -def fake_empty_call_zone_method(context, method, specs): - return [] - - -def fake_call_zone_method(context, method, specs): - return [ - ('zone1', [ - dict(weight=1, blob='AAAAAAA'), - dict(weight=111, blob='BBBBBBB'), - dict(weight=112, blob='CCCCCCC'), - dict(weight=113, blob='DDDDDDD'), - ]), - ('zone2', [ - dict(weight=120, blob='EEEEEEE'), - dict(weight=2, blob='FFFFFFF'), - dict(weight=122, blob='GGGGGGG'), - dict(weight=123, blob='HHHHHHH'), - ]), - ('zone3', [ - dict(weight=130, blob='IIIIIII'), - dict(weight=131, blob='JJJJJJJ'), - dict(weight=132, blob='KKKKKKK'), - dict(weight=3, blob='LLLLLLL'), - ]), - ] - - -class ZoneAwareSchedulerTestCase(test.TestCase): - """Test case for Zone Aware Scheduler.""" - - def test_zone_aware_scheduler(self): - """ - Create a nested set of FakeZones, ensure that a select call returns the - appropriate build plan. - """ - sched = FakeZoneAwareScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) - - zm = FakeZoneManager() - sched.set_zone_manager(zm) - - fake_context = {} - build_plan = sched.select(fake_context, {}) - - self.assertEqual(15, len(build_plan)) - - hostnames = [plan_item['name'] - for plan_item in build_plan if 'name' in plan_item] - self.assertEqual(3, len(hostnames)) - - def test_empty_zone_aware_scheduler(self): - """ - Ensure empty hosts & child_zones result in NoValidHosts exception. - """ - sched = FakeZoneAwareScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) - - zm = FakeEmptyZoneManager() - sched.set_zone_manager(zm) - - fake_context = {} - self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, - fake_context, 1, - dict(host_filter=None, instance_type={})) -- cgit From 967d82669ae07b2add3289e3decad60aea2657d8 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:49:21 -0500 Subject: Moving into scheduler subdir and refactoring out common code --- nova/tests/scheduler/__init__.py | 0 nova/tests/scheduler/test_host_filter.py | 189 ++++++++++++++++++++++ nova/tests/scheduler/test_least_cost_scheduler.py | 146 +++++++++++++++++ nova/tests/scheduler/test_zone_aware_scheduler.py | 31 ++++ 4 files changed, 366 insertions(+) create mode 100644 nova/tests/scheduler/__init__.py create mode 100644 nova/tests/scheduler/test_host_filter.py create mode 100644 nova/tests/scheduler/test_least_cost_scheduler.py (limited to 'nova') diff --git a/nova/tests/scheduler/__init__.py b/nova/tests/scheduler/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py new file mode 100644 index 000000000..c3af50a6e --- /dev/null +++ b/nova/tests/scheduler/test_host_filter.py @@ -0,0 +1,189 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler Host Filter Drivers. +""" + +import json + +from nova import exception +from nova import flags +from nova import test +from nova.scheduler import host_filter +from nova.tests.scheduler import test_zone_aware_scheduler + +FLAGS = flags.FLAGS + + +class FakeZoneManager: + pass + + +class HostFilterTestCase(test.TestCase): + """Test case for host filter drivers.""" + + def setUp(self): + super(HostFilterTestCase, self).setUp() + self.old_flag = FLAGS.default_host_filter_driver + FLAGS.default_host_filter_driver = \ + 'nova.scheduler.host_filter.AllHostsFilter' + self.instance_type = dict(name='tiny', + memory_mb=50, + vcpus=10, + local_gb=500, + flavorid=1, + swap=500, + rxtx_quota=30000, + rxtx_cap=200) + + self.zone_manager = FakeZoneManager() + + states = test_zone_aware_scheduler.fake_zone_manager_service_states( + num_hosts=10) + self.zone_manager.service_states = states + + def tearDown(self): + FLAGS.default_host_filter_driver = self.old_flag + super(HostFilterTestCase, self).tearDown() + + def test_choose_driver(self): + # Test default driver ... + driver = host_filter.choose_driver() + self.assertEquals(driver._full_name(), + 'nova.scheduler.host_filter.AllHostsFilter') + # Test valid driver ... + driver = host_filter.choose_driver( + 'nova.scheduler.host_filter.InstanceTypeFilter') + self.assertEquals(driver._full_name(), + 'nova.scheduler.host_filter.InstanceTypeFilter') + # Test invalid driver ... + try: + host_filter.choose_driver('does not exist') + self.fail("Should not find driver") + except exception.SchedulerHostFilterDriverNotFound: + pass + + def test_all_host_driver(self): + driver = host_filter.AllHostsFilter() + cooked = driver.instance_type_to_filter(self.instance_type) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(10, len(hosts)) + for host, capabilities in hosts: + self.assertTrue(host.startswith('host')) + + def test_instance_type_driver(self): + driver = host_filter.InstanceTypeFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = driver.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', + name) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(6, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + self.assertEquals('host05', just_hosts[0]) + self.assertEquals('host10', just_hosts[5]) + + def test_json_driver(self): + driver = host_filter.JsonFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = driver.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(6, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + self.assertEquals('host05', just_hosts[0]) + self.assertEquals('host10', just_hosts[5]) + + # Try some custom queries + + raw = ['or', + ['and', + ['<', '$compute.host_memory_free', 30], + ['<', '$compute.disk_available', 300] + ], + ['and', + ['>', '$compute.host_memory_free', 70], + ['>', '$compute.disk_available', 700] + ] + ] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(5, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([1, 2, 8, 9, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + raw = ['not', + ['=', '$compute.host_memory_free', 30], + ] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(9, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(5, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([2, 4, 6, 8, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + # Try some bogus input ... + raw = ['unknown command', ] + cooked = json.dumps(raw) + try: + driver.filter_hosts(self.zone_manager, cooked) + self.fail("Should give KeyError") + except KeyError, e: + pass + + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([]))) + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({}))) + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps( + ['not', True, False, True, False] + ))) + + try: + driver.filter_hosts(self.zone_manager, json.dumps( + 'not', True, False, True, False + )) + self.fail("Should give KeyError") + except KeyError, e: + pass + + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', '$foo', 100] + ))) + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', '$.....', 100] + ))) + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] + ))) + + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', {}, ['>', '$missing....foo']] + ))) diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py new file mode 100644 index 000000000..e0ed61417 --- /dev/null +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -0,0 +1,146 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Least Cost Scheduler +""" + +from nova import flags +from nova import test +from nova.scheduler import least_cost +from nova.tests.scheduler import test_zone_aware_scheduler + +MB = 1024 * 1024 +FLAGS = flags.FLAGS + + +class FakeHost(object): + def __init__(self, host_id, free_ram, io): + self.id = host_id + self.free_ram = free_ram + self.io = io + + +class WeightedSumTestCase(test.TestCase): + def test_empty_domain(self): + domain = [] + weighted_fns = [] + result = least_cost.weighted_sum(domain, weighted_fns) + expected = [] + self.assertEqual(expected, result) + + def test_basic_costing(self): + hosts = [ + FakeHost(1, 512 * MB, 100), + FakeHost(2, 256 * MB, 400), + FakeHost(3, 512 * MB, 100) + ] + + weighted_fns = [ + (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* + (2, lambda h: h.io), # Avoid high I/O + ] + + costs = least_cost.weighted_sum( + domain=hosts, weighted_fns=weighted_fns) + + # Each 256 MB unit of free-ram contributes 0.5 points by way of: + # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 + # Each 100 iops of IO adds 0.5 points by way of: + # cost = 2 * (100/400) = 2 * 0.25 = 0.5 + expected = [1.5, 2.5, 1.5] + self.assertEqual(expected, costs) + + +# TODO(sirp): unify this with test_host_filter tests? possibility of sharing +# test setup code +class FakeZoneManager: + pass + + +class LeastCostSchedulerTestCase(test.TestCase): + def setUp(self): + super(LeastCostSchedulerTestCase, self).setUp() + zone_manager = FakeZoneManager() + + states = test_zone_aware_scheduler.fake_zone_manager_service_states( + num_hosts=10) + zone_manager.service_states = states + + self.sched = least_cost.LeastCostScheduler() + self.sched.zone_manager = zone_manager + + def tearDown(self): + super(LeastCostSchedulerTestCase, self).tearDown() + + def assertWeights(self, expected, num, request_spec, hosts): + weighted = self.sched.weigh_hosts(num, request_spec, hosts) + self.assertDictListMatch(weighted, expected, approx_equal=True) + + def test_no_hosts(self): + num = 1 + request_spec = {} + hosts = [] + + expected = [] + self.assertWeights(expected, num, request_spec, hosts) + + def test_noop_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] + FLAGS.noop_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [dict(weight=1, hostname=hostname) + for hostname, caps in hosts] + self.assertWeights(expected, num, request_spec, hosts) + + def test_cost_fn_weights(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] + FLAGS.noop_cost_fn_weight = 2 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [dict(weight=2, hostname=hostname) + for hostname, caps in hosts] + self.assertWeights(expected, num, request_spec, hosts) + + def test_fill_first_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.fill_first_cost_fn' + ] + FLAGS.fill_first_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [] + for idx, (hostname, caps) in enumerate(hosts): + # Costs are normalized so over 10 hosts, each host with increasing + # free ram will cost 1/N more. Since the lowest cost host has some + # free ram, we add in the 1/N for the base_cost + weight = 0.1 + (0.1 * idx) + weight_dict = dict(weight=weight, hostname=hostname) + expected.append(weight_dict) + + self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py index 37169fb97..b2cc4fe23 100644 --- a/nova/tests/scheduler/test_zone_aware_scheduler.py +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -22,6 +22,37 @@ from nova.scheduler import zone_aware_scheduler from nova.scheduler import zone_manager +def _host_caps(multiplier): + # Returns host capabilities in the following way: + # host1 = memory:free 10 (100max) + # disk:available 100 (1000max) + # hostN = memory:free 10 + 10N + # disk:available 100 + 100N + # in other words: hostN has more resources than host0 + # which means ... don't go above 10 hosts. + return {'host_name-description': 'XenServer %s' % multiplier, + 'host_hostname': 'xs-%s' % multiplier, + 'host_memory_total': 100, + 'host_memory_overhead': 10, + 'host_memory_free': 10 + multiplier * 10, + 'host_memory_free-computed': 10 + multiplier * 10, + 'host_other-config': {}, + 'host_ip_address': '192.168.1.%d' % (100 + multiplier), + 'host_cpu_info': {}, + 'disk_available': 100 + multiplier * 100, + 'disk_total': 1000, + 'disk_used': 0, + 'host_uuid': 'xxx-%d' % multiplier, + 'host_name-label': 'xs-%s' % multiplier} + + +def fake_zone_manager_service_states(num_hosts): + states = {} + for x in xrange(num_hosts): + states['host%02d' % (x + 1)] = {'compute': _host_caps(x)} + return states + + class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): def filter_hosts(self, num, specs): # NOTE(sirp): this is returning [(hostname, services)] -- cgit From 1b610e28e40c77271191349b6bfaa56c8f522c24 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:53:00 -0500 Subject: Small cleanups --- nova/tests/scheduler/test_host_filter.py | 7 +++---- nova/tests/scheduler/test_least_cost_scheduler.py | 10 ++++------ 2 files changed, 7 insertions(+), 10 deletions(-) (limited to 'nova') diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index c3af50a6e..edbab7ab4 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -27,10 +27,6 @@ from nova.tests.scheduler import test_zone_aware_scheduler FLAGS = flags.FLAGS -class FakeZoneManager: - pass - - class HostFilterTestCase(test.TestCase): """Test case for host filter drivers.""" @@ -48,6 +44,9 @@ class HostFilterTestCase(test.TestCase): rxtx_quota=30000, rxtx_cap=200) + class FakeZoneManager: + pass + self.zone_manager = FakeZoneManager() states = test_zone_aware_scheduler.fake_zone_manager_service_states( diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index e0ed61417..506fa62fb 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -63,15 +63,13 @@ class WeightedSumTestCase(test.TestCase): self.assertEqual(expected, costs) -# TODO(sirp): unify this with test_host_filter tests? possibility of sharing -# test setup code -class FakeZoneManager: - pass - - class LeastCostSchedulerTestCase(test.TestCase): def setUp(self): super(LeastCostSchedulerTestCase, self).setUp() + + class FakeZoneManager: + pass + zone_manager = FakeZoneManager() states = test_zone_aware_scheduler.fake_zone_manager_service_states( -- cgit From 74bae1b1e2b298ef8425f7cb1aefd3826db40147 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 19 May 2011 13:50:11 -0700 Subject: Separate out tests for when unfilter is called from iptables vs. nwfilter driver. Re: lp783705 --- nova/tests/test_virt.py | 65 ++++++++++++++++++++++++++++++++++------------- nova/virt/libvirt_conn.py | 22 ++++++++-------- 2 files changed, 58 insertions(+), 29 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index babb5de9b..3b5a3867d 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -657,6 +657,21 @@ class LibvirtConnTestCase(test.TestCase): super(LibvirtConnTestCase, self).tearDown() +class FakeNWFilter: + def __init__(self): + self.undefine_call_count = 0 + + def undefine(self): + self.undefine_call_count += 1 + pass + + def _nwfilterLookupByName(self, ignore): + return self + + def _filterDefineXMLMock(self, xml): + return True + + class IptablesFirewallTestCase(test.TestCase): def setUp(self): super(IptablesFirewallTestCase, self).setUp() @@ -869,6 +884,35 @@ class IptablesFirewallTestCase(test.TestCase): self.assertEquals(ipv6_network_rules, ipv6_rules_per_network * networks_count) + def test_unfilter_instance_undefines_nwfilters(self): + admin_ctxt = context.get_admin_context() + + fakefilter = FakeNWFilter() + self.fw.nwfilter._conn.nwfilterDefineXML =\ + fakefilter._filterDefineXMLMock + self.fw.nwfilter._conn.nwfilterLookupByName =\ + fakefilter._nwfilterLookupByName + + instance_ref = self._create_instance_ref() + inst_id = instance_ref['id'] + instance = db.instance_get(self.context, inst_id) + + ip = '10.11.12.13' + network_ref = db.project_get_network(self.context, 'fake') + fixed_ip = {'address': ip, 'network_id': network_ref['id']} + db.fixed_ip_create(admin_ctxt, fixed_ip) + db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + 'instance_id': inst_id}) + self.fw.setup_basic_filtering(instance) + self.fw.prepare_instance_filter(instance) + self.fw.apply_instance_filter(instance) + self.fw.unfilter_instance(instance) + + # should attempt to undefine just the instance filter + self.assertEquals(fakefilter.undefine_call_count, 1) + + db.instance_destroy(admin_ctxt, instance_ref['id']) + class NWFilterTestCase(test.TestCase): def setUp(self): @@ -1047,26 +1091,11 @@ class NWFilterTestCase(test.TestCase): self.assertEquals(len(result), 3) def test_unfilter_instance_undefines_nwfilters(self): - class FakeNWFilter: - def __init__(self): - self.undefine_call_count = 0 - - def undefine(self): - self.undefine_call_count += 1 - pass - - fakefilter = FakeNWFilter() - - def _nwfilterLookupByName(ignore): - return fakefilter - - def _filterDefineXMLMock(xml): - return True - admin_ctxt = context.get_admin_context() - self.fw._conn.nwfilterDefineXML = _filterDefineXMLMock - self.fw._conn.nwfilterLookupByName = _nwfilterLookupByName + fakefilter = FakeNWFilter() + self.fw._conn.nwfilterDefineXML = fakefilter._filterDefineXMLMock + self.fw._conn.nwfilterLookupByName = fakefilter._nwfilterLookupByName instance_ref = self._create_instance() inst_id = instance_ref['id'] diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 706973176..f808a4b7b 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1834,7 +1834,7 @@ class NWFilterFirewall(FirewallDriver): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, remove_secgroup=True): """Clear out the nwfilter rules.""" network_info = _get_network_info(instance) instance_name = instance.name @@ -1846,19 +1846,19 @@ class NWFilterFirewall(FirewallDriver): self._conn.nwfilterLookupByName(instance_filter_name).\ undefine() except libvirt.libvirtError: - LOG.debug(_('The nwfilter(%(instance_filter_name)s) for ' - '%(instance_name)s is not found.') % locals()) + LOG.debug(_('The nwfilter(%(instance_filter_name)s) ' + 'for %(instance_name)s is not found.') % locals()) instance_secgroup_filter_name =\ '%s-secgroup' % (self._instance_filter_name(instance)) - try: - self._conn.nwfilterLookupByName(instance_secgroup_filter_name).\ - undefine() - except libvirt.libvirtError: - # This will happen if called by IptablesFirewallDriver - LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) for ' - '%(instance_name)s is not found.') % locals()) + if remove_secgroup: + try: + self._conn.nwfilterLookupByName(instance_secgroup_filter_name)\ + .undefine() + except libvirt.libvirtError: + LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' + 'for %(instance_name)s is not found.') % locals()) def prepare_instance_filter(self, instance, network_info=None): """ @@ -2022,7 +2022,7 @@ class IptablesFirewallDriver(FirewallDriver): if self.instances.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() - self.nwfilter.unfilter_instance(instance) + self.nwfilter.unfilter_instance(instance, False) else: LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) -- cgit From 0bb2d0085e1fb3ba22a408f405f4539aa07b226c Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 20 May 2011 08:59:07 -0700 Subject: make nwfilter mock more 'realistic' by having it remember which filters have been defined --- nova/tests/test_virt.py | 56 +++++++++++++++++++++++++++++++++++++++-------- nova/virt/libvirt_conn.py | 17 +++++++------- 2 files changed, 55 insertions(+), 18 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 4bc5fed16..5e85e3a2f 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -659,16 +659,26 @@ class LibvirtConnTestCase(test.TestCase): class FakeNWFilter: def __init__(self): - self.undefine_call_count = 0 + self.filters = {} - def undefine(self): - self.undefine_call_count += 1 - pass - - def _nwfilterLookupByName(self, ignore): - return self + def _nwfilterLookupByName(self, name): + if name in self.filters: + return self.filters[name] + raise libvirt.libvirtError('Filter Not Found') def _filterDefineXMLMock(self, xml): + class FakeNWFilterInternal: + def __init__(self, parent, name): + self.name = name + self.parent = parent + + def undefine(self): + del self.parent.filters[self.name] + pass + tree = xml_to_tree(xml) + name = tree.get('name') + if name not in self.filters: + self.filters[name] = FakeNWFilterInternal(self, name) return True @@ -689,6 +699,20 @@ class IptablesFirewallTestCase(test.TestCase): self.fw = libvirt_conn.IptablesFirewallDriver( get_connection=lambda: self.fake_libvirt_connection) + def lazy_load_library_exists(self): + """check if libvirt is available.""" + # try to connect libvirt. if fail, skip test. + try: + import libvirt + import libxml2 + except ImportError: + return False + global libvirt + libvirt = __import__('libvirt') + libvirt_conn.libvirt = __import__('libvirt') + libvirt_conn.libxml2 = __import__('libxml2') + return True + def tearDown(self): self.manager.delete_project(self.project) self.manager.delete_user(self.user) @@ -895,6 +919,10 @@ class IptablesFirewallTestCase(test.TestCase): self.fw.do_refresh_security_group_rules("fake") def test_unfilter_instance_undefines_nwfilter(self): + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + admin_ctxt = context.get_admin_context() fakefilter = FakeNWFilter() @@ -916,10 +944,11 @@ class IptablesFirewallTestCase(test.TestCase): self.fw.setup_basic_filtering(instance) self.fw.prepare_instance_filter(instance) self.fw.apply_instance_filter(instance) + original_filter_count = len(fakefilter.filters) self.fw.unfilter_instance(instance) # should attempt to undefine just the instance filter - self.assertEquals(fakefilter.undefine_call_count, 1) + self.assertEqual(original_filter_count - len(fakefilter.filters), 1) db.instance_destroy(admin_ctxt, instance_ref['id']) @@ -1109,6 +1138,12 @@ class NWFilterTestCase(test.TestCase): instance_ref = self._create_instance() inst_id = instance_ref['id'] + + self.security_group = self.setup_and_return_security_group() + + db.instance_add_security_group(self.context, inst_id, + self.security_group.id) + instance = db.instance_get(self.context, inst_id) ip = '10.11.12.13' @@ -1120,9 +1155,12 @@ class NWFilterTestCase(test.TestCase): self.fw.setup_basic_filtering(instance) self.fw.prepare_instance_filter(instance) self.fw.apply_instance_filter(instance) + original_filter_count = len(fakefilter.filters) + print fakefilter.filters.keys() self.fw.unfilter_instance(instance) + print fakefilter.filters.keys() # should attempt to undefine 2 filters: instance and instance-secgroup - self.assertEquals(fakefilter.undefine_call_count, 2) + self.assertEqual(original_filter_count - len(fakefilter.filters), 2) db.instance_destroy(admin_ctxt, instance_ref['id']) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 9241c1d9e..f27398aa3 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1838,7 +1838,7 @@ class NWFilterFirewall(FirewallDriver): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def unfilter_instance(self, instance, remove_secgroup=True): + def unfilter_instance(self, instance): """Clear out the nwfilter rules.""" network_info = _get_network_info(instance) instance_name = instance.name @@ -1856,13 +1856,12 @@ class NWFilterFirewall(FirewallDriver): instance_secgroup_filter_name =\ '%s-secgroup' % (self._instance_filter_name(instance)) - if remove_secgroup: - try: - self._conn.nwfilterLookupByName(instance_secgroup_filter_name)\ - .undefine() - except libvirt.libvirtError: - LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' - 'for %(instance_name)s is not found.') % locals()) + try: + self._conn.nwfilterLookupByName(instance_secgroup_filter_name)\ + .undefine() + except libvirt.libvirtError: + LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' + 'for %(instance_name)s is not found.') % locals()) def prepare_instance_filter(self, instance, network_info=None): """ @@ -2028,7 +2027,7 @@ class IptablesFirewallDriver(FirewallDriver): if self.instances.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() - self.nwfilter.unfilter_instance(instance, False) + self.nwfilter.unfilter_instance(instance) else: LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) -- cgit From 5c205bb5ef1565db4e52af538cf0d6b73cbeda37 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 20 May 2011 09:09:03 -0700 Subject: fix comments --- nova/tests/test_virt.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 5e85e3a2f..90c6de5a9 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -659,6 +659,7 @@ class LibvirtConnTestCase(test.TestCase): class FakeNWFilter: def __init__(self): + self.filters = {} def _nwfilterLookupByName(self, name): @@ -947,7 +948,7 @@ class IptablesFirewallTestCase(test.TestCase): original_filter_count = len(fakefilter.filters) self.fw.unfilter_instance(instance) - # should attempt to undefine just the instance filter + # should undefine just the instance filter self.assertEqual(original_filter_count - len(fakefilter.filters), 1) db.instance_destroy(admin_ctxt, instance_ref['id']) @@ -1160,7 +1161,7 @@ class NWFilterTestCase(test.TestCase): self.fw.unfilter_instance(instance) print fakefilter.filters.keys() - # should attempt to undefine 2 filters: instance and instance-secgroup + # should undefine 2 filters: instance and instance-secgroup self.assertEqual(original_filter_count - len(fakefilter.filters), 2) db.instance_destroy(admin_ctxt, instance_ref['id']) -- cgit From f2da479b8988cd55d39e89935b10e0b348df43c9 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 31 May 2011 23:36:49 +0400 Subject: Moved everything from thread-local storage to class attributes --- nova/auth/ldapdriver.py | 38 +++++++++++--------------------------- nova/auth/manager.py | 14 +++----------- 2 files changed, 14 insertions(+), 38 deletions(-) (limited to 'nova') diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 9fe0165a1..91f412baa 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -26,7 +26,6 @@ public methods. import functools import sys -import threading from nova import exception from nova import flags @@ -106,7 +105,8 @@ class LdapDriver(object): isadmin_attribute = 'isNovaAdmin' project_attribute = 'owner' project_objectclass = 'groupOfNames' - __local = threading.local() + conn = None + mc = None def __init__(self): """Imports the LDAP module""" @@ -117,15 +117,22 @@ class LdapDriver(object): LdapDriver.project_attribute = 'projectManager' LdapDriver.project_objectclass = 'novaProject' self.__cache = None + if LdapDriver.conn is None: + LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) + LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + if LdapDriver.mc is None: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def __enter__(self): - """Creates the connection to LDAP""" # TODO(yorik-sar): Should be per-request cache, not per-driver-request self.__cache = {} return self def __exit__(self, exc_type, exc_value, traceback): - """Destroys the connection to LDAP""" self.__cache = None return False @@ -149,29 +156,6 @@ class LdapDriver(object): return inner return do_wrap - @property - def conn(self): - try: - return self.__local.conn - except AttributeError: - conn = self.ldap.initialize(FLAGS.ldap_url) - conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) - self.__local.conn = conn - return conn - - @property - def mc(self): - try: - return self.__local.mc - except AttributeError: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache - mc = memcache.Client(FLAGS.memcached_servers, debug=0) - self.__local.mc = mc - return mc - @sanitize @__local_cache('uid_user-%s') def get_user(self, uid): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index c71f0f161..c887297f3 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -23,7 +23,6 @@ Nova authentication management import os import shutil import string # pylint: disable=W0402 -import threading import tempfile import uuid import zipfile @@ -207,7 +206,7 @@ class AuthManager(object): """ _instance = None - __local = threading.local() + mc = None def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" @@ -224,19 +223,12 @@ class AuthManager(object): self.network_manager = utils.import_object(FLAGS.network_manager) if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) - - @property - def mc(self): - try: - return self.__local.mc - except AttributeError: + if AuthManager.mc is None: if FLAGS.memcached_servers: import memcache else: from nova import fakememcache as memcache - mc = memcache.Client(FLAGS.memcached_servers, debug=0) - self.__local.mc = mc - return mc + AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', -- cgit From 5922b5dc166476adf550abbbacc21e4585e53a37 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 31 May 2011 21:23:36 +0000 Subject: Fixing Scheduler Tests --- nova/tests/scheduler/test_scheduler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 54b3f80fb..b0f0e882a 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -61,7 +61,8 @@ class SchedulerTestCase(test.TestCase): """Test case for scheduler""" def setUp(self): super(SchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') + driver = 'nova.tests.scheduler.test_scheduler.TestDriver' + self.flags(scheduler_driver=driver) def _create_compute_service(self): """Create compute-manager(ComputeNode and Service record).""" -- cgit From a26e21040681fd6db5a6ae862ca18ee17689854c Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Wed, 1 Jun 2011 18:32:49 +0400 Subject: Moved memcached driver import to the top of modules. --- nova/auth/ldapdriver.py | 10 ++++++---- nova/auth/manager.py | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) (limited to 'nova') diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 91f412baa..e26a360af 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -69,6 +69,12 @@ flags.DEFINE_string('ldap_developer', LOG = logging.getLogger("nova.ldapdriver") +if FLAGS.memcached_servers: + import memcache +else: + from nova import fakememcache as memcache + + # TODO(vish): make an abstract base class with the same public methods # to define a set interface for AuthDrivers. I'm delaying # creating this now because I'm expecting an auth refactor @@ -121,10 +127,6 @@ class LdapDriver(object): LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) if LdapDriver.mc is None: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def __enter__(self): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index c887297f3..98c7dd263 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -73,6 +73,12 @@ flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', LOG = logging.getLogger('nova.auth.manager') +if FLAGS.memcached_servers: + import memcache +else: + from nova import fakememcache as memcache + + class AuthBase(object): """Base class for objects relating to auth @@ -224,10 +230,6 @@ class AuthManager(object): if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) if AuthManager.mc is None: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def authenticate(self, access, signature, params, verb='GET', -- cgit From 4d1271821f782d4e11934d69b4ffe3aced6072eb Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Wed, 1 Jun 2011 18:34:54 +0400 Subject: PEP8 fix. --- nova/auth/ldapdriver.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index e26a360af..95e31ae3b 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -125,7 +125,8 @@ class LdapDriver(object): self.__cache = None if LdapDriver.conn is None: LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) - LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, + FLAGS.ldap_password) if LdapDriver.mc is None: LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) -- cgit From b2fb1738db489206557abccb631b13991c31fd4e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 14:23:05 -0700 Subject: make all uses of utcnow use our testable utils.utcnow --- nova/api/ec2/admin.py | 3 +-- nova/api/ec2/cloud.py | 5 ++-- nova/api/openstack/auth.py | 5 ++-- nova/api/openstack/contrib/__init__.py | 2 +- nova/api/openstack/limits.py | 2 +- nova/api/openstack/ratelimiting/__init__.py | 2 +- nova/compute/api.py | 3 +-- nova/compute/manager.py | 3 +-- nova/compute/monitor.py | 2 +- nova/context.py | 1 - nova/db/sqlalchemy/api.py | 29 +++++++++++----------- .../versions/016_make_quotas_key_and_value.py | 10 ++++---- nova/db/sqlalchemy/models.py | 9 +++---- nova/network/manager.py | 2 +- nova/notifier/api.py | 7 +++--- nova/scheduler/driver.py | 3 ++- nova/scheduler/simple.py | 11 ++++---- nova/test.py | 4 +-- nova/tests/api/openstack/fakes.py | 3 +-- nova/tests/api/openstack/test_images.py | 1 - nova/tests/api/openstack/test_servers.py | 8 +++--- nova/tests/test_compute.py | 5 ++-- nova/tests/test_console.py | 2 -- nova/tests/test_middleware.py | 1 - nova/tests/test_scheduler.py | 16 ++++++------ nova/utils.py | 2 +- nova/virt/xenapi/fake.py | 4 +-- nova/volume/api.py | 5 ++-- nova/volume/manager.py | 4 +-- 29 files changed, 68 insertions(+), 86 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index ea94d9c1f..aeebd86fb 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -21,7 +21,6 @@ Admin API controller, exposed through http via the api worker. """ import base64 -import datetime from nova import db from nova import exception @@ -305,7 +304,7 @@ class AdminController(object): * Volume Count """ services = db.service_get_all(context, False) - now = datetime.datetime.utcnow() + now = utils.utcnow() hosts = [] rv = [] for host in [service['host'] for service in services]: diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 79cc3b3bf..04675174f 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -23,7 +23,6 @@ datastore. """ import base64 -import datetime import IPy import os import urllib @@ -235,7 +234,7 @@ class CloudController(object): 'zoneState': 'available'}]} services = db.service_get_all(context, False) - now = datetime.datetime.utcnow() + now = utils.utcnow() hosts = [] for host in [service['host'] for service in services]: if not host in hosts: @@ -595,7 +594,7 @@ class CloudController(object): instance_id = ec2utils.ec2_id_to_id(ec2_id) output = self.compute_api.get_console_output( context, instance_id=instance_id) - now = datetime.datetime.utcnow() + now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 6c6ee22a2..b49bf449b 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -13,9 +13,8 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. -import datetime import hashlib import time @@ -127,7 +126,7 @@ class AuthMiddleware(wsgi.Middleware): except exception.NotFound: return None if token: - delta = datetime.datetime.utcnow() - token['created_at'] + delta = utils.utcnow() - token['created_at'] if delta.days >= 2: self.db.auth_token_destroy(ctxt, token['token_hash']) else: diff --git a/nova/api/openstack/contrib/__init__.py b/nova/api/openstack/contrib/__init__.py index b42a1d89d..acb5eb280 100644 --- a/nova/api/openstack/contrib/__init__.py +++ b/nova/api/openstack/contrib/__init__.py @@ -13,7 +13,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """Contrib contains extensions that are shipped with nova. diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 4d46b92df..dc2bc6bbc 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -11,7 +11,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """ Module dedicated functions/classes dealing with rate limiting requests. diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index 88ffc3246..9ede548c2 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -13,7 +13,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """Rate limiting of arbitrary actions.""" diff --git a/nova/compute/api.py b/nova/compute/api.py index 7122ebe67..de87ddd88 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -18,7 +18,6 @@ """Handles all requests relating to instances (guest vms).""" -import datetime import eventlet import re import time @@ -405,7 +404,7 @@ class API(base.Base): instance['id'], state_description='terminating', state=0, - terminated_at=datetime.datetime.utcnow()) + terminated_at=utils.utcnow()) host = instance['host'] if host: diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3897b3a9e..a57d6e246 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -35,7 +35,6 @@ terminating it. """ -import datetime import os import socket import sys @@ -159,7 +158,7 @@ class ComputeManager(manager.SchedulerDependentManager): def _update_launched_at(self, context, instance_id, launched_at=None): """Update the launched_at parameter of the given instance.""" - data = {'launched_at': launched_at or datetime.datetime.utcnow()} + data = {'launched_at': launched_at or utils.utcnow()} self.db.instance_update(context, instance_id, data) def _update_image_id(self, context, instance_id, image_id): diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 3bb54a382..613734bef 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -86,7 +86,7 @@ RRD_VALUES = { ]} -utcnow = datetime.datetime.utcnow +utcnow = utils.utcnow LOG = logging.getLogger('nova.compute.monitor') diff --git a/nova/context.py b/nova/context.py index c113f7ea7..99085ed75 100644 --- a/nova/context.py +++ b/nova/context.py @@ -18,7 +18,6 @@ """RequestContext: context for requests that persist through all of nova.""" -import datetime import random from nova import exception diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index c3a971a82..bb7bf5b89 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -19,7 +19,6 @@ Implementation of SQLAlchemy backend. """ -import datetime import warnings from nova import db @@ -674,7 +673,7 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time): filter_by(allocated=0).\ update({'instance_id': None, 'leased': 0, - 'updated_at': datetime.datetime.utcnow()}, + 'updated_at': utils.utcnow()}, synchronize_session='fetch') return result @@ -820,17 +819,17 @@ def instance_destroy(context, instance_id): session.query(models.Instance).\ filter_by(id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1123,7 +1122,7 @@ def key_pair_destroy_all_by_user(context, user_id): session.query(models.KeyPair).\ filter_by(user_id=user_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1655,7 +1654,7 @@ def volume_destroy(context, volume_id): session.query(models.Volume).\ filter_by(id=volume_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.ExportDevice).\ filter_by(volume_id=volume_id).\ @@ -1813,7 +1812,7 @@ def snapshot_destroy(context, snapshot_id): session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1968,17 +1967,17 @@ def security_group_destroy(context, security_group_id): session.query(models.SecurityGroup).\ filter_by(id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1989,11 +1988,11 @@ def security_group_destroy_all(context, session=None): with session.begin(): session.query(models.SecurityGroup).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -2627,7 +2626,7 @@ def instance_metadata_delete(context, instance_id, key): filter_by(key=key).\ filter_by(deleted=False).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -2638,7 +2637,7 @@ def instance_metadata_delete_all(context, instance_id): filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py index 5d0593f2e..a4fe3e482 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py @@ -17,7 +17,7 @@ from sqlalchemy import Boolean, Column, DateTime, Integer from sqlalchemy import MetaData, String, Table -import datetime +from nova import utils meta = MetaData() @@ -35,9 +35,9 @@ def old_style_quotas_table(name): return Table(name, meta, Column('id', Integer(), primary_key=True), Column('created_at', DateTime(), - default=datetime.datetime.utcnow), + default=utils.utcnow), Column('updated_at', DateTime(), - onupdate=datetime.datetime.utcnow), + onupdate=utils.utcnow), Column('deleted_at', DateTime()), Column('deleted', Boolean(), default=False), Column('project_id', @@ -57,9 +57,9 @@ def new_style_quotas_table(name): return Table(name, meta, Column('id', Integer(), primary_key=True), Column('created_at', DateTime(), - default=datetime.datetime.utcnow), + default=utils.utcnow), Column('updated_at', DateTime(), - onupdate=datetime.datetime.utcnow), + onupdate=utils.utcnow), Column('deleted_at', DateTime()), Column('deleted', Boolean(), default=False), Column('project_id', diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 22a1a84e8..dbe72efd9 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -19,8 +19,6 @@ SQLAlchemy models for nova data. """ -import datetime - from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy import Column, Integer, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text @@ -33,6 +31,7 @@ from nova.db.sqlalchemy.session import get_session from nova import auth from nova import exception from nova import flags +from nova import utils FLAGS = flags.FLAGS @@ -43,8 +42,8 @@ class NovaBase(object): """Base class for Nova Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} __table_initialized__ = False - created_at = Column(DateTime, default=datetime.datetime.utcnow) - updated_at = Column(DateTime, onupdate=datetime.datetime.utcnow) + created_at = Column(DateTime, default=utils.utcnow) + updated_at = Column(DateTime, onupdate=utils.utcnow) deleted_at = Column(DateTime) deleted = Column(Boolean, default=False) @@ -64,7 +63,7 @@ class NovaBase(object): def delete(self, session=None): """Delete this object.""" self.deleted = True - self.deleted_at = datetime.datetime.utcnow() + self.deleted_at = utils.utcnow() self.save(session=session) def __setitem__(self, key, value): diff --git a/nova/network/manager.py b/nova/network/manager.py index 5a6fdde5a..f726c4b26 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -235,7 +235,7 @@ class NetworkManager(manager.SchedulerDependentManager): inst_addr = instance_ref['mac_address'] raise exception.Error(_('IP %(address)s leased to bad mac' ' %(inst_addr)s vs %(mac)s') % locals()) - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': True, diff --git a/nova/notifier/api.py b/nova/notifier/api.py index a3e7a039e..d49517c8b 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -11,9 +11,8 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. -import datetime import uuid from nova import flags @@ -64,7 +63,7 @@ def notify(publisher_id, event_type, priority, payload): {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', - 'timestamp': datetime.datetime.utcnow(), + 'timestamp': utils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} @@ -79,5 +78,5 @@ def notify(publisher_id, event_type, priority, payload): event_type=event_type, priority=priority, payload=payload, - timestamp=str(datetime.datetime.utcnow())) + timestamp=str(utils.utcnow())) driver.notify(msg) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 2094e3565..0b257c5d8 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -28,6 +28,7 @@ from nova import exception from nova import flags from nova import log as logging from nova import rpc +from nova import utils from nova.compute import power_state FLAGS = flags.FLAGS @@ -61,7 +62,7 @@ class Scheduler(object): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. - elapsed = datetime.datetime.utcnow() - last_heartbeat + elapsed = utils.utcnow() - last_heartbeat return elapsed < datetime.timedelta(seconds=FLAGS.service_down_time) def hosts_up(self, context, topic): diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index dd568d2c6..87cdef11d 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -21,10 +21,9 @@ Simple Scheduler """ -import datetime - from nova import db from nova import flags +from nova import utils from nova.scheduler import driver from nova.scheduler import chance @@ -54,7 +53,7 @@ class SimpleScheduler(chance.ChanceScheduler): # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.instance_update(context, instance_id, {'host': host, 'scheduled_at': now}) return host @@ -66,7 +65,7 @@ class SimpleScheduler(chance.ChanceScheduler): if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.instance_update(context, instance_id, {'host': service['host'], @@ -90,7 +89,7 @@ class SimpleScheduler(chance.ChanceScheduler): # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.volume_update(context, volume_id, {'host': host, 'scheduled_at': now}) return host @@ -103,7 +102,7 @@ class SimpleScheduler(chance.ChanceScheduler): if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.volume_update(context, volume_id, {'host': service['host'], diff --git a/nova/test.py b/nova/test.py index 80b2d0a74..60b599ce4 100644 --- a/nova/test.py +++ b/nova/test.py @@ -23,7 +23,6 @@ inline callbacks. """ -import datetime import functools import os import shutil @@ -37,6 +36,7 @@ from eventlet import greenthread from nova import fakerabbit from nova import flags from nova import rpc +from nova import utils from nova import service from nova import wsgi from nova.virt import fake @@ -69,7 +69,7 @@ class TestCase(unittest.TestCase): # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. - self.start = datetime.datetime.utcnow() + self.start = utils.utcnow() shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db), os.path.join(FLAGS.state_path, FLAGS.sqlite_db)) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 8e0156afa..4fb0613fc 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -16,7 +16,6 @@ # under the License. import copy -import datetime import json import random import string @@ -253,7 +252,7 @@ class FakeAuthDatabase(object): @staticmethod def auth_token_create(context, token): - fake_token = FakeToken(created_at=datetime.datetime.now(), **token) + fake_token = FakeToken(created_at=utils.utcnow(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 9f1f28611..93b402081 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -22,7 +22,6 @@ and as a WSGI layer import copy import json -import datetime import os import shutil import tempfile diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 11dcaaade..50d5fe980 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -16,7 +16,6 @@ # under the License. import base64 -import datetime import json import unittest from xml.dom import minidom @@ -29,6 +28,7 @@ from nova import db from nova import exception from nova import flags from nova import test +from nova import utils import nova.api.openstack from nova.api.openstack import servers import nova.compute.api @@ -114,9 +114,9 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None, "user_data": "", "reservation_id": "", "mac_address": "", - "scheduled_at": datetime.datetime.now(), - "launched_at": datetime.datetime.now(), - "terminated_at": datetime.datetime.now(), + "scheduled_at": utils.utcnow(), + "launched_at": utils.utcnow(), + "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": "server%s" % id, "display_description": "", diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 9170837b6..c726080ee 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -19,7 +19,6 @@ Tests For Compute """ -import datetime import mox import stubout @@ -217,12 +216,12 @@ class ComputeTestCase(test.TestCase): instance_ref = db.instance_get(self.context, instance_id) self.assertEqual(instance_ref['launched_at'], None) self.assertEqual(instance_ref['deleted_at'], None) - launch = datetime.datetime.utcnow() + launch = utils.utcnow() self.compute.run_instance(self.context, instance_id) instance_ref = db.instance_get(self.context, instance_id) self.assert_(instance_ref['launched_at'] > launch) self.assertEqual(instance_ref['deleted_at'], None) - terminate = datetime.datetime.utcnow() + terminate = utils.utcnow() self.compute.terminate_instance(self.context, instance_id) self.context = self.context.elevated(True) instance_ref = db.instance_get(self.context, instance_id) diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py index 1a9a867ee..831e7670f 100644 --- a/nova/tests/test_console.py +++ b/nova/tests/test_console.py @@ -20,8 +20,6 @@ Tests For Console proxy. """ -import datetime - from nova import context from nova import db from nova import exception diff --git a/nova/tests/test_middleware.py b/nova/tests/test_middleware.py index 6564a6955..40d117c45 100644 --- a/nova/tests/test_middleware.py +++ b/nova/tests/test_middleware.py @@ -16,7 +16,6 @@ # License for the specific language governing permissions and limitations # under the License. -import datetime import webob import webob.dec import webob.exc diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 54b3f80fb..1cf6bbfbf 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -196,7 +196,7 @@ class ZoneSchedulerTestCase(test.TestCase): service.topic = 'compute' service.id = kwargs['id'] service.availability_zone = kwargs['zone'] - service.created_at = datetime.datetime.utcnow() + service.created_at = utils.utcnow() return service def test_with_two_zones(self): @@ -290,7 +290,7 @@ class SimpleDriverTestCase(test.TestCase): dic['host'] = kwargs.get('host', 'dummy') s_ref = db.service_create(self.context, dic) if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): - t = datetime.datetime.utcnow() - datetime.timedelta(0) + t = utils.utcnow() - datetime.timedelta(0) dic['created_at'] = kwargs.get('created_at', t) dic['updated_at'] = kwargs.get('updated_at', t) db.service_update(self.context, s_ref['id'], dic) @@ -401,7 +401,7 @@ class SimpleDriverTestCase(test.TestCase): FLAGS.compute_manager) compute1.start() s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() + now = utils.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta db.service_update(self.context, s1['id'], {'updated_at': past}) @@ -542,7 +542,7 @@ class SimpleDriverTestCase(test.TestCase): def test_wont_sechedule_if_specified_host_is_down(self): compute1 = self.start_service('compute', host='host1') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() + now = utils.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta db.service_update(self.context, s1['id'], {'updated_at': past}) @@ -692,7 +692,7 @@ class SimpleDriverTestCase(test.TestCase): dic = {'instance_id': instance_id, 'size': 1} v_ref = db.volume_create(self.context, {'instance_id': instance_id, 'size': 1}) - t1 = datetime.datetime.utcnow() - datetime.timedelta(1) + t1 = utils.utcnow() - datetime.timedelta(1) dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', 'topic': 'volume', 'report_count': 0} s_ref = db.service_create(self.context, dic) @@ -709,7 +709,7 @@ class SimpleDriverTestCase(test.TestCase): """Confirms src-compute node is alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) + t = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) @@ -737,7 +737,7 @@ class SimpleDriverTestCase(test.TestCase): """Confirms exception raises in case dest host does not exist.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) + t = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) @@ -796,7 +796,7 @@ class SimpleDriverTestCase(test.TestCase): # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t1 = datetime.datetime.utcnow() - datetime.timedelta(10) + t1 = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t1, updated_at=t1, host=dest) diff --git a/nova/utils.py b/nova/utils.py index 361fc9873..b1638e72c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -307,7 +307,7 @@ def get_my_linklocal(interface): def utcnow(): - """Overridable version of datetime.datetime.utcnow.""" + """Overridable version of utils.utcnow.""" if utcnow.override_time: return utcnow.override_time return datetime.datetime.utcnow() diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 76988b172..165888cb2 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -51,13 +51,13 @@ A fake XenAPI SDK. """ -import datetime import uuid from pprint import pformat from nova import exception from nova import log as logging +from nova import utils _CLASSES = ['host', 'network', 'session', 'SR', 'VBD', @@ -540,7 +540,7 @@ class SessionBase(object): except Failure, exc: task['error_info'] = exc.details task['status'] = 'failed' - task['finished'] = datetime.datetime.now() + task['finished'] = utils.utcnow() return task_ref def _check_session(self, params): diff --git a/nova/volume/api.py b/nova/volume/api.py index 5804955f7..b07f2e94b 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -20,14 +20,13 @@ Handles all requests relating to volumes. """ -import datetime -from nova import db from nova import exception from nova import flags from nova import log as logging from nova import quota from nova import rpc +from nova import utils from nova.db import base FLAGS = flags.FLAGS @@ -78,7 +77,7 @@ class API(base.Base): volume = self.get(context, volume_id) if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) host = volume['host'] diff --git a/nova/volume/manager.py b/nova/volume/manager.py index ff53f0701..798bd379a 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -42,8 +42,6 @@ intact. """ -import datetime - from nova import context from nova import exception @@ -127,7 +125,7 @@ class VolumeManager(manager.SchedulerDependentManager): volume_ref['id'], {'status': 'error'}) raise - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) -- cgit From 4762aebe4ddc57d8502ed3b5aec56b613d0ec93b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 14:51:30 -0700 Subject: switch zones to use utcnow --- nova/scheduler/zone_manager.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'nova') diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index 3ddf6f3c3..3f483adff 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -17,16 +17,17 @@ ZoneManager oversees all communications with child Zones. """ +import datetime import novaclient import thread import traceback -from datetime import datetime from eventlet import greenpool from nova import db from nova import flags from nova import log as logging +from nova import utils FLAGS = flags.FLAGS flags.DEFINE_integer('zone_db_check_interval', 60, @@ -42,7 +43,7 @@ class ZoneState(object): self.name = None self.capabilities = None self.attempt = 0 - self.last_seen = datetime.min + self.last_seen = datetime.datetime.min self.last_exception = None self.last_exception_time = None @@ -56,7 +57,7 @@ class ZoneState(object): def update_metadata(self, zone_metadata): """Update zone metadata after successful communications with child zone.""" - self.last_seen = datetime.now() + self.last_seen = utils.utcnow() self.attempt = 0 self.name = zone_metadata.get("name", "n/a") self.capabilities = ", ".join(["%s=%s" % (k, v) @@ -72,7 +73,7 @@ class ZoneState(object): """Something went wrong. Check to see if zone should be marked as offline.""" self.last_exception = exception - self.last_exception_time = datetime.now() + self.last_exception_time = utils.utcnow() api_url = self.api_url logging.warning(_("'%(exception)s' error talking to " "zone %(api_url)s") % locals()) @@ -104,7 +105,7 @@ def _poll_zone(zone): class ZoneManager(object): """Keeps the zone states updated.""" def __init__(self): - self.last_zone_db_check = datetime.min + self.last_zone_db_check = datetime.datetime.min self.zone_states = {} # { : ZoneState } self.service_states = {} # { : { : { cap k : v }}} self.green_pool = greenpool.GreenPool() @@ -158,10 +159,10 @@ class ZoneManager(object): def ping(self, context=None): """Ping should be called periodically to update zone status.""" - diff = datetime.now() - self.last_zone_db_check + diff = utils.utcnow() - self.last_zone_db_check if diff.seconds >= FLAGS.zone_db_check_interval: logging.debug(_("Updating zone cache from db.")) - self.last_zone_db_check = datetime.now() + self.last_zone_db_check = utils.utcnow() self._refresh_from_db(context) self._poll_zones(context) -- cgit From 9ee103a91fe3bed03c3f4c6c1a6e89fa474e1aae Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Fri, 3 Jun 2011 12:37:58 +0400 Subject: Fixed FakeLdapDriver, made it call LdapDriver.__init__ --- nova/auth/ldapdriver.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 95e31ae3b..183f7a985 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -676,6 +676,7 @@ class LdapDriver(object): class FakeLdapDriver(LdapDriver): """Fake Ldap Auth driver""" - def __init__(self): # pylint: disable=W0231 - __import__('nova.auth.fakeldap') - self.ldap = sys.modules['nova.auth.fakeldap'] + def __init__(self): + import nova.auth.fakeldap + sys.modules['ldap'] = nova.auth.fakeldap + super(FakeLdapDriver, self).__init__() -- cgit From 72a47784dc09d9b840db146d58ea71f6af30a8ea Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Fri, 3 Jun 2011 13:39:22 +0400 Subject: Flush AuthManager's cache before each test. --- nova/tests/test_auth.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova') diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index f02dd94b7..7d00bddfe 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -86,6 +86,7 @@ class _AuthManagerBaseTestCase(test.TestCase): super(_AuthManagerBaseTestCase, self).setUp() self.flags(connection_type='fake') self.manager = manager.AuthManager(new=True) + self.manager.mc.cache = {} def test_create_and_find_user(self): with user_generator(self.manager): -- cgit