From 68e34c790612f3250bd902cc87a0ab9d3d69abfb Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 13 May 2011 15:36:42 -0500 Subject: first cut at weighted-sum tests --- nova/scheduler/zone_aware_scheduler.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index b3d230bd2..38b395d52 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -89,6 +89,8 @@ class ZoneAwareScheduler(driver.Scheduler): # then weigh the selected hosts. # weighted = [{weight=weight, name=hostname}, ...] + # TODO(sirp): weigh_hosts should also be a function of 'topic' or + # resources, so that we can apply different objective functions to it weighted = self.weigh_hosts(num_instances, specs, host_list) # Next, tack on the best weights from the child zones ... -- cgit From a4ea9ac61568ce5f8300a5ba138f0ac10c79b43c Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 16 May 2011 15:59:01 -0700 Subject: fix for lp783705 - remove nwfilters when instance is terminated --- nova/tests/test_virt.py | 42 ++++++++++++++++++++++++++++++++++++++++++ nova/virt/libvirt_conn.py | 27 +++++++++++++++++++++++++-- 2 files changed, 67 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 1311ba361..babb5de9b 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -1045,3 +1045,45 @@ class NWFilterTestCase(test.TestCase): network_info, "fake") self.assertEquals(len(result), 3) + + def test_unfilter_instance_undefines_nwfilters(self): + class FakeNWFilter: + def __init__(self): + self.undefine_call_count = 0 + + def undefine(self): + self.undefine_call_count += 1 + pass + + fakefilter = FakeNWFilter() + + def _nwfilterLookupByName(ignore): + return fakefilter + + def _filterDefineXMLMock(xml): + return True + + admin_ctxt = context.get_admin_context() + + self.fw._conn.nwfilterDefineXML = _filterDefineXMLMock + self.fw._conn.nwfilterLookupByName = _nwfilterLookupByName + + instance_ref = self._create_instance() + inst_id = instance_ref['id'] + instance = db.instance_get(self.context, inst_id) + + ip = '10.11.12.13' + network_ref = db.project_get_network(self.context, 'fake') + fixed_ip = {'address': ip, 'network_id': network_ref['id']} + db.fixed_ip_create(admin_ctxt, fixed_ip) + db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + 'instance_id': inst_id}) + self.fw.setup_basic_filtering(instance) + self.fw.prepare_instance_filter(instance) + self.fw.apply_instance_filter(instance) + self.fw.unfilter_instance(instance) + + # should attempt to undefine 2 filters: instance and instance-secgroup + self.assertEquals(fakefilter.undefine_call_count, 2) + + db.instance_destroy(admin_ctxt, instance_ref['id']) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 555e44ce2..706973176 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1835,8 +1835,30 @@ class NWFilterFirewall(FirewallDriver): tpool.execute(self._conn.nwfilterDefineXML, xml) def unfilter_instance(self, instance): - # Nothing to do - pass + """Clear out the nwfilter rules.""" + network_info = _get_network_info(instance) + instance_name = instance.name + for (network, mapping) in network_info: + nic_id = mapping['mac'].replace(':', '') + instance_filter_name = self._instance_filter_name(instance, nic_id) + + try: + self._conn.nwfilterLookupByName(instance_filter_name).\ + undefine() + except libvirt.libvirtError: + LOG.debug(_('The nwfilter(%(instance_filter_name)s) for ' + '%(instance_name)s is not found.') % locals()) + + instance_secgroup_filter_name =\ + '%s-secgroup' % (self._instance_filter_name(instance)) + + try: + self._conn.nwfilterLookupByName(instance_secgroup_filter_name).\ + undefine() + except libvirt.libvirtError: + # This will happen if called by IptablesFirewallDriver + LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) for ' + '%(instance_name)s is not found.') % locals()) def prepare_instance_filter(self, instance, network_info=None): """ @@ -2000,6 +2022,7 @@ class IptablesFirewallDriver(FirewallDriver): if self.instances.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() + self.nwfilter.unfilter_instance(instance) else: LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) -- cgit From 0e2aba9e9869a66a1c3a6ece0fb08be631daa5bf Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:38:44 +0400 Subject: Moved memcached connection in AuthManager to thread-local storage. Added caching of LDAP connection in thread-local storage. Optimized LDAP queries, added similar memcached support to LDAPDriver. Add "per-driver-request" caching of LDAP results. (should be per-api-request) --- nova/auth/ldapdriver.py | 93 ++++++++++++++++++++++++++++++++++++++++++++----- nova/auth/manager.py | 20 +++++++---- 2 files changed, 98 insertions(+), 15 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 3f8432851..7849d941e 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -24,7 +24,9 @@ other backends by creating another class that exposes the same public methods. """ +import functools import sys +import threading from nova import exception from nova import flags @@ -85,6 +87,7 @@ def _clean(attr): def sanitize(fn): """Decorator to sanitize all args""" + @functools.wraps(fn) def _wrapped(self, *args, **kwargs): args = [_clean(x) for x in args] kwargs = dict((k, _clean(v)) for (k, v) in kwargs) @@ -103,29 +106,74 @@ class LdapDriver(object): isadmin_attribute = 'isNovaAdmin' project_attribute = 'owner' project_objectclass = 'groupOfNames' + __local = threading.local() def __init__(self): """Imports the LDAP module""" self.ldap = __import__('ldap') - self.conn = None if FLAGS.ldap_schema_version == 1: LdapDriver.project_pattern = '(objectclass=novaProject)' LdapDriver.isadmin_attribute = 'isAdmin' LdapDriver.project_attribute = 'projectManager' LdapDriver.project_objectclass = 'novaProject' + self.__cache = None def __enter__(self): """Creates the connection to LDAP""" - self.conn = self.ldap.initialize(FLAGS.ldap_url) - self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + # TODO(yorik-sar): Should be per-request cache, not per-driver-request + self.__cache = {} return self def __exit__(self, exc_type, exc_value, traceback): """Destroys the connection to LDAP""" - self.conn.unbind_s() + self.__cache = None return False + def __local_cache(key_fmt): + """Wrap function to cache it's result in self.__cache. + Works only with functions with one fixed argument. + """ + def do_wrap(fn): + @functools.wraps(fn) + def inner(self, arg, **kwargs): + cache_key = key_fmt % (arg,) + try: + res = self.__cache[cache_key] + LOG.debug('Local cache hit for %s by key %s' % + (fn.__name__, cache_key)) + return res + except KeyError: + res = fn(self, arg, **kwargs) + self.__cache[cache_key] = res + return res + return inner + return do_wrap + + @property + def conn(self): + try: + return self.__local.conn + except AttributeError: + conn = self.ldap.initialize(FLAGS.ldap_url) + conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + self.__local.conn = conn + return conn + + @property + def mc(self): + try: + return self.__local.mc + except AttributeError: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + mc = memcache.Client(FLAGS.memcached_servers, debug=0) + self.__local.mc = mc + return mc + @sanitize + @__local_cache('uid_user-%s') def get_user(self, uid): """Retrieve user by id""" attr = self.__get_ldap_user(uid) @@ -134,15 +182,30 @@ class LdapDriver(object): @sanitize def get_user_from_access_key(self, access): """Retrieve user by access key""" + cache_key = 'uak_dn_%s'%(access,) + user_dn = self.mc.get(cache_key) + if user_dn: + user = self.__to_user( + self.__find_object(user_dn, scope=self.ldap.SCOPE_BASE)) + if user: + if user['access'] == access: + return user + else: + self.mc.set(cache_key, None) query = '(accessKey=%s)' % access dn = FLAGS.ldap_user_subtree - return self.__to_user(self.__find_object(dn, query)) + user_obj = self.__find_object(dn, query) + user = self.__to_user(user_obj) + if user: + self.mc.set(cache_key, user_obj['dn'][0]) + return user @sanitize + @__local_cache('pid_project-%s') def get_project(self, pid): """Retrieve project by id""" - dn = self.__project_to_dn(pid) - attr = self.__find_object(dn, LdapDriver.project_pattern) + dn = self.__project_to_dn(pid, search=False) + attr = self.__find_object(dn, LdapDriver.project_pattern, scope=self.ldap.SCOPE_BASE) return self.__to_project(attr) @sanitize @@ -395,6 +458,7 @@ class LdapDriver(object): """Check if project exists""" return self.get_project(project_id) is not None + @__local_cache('uid_attrs-%s') def __get_ldap_user(self, uid): """Retrieve LDAP user entry by id""" dn = FLAGS.ldap_user_subtree @@ -426,12 +490,20 @@ class LdapDriver(object): if scope is None: # One of the flags is 0! scope = self.ldap.SCOPE_SUBTREE + if query is None: + query = "(objectClass=*)" try: res = self.conn.search_s(dn, scope, query) except self.ldap.NO_SUCH_OBJECT: return [] # Just return the attributes - return [attributes for dn, attributes in res] + # FIXME(yorik-sar): Whole driver should be refactored to + # prevent this hack + res1 = [] + for dn, attrs in res: + attrs['dn'] = [dn] + res1.append(attrs) + return res1 def __find_role_dns(self, tree): """Find dns of role objects in given tree""" @@ -564,6 +636,7 @@ class LdapDriver(object): 'description': attr.get('description', [None])[0], 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} + @__local_cache('uid_dn-%s') def __uid_to_dn(self, uid, search=True): """Convert uid to dn""" # By default return a generated DN @@ -576,6 +649,7 @@ class LdapDriver(object): userdn = user[0] return userdn + @__local_cache('pid_dn-%s') def __project_to_dn(self, pid, search=True): """Convert pid to dn""" # By default return a generated DN @@ -603,10 +677,11 @@ class LdapDriver(object): else: return None + @__local_cache('dn_uid-%s') def __dn_to_uid(self, dn): """Convert user dn to uid""" query = '(objectclass=novaUser)' - user = self.__find_object(dn, query) + user = self.__find_object(dn, query, scope=self.ldap.SCOPE_BASE) return user[FLAGS.ldap_user_id_attribute][0] diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 07235a2a7..c71f0f161 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -23,6 +23,7 @@ Nova authentication management import os import shutil import string # pylint: disable=W0402 +import threading import tempfile import uuid import zipfile @@ -206,6 +207,7 @@ class AuthManager(object): """ _instance = None + __local = threading.local() def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" @@ -223,12 +225,18 @@ class AuthManager(object): if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache - self.mc = memcache.Client(FLAGS.memcached_servers, - debug=0) + @property + def mc(self): + try: + return self.__local.mc + except AttributeError: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + mc = memcache.Client(FLAGS.memcached_servers, debug=0) + self.__local.mc = mc + return mc def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', -- cgit From e4f8ef67065f1de36ceadf9dd97e07fbe9fc9d83 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:39:19 +0400 Subject: Fixed mistyped key, caused huge performance leak. --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index 519793643..5c536f6d8 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -110,7 +110,7 @@ class FlagValues(gflags.FlagValues): return name in self.__dict__['__dirty'] def ClearDirty(self): - self.__dict__['__is_dirty'] = [] + self.__dict__['__dirty'] = [] def WasAlreadyParsed(self): return self.__dict__['__was_already_parsed'] -- cgit From 2fcc10656222bea6056742ef943c1b82724c0b56 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:45:48 +0400 Subject: PEP8 fixes. --- nova/auth/ldapdriver.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 7849d941e..9fe0165a1 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -182,7 +182,7 @@ class LdapDriver(object): @sanitize def get_user_from_access_key(self, access): """Retrieve user by access key""" - cache_key = 'uak_dn_%s'%(access,) + cache_key = 'uak_dn_%s' % (access,) user_dn = self.mc.get(cache_key) if user_dn: user = self.__to_user( @@ -205,7 +205,8 @@ class LdapDriver(object): def get_project(self, pid): """Retrieve project by id""" dn = self.__project_to_dn(pid, search=False) - attr = self.__find_object(dn, LdapDriver.project_pattern, scope=self.ldap.SCOPE_BASE) + attr = self.__find_object(dn, LdapDriver.project_pattern, + scope=self.ldap.SCOPE_BASE) return self.__to_project(attr) @sanitize -- cgit From 23bbbfcd3317859d44dba7da7996a978ad922543 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 10:45:19 -0500 Subject: First cut at least cost scheduler --- nova/scheduler/least_cost.py | 79 +++++++++++++++++++++++++++++++++ nova/tests/test_least_cost_scheduler.py | 39 ++++++++++++++++ 2 files changed, 118 insertions(+) create mode 100644 nova/scheduler/least_cost.py create mode 100644 nova/tests/test_least_cost_scheduler.py diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py new file mode 100644 index 000000000..75dde81ca --- /dev/null +++ b/nova/scheduler/least_cost.py @@ -0,0 +1,79 @@ +import collections + +# TODO(sirp): this should be just `zone_aware` to match naming scheme +# TODO(sirp): perhaps all zone-aware stuff should go under a `zone_aware` +# module +from nova.scheduler import zone_aware_scheduler + +class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): + def get_cost_fns(self): + """Returns a list of tuples containing weights and cost functions to + use for weighing hosts + """ + cost_fns = [] + + return cost_fns + + def weigh_hosts(self, num, specs, hosts): + """ + Returns a list of dictionaries of form: + [ {weight: weight, hostname: hostname} ] + """ + # FIXME(sirp): weigh_hosts should handle more than just instances + cost_fns = [] + hosts = [] + cost_hosts = weighted_sum(domain=hosts, weighted_fns=self.get_cost_fns()) + + # TODO convert hosts back to hostnames + weight_hostnames = [] + return weight_hostnames + +def normalize_list(L): + """Normalize an array of numbers such that each element satisfies: + 0 <= e <= 1 + """ + if not L: + return L + max_ = max(L) + if max_ > 0: + return [(float(e) / max_) for e in L] + return L + +def weighted_sum(domain, weighted_fns, normalize=True): + """ + Use the weighted-sum method to compute a score for an array of objects. + Normalize the results of the objective-functions so that the weights are + meaningful regardless of objective-function's range. + + domain - input to be scored + weighted_fns - list of weights and functions like: + [(weight, objective-functions)] + + Returns an unsorted list like: [(score, elem)] + """ + # Table of form: + # { domain1: [score1, score2, ..., scoreM] + # ... + # domainN: [score1, score2, ..., scoreM] } + score_table = collections.defaultdict(list) + + for weight, fn in weighted_fns: + scores = [fn(elem) for elem in domain] + + if normalize: + norm_scores = normalize_list(scores) + else: + norm_scores = scores + + for idx, score in enumerate(norm_scores): + weighted_score = score * weight + score_table[idx].append(weighted_score) + + # Sum rows in table to compute score for each element in domain + domain_scores = [] + for idx in sorted(score_table): + elem_score = sum(score_table[idx]) + elem = domain[idx] + domain_scores.append(elem_score) + + return domain_scores diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py new file mode 100644 index 000000000..a3a18a09f --- /dev/null +++ b/nova/tests/test_least_cost_scheduler.py @@ -0,0 +1,39 @@ +from nova import test +from nova.scheduler import least_cost + +MB = 1024 * 1024 + +class FakeHost(object): + def __init__(self, host_id, free_ram, io): + self.id = host_id + self.free_ram = free_ram + self.io = io + +class WeightedSumTest(test.TestCase): + def test_empty_domain(self): + domain = [] + weighted_fns = [] + result = least_cost.weighted_sum(domain, weighted_fns) + expected = [] + self.assertEqual(expected, result) + + def test_basic_costing(self): + hosts = [ + FakeHost(1, 512 * MB, 100), + FakeHost(2, 256 * MB, 400), + FakeHost(3, 512 * MB, 100) + ] + + weighted_fns = [ + (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* + (2, lambda h: h.io), # Avoid high I/O + ] + + costs = least_cost.weighted_sum(domain=hosts, weighted_fns=weighted_fns) + + # Each 256 MB unit of free-ram contributes 0.5 points by way of: + # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 + # Each 100 iops of IO adds 0.5 points by way of: + # cost = 2 * (100/400) = 2 * 0.25 = 0.5 + expected = [1.5, 2.5, 1.5] + self.assertEqual(expected, costs) -- cgit From a4035df4d031d3d90f3f7ce938ff0b8305be6773 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 17:27:04 -0500 Subject: Adding fill first cost function --- nova/scheduler/least_cost.py | 12 ++++++++++++ nova/test.py | 16 +++++++++++++--- nova/tests/test_least_cost_scheduler.py | 33 +++++++++++++++++++++++++++++++-- 3 files changed, 56 insertions(+), 5 deletions(-) diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index e47951f17..79376c358 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -34,6 +34,8 @@ flags.DEFINE_list('least_cost_scheduler_cost_functions', 'Which cost functions the LeastCostScheduler should use.') +# TODO(sirp): Once we have enough of these rules, we can break them out into a +# cost_functions.py file (perhaps in a least_cost_scheduler directory) flags.DEFINE_integer('noop_cost_fn_weight', 1, 'How much weight to give the noop cost function') def noop_cost_fn(host): @@ -41,6 +43,16 @@ def noop_cost_fn(host): return 1 +flags.DEFINE_integer('fill_first_cost_fn_weight', 1, + 'How much weight to give the fill-first cost function') +def fill_first_cost_fn(host): + """Prefer hosts that have less ram available, filter_hosts will exclude + hosts that don't have enough ram""" + hostname, caps = host + free_mem = caps['compute']['host_memory_free'] + return free_mem + + class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): def get_cost_fns(self): """Returns a list of tuples containing weights and cost functions to diff --git a/nova/test.py b/nova/test.py index 4deb2a175..401f82d38 100644 --- a/nova/test.py +++ b/nova/test.py @@ -181,7 +181,7 @@ class TestCase(unittest.TestCase): wsgi.Server.start = _wrapped_start # Useful assertions - def assertDictMatch(self, d1, d2): + def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): """Assert two dicts are equivalent. This is a 'deep' match in the sense that it handles nested @@ -212,15 +212,24 @@ class TestCase(unittest.TestCase): for key in d1keys: d1value = d1[key] d2value = d2[key] + + try: + within_tolerance = abs(float(d1value) - float(d2value)) < tolerance + except ValueError: + # If both values aren't convertable to float, just ignore + within_tolerance = False + if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): self.assertDictMatch(d1value, d2value) elif 'DONTCARE' in (d1value, d2value): continue + elif approx_equal and within_tolerance: + continue elif d1value != d2value: raise_assertion("d1['%(key)s']=%(d1value)s != " "d2['%(key)s']=%(d2value)s" % locals()) - def assertDictListMatch(self, L1, L2): + def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001): """Assert a list of dicts are equivalent.""" def raise_assertion(msg): L1str = str(L1) @@ -236,4 +245,5 @@ class TestCase(unittest.TestCase): 'len(L2)=%(L2count)d' % locals()) for d1, d2 in zip(L1, L2): - self.assertDictMatch(d1, d2) + self.assertDictMatch(d1, d2, approx_equal=approx_equal, + tolerance=tolerance) diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py index b2318a3bf..b7bcd2f02 100644 --- a/nova/tests/test_least_cost_scheduler.py +++ b/nova/tests/test_least_cost_scheduler.py @@ -119,7 +119,7 @@ class LeastCostSchedulerTestCase(test.TestCase): def assertWeights(self, expected, num, request_spec, hosts): weighted = self.sched.weigh_hosts(num, request_spec, hosts) - self.assertDictListMatch(weighted, expected) + self.assertDictListMatch(weighted, expected, approx_equal=True) def test_no_hosts(self): num = 1 @@ -137,12 +137,41 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) expected = [ dict(weight=1, hostname=hostname) for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) + def test_cost_fn_weights(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] FLAGS.noop_cost_fn_weight = 2 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + expected = [ dict(weight=2, hostname=hostname) for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) + + def test_fill_first_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.fill_first_cost_fn' + ] + FLAGS.fill_first_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [] + for idx, (hostname, caps) in enumerate(hosts): + # Costs are normalized so over 10 hosts, each host with increasing + # free ram will cost 1/N more. Since the lowest cost host has some + # free ram, we add in the 1/N for the base_cost + weight = 0.1 + (0.1 * idx) + weight_dict = dict(weight=weight, hostname=hostname) + expected.append(weight_dict) + + self.assertWeights(expected, num, request_spec, hosts) -- cgit From d6fbe417d7f8f7540cffe8c941c0591a22483978 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 17:44:08 -0500 Subject: Using import_class to import filter_host driver --- nova/scheduler/host_filter.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 7cb41a433..117f08242 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -42,6 +42,7 @@ from nova import exception from nova import flags from nova import log as logging from nova.scheduler import zone_aware_scheduler +from nova import utils LOG = logging.getLogger('nova.scheduler.host_filter') @@ -283,11 +284,13 @@ def choose_driver(driver_name=None): if not driver_name: driver_name = FLAGS.default_host_filter_driver - # FIXME(sirp): use utils.import_class here - for driver in DRIVERS: - if "%s.%s" % (driver.__module__, driver.__name__) == driver_name: - return driver() - raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name) + + try: + driver = utils.import_object(driver_name) + return driver + except exception.ClassNotFound: + raise exception.SchedulerHostFilterDriverNotFound( + driver_name=driver_name) class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): -- cgit From 439787e7588b2409f319f2d86a41a3581cff8861 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:15:31 -0500 Subject: Pep8 fixes --- nova/exception.py | 3 +++ nova/scheduler/least_cost.py | 30 +++++++++++++++++++++--------- nova/scheduler/zone_aware_scheduler.py | 11 ++++++----- nova/test.py | 5 +++-- nova/tests/test_least_cost_scheduler.py | 18 +++++++++++------- 5 files changed, 44 insertions(+), 23 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index 63ed6dd5e..16c443c61 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -460,15 +460,18 @@ class FlavorNotFound(NotFound): class ZoneNotFound(NotFound): message = _("Zone %(zone_id)s could not be found.") + # TODO(sirp): move these into the schedule classes since they are internal? class SchedulerHostFilterDriverNotFound(NotFound): message = _("Scheduler Host Filter Driver %(driver_name)s could" " not be found.") + class SchedulerCostFunctionNotFound(NotFound): message = _("Scheduler cost function %(cost_fn_str)s could" " not be found.") + class SchedulerWeightFlagNotFound(NotFound): message = _("Scheduler weight flag not found: %(flag_name)s") diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index 79376c358..629fe2e42 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -13,16 +13,19 @@ # License for the specific language governing permissions and limitations # under the License. """ -Helpful docstring here +Least Cost Scheduler is a mechanism for choosing which host machines to +provision a set of resources to. The input of the least-cost-scheduler is a +set of objective-functions, called the 'cost-functions', a weight for each +cost-function, and a list of candidate hosts (gathered via FilterHosts). + +The cost-function and weights are tabulated, and the host with the least cost +is then selected for provisioning. """ import collections from nova import flags from nova import log as logging -# TODO(sirp): this should be just `zone_aware` to match naming scheme -# TODO(sirp): perhaps all zone-aware stuff should go under a `zone_aware` -# module from nova.scheduler import zone_aware_scheduler from nova import utils @@ -38,6 +41,8 @@ flags.DEFINE_list('least_cost_scheduler_cost_functions', # cost_functions.py file (perhaps in a least_cost_scheduler directory) flags.DEFINE_integer('noop_cost_fn_weight', 1, 'How much weight to give the noop cost function') + + def noop_cost_fn(host): """Return a pre-weight cost of 1 for each host""" return 1 @@ -45,6 +50,8 @@ def noop_cost_fn(host): flags.DEFINE_integer('fill_first_cost_fn_weight', 1, 'How much weight to give the fill-first cost function') + + def fill_first_cost_fn(host): """Prefer hosts that have less ram available, filter_hosts will exclude hosts that don't have enough ram""" @@ -68,7 +75,7 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): except exception.ClassNotFound: raise exception.SchedulerCostFunctionNotFound( cost_fn_str=cost_fn_str) - + try: weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__) except AttributeError: @@ -82,17 +89,22 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): def weigh_hosts(self, num, request_spec, hosts): """Returns a list of dictionaries of form: [ {weight: weight, hostname: hostname} ]""" + # FIXME(sirp): weigh_hosts should handle more than just instances - hostnames = [hostname for hostname, _ in hosts] + hostnames = [hostname for hostname, caps in hosts] cost_fns = self.get_cost_fns() costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) - + weighted = [] + weight_log = [] for cost, hostname in zip(costs, hostnames): + weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) weight_dict = dict(weight=cost, hostname=hostname) weighted.append(weight_dict) - return weighted + + LOG.debug(_("Weighted Costs => %s") % weight_log) + return weighted def normalize_list(L): @@ -110,7 +122,7 @@ def weighted_sum(domain, weighted_fns, normalize=True): """Use the weighted-sum method to compute a score for an array of objects. Normalize the results of the objective-functions so that the weights are meaningful regardless of objective-function's range. - + domain - input to be scored weighted_fns - list of weights and functions like: [(weight, objective-functions)] diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index fa5b3b1b6..a1a68ce5e 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -80,7 +80,7 @@ class ZoneAwareScheduler(driver.Scheduler): LOG.debug(_("Casted to compute %(host)s for run_instance") % locals()) else: - # TODO(sandy) Provision in child zone ... + # TODO(sandy) Provision in child zone ... LOG.warning(_("Provision to Child Zone not supported (yet)") % locals()) pass @@ -117,11 +117,11 @@ class ZoneAwareScheduler(driver.Scheduler): # Filter local hosts based on requirements ... host_list = self.filter_hosts(num_instances, request_spec) - # then weigh the selected hosts. - # weighted = [{weight=weight, name=hostname}, ...] - # TODO(sirp): weigh_hosts should also be a function of 'topic' or # resources, so that we can apply different objective functions to it + + # then weigh the selected hosts. + # weighted = [{weight=weight, name=hostname}, ...] weighted = self.weigh_hosts(num_instances, request_spec, host_list) # Next, tack on the best weights from the child zones ... @@ -145,8 +145,9 @@ class ZoneAwareScheduler(driver.Scheduler): """Derived classes must override this method and return a list of hosts in [(hostname, capability_dict)] format.""" # NOTE(sirp): The default logic is the equivalent to AllHostsFilter + service_states = self.zone_manager.service_states return [(host, services) - for host, services in self.zone_manager.service_states.iteritems()] + for host, services in service_states.iteritems()] def weigh_hosts(self, num, request_spec, hosts): """Derived classes may override this to provide more sophisticated diff --git a/nova/test.py b/nova/test.py index 401f82d38..00a16dd68 100644 --- a/nova/test.py +++ b/nova/test.py @@ -212,9 +212,10 @@ class TestCase(unittest.TestCase): for key in d1keys: d1value = d1[key] d2value = d2[key] - + try: - within_tolerance = abs(float(d1value) - float(d2value)) < tolerance + error = abs(float(d1value) - float(d2value)) + within_tolerance = error <= tolerance except ValueError: # If both values aren't convertable to float, just ignore within_tolerance = False diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py index b7bcd2f02..c8ce7892f 100644 --- a/nova/tests/test_least_cost_scheduler.py +++ b/nova/tests/test_least_cost_scheduler.py @@ -30,6 +30,7 @@ class FakeHost(object): self.free_ram = free_ram self.io = io + class WeightedSumTestCase(test.TestCase): def test_empty_domain(self): domain = [] @@ -50,14 +51,15 @@ class WeightedSumTestCase(test.TestCase): (2, lambda h: h.io), # Avoid high I/O ] - costs = least_cost.weighted_sum(domain=hosts, weighted_fns=weighted_fns) + costs = least_cost.weighted_sum( + domain=hosts, weighted_fns=weighted_fns) # Each 256 MB unit of free-ram contributes 0.5 points by way of: # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 # Each 100 iops of IO adds 0.5 points by way of: # cost = 2 * (100/400) = 2 * 0.25 = 0.5 expected = [1.5, 2.5, 1.5] - self.assertEqual(expected, costs) + self.assertEqual(expected, costs) # TODO(sirp): unify this with test_host_filter tests? possibility of sharing @@ -65,6 +67,7 @@ class WeightedSumTestCase(test.TestCase): class FakeZoneManager: pass + class LeastCostSchedulerTestCase(test.TestCase): def _host_caps(self, multiplier): # Returns host capabilities in the following way: @@ -116,7 +119,6 @@ class LeastCostSchedulerTestCase(test.TestCase): #FLAGS.default_host_filter_driver = self.old_flag super(LeastCostSchedulerTestCase, self).tearDown() - def assertWeights(self, expected, num, request_spec, hosts): weighted = self.sched.weigh_hosts(num, request_spec, hosts) self.assertDictListMatch(weighted, expected, approx_equal=True) @@ -138,8 +140,9 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - - expected = [ dict(weight=1, hostname=hostname) for hostname, caps in hosts] + + expected = [dict(weight=1, hostname=hostname) + for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) def test_cost_fn_weights(self): @@ -152,7 +155,8 @@ class LeastCostSchedulerTestCase(test.TestCase): request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - expected = [ dict(weight=2, hostname=hostname) for hostname, caps in hosts] + expected = [dict(weight=2, hostname=hostname) + for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) def test_fill_first_cost_fn(self): @@ -164,7 +168,7 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - + expected = [] for idx, (hostname, caps) in enumerate(hosts): # Costs are normalized so over 10 hosts, each host with increasing -- cgit From 4ba215224e6c75037fd4f20be57d632da5d07469 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:32:56 -0500 Subject: Moving tests into scheduler subdirectory --- nova/scheduler/host_filter.py | 5 - nova/tests/scheduler/test_scheduler.py | 1118 +++++++++++++++++++++ nova/tests/scheduler/test_zone_aware_scheduler.py | 121 +++ nova/tests/test_host_filter.py | 211 ---- nova/tests/test_least_cost_scheduler.py | 181 ---- nova/tests/test_scheduler.py | 1118 --------------------- nova/tests/test_zone_aware_scheduler.py | 121 --- 7 files changed, 1239 insertions(+), 1636 deletions(-) create mode 100644 nova/tests/scheduler/test_scheduler.py create mode 100644 nova/tests/scheduler/test_zone_aware_scheduler.py delete mode 100644 nova/tests/test_host_filter.py delete mode 100644 nova/tests/test_least_cost_scheduler.py delete mode 100644 nova/tests/test_scheduler.py delete mode 100644 nova/tests/test_zone_aware_scheduler.py diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 117f08242..79e9f3159 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -313,8 +313,3 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): instance_type = request_spec['instance_type'] name, query = driver.instance_type_to_filter(instance_type) return driver.filter_hosts(self.zone_manager, query) - - def weigh_hosts(self, num, request_spec, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format.""" - return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py new file mode 100644 index 000000000..54b3f80fb --- /dev/null +++ b/nova/tests/scheduler/test_scheduler.py @@ -0,0 +1,1118 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler +""" + +import datetime +import mox +import novaclient.exceptions +import stubout +import webob + +from mox import IgnoreArg +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import service +from nova import test +from nova import rpc +from nova import utils +from nova.auth import manager as auth_manager +from nova.scheduler import api +from nova.scheduler import manager +from nova.scheduler import driver +from nova.compute import power_state +from nova.db.sqlalchemy import models + + +FLAGS = flags.FLAGS +flags.DECLARE('max_cores', 'nova.scheduler.simple') +flags.DECLARE('stub_network', 'nova.compute.manager') +flags.DECLARE('instances_path', 'nova.compute.manager') + + +class TestDriver(driver.Scheduler): + """Scheduler Driver for Tests""" + def schedule(context, topic, *args, **kwargs): + return 'fallback_host' + + def schedule_named_method(context, topic, num): + return 'named_host' + + +class SchedulerTestCase(test.TestCase): + """Test case for scheduler""" + def setUp(self): + super(SchedulerTestCase, self).setUp() + self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') + + def _create_compute_service(self): + """Create compute-manager(ComputeNode and Service record).""" + ctxt = context.get_admin_context() + dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute', + 'report_count': 0, 'availability_zone': 'dummyzone'} + s_ref = db.service_create(ctxt, dic) + + dic = {'service_id': s_ref['id'], + 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, + 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10, + 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, + 'cpu_info': ''} + db.compute_node_create(ctxt, dic) + + return db.service_get(ctxt, s_ref['id']) + + def _create_instance(self, **kwargs): + """Create a test instance""" + ctxt = context.get_admin_context() + inst = {} + inst['user_id'] = 'admin' + inst['project_id'] = kwargs.get('project_id', 'fake') + inst['host'] = kwargs.get('host', 'dummy') + inst['vcpus'] = kwargs.get('vcpus', 1) + inst['memory_mb'] = kwargs.get('memory_mb', 10) + inst['local_gb'] = kwargs.get('local_gb', 20) + return db.instance_create(ctxt, inst) + + def test_fallback(self): + scheduler = manager.SchedulerManager() + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + ctxt = context.get_admin_context() + rpc.cast(ctxt, + 'topic.fallback_host', + {'method': 'noexist', + 'args': {'num': 7}}) + self.mox.ReplayAll() + scheduler.noexist(ctxt, 'topic', num=7) + + def test_named_method(self): + scheduler = manager.SchedulerManager() + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + ctxt = context.get_admin_context() + rpc.cast(ctxt, + 'topic.named_host', + {'method': 'named_method', + 'args': {'num': 7}}) + self.mox.ReplayAll() + scheduler.named_method(ctxt, 'topic', num=7) + + def test_show_host_resources_host_not_exit(self): + """A host given as an argument does not exists.""" + + scheduler = manager.SchedulerManager() + dest = 'dummydest' + ctxt = context.get_admin_context() + + self.assertRaises(exception.NotFound, scheduler.show_host_resources, + ctxt, dest) + #TODO(bcwaldon): reimplement this functionality + #c1 = (e.message.find(_("does not exist or is not a " + # "compute node.")) >= 0) + + def _dic_is_equal(self, dic1, dic2, keys=None): + """Compares 2 dictionary contents(Helper method)""" + if not keys: + keys = ['vcpus', 'memory_mb', 'local_gb', + 'vcpus_used', 'memory_mb_used', 'local_gb_used'] + + for key in keys: + if not (dic1[key] == dic2[key]): + return False + return True + + def test_show_host_resources_no_project(self): + """No instance are running on the given host.""" + + scheduler = manager.SchedulerManager() + ctxt = context.get_admin_context() + s_ref = self._create_compute_service() + + result = scheduler.show_host_resources(ctxt, s_ref['host']) + + # result checking + c1 = ('resource' in result and 'usage' in result) + compute_node = s_ref['compute_node'][0] + c2 = self._dic_is_equal(result['resource'], compute_node) + c3 = result['usage'] == {} + self.assertTrue(c1 and c2 and c3) + db.service_destroy(ctxt, s_ref['id']) + + def test_show_host_resources_works_correctly(self): + """Show_host_resources() works correctly as expected.""" + + scheduler = manager.SchedulerManager() + ctxt = context.get_admin_context() + s_ref = self._create_compute_service() + i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host']) + i_ref2 = self._create_instance(project_id='p-02', vcpus=3, + host=s_ref['host']) + + result = scheduler.show_host_resources(ctxt, s_ref['host']) + + c1 = ('resource' in result and 'usage' in result) + compute_node = s_ref['compute_node'][0] + c2 = self._dic_is_equal(result['resource'], compute_node) + c3 = result['usage'].keys() == ['p-01', 'p-02'] + keys = ['vcpus', 'memory_mb', 'local_gb'] + c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys) + c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys) + self.assertTrue(c1 and c2 and c3 and c4 and c5) + + db.service_destroy(ctxt, s_ref['id']) + db.instance_destroy(ctxt, i_ref1['id']) + db.instance_destroy(ctxt, i_ref2['id']) + + +class ZoneSchedulerTestCase(test.TestCase): + """Test case for zone scheduler""" + def setUp(self): + super(ZoneSchedulerTestCase, self).setUp() + self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler') + + def _create_service_model(self, **kwargs): + service = db.sqlalchemy.models.Service() + service.host = kwargs['host'] + service.disabled = False + service.deleted = False + service.report_count = 0 + service.binary = 'nova-compute' + service.topic = 'compute' + service.id = kwargs['id'] + service.availability_zone = kwargs['zone'] + service.created_at = datetime.datetime.utcnow() + return service + + def test_with_two_zones(self): + scheduler = manager.SchedulerManager() + ctxt = context.get_admin_context() + service_list = [self._create_service_model(id=1, + host='host1', + zone='zone1'), + self._create_service_model(id=2, + host='host2', + zone='zone2'), + self._create_service_model(id=3, + host='host3', + zone='zone2'), + self._create_service_model(id=4, + host='host4', + zone='zone2'), + self._create_service_model(id=5, + host='host5', + zone='zone2')] + self.mox.StubOutWithMock(db, 'service_get_all_by_topic') + arg = IgnoreArg() + db.service_get_all_by_topic(arg, arg).AndReturn(service_list) + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + rpc.cast(ctxt, + 'compute.host1', + {'method': 'run_instance', + 'args': {'instance_id': 'i-ffffffff', + 'availability_zone': 'zone1'}}) + self.mox.ReplayAll() + scheduler.run_instance(ctxt, + 'compute', + instance_id='i-ffffffff', + availability_zone='zone1') + + +class SimpleDriverTestCase(test.TestCase): + """Test case for simple driver""" + def setUp(self): + super(SimpleDriverTestCase, self).setUp() + self.flags(connection_type='fake', + stub_network=True, + max_cores=4, + max_gigabytes=4, + network_manager='nova.network.manager.FlatManager', + volume_driver='nova.volume.driver.FakeISCSIDriver', + scheduler_driver='nova.scheduler.simple.SimpleScheduler') + self.scheduler = manager.SchedulerManager() + self.manager = auth_manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake') + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.get_admin_context() + + def tearDown(self): + self.manager.delete_user(self.user) + self.manager.delete_project(self.project) + super(SimpleDriverTestCase, self).tearDown() + + def _create_instance(self, **kwargs): + """Create a test instance""" + inst = {} + inst['image_id'] = 1 + inst['reservation_id'] = 'r-fakeres' + inst['user_id'] = self.user.id + inst['project_id'] = self.project.id + inst['instance_type_id'] = '1' + inst['mac_address'] = utils.generate_mac() + inst['vcpus'] = kwargs.get('vcpus', 1) + inst['ami_launch_index'] = 0 + inst['availability_zone'] = kwargs.get('availability_zone', None) + inst['host'] = kwargs.get('host', 'dummy') + inst['memory_mb'] = kwargs.get('memory_mb', 20) + inst['local_gb'] = kwargs.get('local_gb', 30) + inst['launched_on'] = kwargs.get('launghed_on', 'dummy') + inst['state_description'] = kwargs.get('state_description', 'running') + inst['state'] = kwargs.get('state', power_state.RUNNING) + return db.instance_create(self.context, inst)['id'] + + def _create_volume(self): + """Create a test volume""" + vol = {} + vol['size'] = 1 + vol['availability_zone'] = 'test' + return db.volume_create(self.context, vol)['id'] + + def _create_compute_service(self, **kwargs): + """Create a compute service.""" + + dic = {'binary': 'nova-compute', 'topic': 'compute', + 'report_count': 0, 'availability_zone': 'dummyzone'} + dic['host'] = kwargs.get('host', 'dummy') + s_ref = db.service_create(self.context, dic) + if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): + t = datetime.datetime.utcnow() - datetime.timedelta(0) + dic['created_at'] = kwargs.get('created_at', t) + dic['updated_at'] = kwargs.get('updated_at', t) + db.service_update(self.context, s_ref['id'], dic) + + dic = {'service_id': s_ref['id'], + 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, + 'vcpus_used': 16, 'local_gb_used': 10, + 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, + 'cpu_info': ''} + dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32) + dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu') + dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003) + db.compute_node_create(self.context, dic) + return db.service_get(self.context, s_ref['id']) + + def test_doesnt_report_disabled_hosts_as_up(self): + """Ensures driver doesn't find hosts before they are enabled""" + # NOTE(vish): constructing service without create method + # because we are going to use it without queue + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + db.service_update(self.context, s2['id'], {'disabled': True}) + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(0, len(hosts)) + compute1.kill() + compute2.kill() + + def test_reports_enabled_hosts_as_up(self): + """Ensures driver can find the hosts that are up""" + # NOTE(vish): constructing service without create method + # because we are going to use it without queue + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(2, len(hosts)) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_instance(self): + """Ensures the host with less cores gets the next one""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance() + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual(host, 'host2') + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_specific_host_gets_instance(self): + """Ensures if you set availability_zone it launches on that zone""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_wont_sechedule_if_specified_host_is_down(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) + past = now - delta + db.service_update(self.context, s1['id'], {'updated_at': past}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + self.assertRaises(driver.WillNotSchedule, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id2) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_will_schedule_on_disabled_host_if_specified(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_too_many_cores(self): + """Ensures we don't go over max cores""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_ids1 = [] + instance_ids2 = [] + for index in xrange(FLAGS.max_cores): + instance_id = self._create_instance() + compute1.run_instance(self.context, instance_id) + instance_ids1.append(instance_id) + instance_id = self._create_instance() + compute2.run_instance(self.context, instance_id) + instance_ids2.append(instance_id) + instance_id = self._create_instance() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id) + for instance_id in instance_ids1: + compute1.terminate_instance(self.context, instance_id) + for instance_id in instance_ids2: + compute2.terminate_instance(self.context, instance_id) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_volume(self): + """Ensures the host with less gigabytes gets the next one""" + volume1 = service.Service('host1', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume1.start() + volume2 = service.Service('host2', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume2.start() + volume_id1 = self._create_volume() + volume1.create_volume(self.context, volume_id1) + volume_id2 = self._create_volume() + host = self.scheduler.driver.schedule_create_volume(self.context, + volume_id2) + self.assertEqual(host, 'host2') + volume1.delete_volume(self.context, volume_id1) + db.volume_destroy(self.context, volume_id2) + dic = {'service_id': s_ref['id'], + 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, + 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10, + 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, + 'cpu_info': ''} + + def test_doesnt_report_disabled_hosts_as_up(self): + """Ensures driver doesn't find hosts before they are enabled""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + db.service_update(self.context, s2['id'], {'disabled': True}) + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(0, len(hosts)) + compute1.kill() + compute2.kill() + + def test_reports_enabled_hosts_as_up(self): + """Ensures driver can find the hosts that are up""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(2, len(hosts)) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_instance(self): + """Ensures the host with less cores gets the next one""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance() + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual(host, 'host2') + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_specific_host_gets_instance(self): + """Ensures if you set availability_zone it launches on that zone""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_wont_sechedule_if_specified_host_is_down(self): + compute1 = self.start_service('compute', host='host1') + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) + past = now - delta + db.service_update(self.context, s1['id'], {'updated_at': past}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + self.assertRaises(driver.WillNotSchedule, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id2) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_will_schedule_on_disabled_host_if_specified(self): + compute1 = self.start_service('compute', host='host1') + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_too_many_cores(self): + """Ensures we don't go over max cores""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + instance_ids1 = [] + instance_ids2 = [] + for index in xrange(FLAGS.max_cores): + instance_id = self._create_instance() + compute1.run_instance(self.context, instance_id) + instance_ids1.append(instance_id) + instance_id = self._create_instance() + compute2.run_instance(self.context, instance_id) + instance_ids2.append(instance_id) + instance_id = self._create_instance() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id) + db.instance_destroy(self.context, instance_id) + for instance_id in instance_ids1: + compute1.terminate_instance(self.context, instance_id) + for instance_id in instance_ids2: + compute2.terminate_instance(self.context, instance_id) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_volume(self): + """Ensures the host with less gigabytes gets the next one""" + volume1 = self.start_service('volume', host='host1') + volume2 = self.start_service('volume', host='host2') + volume_id1 = self._create_volume() + volume1.create_volume(self.context, volume_id1) + volume_id2 = self._create_volume() + host = self.scheduler.driver.schedule_create_volume(self.context, + volume_id2) + self.assertEqual(host, 'host2') + volume1.delete_volume(self.context, volume_id1) + db.volume_destroy(self.context, volume_id2) + volume1.kill() + volume2.kill() + + def test_too_many_gigabytes(self): + """Ensures we don't go over max gigabytes""" + volume1 = self.start_service('volume', host='host1') + volume2 = self.start_service('volume', host='host2') + volume_ids1 = [] + volume_ids2 = [] + for index in xrange(FLAGS.max_gigabytes): + volume_id = self._create_volume() + volume1.create_volume(self.context, volume_id) + volume_ids1.append(volume_id) + volume_id = self._create_volume() + volume2.create_volume(self.context, volume_id) + volume_ids2.append(volume_id) + volume_id = self._create_volume() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_create_volume, + self.context, + volume_id) + for volume_id in volume_ids1: + volume1.delete_volume(self.context, volume_id) + for volume_id in volume_ids2: + volume2.delete_volume(self.context, volume_id) + volume1.kill() + volume2.kill() + + def test_scheduler_live_migration_with_volume(self): + """scheduler_live_migration() works correctly as expected. + + Also, checks instance state is changed from 'running' -> 'migrating'. + + """ + + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + dic = {'instance_id': instance_id, 'size': 1} + v_ref = db.volume_create(self.context, dic) + + # cannot check 2nd argument b/c the addresses of instance object + # is different. + driver_i = self.scheduler.driver + nocare = mox.IgnoreArg() + self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') + self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') + self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') + driver_i._live_migration_src_check(nocare, nocare) + driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) + driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} + rpc.cast(self.context, + db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), + {"method": 'live_migration', "args": kwargs}) + + self.mox.ReplayAll() + self.scheduler.live_migration(self.context, FLAGS.compute_topic, + instance_id=instance_id, + dest=i_ref['host']) + + i_ref = db.instance_get(self.context, instance_id) + self.assertTrue(i_ref['state_description'] == 'migrating') + db.instance_destroy(self.context, instance_id) + db.volume_destroy(self.context, v_ref['id']) + + def test_live_migration_src_check_instance_not_running(self): + """The instance given by instance_id is not running.""" + + instance_id = self._create_instance(state_description='migrating') + i_ref = db.instance_get(self.context, instance_id) + + try: + self.scheduler.driver._live_migration_src_check(self.context, + i_ref) + except exception.Invalid, e: + c = (e.message.find('is not running') > 0) + + self.assertTrue(c) + db.instance_destroy(self.context, instance_id) + + def test_live_migration_src_check_volume_node_not_alive(self): + """Raise exception when volume node is not alive.""" + + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + dic = {'instance_id': instance_id, 'size': 1} + v_ref = db.volume_create(self.context, {'instance_id': instance_id, + 'size': 1}) + t1 = datetime.datetime.utcnow() - datetime.timedelta(1) + dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', + 'topic': 'volume', 'report_count': 0} + s_ref = db.service_create(self.context, dic) + + self.assertRaises(exception.VolumeServiceUnavailable, + self.scheduler.driver.schedule_live_migration, + self.context, instance_id, i_ref['host']) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.volume_destroy(self.context, v_ref['id']) + + def test_live_migration_src_check_compute_node_not_alive(self): + """Confirms src-compute node is alive.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + t = datetime.datetime.utcnow() - datetime.timedelta(10) + s_ref = self._create_compute_service(created_at=t, updated_at=t, + host=i_ref['host']) + + self.assertRaises(exception.ComputeServiceUnavailable, + self.scheduler.driver._live_migration_src_check, + self.context, i_ref) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_src_check_works_correctly(self): + """Confirms this method finishes with no error.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host=i_ref['host']) + + ret = self.scheduler.driver._live_migration_src_check(self.context, + i_ref) + + self.assertTrue(ret is None) + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_not_alive(self): + """Confirms exception raises in case dest host does not exist.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + t = datetime.datetime.utcnow() - datetime.timedelta(10) + s_ref = self._create_compute_service(created_at=t, updated_at=t, + host=i_ref['host']) + + self.assertRaises(exception.ComputeServiceUnavailable, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, i_ref['host']) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_service_same_host(self): + """Confirms exceptioin raises in case dest and src is same host.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host=i_ref['host']) + + self.assertRaises(exception.UnableToMigrateToSelf, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, i_ref['host']) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_service_lack_memory(self): + """Confirms exception raises when dest doesn't have enough memory.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host='somewhere', + memory_mb_used=12) + + self.assertRaises(exception.MigrationError, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, 'somewhere') + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_service_works_correctly(self): + """Confirms method finishes with no error.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host='somewhere', + memory_mb_used=5) + + ret = self.scheduler.driver._live_migration_dest_check(self.context, + i_ref, + 'somewhere') + self.assertTrue(ret is None) + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_common_check_service_orig_not_exists(self): + """Destination host does not exist.""" + + dest = 'dummydest' + # mocks for live_migration_common_check() + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + t1 = datetime.datetime.utcnow() - datetime.timedelta(10) + s_ref = self._create_compute_service(created_at=t1, updated_at=t1, + host=dest) + + # mocks for mounted_on_same_shared_storage() + fpath = '/test/20110127120000' + self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) + topic = FLAGS.compute_topic + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(self.context, topic, dest), + {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), + {"method": 'check_shared_storage_test_file', + "args": {'filename': fpath}}) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), topic, dest), + {"method": 'cleanup_shared_storage_test_file', + "args": {'filename': fpath}}) + + self.mox.ReplayAll() + self.assertRaises(exception.SourceHostUnavailable, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_common_check_service_different_hypervisor(self): + """Original host and dest host has different hypervisor type.""" + dest = 'dummydest' + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + + # compute service for destination + s_ref = self._create_compute_service(host=i_ref['host']) + # compute service for original host + s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen') + + # mocks + driver = self.scheduler.driver + self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') + driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) + + self.mox.ReplayAll() + self.assertRaises(exception.InvalidHypervisorType, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.service_destroy(self.context, s_ref2['id']) + + def test_live_migration_common_check_service_different_version(self): + """Original host and dest host has different hypervisor version.""" + dest = 'dummydest' + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + + # compute service for destination + s_ref = self._create_compute_service(host=i_ref['host']) + # compute service for original host + s_ref2 = self._create_compute_service(host=dest, + hypervisor_version=12002) + + # mocks + driver = self.scheduler.driver + self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') + driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) + + self.mox.ReplayAll() + self.assertRaises(exception.DestinationHypervisorTooOld, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.service_destroy(self.context, s_ref2['id']) + + def test_live_migration_common_check_checking_cpuinfo_fail(self): + """Raise excetion when original host doen't have compatible cpu.""" + + dest = 'dummydest' + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + + # compute service for destination + s_ref = self._create_compute_service(host=i_ref['host']) + # compute service for original host + s_ref2 = self._create_compute_service(host=dest) + + # mocks + driver = self.scheduler.driver + self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') + driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) + self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True) + rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), + {"method": 'compare_cpu', + "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ + AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_common_check(self.context, + i_ref, + dest) + except rpc.RemoteError, e: + c = (e.message.find(_("doesn't have compatibility to")) >= 0) + + self.assertTrue(c) + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.service_destroy(self.context, s_ref2['id']) + + +class FakeZone(object): + def __init__(self, id, api_url, username, password): + self.id = id + self.api_url = api_url + self.username = username + self.password = password + + +def zone_get_all(context): + return [ + FakeZone(1, 'http://example.com', 'bob', 'xxx'), + ] + + +class FakeRerouteCompute(api.reroute_compute): + def _call_child_zones(self, zones, function): + return [] + + def get_collection_context_and_id(self, args, kwargs): + return ("servers", None, 1) + + def unmarshall_result(self, zone_responses): + return dict(magic="found me") + + +def go_boom(self, context, instance): + raise exception.InstanceNotFound(instance_id=instance) + + +def found_instance(self, context, instance): + return dict(name='myserver') + + +class FakeResource(object): + def __init__(self, attribute_dict): + for k, v in attribute_dict.iteritems(): + setattr(self, k, v) + + def pause(self): + pass + + +class ZoneRedirectTest(test.TestCase): + def setUp(self): + super(ZoneRedirectTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + + self.stubs.Set(db, 'zone_get_all', zone_get_all) + + self.enable_zone_routing = FLAGS.enable_zone_routing + FLAGS.enable_zone_routing = True + + def tearDown(self): + self.stubs.UnsetAll() + FLAGS.enable_zone_routing = self.enable_zone_routing + super(ZoneRedirectTest, self).tearDown() + + def test_trap_found_locally(self): + decorator = FakeRerouteCompute("foo") + try: + result = decorator(found_instance)(None, None, 1) + except api.RedirectResult, e: + self.fail(_("Successful database hit should succeed")) + + def test_trap_not_found_locally(self): + decorator = FakeRerouteCompute("foo") + try: + result = decorator(go_boom)(None, None, 1) + self.assertFail(_("Should have rerouted.")) + except api.RedirectResult, e: + self.assertEquals(e.results['magic'], 'found me') + + def test_routing_flags(self): + FLAGS.enable_zone_routing = False + decorator = FakeRerouteCompute("foo") + self.assertRaises(exception.InstanceNotFound, decorator(go_boom), + None, None, 1) + + def test_get_collection_context_and_id(self): + decorator = api.reroute_compute("foo") + self.assertEquals(decorator.get_collection_context_and_id( + (None, 10, 20), {}), ("servers", 10, 20)) + self.assertEquals(decorator.get_collection_context_and_id( + (None, 11,), dict(instance_id=21)), ("servers", 11, 21)) + self.assertEquals(decorator.get_collection_context_and_id( + (None,), dict(context=12, instance_id=22)), ("servers", 12, 22)) + + def test_unmarshal_single_server(self): + decorator = api.reroute_compute("foo") + self.assertEquals(decorator.unmarshall_result([]), {}) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, b=2)), ]), + dict(server=dict(a=1, b=2))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, _b=2)), ]), + dict(server=dict(a=1,))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, manager=2)), ]), + dict(server=dict(a=1,))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(_a=1, manager=2)), ]), + dict(server={})) + + +class FakeServerCollection(object): + def get(self, instance_id): + return FakeResource(dict(a=10, b=20)) + + def find(self, name): + return FakeResource(dict(a=11, b=22)) + + +class FakeEmptyServerCollection(object): + def get(self, f): + raise novaclient.NotFound(1) + + def find(self, name): + raise novaclient.NotFound(2) + + +class FakeNovaClient(object): + def __init__(self, collection): + self.servers = collection + + +class DynamicNovaClientTest(test.TestCase): + def test_issue_novaclient_command_found(self): + zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "get", 100).a, 10) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "find", "name").b, 22) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "pause", 100), None) + + def test_issue_novaclient_command_not_found(self): + zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "get", 100), None) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "find", "name"), None) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "any", "name"), None) + + +class FakeZonesProxy(object): + def do_something(*args, **kwargs): + return 42 + + def raises_exception(*args, **kwargs): + raise Exception('testing') + + +class FakeNovaClientOpenStack(object): + def __init__(self, *args, **kwargs): + self.zones = FakeZonesProxy() + + def authenticate(self): + pass + + +class CallZoneMethodTest(test.TestCase): + def setUp(self): + super(CallZoneMethodTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.stubs.Set(db, 'zone_get_all', zone_get_all) + self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack) + + def tearDown(self): + self.stubs.UnsetAll() + super(CallZoneMethodTest, self).tearDown() + + def test_call_zone_method(self): + context = {} + method = 'do_something' + results = api.call_zone_method(context, method) + expected = [(1, 42)] + self.assertEqual(expected, results) + + def test_call_zone_method_not_present(self): + context = {} + method = 'not_present' + self.assertRaises(AttributeError, api.call_zone_method, + context, method) + + def test_call_zone_method_generates_exception(self): + context = {} + method = 'raises_exception' + results = api.call_zone_method(context, method) + + # FIXME(sirp): for now the _error_trap code is catching errors and + # converting them to a ("ERROR", "string") tuples. The code (and this + # test) should eventually handle real exceptions. + expected = [(1, ('ERROR', 'testing'))] + self.assertEqual(expected, results) diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py new file mode 100644 index 000000000..37169fb97 --- /dev/null +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -0,0 +1,121 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Zone Aware Scheduler. +""" + +from nova import test +from nova.scheduler import driver +from nova.scheduler import zone_aware_scheduler +from nova.scheduler import zone_manager + + +class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): + def filter_hosts(self, num, specs): + # NOTE(sirp): this is returning [(hostname, services)] + return self.zone_manager.service_states.items() + + def weigh_hosts(self, num, specs, hosts): + fake_weight = 99 + weighted = [] + for hostname, caps in hosts: + weighted.append(dict(weight=fake_weight, name=hostname)) + return weighted + + +class FakeZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = { + 'host1': { + 'compute': {'ram': 1000} + }, + 'host2': { + 'compute': {'ram': 2000} + }, + 'host3': { + 'compute': {'ram': 3000} + } + } + + +class FakeEmptyZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = {} + + +def fake_empty_call_zone_method(context, method, specs): + return [] + + +def fake_call_zone_method(context, method, specs): + return [ + ('zone1', [ + dict(weight=1, blob='AAAAAAA'), + dict(weight=111, blob='BBBBBBB'), + dict(weight=112, blob='CCCCCCC'), + dict(weight=113, blob='DDDDDDD'), + ]), + ('zone2', [ + dict(weight=120, blob='EEEEEEE'), + dict(weight=2, blob='FFFFFFF'), + dict(weight=122, blob='GGGGGGG'), + dict(weight=123, blob='HHHHHHH'), + ]), + ('zone3', [ + dict(weight=130, blob='IIIIIII'), + dict(weight=131, blob='JJJJJJJ'), + dict(weight=132, blob='KKKKKKK'), + dict(weight=3, blob='LLLLLLL'), + ]), + ] + + +class ZoneAwareSchedulerTestCase(test.TestCase): + """Test case for Zone Aware Scheduler.""" + + def test_zone_aware_scheduler(self): + """ + Create a nested set of FakeZones, ensure that a select call returns the + appropriate build plan. + """ + sched = FakeZoneAwareScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) + + zm = FakeZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + build_plan = sched.select(fake_context, {}) + + self.assertEqual(15, len(build_plan)) + + hostnames = [plan_item['name'] + for plan_item in build_plan if 'name' in plan_item] + self.assertEqual(3, len(hostnames)) + + def test_empty_zone_aware_scheduler(self): + """ + Ensure empty hosts & child_zones result in NoValidHosts exception. + """ + sched = FakeZoneAwareScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) + + zm = FakeEmptyZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, + fake_context, 1, + dict(host_filter=None, instance_type={})) diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py deleted file mode 100644 index 1a2a86a79..000000000 --- a/nova/tests/test_host_filter.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler Host Filter Drivers. -""" - -import json - -from nova import exception -from nova import flags -from nova import test -from nova.scheduler import host_filter - -FLAGS = flags.FLAGS - - -class FakeZoneManager: - pass - - -class HostFilterTestCase(test.TestCase): - """Test case for host filter drivers.""" - - def _host_caps(self, multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - def setUp(self): - super(HostFilterTestCase, self).setUp() - self.old_flag = FLAGS.default_host_filter_driver - FLAGS.default_host_filter_driver = \ - 'nova.scheduler.host_filter.AllHostsFilter' - self.instance_type = dict(name='tiny', - memory_mb=50, - vcpus=10, - local_gb=500, - flavorid=1, - swap=500, - rxtx_quota=30000, - rxtx_cap=200) - - self.zone_manager = FakeZoneManager() - states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} - self.zone_manager.service_states = states - - def tearDown(self): - FLAGS.default_host_filter_driver = self.old_flag - super(HostFilterTestCase, self).tearDown() - - def test_choose_driver(self): - # Test default driver ... - driver = host_filter.choose_driver() - self.assertEquals(driver._full_name(), - 'nova.scheduler.host_filter.AllHostsFilter') - # Test valid driver ... - driver = host_filter.choose_driver( - 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(driver._full_name(), - 'nova.scheduler.host_filter.InstanceTypeFilter') - # Test invalid driver ... - try: - host_filter.choose_driver('does not exist') - self.fail("Should not find driver") - except exception.SchedulerHostFilterDriverNotFound: - pass - - def test_all_host_driver(self): - driver = host_filter.AllHostsFilter() - cooked = driver.instance_type_to_filter(self.instance_type) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(10, len(hosts)) - for host, capabilities in hosts: - self.assertTrue(host.startswith('host')) - - def test_instance_type_driver(self): - driver = host_filter.InstanceTypeFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - def test_json_driver(self): - driver = host_filter.JsonFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - # Try some custom queries - - raw = ['or', - ['and', - ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300] - ], - ['and', - ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700] - ] - ] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['not', - ['=', '$compute.host_memory_free', 30], - ] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(9, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([2, 4, 6, 8, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - # Try some bogus input ... - raw = ['unknown command', ] - cooked = json.dumps(raw) - try: - driver.filter_hosts(self.zone_manager, cooked) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([]))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({}))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps( - ['not', True, False, True, False] - ))) - - try: - driver.filter_hosts(self.zone_manager, json.dumps( - 'not', True, False, True, False - )) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$foo', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$.....', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] - ))) - - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', {}, ['>', '$missing....foo']] - ))) diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py deleted file mode 100644 index c8ce7892f..000000000 --- a/nova/tests/test_least_cost_scheduler.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Least Cost Scheduler -""" - -from nova import flags -from nova import test -from nova.scheduler import least_cost - -MB = 1024 * 1024 -FLAGS = flags.FLAGS - - -class FakeHost(object): - def __init__(self, host_id, free_ram, io): - self.id = host_id - self.free_ram = free_ram - self.io = io - - -class WeightedSumTestCase(test.TestCase): - def test_empty_domain(self): - domain = [] - weighted_fns = [] - result = least_cost.weighted_sum(domain, weighted_fns) - expected = [] - self.assertEqual(expected, result) - - def test_basic_costing(self): - hosts = [ - FakeHost(1, 512 * MB, 100), - FakeHost(2, 256 * MB, 400), - FakeHost(3, 512 * MB, 100) - ] - - weighted_fns = [ - (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* - (2, lambda h: h.io), # Avoid high I/O - ] - - costs = least_cost.weighted_sum( - domain=hosts, weighted_fns=weighted_fns) - - # Each 256 MB unit of free-ram contributes 0.5 points by way of: - # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 - # Each 100 iops of IO adds 0.5 points by way of: - # cost = 2 * (100/400) = 2 * 0.25 = 0.5 - expected = [1.5, 2.5, 1.5] - self.assertEqual(expected, costs) - - -# TODO(sirp): unify this with test_host_filter tests? possibility of sharing -# test setup code -class FakeZoneManager: - pass - - -class LeastCostSchedulerTestCase(test.TestCase): - def _host_caps(self, multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - def setUp(self): - super(LeastCostSchedulerTestCase, self).setUp() - #self.old_flag = FLAGS.default_host_filter_driver - #FLAGS.default_host_filter_driver = \ - # 'nova.scheduler.host_filter.AllHostsFilter' - self.instance_type = dict(name='tiny', - memory_mb=50, - vcpus=10, - local_gb=500, - flavorid=1, - swap=500, - rxtx_quota=30000, - rxtx_cap=200) - - zone_manager = FakeZoneManager() - states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} - zone_manager.service_states = states - - self.sched = least_cost.LeastCostScheduler() - self.sched.zone_manager = zone_manager - - def tearDown(self): - #FLAGS.default_host_filter_driver = self.old_flag - super(LeastCostSchedulerTestCase, self).tearDown() - - def assertWeights(self, expected, num, request_spec, hosts): - weighted = self.sched.weigh_hosts(num, request_spec, hosts) - self.assertDictListMatch(weighted, expected, approx_equal=True) - - def test_no_hosts(self): - num = 1 - request_spec = {} - hosts = [] - - expected = [] - self.assertWeights(expected, num, request_spec, hosts) - - def test_noop_cost_fn(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.noop_cost_fn' - ] - FLAGS.noop_cost_fn_weight = 1 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [dict(weight=1, hostname=hostname) - for hostname, caps in hosts] - self.assertWeights(expected, num, request_spec, hosts) - - def test_cost_fn_weights(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.noop_cost_fn' - ] - FLAGS.noop_cost_fn_weight = 2 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [dict(weight=2, hostname=hostname) - for hostname, caps in hosts] - self.assertWeights(expected, num, request_spec, hosts) - - def test_fill_first_cost_fn(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.fill_first_cost_fn' - ] - FLAGS.fill_first_cost_fn_weight = 1 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [] - for idx, (hostname, caps) in enumerate(hosts): - # Costs are normalized so over 10 hosts, each host with increasing - # free ram will cost 1/N more. Since the lowest cost host has some - # free ram, we add in the 1/N for the base_cost - weight = 0.1 + (0.1 * idx) - weight_dict = dict(weight=weight, hostname=hostname) - expected.append(weight_dict) - - self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py deleted file mode 100644 index 54b3f80fb..000000000 --- a/nova/tests/test_scheduler.py +++ /dev/null @@ -1,1118 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler -""" - -import datetime -import mox -import novaclient.exceptions -import stubout -import webob - -from mox import IgnoreArg -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import service -from nova import test -from nova import rpc -from nova import utils -from nova.auth import manager as auth_manager -from nova.scheduler import api -from nova.scheduler import manager -from nova.scheduler import driver -from nova.compute import power_state -from nova.db.sqlalchemy import models - - -FLAGS = flags.FLAGS -flags.DECLARE('max_cores', 'nova.scheduler.simple') -flags.DECLARE('stub_network', 'nova.compute.manager') -flags.DECLARE('instances_path', 'nova.compute.manager') - - -class TestDriver(driver.Scheduler): - """Scheduler Driver for Tests""" - def schedule(context, topic, *args, **kwargs): - return 'fallback_host' - - def schedule_named_method(context, topic, num): - return 'named_host' - - -class SchedulerTestCase(test.TestCase): - """Test case for scheduler""" - def setUp(self): - super(SchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') - - def _create_compute_service(self): - """Create compute-manager(ComputeNode and Service record).""" - ctxt = context.get_admin_context() - dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute', - 'report_count': 0, 'availability_zone': 'dummyzone'} - s_ref = db.service_create(ctxt, dic) - - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - db.compute_node_create(ctxt, dic) - - return db.service_get(ctxt, s_ref['id']) - - def _create_instance(self, **kwargs): - """Create a test instance""" - ctxt = context.get_admin_context() - inst = {} - inst['user_id'] = 'admin' - inst['project_id'] = kwargs.get('project_id', 'fake') - inst['host'] = kwargs.get('host', 'dummy') - inst['vcpus'] = kwargs.get('vcpus', 1) - inst['memory_mb'] = kwargs.get('memory_mb', 10) - inst['local_gb'] = kwargs.get('local_gb', 20) - return db.instance_create(ctxt, inst) - - def test_fallback(self): - scheduler = manager.SchedulerManager() - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - ctxt = context.get_admin_context() - rpc.cast(ctxt, - 'topic.fallback_host', - {'method': 'noexist', - 'args': {'num': 7}}) - self.mox.ReplayAll() - scheduler.noexist(ctxt, 'topic', num=7) - - def test_named_method(self): - scheduler = manager.SchedulerManager() - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - ctxt = context.get_admin_context() - rpc.cast(ctxt, - 'topic.named_host', - {'method': 'named_method', - 'args': {'num': 7}}) - self.mox.ReplayAll() - scheduler.named_method(ctxt, 'topic', num=7) - - def test_show_host_resources_host_not_exit(self): - """A host given as an argument does not exists.""" - - scheduler = manager.SchedulerManager() - dest = 'dummydest' - ctxt = context.get_admin_context() - - self.assertRaises(exception.NotFound, scheduler.show_host_resources, - ctxt, dest) - #TODO(bcwaldon): reimplement this functionality - #c1 = (e.message.find(_("does not exist or is not a " - # "compute node.")) >= 0) - - def _dic_is_equal(self, dic1, dic2, keys=None): - """Compares 2 dictionary contents(Helper method)""" - if not keys: - keys = ['vcpus', 'memory_mb', 'local_gb', - 'vcpus_used', 'memory_mb_used', 'local_gb_used'] - - for key in keys: - if not (dic1[key] == dic2[key]): - return False - return True - - def test_show_host_resources_no_project(self): - """No instance are running on the given host.""" - - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - s_ref = self._create_compute_service() - - result = scheduler.show_host_resources(ctxt, s_ref['host']) - - # result checking - c1 = ('resource' in result and 'usage' in result) - compute_node = s_ref['compute_node'][0] - c2 = self._dic_is_equal(result['resource'], compute_node) - c3 = result['usage'] == {} - self.assertTrue(c1 and c2 and c3) - db.service_destroy(ctxt, s_ref['id']) - - def test_show_host_resources_works_correctly(self): - """Show_host_resources() works correctly as expected.""" - - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - s_ref = self._create_compute_service() - i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host']) - i_ref2 = self._create_instance(project_id='p-02', vcpus=3, - host=s_ref['host']) - - result = scheduler.show_host_resources(ctxt, s_ref['host']) - - c1 = ('resource' in result and 'usage' in result) - compute_node = s_ref['compute_node'][0] - c2 = self._dic_is_equal(result['resource'], compute_node) - c3 = result['usage'].keys() == ['p-01', 'p-02'] - keys = ['vcpus', 'memory_mb', 'local_gb'] - c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys) - c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys) - self.assertTrue(c1 and c2 and c3 and c4 and c5) - - db.service_destroy(ctxt, s_ref['id']) - db.instance_destroy(ctxt, i_ref1['id']) - db.instance_destroy(ctxt, i_ref2['id']) - - -class ZoneSchedulerTestCase(test.TestCase): - """Test case for zone scheduler""" - def setUp(self): - super(ZoneSchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler') - - def _create_service_model(self, **kwargs): - service = db.sqlalchemy.models.Service() - service.host = kwargs['host'] - service.disabled = False - service.deleted = False - service.report_count = 0 - service.binary = 'nova-compute' - service.topic = 'compute' - service.id = kwargs['id'] - service.availability_zone = kwargs['zone'] - service.created_at = datetime.datetime.utcnow() - return service - - def test_with_two_zones(self): - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - service_list = [self._create_service_model(id=1, - host='host1', - zone='zone1'), - self._create_service_model(id=2, - host='host2', - zone='zone2'), - self._create_service_model(id=3, - host='host3', - zone='zone2'), - self._create_service_model(id=4, - host='host4', - zone='zone2'), - self._create_service_model(id=5, - host='host5', - zone='zone2')] - self.mox.StubOutWithMock(db, 'service_get_all_by_topic') - arg = IgnoreArg() - db.service_get_all_by_topic(arg, arg).AndReturn(service_list) - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - rpc.cast(ctxt, - 'compute.host1', - {'method': 'run_instance', - 'args': {'instance_id': 'i-ffffffff', - 'availability_zone': 'zone1'}}) - self.mox.ReplayAll() - scheduler.run_instance(ctxt, - 'compute', - instance_id='i-ffffffff', - availability_zone='zone1') - - -class SimpleDriverTestCase(test.TestCase): - """Test case for simple driver""" - def setUp(self): - super(SimpleDriverTestCase, self).setUp() - self.flags(connection_type='fake', - stub_network=True, - max_cores=4, - max_gigabytes=4, - network_manager='nova.network.manager.FlatManager', - volume_driver='nova.volume.driver.FakeISCSIDriver', - scheduler_driver='nova.scheduler.simple.SimpleScheduler') - self.scheduler = manager.SchedulerManager() - self.manager = auth_manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake') - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.get_admin_context() - - def tearDown(self): - self.manager.delete_user(self.user) - self.manager.delete_project(self.project) - super(SimpleDriverTestCase, self).tearDown() - - def _create_instance(self, **kwargs): - """Create a test instance""" - inst = {} - inst['image_id'] = 1 - inst['reservation_id'] = 'r-fakeres' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id - inst['instance_type_id'] = '1' - inst['mac_address'] = utils.generate_mac() - inst['vcpus'] = kwargs.get('vcpus', 1) - inst['ami_launch_index'] = 0 - inst['availability_zone'] = kwargs.get('availability_zone', None) - inst['host'] = kwargs.get('host', 'dummy') - inst['memory_mb'] = kwargs.get('memory_mb', 20) - inst['local_gb'] = kwargs.get('local_gb', 30) - inst['launched_on'] = kwargs.get('launghed_on', 'dummy') - inst['state_description'] = kwargs.get('state_description', 'running') - inst['state'] = kwargs.get('state', power_state.RUNNING) - return db.instance_create(self.context, inst)['id'] - - def _create_volume(self): - """Create a test volume""" - vol = {} - vol['size'] = 1 - vol['availability_zone'] = 'test' - return db.volume_create(self.context, vol)['id'] - - def _create_compute_service(self, **kwargs): - """Create a compute service.""" - - dic = {'binary': 'nova-compute', 'topic': 'compute', - 'report_count': 0, 'availability_zone': 'dummyzone'} - dic['host'] = kwargs.get('host', 'dummy') - s_ref = db.service_create(self.context, dic) - if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): - t = datetime.datetime.utcnow() - datetime.timedelta(0) - dic['created_at'] = kwargs.get('created_at', t) - dic['updated_at'] = kwargs.get('updated_at', t) - db.service_update(self.context, s_ref['id'], dic) - - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32) - dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu') - dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003) - db.compute_node_create(self.context, dic) - return db.service_get(self.context, s_ref['id']) - - def test_doesnt_report_disabled_hosts_as_up(self): - """Ensures driver doesn't find hosts before they are enabled""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - db.service_update(self.context, s2['id'], {'disabled': True}) - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(0, len(hosts)) - compute1.kill() - compute2.kill() - - def test_reports_enabled_hosts_as_up(self): - """Ensures driver can find the hosts that are up""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(2, len(hosts)) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_instance(self): - """Ensures the host with less cores gets the next one""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance() - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual(host, 'host2') - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_specific_host_gets_instance(self): - """Ensures if you set availability_zone it launches on that zone""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_wont_sechedule_if_specified_host_is_down(self): - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) - past = now - delta - db.service_update(self.context, s1['id'], {'updated_at': past}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - self.assertRaises(driver.WillNotSchedule, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id2) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_will_schedule_on_disabled_host_if_specified(self): - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_too_many_cores(self): - """Ensures we don't go over max cores""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_ids1 = [] - instance_ids2 = [] - for index in xrange(FLAGS.max_cores): - instance_id = self._create_instance() - compute1.run_instance(self.context, instance_id) - instance_ids1.append(instance_id) - instance_id = self._create_instance() - compute2.run_instance(self.context, instance_id) - instance_ids2.append(instance_id) - instance_id = self._create_instance() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id) - for instance_id in instance_ids1: - compute1.terminate_instance(self.context, instance_id) - for instance_id in instance_ids2: - compute2.terminate_instance(self.context, instance_id) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_volume(self): - """Ensures the host with less gigabytes gets the next one""" - volume1 = service.Service('host1', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume1.start() - volume2 = service.Service('host2', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume2.start() - volume_id1 = self._create_volume() - volume1.create_volume(self.context, volume_id1) - volume_id2 = self._create_volume() - host = self.scheduler.driver.schedule_create_volume(self.context, - volume_id2) - self.assertEqual(host, 'host2') - volume1.delete_volume(self.context, volume_id1) - db.volume_destroy(self.context, volume_id2) - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - - def test_doesnt_report_disabled_hosts_as_up(self): - """Ensures driver doesn't find hosts before they are enabled""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - db.service_update(self.context, s2['id'], {'disabled': True}) - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(0, len(hosts)) - compute1.kill() - compute2.kill() - - def test_reports_enabled_hosts_as_up(self): - """Ensures driver can find the hosts that are up""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(2, len(hosts)) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_instance(self): - """Ensures the host with less cores gets the next one""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance() - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual(host, 'host2') - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_specific_host_gets_instance(self): - """Ensures if you set availability_zone it launches on that zone""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_wont_sechedule_if_specified_host_is_down(self): - compute1 = self.start_service('compute', host='host1') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) - past = now - delta - db.service_update(self.context, s1['id'], {'updated_at': past}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - self.assertRaises(driver.WillNotSchedule, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id2) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_will_schedule_on_disabled_host_if_specified(self): - compute1 = self.start_service('compute', host='host1') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_too_many_cores(self): - """Ensures we don't go over max cores""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_ids1 = [] - instance_ids2 = [] - for index in xrange(FLAGS.max_cores): - instance_id = self._create_instance() - compute1.run_instance(self.context, instance_id) - instance_ids1.append(instance_id) - instance_id = self._create_instance() - compute2.run_instance(self.context, instance_id) - instance_ids2.append(instance_id) - instance_id = self._create_instance() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id) - db.instance_destroy(self.context, instance_id) - for instance_id in instance_ids1: - compute1.terminate_instance(self.context, instance_id) - for instance_id in instance_ids2: - compute2.terminate_instance(self.context, instance_id) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_volume(self): - """Ensures the host with less gigabytes gets the next one""" - volume1 = self.start_service('volume', host='host1') - volume2 = self.start_service('volume', host='host2') - volume_id1 = self._create_volume() - volume1.create_volume(self.context, volume_id1) - volume_id2 = self._create_volume() - host = self.scheduler.driver.schedule_create_volume(self.context, - volume_id2) - self.assertEqual(host, 'host2') - volume1.delete_volume(self.context, volume_id1) - db.volume_destroy(self.context, volume_id2) - volume1.kill() - volume2.kill() - - def test_too_many_gigabytes(self): - """Ensures we don't go over max gigabytes""" - volume1 = self.start_service('volume', host='host1') - volume2 = self.start_service('volume', host='host2') - volume_ids1 = [] - volume_ids2 = [] - for index in xrange(FLAGS.max_gigabytes): - volume_id = self._create_volume() - volume1.create_volume(self.context, volume_id) - volume_ids1.append(volume_id) - volume_id = self._create_volume() - volume2.create_volume(self.context, volume_id) - volume_ids2.append(volume_id) - volume_id = self._create_volume() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_create_volume, - self.context, - volume_id) - for volume_id in volume_ids1: - volume1.delete_volume(self.context, volume_id) - for volume_id in volume_ids2: - volume2.delete_volume(self.context, volume_id) - volume1.kill() - volume2.kill() - - def test_scheduler_live_migration_with_volume(self): - """scheduler_live_migration() works correctly as expected. - - Also, checks instance state is changed from 'running' -> 'migrating'. - - """ - - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - dic = {'instance_id': instance_id, 'size': 1} - v_ref = db.volume_create(self.context, dic) - - # cannot check 2nd argument b/c the addresses of instance object - # is different. - driver_i = self.scheduler.driver - nocare = mox.IgnoreArg() - self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') - self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') - self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') - driver_i._live_migration_src_check(nocare, nocare) - driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) - driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} - rpc.cast(self.context, - db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), - {"method": 'live_migration', "args": kwargs}) - - self.mox.ReplayAll() - self.scheduler.live_migration(self.context, FLAGS.compute_topic, - instance_id=instance_id, - dest=i_ref['host']) - - i_ref = db.instance_get(self.context, instance_id) - self.assertTrue(i_ref['state_description'] == 'migrating') - db.instance_destroy(self.context, instance_id) - db.volume_destroy(self.context, v_ref['id']) - - def test_live_migration_src_check_instance_not_running(self): - """The instance given by instance_id is not running.""" - - instance_id = self._create_instance(state_description='migrating') - i_ref = db.instance_get(self.context, instance_id) - - try: - self.scheduler.driver._live_migration_src_check(self.context, - i_ref) - except exception.Invalid, e: - c = (e.message.find('is not running') > 0) - - self.assertTrue(c) - db.instance_destroy(self.context, instance_id) - - def test_live_migration_src_check_volume_node_not_alive(self): - """Raise exception when volume node is not alive.""" - - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - dic = {'instance_id': instance_id, 'size': 1} - v_ref = db.volume_create(self.context, {'instance_id': instance_id, - 'size': 1}) - t1 = datetime.datetime.utcnow() - datetime.timedelta(1) - dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', - 'topic': 'volume', 'report_count': 0} - s_ref = db.service_create(self.context, dic) - - self.assertRaises(exception.VolumeServiceUnavailable, - self.scheduler.driver.schedule_live_migration, - self.context, instance_id, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.volume_destroy(self.context, v_ref['id']) - - def test_live_migration_src_check_compute_node_not_alive(self): - """Confirms src-compute node is alive.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t, updated_at=t, - host=i_ref['host']) - - self.assertRaises(exception.ComputeServiceUnavailable, - self.scheduler.driver._live_migration_src_check, - self.context, i_ref) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_src_check_works_correctly(self): - """Confirms this method finishes with no error.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host=i_ref['host']) - - ret = self.scheduler.driver._live_migration_src_check(self.context, - i_ref) - - self.assertTrue(ret is None) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_not_alive(self): - """Confirms exception raises in case dest host does not exist.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t, updated_at=t, - host=i_ref['host']) - - self.assertRaises(exception.ComputeServiceUnavailable, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_same_host(self): - """Confirms exceptioin raises in case dest and src is same host.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host=i_ref['host']) - - self.assertRaises(exception.UnableToMigrateToSelf, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_lack_memory(self): - """Confirms exception raises when dest doesn't have enough memory.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=12) - - self.assertRaises(exception.MigrationError, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, 'somewhere') - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_works_correctly(self): - """Confirms method finishes with no error.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=5) - - ret = self.scheduler.driver._live_migration_dest_check(self.context, - i_ref, - 'somewhere') - self.assertTrue(ret is None) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_common_check_service_orig_not_exists(self): - """Destination host does not exist.""" - - dest = 'dummydest' - # mocks for live_migration_common_check() - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t1 = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t1, updated_at=t1, - host=dest) - - # mocks for mounted_on_same_shared_storage() - fpath = '/test/20110127120000' - self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) - topic = FLAGS.compute_topic - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(self.context, topic, dest), - {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), - {"method": 'check_shared_storage_test_file', - "args": {'filename': fpath}}) - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(mox.IgnoreArg(), topic, dest), - {"method": 'cleanup_shared_storage_test_file', - "args": {'filename': fpath}}) - - self.mox.ReplayAll() - self.assertRaises(exception.SourceHostUnavailable, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_common_check_service_different_hypervisor(self): - """Original host and dest host has different hypervisor type.""" - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen') - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - - self.mox.ReplayAll() - self.assertRaises(exception.InvalidHypervisorType, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - def test_live_migration_common_check_service_different_version(self): - """Original host and dest host has different hypervisor version.""" - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest, - hypervisor_version=12002) - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - - self.mox.ReplayAll() - self.assertRaises(exception.DestinationHypervisorTooOld, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - def test_live_migration_common_check_checking_cpuinfo_fail(self): - """Raise excetion when original host doen't have compatible cpu.""" - - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest) - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True) - rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), - {"method": 'compare_cpu', - "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ - AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) - - self.mox.ReplayAll() - try: - self.scheduler.driver._live_migration_common_check(self.context, - i_ref, - dest) - except rpc.RemoteError, e: - c = (e.message.find(_("doesn't have compatibility to")) >= 0) - - self.assertTrue(c) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - -class FakeZone(object): - def __init__(self, id, api_url, username, password): - self.id = id - self.api_url = api_url - self.username = username - self.password = password - - -def zone_get_all(context): - return [ - FakeZone(1, 'http://example.com', 'bob', 'xxx'), - ] - - -class FakeRerouteCompute(api.reroute_compute): - def _call_child_zones(self, zones, function): - return [] - - def get_collection_context_and_id(self, args, kwargs): - return ("servers", None, 1) - - def unmarshall_result(self, zone_responses): - return dict(magic="found me") - - -def go_boom(self, context, instance): - raise exception.InstanceNotFound(instance_id=instance) - - -def found_instance(self, context, instance): - return dict(name='myserver') - - -class FakeResource(object): - def __init__(self, attribute_dict): - for k, v in attribute_dict.iteritems(): - setattr(self, k, v) - - def pause(self): - pass - - -class ZoneRedirectTest(test.TestCase): - def setUp(self): - super(ZoneRedirectTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - - self.stubs.Set(db, 'zone_get_all', zone_get_all) - - self.enable_zone_routing = FLAGS.enable_zone_routing - FLAGS.enable_zone_routing = True - - def tearDown(self): - self.stubs.UnsetAll() - FLAGS.enable_zone_routing = self.enable_zone_routing - super(ZoneRedirectTest, self).tearDown() - - def test_trap_found_locally(self): - decorator = FakeRerouteCompute("foo") - try: - result = decorator(found_instance)(None, None, 1) - except api.RedirectResult, e: - self.fail(_("Successful database hit should succeed")) - - def test_trap_not_found_locally(self): - decorator = FakeRerouteCompute("foo") - try: - result = decorator(go_boom)(None, None, 1) - self.assertFail(_("Should have rerouted.")) - except api.RedirectResult, e: - self.assertEquals(e.results['magic'], 'found me') - - def test_routing_flags(self): - FLAGS.enable_zone_routing = False - decorator = FakeRerouteCompute("foo") - self.assertRaises(exception.InstanceNotFound, decorator(go_boom), - None, None, 1) - - def test_get_collection_context_and_id(self): - decorator = api.reroute_compute("foo") - self.assertEquals(decorator.get_collection_context_and_id( - (None, 10, 20), {}), ("servers", 10, 20)) - self.assertEquals(decorator.get_collection_context_and_id( - (None, 11,), dict(instance_id=21)), ("servers", 11, 21)) - self.assertEquals(decorator.get_collection_context_and_id( - (None,), dict(context=12, instance_id=22)), ("servers", 12, 22)) - - def test_unmarshal_single_server(self): - decorator = api.reroute_compute("foo") - self.assertEquals(decorator.unmarshall_result([]), {}) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, b=2)), ]), - dict(server=dict(a=1, b=2))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, _b=2)), ]), - dict(server=dict(a=1,))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, manager=2)), ]), - dict(server=dict(a=1,))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(_a=1, manager=2)), ]), - dict(server={})) - - -class FakeServerCollection(object): - def get(self, instance_id): - return FakeResource(dict(a=10, b=20)) - - def find(self, name): - return FakeResource(dict(a=11, b=22)) - - -class FakeEmptyServerCollection(object): - def get(self, f): - raise novaclient.NotFound(1) - - def find(self, name): - raise novaclient.NotFound(2) - - -class FakeNovaClient(object): - def __init__(self, collection): - self.servers = collection - - -class DynamicNovaClientTest(test.TestCase): - def test_issue_novaclient_command_found(self): - zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "get", 100).a, 10) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "find", "name").b, 22) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "pause", 100), None) - - def test_issue_novaclient_command_not_found(self): - zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "get", 100), None) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "find", "name"), None) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "any", "name"), None) - - -class FakeZonesProxy(object): - def do_something(*args, **kwargs): - return 42 - - def raises_exception(*args, **kwargs): - raise Exception('testing') - - -class FakeNovaClientOpenStack(object): - def __init__(self, *args, **kwargs): - self.zones = FakeZonesProxy() - - def authenticate(self): - pass - - -class CallZoneMethodTest(test.TestCase): - def setUp(self): - super(CallZoneMethodTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - self.stubs.Set(db, 'zone_get_all', zone_get_all) - self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack) - - def tearDown(self): - self.stubs.UnsetAll() - super(CallZoneMethodTest, self).tearDown() - - def test_call_zone_method(self): - context = {} - method = 'do_something' - results = api.call_zone_method(context, method) - expected = [(1, 42)] - self.assertEqual(expected, results) - - def test_call_zone_method_not_present(self): - context = {} - method = 'not_present' - self.assertRaises(AttributeError, api.call_zone_method, - context, method) - - def test_call_zone_method_generates_exception(self): - context = {} - method = 'raises_exception' - results = api.call_zone_method(context, method) - - # FIXME(sirp): for now the _error_trap code is catching errors and - # converting them to a ("ERROR", "string") tuples. The code (and this - # test) should eventually handle real exceptions. - expected = [(1, ('ERROR', 'testing'))] - self.assertEqual(expected, results) diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py deleted file mode 100644 index 37169fb97..000000000 --- a/nova/tests/test_zone_aware_scheduler.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Zone Aware Scheduler. -""" - -from nova import test -from nova.scheduler import driver -from nova.scheduler import zone_aware_scheduler -from nova.scheduler import zone_manager - - -class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): - def filter_hosts(self, num, specs): - # NOTE(sirp): this is returning [(hostname, services)] - return self.zone_manager.service_states.items() - - def weigh_hosts(self, num, specs, hosts): - fake_weight = 99 - weighted = [] - for hostname, caps in hosts: - weighted.append(dict(weight=fake_weight, name=hostname)) - return weighted - - -class FakeZoneManager(zone_manager.ZoneManager): - def __init__(self): - self.service_states = { - 'host1': { - 'compute': {'ram': 1000} - }, - 'host2': { - 'compute': {'ram': 2000} - }, - 'host3': { - 'compute': {'ram': 3000} - } - } - - -class FakeEmptyZoneManager(zone_manager.ZoneManager): - def __init__(self): - self.service_states = {} - - -def fake_empty_call_zone_method(context, method, specs): - return [] - - -def fake_call_zone_method(context, method, specs): - return [ - ('zone1', [ - dict(weight=1, blob='AAAAAAA'), - dict(weight=111, blob='BBBBBBB'), - dict(weight=112, blob='CCCCCCC'), - dict(weight=113, blob='DDDDDDD'), - ]), - ('zone2', [ - dict(weight=120, blob='EEEEEEE'), - dict(weight=2, blob='FFFFFFF'), - dict(weight=122, blob='GGGGGGG'), - dict(weight=123, blob='HHHHHHH'), - ]), - ('zone3', [ - dict(weight=130, blob='IIIIIII'), - dict(weight=131, blob='JJJJJJJ'), - dict(weight=132, blob='KKKKKKK'), - dict(weight=3, blob='LLLLLLL'), - ]), - ] - - -class ZoneAwareSchedulerTestCase(test.TestCase): - """Test case for Zone Aware Scheduler.""" - - def test_zone_aware_scheduler(self): - """ - Create a nested set of FakeZones, ensure that a select call returns the - appropriate build plan. - """ - sched = FakeZoneAwareScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) - - zm = FakeZoneManager() - sched.set_zone_manager(zm) - - fake_context = {} - build_plan = sched.select(fake_context, {}) - - self.assertEqual(15, len(build_plan)) - - hostnames = [plan_item['name'] - for plan_item in build_plan if 'name' in plan_item] - self.assertEqual(3, len(hostnames)) - - def test_empty_zone_aware_scheduler(self): - """ - Ensure empty hosts & child_zones result in NoValidHosts exception. - """ - sched = FakeZoneAwareScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) - - zm = FakeEmptyZoneManager() - sched.set_zone_manager(zm) - - fake_context = {} - self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, - fake_context, 1, - dict(host_filter=None, instance_type={})) -- cgit From 967d82669ae07b2add3289e3decad60aea2657d8 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:49:21 -0500 Subject: Moving into scheduler subdir and refactoring out common code --- nova/tests/scheduler/__init__.py | 0 nova/tests/scheduler/test_host_filter.py | 189 ++++++++++++++++++++++ nova/tests/scheduler/test_least_cost_scheduler.py | 146 +++++++++++++++++ nova/tests/scheduler/test_zone_aware_scheduler.py | 31 ++++ 4 files changed, 366 insertions(+) create mode 100644 nova/tests/scheduler/__init__.py create mode 100644 nova/tests/scheduler/test_host_filter.py create mode 100644 nova/tests/scheduler/test_least_cost_scheduler.py diff --git a/nova/tests/scheduler/__init__.py b/nova/tests/scheduler/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py new file mode 100644 index 000000000..c3af50a6e --- /dev/null +++ b/nova/tests/scheduler/test_host_filter.py @@ -0,0 +1,189 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler Host Filter Drivers. +""" + +import json + +from nova import exception +from nova import flags +from nova import test +from nova.scheduler import host_filter +from nova.tests.scheduler import test_zone_aware_scheduler + +FLAGS = flags.FLAGS + + +class FakeZoneManager: + pass + + +class HostFilterTestCase(test.TestCase): + """Test case for host filter drivers.""" + + def setUp(self): + super(HostFilterTestCase, self).setUp() + self.old_flag = FLAGS.default_host_filter_driver + FLAGS.default_host_filter_driver = \ + 'nova.scheduler.host_filter.AllHostsFilter' + self.instance_type = dict(name='tiny', + memory_mb=50, + vcpus=10, + local_gb=500, + flavorid=1, + swap=500, + rxtx_quota=30000, + rxtx_cap=200) + + self.zone_manager = FakeZoneManager() + + states = test_zone_aware_scheduler.fake_zone_manager_service_states( + num_hosts=10) + self.zone_manager.service_states = states + + def tearDown(self): + FLAGS.default_host_filter_driver = self.old_flag + super(HostFilterTestCase, self).tearDown() + + def test_choose_driver(self): + # Test default driver ... + driver = host_filter.choose_driver() + self.assertEquals(driver._full_name(), + 'nova.scheduler.host_filter.AllHostsFilter') + # Test valid driver ... + driver = host_filter.choose_driver( + 'nova.scheduler.host_filter.InstanceTypeFilter') + self.assertEquals(driver._full_name(), + 'nova.scheduler.host_filter.InstanceTypeFilter') + # Test invalid driver ... + try: + host_filter.choose_driver('does not exist') + self.fail("Should not find driver") + except exception.SchedulerHostFilterDriverNotFound: + pass + + def test_all_host_driver(self): + driver = host_filter.AllHostsFilter() + cooked = driver.instance_type_to_filter(self.instance_type) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(10, len(hosts)) + for host, capabilities in hosts: + self.assertTrue(host.startswith('host')) + + def test_instance_type_driver(self): + driver = host_filter.InstanceTypeFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = driver.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', + name) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(6, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + self.assertEquals('host05', just_hosts[0]) + self.assertEquals('host10', just_hosts[5]) + + def test_json_driver(self): + driver = host_filter.JsonFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = driver.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(6, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + self.assertEquals('host05', just_hosts[0]) + self.assertEquals('host10', just_hosts[5]) + + # Try some custom queries + + raw = ['or', + ['and', + ['<', '$compute.host_memory_free', 30], + ['<', '$compute.disk_available', 300] + ], + ['and', + ['>', '$compute.host_memory_free', 70], + ['>', '$compute.disk_available', 700] + ] + ] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(5, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([1, 2, 8, 9, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + raw = ['not', + ['=', '$compute.host_memory_free', 30], + ] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(9, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(5, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([2, 4, 6, 8, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + # Try some bogus input ... + raw = ['unknown command', ] + cooked = json.dumps(raw) + try: + driver.filter_hosts(self.zone_manager, cooked) + self.fail("Should give KeyError") + except KeyError, e: + pass + + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([]))) + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({}))) + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps( + ['not', True, False, True, False] + ))) + + try: + driver.filter_hosts(self.zone_manager, json.dumps( + 'not', True, False, True, False + )) + self.fail("Should give KeyError") + except KeyError, e: + pass + + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', '$foo', 100] + ))) + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', '$.....', 100] + ))) + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] + ))) + + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', {}, ['>', '$missing....foo']] + ))) diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py new file mode 100644 index 000000000..e0ed61417 --- /dev/null +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -0,0 +1,146 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Least Cost Scheduler +""" + +from nova import flags +from nova import test +from nova.scheduler import least_cost +from nova.tests.scheduler import test_zone_aware_scheduler + +MB = 1024 * 1024 +FLAGS = flags.FLAGS + + +class FakeHost(object): + def __init__(self, host_id, free_ram, io): + self.id = host_id + self.free_ram = free_ram + self.io = io + + +class WeightedSumTestCase(test.TestCase): + def test_empty_domain(self): + domain = [] + weighted_fns = [] + result = least_cost.weighted_sum(domain, weighted_fns) + expected = [] + self.assertEqual(expected, result) + + def test_basic_costing(self): + hosts = [ + FakeHost(1, 512 * MB, 100), + FakeHost(2, 256 * MB, 400), + FakeHost(3, 512 * MB, 100) + ] + + weighted_fns = [ + (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* + (2, lambda h: h.io), # Avoid high I/O + ] + + costs = least_cost.weighted_sum( + domain=hosts, weighted_fns=weighted_fns) + + # Each 256 MB unit of free-ram contributes 0.5 points by way of: + # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 + # Each 100 iops of IO adds 0.5 points by way of: + # cost = 2 * (100/400) = 2 * 0.25 = 0.5 + expected = [1.5, 2.5, 1.5] + self.assertEqual(expected, costs) + + +# TODO(sirp): unify this with test_host_filter tests? possibility of sharing +# test setup code +class FakeZoneManager: + pass + + +class LeastCostSchedulerTestCase(test.TestCase): + def setUp(self): + super(LeastCostSchedulerTestCase, self).setUp() + zone_manager = FakeZoneManager() + + states = test_zone_aware_scheduler.fake_zone_manager_service_states( + num_hosts=10) + zone_manager.service_states = states + + self.sched = least_cost.LeastCostScheduler() + self.sched.zone_manager = zone_manager + + def tearDown(self): + super(LeastCostSchedulerTestCase, self).tearDown() + + def assertWeights(self, expected, num, request_spec, hosts): + weighted = self.sched.weigh_hosts(num, request_spec, hosts) + self.assertDictListMatch(weighted, expected, approx_equal=True) + + def test_no_hosts(self): + num = 1 + request_spec = {} + hosts = [] + + expected = [] + self.assertWeights(expected, num, request_spec, hosts) + + def test_noop_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] + FLAGS.noop_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [dict(weight=1, hostname=hostname) + for hostname, caps in hosts] + self.assertWeights(expected, num, request_spec, hosts) + + def test_cost_fn_weights(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] + FLAGS.noop_cost_fn_weight = 2 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [dict(weight=2, hostname=hostname) + for hostname, caps in hosts] + self.assertWeights(expected, num, request_spec, hosts) + + def test_fill_first_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.fill_first_cost_fn' + ] + FLAGS.fill_first_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [] + for idx, (hostname, caps) in enumerate(hosts): + # Costs are normalized so over 10 hosts, each host with increasing + # free ram will cost 1/N more. Since the lowest cost host has some + # free ram, we add in the 1/N for the base_cost + weight = 0.1 + (0.1 * idx) + weight_dict = dict(weight=weight, hostname=hostname) + expected.append(weight_dict) + + self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py index 37169fb97..b2cc4fe23 100644 --- a/nova/tests/scheduler/test_zone_aware_scheduler.py +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -22,6 +22,37 @@ from nova.scheduler import zone_aware_scheduler from nova.scheduler import zone_manager +def _host_caps(multiplier): + # Returns host capabilities in the following way: + # host1 = memory:free 10 (100max) + # disk:available 100 (1000max) + # hostN = memory:free 10 + 10N + # disk:available 100 + 100N + # in other words: hostN has more resources than host0 + # which means ... don't go above 10 hosts. + return {'host_name-description': 'XenServer %s' % multiplier, + 'host_hostname': 'xs-%s' % multiplier, + 'host_memory_total': 100, + 'host_memory_overhead': 10, + 'host_memory_free': 10 + multiplier * 10, + 'host_memory_free-computed': 10 + multiplier * 10, + 'host_other-config': {}, + 'host_ip_address': '192.168.1.%d' % (100 + multiplier), + 'host_cpu_info': {}, + 'disk_available': 100 + multiplier * 100, + 'disk_total': 1000, + 'disk_used': 0, + 'host_uuid': 'xxx-%d' % multiplier, + 'host_name-label': 'xs-%s' % multiplier} + + +def fake_zone_manager_service_states(num_hosts): + states = {} + for x in xrange(num_hosts): + states['host%02d' % (x + 1)] = {'compute': _host_caps(x)} + return states + + class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): def filter_hosts(self, num, specs): # NOTE(sirp): this is returning [(hostname, services)] -- cgit From 1b610e28e40c77271191349b6bfaa56c8f522c24 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:53:00 -0500 Subject: Small cleanups --- nova/tests/scheduler/test_host_filter.py | 7 +++---- nova/tests/scheduler/test_least_cost_scheduler.py | 10 ++++------ 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index c3af50a6e..edbab7ab4 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -27,10 +27,6 @@ from nova.tests.scheduler import test_zone_aware_scheduler FLAGS = flags.FLAGS -class FakeZoneManager: - pass - - class HostFilterTestCase(test.TestCase): """Test case for host filter drivers.""" @@ -48,6 +44,9 @@ class HostFilterTestCase(test.TestCase): rxtx_quota=30000, rxtx_cap=200) + class FakeZoneManager: + pass + self.zone_manager = FakeZoneManager() states = test_zone_aware_scheduler.fake_zone_manager_service_states( diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index e0ed61417..506fa62fb 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -63,15 +63,13 @@ class WeightedSumTestCase(test.TestCase): self.assertEqual(expected, costs) -# TODO(sirp): unify this with test_host_filter tests? possibility of sharing -# test setup code -class FakeZoneManager: - pass - - class LeastCostSchedulerTestCase(test.TestCase): def setUp(self): super(LeastCostSchedulerTestCase, self).setUp() + + class FakeZoneManager: + pass + zone_manager = FakeZoneManager() states = test_zone_aware_scheduler.fake_zone_manager_service_states( -- cgit From 74bae1b1e2b298ef8425f7cb1aefd3826db40147 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 19 May 2011 13:50:11 -0700 Subject: Separate out tests for when unfilter is called from iptables vs. nwfilter driver. Re: lp783705 --- nova/tests/test_virt.py | 65 ++++++++++++++++++++++++++++++++++------------- nova/virt/libvirt_conn.py | 22 ++++++++-------- 2 files changed, 58 insertions(+), 29 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index babb5de9b..3b5a3867d 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -657,6 +657,21 @@ class LibvirtConnTestCase(test.TestCase): super(LibvirtConnTestCase, self).tearDown() +class FakeNWFilter: + def __init__(self): + self.undefine_call_count = 0 + + def undefine(self): + self.undefine_call_count += 1 + pass + + def _nwfilterLookupByName(self, ignore): + return self + + def _filterDefineXMLMock(self, xml): + return True + + class IptablesFirewallTestCase(test.TestCase): def setUp(self): super(IptablesFirewallTestCase, self).setUp() @@ -869,6 +884,35 @@ class IptablesFirewallTestCase(test.TestCase): self.assertEquals(ipv6_network_rules, ipv6_rules_per_network * networks_count) + def test_unfilter_instance_undefines_nwfilters(self): + admin_ctxt = context.get_admin_context() + + fakefilter = FakeNWFilter() + self.fw.nwfilter._conn.nwfilterDefineXML =\ + fakefilter._filterDefineXMLMock + self.fw.nwfilter._conn.nwfilterLookupByName =\ + fakefilter._nwfilterLookupByName + + instance_ref = self._create_instance_ref() + inst_id = instance_ref['id'] + instance = db.instance_get(self.context, inst_id) + + ip = '10.11.12.13' + network_ref = db.project_get_network(self.context, 'fake') + fixed_ip = {'address': ip, 'network_id': network_ref['id']} + db.fixed_ip_create(admin_ctxt, fixed_ip) + db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + 'instance_id': inst_id}) + self.fw.setup_basic_filtering(instance) + self.fw.prepare_instance_filter(instance) + self.fw.apply_instance_filter(instance) + self.fw.unfilter_instance(instance) + + # should attempt to undefine just the instance filter + self.assertEquals(fakefilter.undefine_call_count, 1) + + db.instance_destroy(admin_ctxt, instance_ref['id']) + class NWFilterTestCase(test.TestCase): def setUp(self): @@ -1047,26 +1091,11 @@ class NWFilterTestCase(test.TestCase): self.assertEquals(len(result), 3) def test_unfilter_instance_undefines_nwfilters(self): - class FakeNWFilter: - def __init__(self): - self.undefine_call_count = 0 - - def undefine(self): - self.undefine_call_count += 1 - pass - - fakefilter = FakeNWFilter() - - def _nwfilterLookupByName(ignore): - return fakefilter - - def _filterDefineXMLMock(xml): - return True - admin_ctxt = context.get_admin_context() - self.fw._conn.nwfilterDefineXML = _filterDefineXMLMock - self.fw._conn.nwfilterLookupByName = _nwfilterLookupByName + fakefilter = FakeNWFilter() + self.fw._conn.nwfilterDefineXML = fakefilter._filterDefineXMLMock + self.fw._conn.nwfilterLookupByName = fakefilter._nwfilterLookupByName instance_ref = self._create_instance() inst_id = instance_ref['id'] diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 706973176..f808a4b7b 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1834,7 +1834,7 @@ class NWFilterFirewall(FirewallDriver): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, remove_secgroup=True): """Clear out the nwfilter rules.""" network_info = _get_network_info(instance) instance_name = instance.name @@ -1846,19 +1846,19 @@ class NWFilterFirewall(FirewallDriver): self._conn.nwfilterLookupByName(instance_filter_name).\ undefine() except libvirt.libvirtError: - LOG.debug(_('The nwfilter(%(instance_filter_name)s) for ' - '%(instance_name)s is not found.') % locals()) + LOG.debug(_('The nwfilter(%(instance_filter_name)s) ' + 'for %(instance_name)s is not found.') % locals()) instance_secgroup_filter_name =\ '%s-secgroup' % (self._instance_filter_name(instance)) - try: - self._conn.nwfilterLookupByName(instance_secgroup_filter_name).\ - undefine() - except libvirt.libvirtError: - # This will happen if called by IptablesFirewallDriver - LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) for ' - '%(instance_name)s is not found.') % locals()) + if remove_secgroup: + try: + self._conn.nwfilterLookupByName(instance_secgroup_filter_name)\ + .undefine() + except libvirt.libvirtError: + LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' + 'for %(instance_name)s is not found.') % locals()) def prepare_instance_filter(self, instance, network_info=None): """ @@ -2022,7 +2022,7 @@ class IptablesFirewallDriver(FirewallDriver): if self.instances.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() - self.nwfilter.unfilter_instance(instance) + self.nwfilter.unfilter_instance(instance, False) else: LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) -- cgit From 0bb2d0085e1fb3ba22a408f405f4539aa07b226c Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 20 May 2011 08:59:07 -0700 Subject: make nwfilter mock more 'realistic' by having it remember which filters have been defined --- nova/tests/test_virt.py | 56 +++++++++++++++++++++++++++++++++++++++-------- nova/virt/libvirt_conn.py | 17 +++++++------- 2 files changed, 55 insertions(+), 18 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 4bc5fed16..5e85e3a2f 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -659,16 +659,26 @@ class LibvirtConnTestCase(test.TestCase): class FakeNWFilter: def __init__(self): - self.undefine_call_count = 0 + self.filters = {} - def undefine(self): - self.undefine_call_count += 1 - pass - - def _nwfilterLookupByName(self, ignore): - return self + def _nwfilterLookupByName(self, name): + if name in self.filters: + return self.filters[name] + raise libvirt.libvirtError('Filter Not Found') def _filterDefineXMLMock(self, xml): + class FakeNWFilterInternal: + def __init__(self, parent, name): + self.name = name + self.parent = parent + + def undefine(self): + del self.parent.filters[self.name] + pass + tree = xml_to_tree(xml) + name = tree.get('name') + if name not in self.filters: + self.filters[name] = FakeNWFilterInternal(self, name) return True @@ -689,6 +699,20 @@ class IptablesFirewallTestCase(test.TestCase): self.fw = libvirt_conn.IptablesFirewallDriver( get_connection=lambda: self.fake_libvirt_connection) + def lazy_load_library_exists(self): + """check if libvirt is available.""" + # try to connect libvirt. if fail, skip test. + try: + import libvirt + import libxml2 + except ImportError: + return False + global libvirt + libvirt = __import__('libvirt') + libvirt_conn.libvirt = __import__('libvirt') + libvirt_conn.libxml2 = __import__('libxml2') + return True + def tearDown(self): self.manager.delete_project(self.project) self.manager.delete_user(self.user) @@ -895,6 +919,10 @@ class IptablesFirewallTestCase(test.TestCase): self.fw.do_refresh_security_group_rules("fake") def test_unfilter_instance_undefines_nwfilter(self): + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + admin_ctxt = context.get_admin_context() fakefilter = FakeNWFilter() @@ -916,10 +944,11 @@ class IptablesFirewallTestCase(test.TestCase): self.fw.setup_basic_filtering(instance) self.fw.prepare_instance_filter(instance) self.fw.apply_instance_filter(instance) + original_filter_count = len(fakefilter.filters) self.fw.unfilter_instance(instance) # should attempt to undefine just the instance filter - self.assertEquals(fakefilter.undefine_call_count, 1) + self.assertEqual(original_filter_count - len(fakefilter.filters), 1) db.instance_destroy(admin_ctxt, instance_ref['id']) @@ -1109,6 +1138,12 @@ class NWFilterTestCase(test.TestCase): instance_ref = self._create_instance() inst_id = instance_ref['id'] + + self.security_group = self.setup_and_return_security_group() + + db.instance_add_security_group(self.context, inst_id, + self.security_group.id) + instance = db.instance_get(self.context, inst_id) ip = '10.11.12.13' @@ -1120,9 +1155,12 @@ class NWFilterTestCase(test.TestCase): self.fw.setup_basic_filtering(instance) self.fw.prepare_instance_filter(instance) self.fw.apply_instance_filter(instance) + original_filter_count = len(fakefilter.filters) + print fakefilter.filters.keys() self.fw.unfilter_instance(instance) + print fakefilter.filters.keys() # should attempt to undefine 2 filters: instance and instance-secgroup - self.assertEquals(fakefilter.undefine_call_count, 2) + self.assertEqual(original_filter_count - len(fakefilter.filters), 2) db.instance_destroy(admin_ctxt, instance_ref['id']) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 9241c1d9e..f27398aa3 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1838,7 +1838,7 @@ class NWFilterFirewall(FirewallDriver): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def unfilter_instance(self, instance, remove_secgroup=True): + def unfilter_instance(self, instance): """Clear out the nwfilter rules.""" network_info = _get_network_info(instance) instance_name = instance.name @@ -1856,13 +1856,12 @@ class NWFilterFirewall(FirewallDriver): instance_secgroup_filter_name =\ '%s-secgroup' % (self._instance_filter_name(instance)) - if remove_secgroup: - try: - self._conn.nwfilterLookupByName(instance_secgroup_filter_name)\ - .undefine() - except libvirt.libvirtError: - LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' - 'for %(instance_name)s is not found.') % locals()) + try: + self._conn.nwfilterLookupByName(instance_secgroup_filter_name)\ + .undefine() + except libvirt.libvirtError: + LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' + 'for %(instance_name)s is not found.') % locals()) def prepare_instance_filter(self, instance, network_info=None): """ @@ -2028,7 +2027,7 @@ class IptablesFirewallDriver(FirewallDriver): if self.instances.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() - self.nwfilter.unfilter_instance(instance, False) + self.nwfilter.unfilter_instance(instance) else: LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) -- cgit From 5c205bb5ef1565db4e52af538cf0d6b73cbeda37 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 20 May 2011 09:09:03 -0700 Subject: fix comments --- nova/tests/test_virt.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 5e85e3a2f..90c6de5a9 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -659,6 +659,7 @@ class LibvirtConnTestCase(test.TestCase): class FakeNWFilter: def __init__(self): + self.filters = {} def _nwfilterLookupByName(self, name): @@ -947,7 +948,7 @@ class IptablesFirewallTestCase(test.TestCase): original_filter_count = len(fakefilter.filters) self.fw.unfilter_instance(instance) - # should attempt to undefine just the instance filter + # should undefine just the instance filter self.assertEqual(original_filter_count - len(fakefilter.filters), 1) db.instance_destroy(admin_ctxt, instance_ref['id']) @@ -1160,7 +1161,7 @@ class NWFilterTestCase(test.TestCase): self.fw.unfilter_instance(instance) print fakefilter.filters.keys() - # should attempt to undefine 2 filters: instance and instance-secgroup + # should undefine 2 filters: instance and instance-secgroup self.assertEqual(original_filter_count - len(fakefilter.filters), 2) db.instance_destroy(admin_ctxt, instance_ref['id']) -- cgit From c9926b12f4c554d9a21c6e77fc657e54a2dd4888 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Sun, 29 May 2011 18:01:46 -0700 Subject: starting --- doc/source/devref/distributed_scheduler.rst | 164 ++++++++++++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100644 doc/source/devref/distributed_scheduler.rst diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst new file mode 100644 index 000000000..75a4d57ce --- /dev/null +++ b/doc/source/devref/distributed_scheduler.rst @@ -0,0 +1,164 @@ +.. + Copyright 2011 OpenStack LLC + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Distributed Scheduler +===== + +The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and Compute nodes are selected for where the work should be performed. In a small deployment we may be happy with the currently available Change Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone). + +But for larger deployments a more complex scheduling algorithm is required. Additionally, if you are using Zones in your Nova setup, you'll need a scheduler that understand how to pass instance requests from Zone to Zone. + +This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capabilities of a Zone and its component services to make informed decisions on where a new instance should be created. When making this decision it consults not only all the Compute nodes in the current Zone, but the Compute nodes in each Child Zone. This continues recursively until the ideal host is found. + +So, how does this all work? + +This document will explain the strategy employed by the ZoneAwareScheduler and its derivations. + +Costs & Weights +---------- +When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to put an Instance with 8G of RAM on a Host that only has 4G remaining would have a very high cost. But putting a 512m RAM instance on an empty Host should have a low cost. + +Some Costs are more esoteric. Consider a rule that says we should prefer Hosts that don't already have an instance on it that is owned by the user requesting it (to mitigate against machine failures). Here we have to look at all the other Instances on the host to compute our cost. + +An example of some other costs might include selecting: +* a GPU-based host over a standard CPU +* a host with fast ethernet over a 10mbps line +* a host than can run Windows instances +* a host in the EU vs North America +* etc + +This Weight is computed for each Instance requested. If the customer asked for 1000 instances, the consumed resources on each Host are "virtually" depleted so the Cost can change accordingly. + +nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler +----------- +As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about Child Zones and each of the Services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions. + +Here is how it works: + +1. The Compute nodes are Filtered and the remaining set are Weighted. +1a. Filtering the hosts is a simple matter of ensuring the Compute node has ample resources (CPU, RAM, DISK, etc) to fulfil the request. +1b. Weighing of the remaining Compute nodes is performed +2. The same request is sent to each Child Zone and step #1 is done there too. The resulting Weighted List is returned to the parent. +3. The Parent Zone sorts and aggregates all the Weights and a final Build Plan is constructed. +4. The Build Plan is executed upon. Concurrently, Instance Create requests are sent to each of the selected Hosts, be they local or in a child zone. Child Zones may forward the requests to their Child Zones as needed. + +Filtering and Weighing +------------ +Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. + + + + + + +- +Routing between Zones is based on the Capabilities of that Zone. Capabilities are nothing more than key/value pairs. Values are multi-value, with each value separated with a semicolon (`;`). When expressed as a string they take the form: + +:: + + key=value;value;value, key=value;value;value + +Zones have Capabilities which are general to the Zone and are set via `--zone_capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag. + +Flow within a Zone +------------------ +The brunt of the work within a Zone is done in the Scheduler Service. The Scheduler is responsible for: +- collecting capability messages from the Compute, Volume and Network nodes, +- polling the child Zones for their status and +- providing data to the Distributed Scheduler for performing load balancing calculations + +Inter-service communication within a Zone is done with RabbitMQ. Each class of Service (Compute, Volume and Network) has both a named message exchange (particular to that host) and a general message exchange (particular to that class of service). Messages sent to these exchanges are picked off in round-robin fashion. Zones introduce a new fan-out exchange per service. Messages sent to the fan-out exchange are picked up by all services of a particular class. This fan-out exchange is used by the Scheduler services to receive capability messages from the Compute, Volume and Network nodes. + +These capability messages are received by the Scheduler services and stored in the `ZoneManager` object. The SchedulerManager object has a reference to the `ZoneManager` it can use for load balancing. + +The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone_name` flag (and defaults to "nova"). + +Zone administrative functions +----------------------------- +Zone administrative operations are usually done using python-novaclient_ + +.. _python-novaclient: https://github.com/rackspace/python-novaclient + +In order to use the Zone operations, be sure to enable administrator operations in OpenStack API by setting the `--allow_admin_api=true` flag. + +Finally you need to enable Zone Forwarding. This will be used by the Distributed Scheduler initiative currently underway. Set `--enable_zone_routing=true` to enable this feature. + +Find out about this Zone +------------------------ +In any Zone you can find the Zone's name and capabilities with the ``nova zone-info`` command. + +:: + + alice@novadev:~$ nova zone-info + +-----------------+---------------+ + | Property | Value | + +-----------------+---------------+ + | compute_cpu | 0.7,0.7 | + | compute_disk | 123000,123000 | + | compute_network | 800,800 | + | hypervisor | xenserver | + | name | nova | + | network_cpu | 0.7,0.7 | + | network_disk | 123000,123000 | + | network_network | 800,800 | + | os | linux | + +-----------------+---------------+ + +This equates to a GET operation on `.../zones/info`. If you have no child Zones defined you'll usually only get back the default `name`, `hypervisor` and `os` capabilities. Otherwise you'll get back a tuple of min, max values for each capabilities of all the hosts of all the services running in the child zone. These take the `_ = ,` format. + +Adding a child Zone +------------------- +Any Zone can be a parent Zone. Children are associated to a Zone. The Zone where this command originates from is known as the Parent Zone. Routing is only ever conducted from a Zone to its children, never the other direction. From a parent zone you can add a child zone with the following command: + +:: + + nova zone-add + +You can get the `child zone api url`, `nova api key` and `username` from the `novarc` file in the child zone. For example: + +:: + + export NOVA_API_KEY="3bd1af06-6435-4e23-a827-413b2eb86934" + export NOVA_USERNAME="alice" + export NOVA_URL="http://192.168.2.120:8774/v1.0/" + + +This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done when this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information. + +Getting a list of child Zones +----------------------------- + +:: + + nova zone-list + + alice@novadev:~$ nova zone-list + +----+-------+-----------+--------------------------------------------+---------------------------------+ + | ID | Name | Is Active | Capabilities | API URL | + +----+-------+-----------+--------------------------------------------+---------------------------------+ + | 2 | zone1 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.108:8774/v1.0/ | + | 3 | zone2 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.115:8774/v1.0/ | + +----+-------+-----------+--------------------------------------------+---------------------------------+ + +This equates to a GET operation to `.../zones`. + +Removing a child Zone +--------------------- +:: + + nova zone-delete + +This equates to a DELETE call to `.../zones/N`. The Zone with ID=N will be removed. This will only remove the zone entry from the current (parent) Zone, no child Zones are affected. Removing a Child Zone doesn't affect any other part of the hierarchy. -- cgit From 5aa54545486ffe9d9988761576f497de9a957d47 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Sun, 29 May 2011 23:42:46 -0300 Subject: lots more --- doc/source/devref/distributed_scheduler.rst | 44 +++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index 75a4d57ce..7599f2cc5 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -25,7 +25,7 @@ This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capab So, how does this all work? -This document will explain the strategy employed by the ZoneAwareScheduler and its derivations. +This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the Zones documentation before reading this. Costs & Weights ---------- @@ -48,19 +48,51 @@ As we explained in the Zones documentation, each Scheduler has a `ZoneManager` o Here is how it works: -1. The Compute nodes are Filtered and the remaining set are Weighted. +1. The Compute nodes are Filtered and the nodes remaining are Weighed. 1a. Filtering the hosts is a simple matter of ensuring the Compute node has ample resources (CPU, RAM, DISK, etc) to fulfil the request. -1b. Weighing of the remaining Compute nodes is performed +1b. Weighing of the remaining Compute nodes assigns a number based on their suitability for the request. 2. The same request is sent to each Child Zone and step #1 is done there too. The resulting Weighted List is returned to the parent. 3. The Parent Zone sorts and aggregates all the Weights and a final Build Plan is constructed. 4. The Build Plan is executed upon. Concurrently, Instance Create requests are sent to each of the selected Hosts, be they local or in a child zone. Child Zones may forward the requests to their Child Zones as needed. Filtering and Weighing ------------ -Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. - - +Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. We will explain how to do this later in this document. +Requesting a new instance +------------ +To request a new instance, a call is made to `nova.compute.api.create()`. The type of instance created depends on the value of the `InstanceType` record being passed in. The `InstanceType` determines the amount of disk, cpu, ram and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table, but we'll discuss that later. + +`nova.compute.api.create()` performs the following actions: +1. it validates all the fields passed into it. +2. it creates an entry in the `Instance` table for each instance requested +3. it puts one `run_instance` message in the scheduler queue for each instance requested +4. the schedulers pick off the messages and decide which Compute node should handle the request. +5. the `run_instance` message is forwarded to the Compute node for processing and the instance is created. +6. it returns a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_id`'s are valid. + +Generally, the standard schedulers (like `ChangeScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of Child Zones. + +The problem with this approach is that each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once. + +For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently: +1. it validates all the fields passed into it. +2. it creates a single `request_id` for all of instances created. This is a UUID. +3. it creates a single `run_instance` request in the scheduler queue +4. a scheduler picks the message off the queue and works on it. +5. the scheduler sends off an OS API `POST /zones/select` command to each Child Zone. The `BODY` payload of the call contains the `request_spec`. +6. the Child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones. +7. if the Child Zone has its own Child Zone's, the `/zones/select` call will be sent down to them as well. +8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed. +9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant Child Zone. The parameters to the Child Zone call are the same as what was passed in by the user. + +The Catch +------------- +This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, Database and set of Nova Services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world. + +When `POST /zones/select` is called to estimate which Compute node to use, time passes until the `POST /servers` call is issued. If we only passed the Weight back from the `select` we would have to re-compute the appropriate Compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to Child Zones asking for estimates. + +Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. -- cgit From c3c2c1a63c126f046457d0d61306ebe9c46af700 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 30 May 2011 00:00:28 -0300 Subject: basic flow done --- doc/source/devref/distributed_scheduler.rst | 105 ++++------------------------ 1 file changed, 13 insertions(+), 92 deletions(-) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index 7599f2cc5..c9aaf8c01 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -55,6 +55,8 @@ Here is how it works: 3. The Parent Zone sorts and aggregates all the Weights and a final Build Plan is constructed. 4. The Build Plan is executed upon. Concurrently, Instance Create requests are sent to each of the selected Hosts, be they local or in a child zone. Child Zones may forward the requests to their Child Zones as needed. +`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which Host filtering and Weighing strategy will be used. We'll go into more detail on that later. + Filtering and Weighing ------------ Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. We will explain how to do this later in this document. @@ -77,7 +79,7 @@ The problem with this approach is that each request is scattered amongst each of For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently: 1. it validates all the fields passed into it. -2. it creates a single `request_id` for all of instances created. This is a UUID. +2. it creates a single `reservation_id` for all of instances created. This is a UUID. 3. it creates a single `run_instance` request in the scheduler queue 4. a scheduler picks the message off the queue and works on it. 5. the scheduler sends off an OS API `POST /zones/select` command to each Child Zone. The `BODY` payload of the call contains the `request_spec`. @@ -85,6 +87,7 @@ For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to 7. if the Child Zone has its own Child Zone's, the `/zones/select` call will be sent down to them as well. 8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed. 9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant Child Zone. The parameters to the Child Zone call are the same as what was passed in by the user. +10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`. The Catch ------------- @@ -92,105 +95,23 @@ This all seems pretty straightforward but, like most things, there's a catch. Zo When `POST /zones/select` is called to estimate which Compute node to use, time passes until the `POST /servers` call is issued. If we only passed the Weight back from the `select` we would have to re-compute the appropriate Compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to Child Zones asking for estimates. -Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. - - - -- -Routing between Zones is based on the Capabilities of that Zone. Capabilities are nothing more than key/value pairs. Values are multi-value, with each value separated with a semicolon (`;`). When expressed as a string they take the form: - -:: - - key=value;value;value, key=value;value;value - -Zones have Capabilities which are general to the Zone and are set via `--zone_capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag. - -Flow within a Zone ------------------- -The brunt of the work within a Zone is done in the Scheduler Service. The Scheduler is responsible for: -- collecting capability messages from the Compute, Volume and Network nodes, -- polling the child Zones for their status and -- providing data to the Distributed Scheduler for performing load balancing calculations - -Inter-service communication within a Zone is done with RabbitMQ. Each class of Service (Compute, Volume and Network) has both a named message exchange (particular to that host) and a general message exchange (particular to that class of service). Messages sent to these exchanges are picked off in round-robin fashion. Zones introduce a new fan-out exchange per service. Messages sent to the fan-out exchange are picked up by all services of a particular class. This fan-out exchange is used by the Scheduler services to receive capability messages from the Compute, Volume and Network nodes. - -These capability messages are received by the Scheduler services and stored in the `ZoneManager` object. The SchedulerManager object has a reference to the `ZoneManager` it can use for load balancing. - -The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone_name` flag (and defaults to "nova"). - -Zone administrative functions ------------------------------ -Zone administrative operations are usually done using python-novaclient_ - -.. _python-novaclient: https://github.com/rackspace/python-novaclient - -In order to use the Zone operations, be sure to enable administrator operations in OpenStack API by setting the `--allow_admin_api=true` flag. - -Finally you need to enable Zone Forwarding. This will be used by the Distributed Scheduler initiative currently underway. Set `--enable_zone_routing=true` to enable this feature. - -Find out about this Zone ------------------------- -In any Zone you can find the Zone's name and capabilities with the ``nova zone-info`` command. - -:: - - alice@novadev:~$ nova zone-info - +-----------------+---------------+ - | Property | Value | - +-----------------+---------------+ - | compute_cpu | 0.7,0.7 | - | compute_disk | 123000,123000 | - | compute_network | 800,800 | - | hypervisor | xenserver | - | name | nova | - | network_cpu | 0.7,0.7 | - | network_disk | 123000,123000 | - | network_network | 800,800 | - | os | linux | - +-----------------+---------------+ - -This equates to a GET operation on `.../zones/info`. If you have no child Zones defined you'll usually only get back the default `name`, `hypervisor` and `os` capabilities. Otherwise you'll get back a tuple of min, max values for each capabilities of all the hosts of all the services running in the child zone. These take the `_ = ,` format. - -Adding a child Zone -------------------- -Any Zone can be a parent Zone. Children are associated to a Zone. The Zone where this command originates from is known as the Parent Zone. Routing is only ever conducted from a Zone to its children, never the other direction. From a parent zone you can add a child zone with the following command: - -:: - - nova zone-add - -You can get the `child zone api url`, `nova api key` and `username` from the `novarc` file in the child zone. For example: - -:: +Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. - export NOVA_API_KEY="3bd1af06-6435-4e23-a827-413b2eb86934" - export NOVA_USERNAME="alice" - export NOVA_URL="http://192.168.2.120:8774/v1.0/" +In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent. +Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.zone_aware_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use. -This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done when this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information. +Reservation ID's +--------------- -Getting a list of child Zones ------------------------------ -:: - nova zone-list - alice@novadev:~$ nova zone-list - +----+-------+-----------+--------------------------------------------+---------------------------------+ - | ID | Name | Is Active | Capabilities | API URL | - +----+-------+-----------+--------------------------------------------+---------------------------------+ - | 2 | zone1 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.108:8774/v1.0/ | - | 3 | zone2 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.115:8774/v1.0/ | - +----+-------+-----------+--------------------------------------------+---------------------------------+ +Host Filter +-------------- -This equates to a GET operation to `.../zones`. -Removing a child Zone ---------------------- -:: +Cost Scheduler Weighing +-------------- - nova zone-delete -This equates to a DELETE call to `.../zones/N`. The Zone with ID=N will be removed. This will only remove the zone entry from the current (parent) Zone, no child Zones are affected. Removing a Child Zone doesn't affect any other part of the hierarchy. -- cgit From 5101aa300b087bf57f22cb128649679e8b11051d Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 30 May 2011 00:45:15 -0300 Subject: reservation_id's done --- doc/source/devref/distributed_scheduler.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index c9aaf8c01..402f50bee 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -104,8 +104,17 @@ Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.c Reservation ID's --------------- +NOTE: The features described in this section are related to the up-coming 'merge-4' branch. +The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created. +NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would be bad. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled. + +We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/servers` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches. + +Finally, we need to give the user a way to get information on each of the instances created under this `reservation_id`. Fortunately, this is still possible with the existing `GET /servers` command, so long as we add a new optional `reservation_id` parameter. + +`python-novaclient` will be extended to support both of these changes. Host Filter -------------- -- cgit From 0cf5316131aecbac5e843282e2e2eb2acd3fc9e3 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 30 May 2011 05:03:45 -0700 Subject: first cut complete --- doc/source/devref/distributed_scheduler.rst | 41 +++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index 402f50bee..a45505640 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -119,8 +119,49 @@ Finally, we need to give the user a way to get information on each of the instan Host Filter -------------- +As we mentioned earlier, filtering hosts is a very deployment specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms. +The filter used is determined by the `--default_host_filter` flag, which points to a Python Class. By default this flag is set to `nova.scheduler.host_filter.AllHostsFilter` which simply returns all available hosts. But there are others. + +`nova.scheduler.host_filter.InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`. +`nova.scheduler.host_filter.JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples. + +To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide the create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of weight tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The weight tuple contains (``, ``) where `` is whatever you want it to be. + Cost Scheduler Weighing -------------- +Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `ZoneAwareScheduler` base class when all the results have been assembled. + +Simple Zone Aware Scheduling +-------------- +The easiest way to get started with the Zone Aware Scheduler is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter as and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. + +The `--scheduler_driver` flag is how you specify the Scheduler class name. + +Flags +-------------- + +All this Zone and Distributed Scheduler stuff can seem a little daunting to configure, but it's actually not too bad. Here are some of the main flags you should set in your `nova.conf` file: + +:: + --allow_admin_api=true + --enable_zone_routing=true + --zone_name=zone1 + --build_plan_encryption_key=c286696d887c9aa0611bbb3e2025a45b + --scheduler_driver=nova.scheduler.host_filter.HostFilterScheduler + --default_host_filter=nova.scheduler.host_filter.AllHostsFilter + +`--allow_admin_api` must be set for OS API to enable the new `/zones/*` commands. +`--enable_zone_routing` must be set for OS API commands such as `create()`, `pause()` and `delete()` to get routed from Zone to Zone when looking for instances. +`--zone_name` is only required in Child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue. +`build_plan_encryption_key` is the SHA-256 key for encrypting/decrypting the Host information when it leaves a Zone. Be sure to change this key for each Zone you create. Do not duplicate keys. +`scheduler_driver` is the real work horse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler` +`default_host_filter` is the host filter to be used for filtering candidate Compute nodes. + +Some optional flags which are handy for debugging are: +:: + --connection_type=fake + --verbose +Using the `Fake` virtualization driver is handy when you're setting this stuff up so you're not dealing with a million possible issues at once. When things seem to working correctly, switch back to whatever hypervisor your deployment uses. -- cgit From 1adb96550640a65a723635f2dc98e4595f95fd52 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 31 May 2011 08:26:11 -0700 Subject: edits based on ed's feedback --- doc/source/devref/distributed_scheduler.rst | 85 +++++++++++++++-------------- 1 file changed, 45 insertions(+), 40 deletions(-) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index a45505640..28ba20af7 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -1,3 +1,7 @@ + + + + .. Copyright 2011 OpenStack LLC All Rights Reserved. @@ -17,7 +21,7 @@ Distributed Scheduler ===== -The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and Compute nodes are selected for where the work should be performed. In a small deployment we may be happy with the currently available Change Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone). +The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Change Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone). But for larger deployments a more complex scheduling algorithm is required. Additionally, if you are using Zones in your Nova setup, you'll need a scheduler that understand how to pass instance requests from Zone to Zone. @@ -29,7 +33,7 @@ This document will explain the strategy employed by the `ZoneAwareScheduler` and Costs & Weights ---------- -When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to put an Instance with 8G of RAM on a Host that only has 4G remaining would have a very high cost. But putting a 512m RAM instance on an empty Host should have a low cost. +When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to putting a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost. Some Costs are more esoteric. Consider a rule that says we should prefer Hosts that don't already have an instance on it that is owned by the user requesting it (to mitigate against machine failures). Here we have to look at all the other Instances on the host to compute our cost. @@ -44,73 +48,73 @@ This Weight is computed for each Instance requested. If the customer asked for 1 nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler ----------- -As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about Child Zones and each of the Services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions. +As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions. Here is how it works: -1. The Compute nodes are Filtered and the nodes remaining are Weighed. -1a. Filtering the hosts is a simple matter of ensuring the Compute node has ample resources (CPU, RAM, DISK, etc) to fulfil the request. -1b. Weighing of the remaining Compute nodes assigns a number based on their suitability for the request. -2. The same request is sent to each Child Zone and step #1 is done there too. The resulting Weighted List is returned to the parent. -3. The Parent Zone sorts and aggregates all the Weights and a final Build Plan is constructed. -4. The Build Plan is executed upon. Concurrently, Instance Create requests are sent to each of the selected Hosts, be they local or in a child zone. Child Zones may forward the requests to their Child Zones as needed. +1. The compute nodes are filtered and the nodes remaining are weighed. +1a. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request. +1b. Weighing of the remaining compute nodes assigns a number based on their suitability for the request. +2. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent. +3. The parent Zone sorts and aggregates all the weights and a final build plan is constructed. +4. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed. -`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which Host filtering and Weighing strategy will be used. We'll go into more detail on that later. +`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which host filtering and weighing strategy will be used. Filtering and Weighing ------------ -Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. We will explain how to do this later in this document. +The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. Requesting a new instance ------------ -To request a new instance, a call is made to `nova.compute.api.create()`. The type of instance created depends on the value of the `InstanceType` record being passed in. The `InstanceType` determines the amount of disk, cpu, ram and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table, but we'll discuss that later. +Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table. -`nova.compute.api.create()` performs the following actions: -1. it validates all the fields passed into it. -2. it creates an entry in the `Instance` table for each instance requested -3. it puts one `run_instance` message in the scheduler queue for each instance requested -4. the schedulers pick off the messages and decide which Compute node should handle the request. -5. the `run_instance` message is forwarded to the Compute node for processing and the instance is created. -6. it returns a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_id`'s are valid. +`nova.compute.api.create()` performed the following actions: +1. it validated all the fields passed into it. +2. it created an entry in the `Instance` table for each instance requested +3. it put one `run_instance` message in the scheduler queue for each instance requested +4. the schedulers picked off the messages and decided which compute node should handle the request. +5. the `run_instance` message was forwarded to the compute node for processing and the instance is created. +6. it returned a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_id`s are valid. -Generally, the standard schedulers (like `ChangeScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of Child Zones. +Generally, the standard schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones. -The problem with this approach is that each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once. +The problem with this approach is each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once. For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently: 1. it validates all the fields passed into it. 2. it creates a single `reservation_id` for all of instances created. This is a UUID. 3. it creates a single `run_instance` request in the scheduler queue 4. a scheduler picks the message off the queue and works on it. -5. the scheduler sends off an OS API `POST /zones/select` command to each Child Zone. The `BODY` payload of the call contains the `request_spec`. -6. the Child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones. -7. if the Child Zone has its own Child Zone's, the `/zones/select` call will be sent down to them as well. +5. the scheduler sends off an OS API `POST /zones/select` command to each child Zone. The `BODY` payload of the call contains the `request_spec`. +6. the child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones. +7. if the child Zone has its own child Zones, the `/zones/select` call will be sent down to them as well. 8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed. -9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant Child Zone. The parameters to the Child Zone call are the same as what was passed in by the user. +9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant child Zone. The parameters to the child Zone call are the same as what was passed in by the user. 10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`. The Catch ------------- -This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, Database and set of Nova Services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world. +This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world. -When `POST /zones/select` is called to estimate which Compute node to use, time passes until the `POST /servers` call is issued. If we only passed the Weight back from the `select` we would have to re-compute the appropriate Compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to Child Zones asking for estimates. +When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates. -Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. +Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. It's for this reason that it is so important that each Zone defines a unique encryption key via `--build_plan_encryption_key` In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent. Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.zone_aware_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use. -Reservation ID's +Reservation IDs --------------- NOTE: The features described in this section are related to the up-coming 'merge-4' branch. The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created. -NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would be bad. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled. +NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled. -We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/servers` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches. +We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/boot` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches. Finally, we need to give the user a way to get information on each of the instances created under this `reservation_id`. Fortunately, this is still possible with the existing `GET /servers` command, so long as we add a new optional `reservation_id` parameter. @@ -119,14 +123,15 @@ Finally, we need to give the user a way to get information on each of the instan Host Filter -------------- -As we mentioned earlier, filtering hosts is a very deployment specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms. +As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms. + +The filter used is determined by the `--default_host_filter` flag, which points to a Python Class. By default this flag is set to `nova.scheduler.host_filter.AllHostsFilter` which simply returns all available hosts. But there are others: -The filter used is determined by the `--default_host_filter` flag, which points to a Python Class. By default this flag is set to `nova.scheduler.host_filter.AllHostsFilter` which simply returns all available hosts. But there are others. + * `nova.scheduler.host_filter.InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`. -`nova.scheduler.host_filter.InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`. -`nova.scheduler.host_filter.JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples. + * `nova.scheduler.host_filter.JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples. -To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide the create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of weight tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The weight tuple contains (``, ``) where `` is whatever you want it to be. +To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The host tuple contains (``, ``) where `` is whatever you want it to be. Cost Scheduler Weighing -------------- @@ -134,9 +139,9 @@ Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` metho Simple Zone Aware Scheduling -------------- -The easiest way to get started with the Zone Aware Scheduler is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter as and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. +The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter as and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. -The `--scheduler_driver` flag is how you specify the Scheduler class name. +The `--scheduler_driver` flag is how you specify the scheduler class name. Flags -------------- @@ -153,9 +158,9 @@ All this Zone and Distributed Scheduler stuff can seem a little daunting to conf `--allow_admin_api` must be set for OS API to enable the new `/zones/*` commands. `--enable_zone_routing` must be set for OS API commands such as `create()`, `pause()` and `delete()` to get routed from Zone to Zone when looking for instances. -`--zone_name` is only required in Child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue. +`--zone_name` is only required in child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue. `build_plan_encryption_key` is the SHA-256 key for encrypting/decrypting the Host information when it leaves a Zone. Be sure to change this key for each Zone you create. Do not duplicate keys. -`scheduler_driver` is the real work horse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler` +`scheduler_driver` is the real workhorse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler`. `default_host_filter` is the host filter to be used for filtering candidate Compute nodes. Some optional flags which are handy for debugging are: -- cgit From f2da479b8988cd55d39e89935b10e0b348df43c9 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 31 May 2011 23:36:49 +0400 Subject: Moved everything from thread-local storage to class attributes --- nova/auth/ldapdriver.py | 38 +++++++++++--------------------------- nova/auth/manager.py | 14 +++----------- 2 files changed, 14 insertions(+), 38 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 9fe0165a1..91f412baa 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -26,7 +26,6 @@ public methods. import functools import sys -import threading from nova import exception from nova import flags @@ -106,7 +105,8 @@ class LdapDriver(object): isadmin_attribute = 'isNovaAdmin' project_attribute = 'owner' project_objectclass = 'groupOfNames' - __local = threading.local() + conn = None + mc = None def __init__(self): """Imports the LDAP module""" @@ -117,15 +117,22 @@ class LdapDriver(object): LdapDriver.project_attribute = 'projectManager' LdapDriver.project_objectclass = 'novaProject' self.__cache = None + if LdapDriver.conn is None: + LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) + LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + if LdapDriver.mc is None: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def __enter__(self): - """Creates the connection to LDAP""" # TODO(yorik-sar): Should be per-request cache, not per-driver-request self.__cache = {} return self def __exit__(self, exc_type, exc_value, traceback): - """Destroys the connection to LDAP""" self.__cache = None return False @@ -149,29 +156,6 @@ class LdapDriver(object): return inner return do_wrap - @property - def conn(self): - try: - return self.__local.conn - except AttributeError: - conn = self.ldap.initialize(FLAGS.ldap_url) - conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) - self.__local.conn = conn - return conn - - @property - def mc(self): - try: - return self.__local.mc - except AttributeError: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache - mc = memcache.Client(FLAGS.memcached_servers, debug=0) - self.__local.mc = mc - return mc - @sanitize @__local_cache('uid_user-%s') def get_user(self, uid): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index c71f0f161..c887297f3 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -23,7 +23,6 @@ Nova authentication management import os import shutil import string # pylint: disable=W0402 -import threading import tempfile import uuid import zipfile @@ -207,7 +206,7 @@ class AuthManager(object): """ _instance = None - __local = threading.local() + mc = None def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" @@ -224,19 +223,12 @@ class AuthManager(object): self.network_manager = utils.import_object(FLAGS.network_manager) if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) - - @property - def mc(self): - try: - return self.__local.mc - except AttributeError: + if AuthManager.mc is None: if FLAGS.memcached_servers: import memcache else: from nova import fakememcache as memcache - mc = memcache.Client(FLAGS.memcached_servers, debug=0) - self.__local.mc = mc - return mc + AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', -- cgit From 5922b5dc166476adf550abbbacc21e4585e53a37 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 31 May 2011 21:23:36 +0000 Subject: Fixing Scheduler Tests --- nova/tests/scheduler/test_scheduler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 54b3f80fb..b0f0e882a 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -61,7 +61,8 @@ class SchedulerTestCase(test.TestCase): """Test case for scheduler""" def setUp(self): super(SchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') + driver = 'nova.tests.scheduler.test_scheduler.TestDriver' + self.flags(scheduler_driver=driver) def _create_compute_service(self): """Create compute-manager(ComputeNode and Service record).""" -- cgit From a26e21040681fd6db5a6ae862ca18ee17689854c Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Wed, 1 Jun 2011 18:32:49 +0400 Subject: Moved memcached driver import to the top of modules. --- nova/auth/ldapdriver.py | 10 ++++++---- nova/auth/manager.py | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 91f412baa..e26a360af 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -69,6 +69,12 @@ flags.DEFINE_string('ldap_developer', LOG = logging.getLogger("nova.ldapdriver") +if FLAGS.memcached_servers: + import memcache +else: + from nova import fakememcache as memcache + + # TODO(vish): make an abstract base class with the same public methods # to define a set interface for AuthDrivers. I'm delaying # creating this now because I'm expecting an auth refactor @@ -121,10 +127,6 @@ class LdapDriver(object): LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) if LdapDriver.mc is None: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def __enter__(self): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index c887297f3..98c7dd263 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -73,6 +73,12 @@ flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', LOG = logging.getLogger('nova.auth.manager') +if FLAGS.memcached_servers: + import memcache +else: + from nova import fakememcache as memcache + + class AuthBase(object): """Base class for objects relating to auth @@ -224,10 +230,6 @@ class AuthManager(object): if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) if AuthManager.mc is None: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def authenticate(self, access, signature, params, verb='GET', -- cgit From 4d1271821f782d4e11934d69b4ffe3aced6072eb Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Wed, 1 Jun 2011 18:34:54 +0400 Subject: PEP8 fix. --- nova/auth/ldapdriver.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index e26a360af..95e31ae3b 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -125,7 +125,8 @@ class LdapDriver(object): self.__cache = None if LdapDriver.conn is None: LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) - LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, + FLAGS.ldap_password) if LdapDriver.mc is None: LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) -- cgit From b2fb1738db489206557abccb631b13991c31fd4e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 14:23:05 -0700 Subject: make all uses of utcnow use our testable utils.utcnow --- bin/nova-manage | 3 +-- nova/api/ec2/admin.py | 3 +-- nova/api/ec2/cloud.py | 5 ++-- nova/api/openstack/auth.py | 5 ++-- nova/api/openstack/contrib/__init__.py | 2 +- nova/api/openstack/limits.py | 2 +- nova/api/openstack/ratelimiting/__init__.py | 2 +- nova/compute/api.py | 3 +-- nova/compute/manager.py | 3 +-- nova/compute/monitor.py | 2 +- nova/context.py | 1 - nova/db/sqlalchemy/api.py | 29 +++++++++++----------- .../versions/016_make_quotas_key_and_value.py | 10 ++++---- nova/db/sqlalchemy/models.py | 9 +++---- nova/network/manager.py | 2 +- nova/notifier/api.py | 7 +++--- nova/scheduler/driver.py | 3 ++- nova/scheduler/simple.py | 11 ++++---- nova/test.py | 4 +-- nova/tests/api/openstack/fakes.py | 3 +-- nova/tests/api/openstack/test_images.py | 1 - nova/tests/api/openstack/test_servers.py | 8 +++--- nova/tests/test_compute.py | 5 ++-- nova/tests/test_console.py | 2 -- nova/tests/test_middleware.py | 1 - nova/tests/test_scheduler.py | 16 ++++++------ nova/utils.py | 2 +- nova/virt/xenapi/fake.py | 4 +-- nova/volume/api.py | 5 ++-- nova/volume/manager.py | 4 +-- 30 files changed, 69 insertions(+), 88 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 5de4d9e81..b545c4246 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -53,7 +53,6 @@ CLI interface for nova management. """ -import datetime import gettext import glob import json @@ -689,7 +688,7 @@ class ServiceCommands(object): """Show a list of all running services. Filter by host & service name. args: [host] [service]""" ctxt = context.get_admin_context() - now = datetime.datetime.utcnow() + now = utils.utcnow() services = db.service_get_all(ctxt) if host: services = [s for s in services if s['host'] == host] diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index ea94d9c1f..aeebd86fb 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -21,7 +21,6 @@ Admin API controller, exposed through http via the api worker. """ import base64 -import datetime from nova import db from nova import exception @@ -305,7 +304,7 @@ class AdminController(object): * Volume Count """ services = db.service_get_all(context, False) - now = datetime.datetime.utcnow() + now = utils.utcnow() hosts = [] rv = [] for host in [service['host'] for service in services]: diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 79cc3b3bf..04675174f 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -23,7 +23,6 @@ datastore. """ import base64 -import datetime import IPy import os import urllib @@ -235,7 +234,7 @@ class CloudController(object): 'zoneState': 'available'}]} services = db.service_get_all(context, False) - now = datetime.datetime.utcnow() + now = utils.utcnow() hosts = [] for host in [service['host'] for service in services]: if not host in hosts: @@ -595,7 +594,7 @@ class CloudController(object): instance_id = ec2utils.ec2_id_to_id(ec2_id) output = self.compute_api.get_console_output( context, instance_id=instance_id) - now = datetime.datetime.utcnow() + now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 6c6ee22a2..b49bf449b 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -13,9 +13,8 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. -import datetime import hashlib import time @@ -127,7 +126,7 @@ class AuthMiddleware(wsgi.Middleware): except exception.NotFound: return None if token: - delta = datetime.datetime.utcnow() - token['created_at'] + delta = utils.utcnow() - token['created_at'] if delta.days >= 2: self.db.auth_token_destroy(ctxt, token['token_hash']) else: diff --git a/nova/api/openstack/contrib/__init__.py b/nova/api/openstack/contrib/__init__.py index b42a1d89d..acb5eb280 100644 --- a/nova/api/openstack/contrib/__init__.py +++ b/nova/api/openstack/contrib/__init__.py @@ -13,7 +13,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """Contrib contains extensions that are shipped with nova. diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 4d46b92df..dc2bc6bbc 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -11,7 +11,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """ Module dedicated functions/classes dealing with rate limiting requests. diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index 88ffc3246..9ede548c2 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -13,7 +13,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """Rate limiting of arbitrary actions.""" diff --git a/nova/compute/api.py b/nova/compute/api.py index 7122ebe67..de87ddd88 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -18,7 +18,6 @@ """Handles all requests relating to instances (guest vms).""" -import datetime import eventlet import re import time @@ -405,7 +404,7 @@ class API(base.Base): instance['id'], state_description='terminating', state=0, - terminated_at=datetime.datetime.utcnow()) + terminated_at=utils.utcnow()) host = instance['host'] if host: diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3897b3a9e..a57d6e246 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -35,7 +35,6 @@ terminating it. """ -import datetime import os import socket import sys @@ -159,7 +158,7 @@ class ComputeManager(manager.SchedulerDependentManager): def _update_launched_at(self, context, instance_id, launched_at=None): """Update the launched_at parameter of the given instance.""" - data = {'launched_at': launched_at or datetime.datetime.utcnow()} + data = {'launched_at': launched_at or utils.utcnow()} self.db.instance_update(context, instance_id, data) def _update_image_id(self, context, instance_id, image_id): diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 3bb54a382..613734bef 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -86,7 +86,7 @@ RRD_VALUES = { ]} -utcnow = datetime.datetime.utcnow +utcnow = utils.utcnow LOG = logging.getLogger('nova.compute.monitor') diff --git a/nova/context.py b/nova/context.py index c113f7ea7..99085ed75 100644 --- a/nova/context.py +++ b/nova/context.py @@ -18,7 +18,6 @@ """RequestContext: context for requests that persist through all of nova.""" -import datetime import random from nova import exception diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index c3a971a82..bb7bf5b89 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -19,7 +19,6 @@ Implementation of SQLAlchemy backend. """ -import datetime import warnings from nova import db @@ -674,7 +673,7 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time): filter_by(allocated=0).\ update({'instance_id': None, 'leased': 0, - 'updated_at': datetime.datetime.utcnow()}, + 'updated_at': utils.utcnow()}, synchronize_session='fetch') return result @@ -820,17 +819,17 @@ def instance_destroy(context, instance_id): session.query(models.Instance).\ filter_by(id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1123,7 +1122,7 @@ def key_pair_destroy_all_by_user(context, user_id): session.query(models.KeyPair).\ filter_by(user_id=user_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1655,7 +1654,7 @@ def volume_destroy(context, volume_id): session.query(models.Volume).\ filter_by(id=volume_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.ExportDevice).\ filter_by(volume_id=volume_id).\ @@ -1813,7 +1812,7 @@ def snapshot_destroy(context, snapshot_id): session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1968,17 +1967,17 @@ def security_group_destroy(context, security_group_id): session.query(models.SecurityGroup).\ filter_by(id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1989,11 +1988,11 @@ def security_group_destroy_all(context, session=None): with session.begin(): session.query(models.SecurityGroup).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -2627,7 +2626,7 @@ def instance_metadata_delete(context, instance_id, key): filter_by(key=key).\ filter_by(deleted=False).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -2638,7 +2637,7 @@ def instance_metadata_delete_all(context, instance_id): filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py index 5d0593f2e..a4fe3e482 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py @@ -17,7 +17,7 @@ from sqlalchemy import Boolean, Column, DateTime, Integer from sqlalchemy import MetaData, String, Table -import datetime +from nova import utils meta = MetaData() @@ -35,9 +35,9 @@ def old_style_quotas_table(name): return Table(name, meta, Column('id', Integer(), primary_key=True), Column('created_at', DateTime(), - default=datetime.datetime.utcnow), + default=utils.utcnow), Column('updated_at', DateTime(), - onupdate=datetime.datetime.utcnow), + onupdate=utils.utcnow), Column('deleted_at', DateTime()), Column('deleted', Boolean(), default=False), Column('project_id', @@ -57,9 +57,9 @@ def new_style_quotas_table(name): return Table(name, meta, Column('id', Integer(), primary_key=True), Column('created_at', DateTime(), - default=datetime.datetime.utcnow), + default=utils.utcnow), Column('updated_at', DateTime(), - onupdate=datetime.datetime.utcnow), + onupdate=utils.utcnow), Column('deleted_at', DateTime()), Column('deleted', Boolean(), default=False), Column('project_id', diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 22a1a84e8..dbe72efd9 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -19,8 +19,6 @@ SQLAlchemy models for nova data. """ -import datetime - from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy import Column, Integer, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text @@ -33,6 +31,7 @@ from nova.db.sqlalchemy.session import get_session from nova import auth from nova import exception from nova import flags +from nova import utils FLAGS = flags.FLAGS @@ -43,8 +42,8 @@ class NovaBase(object): """Base class for Nova Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} __table_initialized__ = False - created_at = Column(DateTime, default=datetime.datetime.utcnow) - updated_at = Column(DateTime, onupdate=datetime.datetime.utcnow) + created_at = Column(DateTime, default=utils.utcnow) + updated_at = Column(DateTime, onupdate=utils.utcnow) deleted_at = Column(DateTime) deleted = Column(Boolean, default=False) @@ -64,7 +63,7 @@ class NovaBase(object): def delete(self, session=None): """Delete this object.""" self.deleted = True - self.deleted_at = datetime.datetime.utcnow() + self.deleted_at = utils.utcnow() self.save(session=session) def __setitem__(self, key, value): diff --git a/nova/network/manager.py b/nova/network/manager.py index 5a6fdde5a..f726c4b26 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -235,7 +235,7 @@ class NetworkManager(manager.SchedulerDependentManager): inst_addr = instance_ref['mac_address'] raise exception.Error(_('IP %(address)s leased to bad mac' ' %(inst_addr)s vs %(mac)s') % locals()) - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': True, diff --git a/nova/notifier/api.py b/nova/notifier/api.py index a3e7a039e..d49517c8b 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -11,9 +11,8 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. -import datetime import uuid from nova import flags @@ -64,7 +63,7 @@ def notify(publisher_id, event_type, priority, payload): {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', - 'timestamp': datetime.datetime.utcnow(), + 'timestamp': utils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} @@ -79,5 +78,5 @@ def notify(publisher_id, event_type, priority, payload): event_type=event_type, priority=priority, payload=payload, - timestamp=str(datetime.datetime.utcnow())) + timestamp=str(utils.utcnow())) driver.notify(msg) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 2094e3565..0b257c5d8 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -28,6 +28,7 @@ from nova import exception from nova import flags from nova import log as logging from nova import rpc +from nova import utils from nova.compute import power_state FLAGS = flags.FLAGS @@ -61,7 +62,7 @@ class Scheduler(object): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. - elapsed = datetime.datetime.utcnow() - last_heartbeat + elapsed = utils.utcnow() - last_heartbeat return elapsed < datetime.timedelta(seconds=FLAGS.service_down_time) def hosts_up(self, context, topic): diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index dd568d2c6..87cdef11d 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -21,10 +21,9 @@ Simple Scheduler """ -import datetime - from nova import db from nova import flags +from nova import utils from nova.scheduler import driver from nova.scheduler import chance @@ -54,7 +53,7 @@ class SimpleScheduler(chance.ChanceScheduler): # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.instance_update(context, instance_id, {'host': host, 'scheduled_at': now}) return host @@ -66,7 +65,7 @@ class SimpleScheduler(chance.ChanceScheduler): if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.instance_update(context, instance_id, {'host': service['host'], @@ -90,7 +89,7 @@ class SimpleScheduler(chance.ChanceScheduler): # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.volume_update(context, volume_id, {'host': host, 'scheduled_at': now}) return host @@ -103,7 +102,7 @@ class SimpleScheduler(chance.ChanceScheduler): if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.volume_update(context, volume_id, {'host': service['host'], diff --git a/nova/test.py b/nova/test.py index 80b2d0a74..60b599ce4 100644 --- a/nova/test.py +++ b/nova/test.py @@ -23,7 +23,6 @@ inline callbacks. """ -import datetime import functools import os import shutil @@ -37,6 +36,7 @@ from eventlet import greenthread from nova import fakerabbit from nova import flags from nova import rpc +from nova import utils from nova import service from nova import wsgi from nova.virt import fake @@ -69,7 +69,7 @@ class TestCase(unittest.TestCase): # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. - self.start = datetime.datetime.utcnow() + self.start = utils.utcnow() shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db), os.path.join(FLAGS.state_path, FLAGS.sqlite_db)) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 8e0156afa..4fb0613fc 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -16,7 +16,6 @@ # under the License. import copy -import datetime import json import random import string @@ -253,7 +252,7 @@ class FakeAuthDatabase(object): @staticmethod def auth_token_create(context, token): - fake_token = FakeToken(created_at=datetime.datetime.now(), **token) + fake_token = FakeToken(created_at=utils.utcnow(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 9f1f28611..93b402081 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -22,7 +22,6 @@ and as a WSGI layer import copy import json -import datetime import os import shutil import tempfile diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 11dcaaade..50d5fe980 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -16,7 +16,6 @@ # under the License. import base64 -import datetime import json import unittest from xml.dom import minidom @@ -29,6 +28,7 @@ from nova import db from nova import exception from nova import flags from nova import test +from nova import utils import nova.api.openstack from nova.api.openstack import servers import nova.compute.api @@ -114,9 +114,9 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None, "user_data": "", "reservation_id": "", "mac_address": "", - "scheduled_at": datetime.datetime.now(), - "launched_at": datetime.datetime.now(), - "terminated_at": datetime.datetime.now(), + "scheduled_at": utils.utcnow(), + "launched_at": utils.utcnow(), + "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": "server%s" % id, "display_description": "", diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 9170837b6..c726080ee 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -19,7 +19,6 @@ Tests For Compute """ -import datetime import mox import stubout @@ -217,12 +216,12 @@ class ComputeTestCase(test.TestCase): instance_ref = db.instance_get(self.context, instance_id) self.assertEqual(instance_ref['launched_at'], None) self.assertEqual(instance_ref['deleted_at'], None) - launch = datetime.datetime.utcnow() + launch = utils.utcnow() self.compute.run_instance(self.context, instance_id) instance_ref = db.instance_get(self.context, instance_id) self.assert_(instance_ref['launched_at'] > launch) self.assertEqual(instance_ref['deleted_at'], None) - terminate = datetime.datetime.utcnow() + terminate = utils.utcnow() self.compute.terminate_instance(self.context, instance_id) self.context = self.context.elevated(True) instance_ref = db.instance_get(self.context, instance_id) diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py index 1a9a867ee..831e7670f 100644 --- a/nova/tests/test_console.py +++ b/nova/tests/test_console.py @@ -20,8 +20,6 @@ Tests For Console proxy. """ -import datetime - from nova import context from nova import db from nova import exception diff --git a/nova/tests/test_middleware.py b/nova/tests/test_middleware.py index 6564a6955..40d117c45 100644 --- a/nova/tests/test_middleware.py +++ b/nova/tests/test_middleware.py @@ -16,7 +16,6 @@ # License for the specific language governing permissions and limitations # under the License. -import datetime import webob import webob.dec import webob.exc diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 54b3f80fb..1cf6bbfbf 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -196,7 +196,7 @@ class ZoneSchedulerTestCase(test.TestCase): service.topic = 'compute' service.id = kwargs['id'] service.availability_zone = kwargs['zone'] - service.created_at = datetime.datetime.utcnow() + service.created_at = utils.utcnow() return service def test_with_two_zones(self): @@ -290,7 +290,7 @@ class SimpleDriverTestCase(test.TestCase): dic['host'] = kwargs.get('host', 'dummy') s_ref = db.service_create(self.context, dic) if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): - t = datetime.datetime.utcnow() - datetime.timedelta(0) + t = utils.utcnow() - datetime.timedelta(0) dic['created_at'] = kwargs.get('created_at', t) dic['updated_at'] = kwargs.get('updated_at', t) db.service_update(self.context, s_ref['id'], dic) @@ -401,7 +401,7 @@ class SimpleDriverTestCase(test.TestCase): FLAGS.compute_manager) compute1.start() s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() + now = utils.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta db.service_update(self.context, s1['id'], {'updated_at': past}) @@ -542,7 +542,7 @@ class SimpleDriverTestCase(test.TestCase): def test_wont_sechedule_if_specified_host_is_down(self): compute1 = self.start_service('compute', host='host1') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() + now = utils.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta db.service_update(self.context, s1['id'], {'updated_at': past}) @@ -692,7 +692,7 @@ class SimpleDriverTestCase(test.TestCase): dic = {'instance_id': instance_id, 'size': 1} v_ref = db.volume_create(self.context, {'instance_id': instance_id, 'size': 1}) - t1 = datetime.datetime.utcnow() - datetime.timedelta(1) + t1 = utils.utcnow() - datetime.timedelta(1) dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', 'topic': 'volume', 'report_count': 0} s_ref = db.service_create(self.context, dic) @@ -709,7 +709,7 @@ class SimpleDriverTestCase(test.TestCase): """Confirms src-compute node is alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) + t = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) @@ -737,7 +737,7 @@ class SimpleDriverTestCase(test.TestCase): """Confirms exception raises in case dest host does not exist.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) + t = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) @@ -796,7 +796,7 @@ class SimpleDriverTestCase(test.TestCase): # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t1 = datetime.datetime.utcnow() - datetime.timedelta(10) + t1 = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t1, updated_at=t1, host=dest) diff --git a/nova/utils.py b/nova/utils.py index 361fc9873..b1638e72c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -307,7 +307,7 @@ def get_my_linklocal(interface): def utcnow(): - """Overridable version of datetime.datetime.utcnow.""" + """Overridable version of utils.utcnow.""" if utcnow.override_time: return utcnow.override_time return datetime.datetime.utcnow() diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 76988b172..165888cb2 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -51,13 +51,13 @@ A fake XenAPI SDK. """ -import datetime import uuid from pprint import pformat from nova import exception from nova import log as logging +from nova import utils _CLASSES = ['host', 'network', 'session', 'SR', 'VBD', @@ -540,7 +540,7 @@ class SessionBase(object): except Failure, exc: task['error_info'] = exc.details task['status'] = 'failed' - task['finished'] = datetime.datetime.now() + task['finished'] = utils.utcnow() return task_ref def _check_session(self, params): diff --git a/nova/volume/api.py b/nova/volume/api.py index 5804955f7..b07f2e94b 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -20,14 +20,13 @@ Handles all requests relating to volumes. """ -import datetime -from nova import db from nova import exception from nova import flags from nova import log as logging from nova import quota from nova import rpc +from nova import utils from nova.db import base FLAGS = flags.FLAGS @@ -78,7 +77,7 @@ class API(base.Base): volume = self.get(context, volume_id) if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) host = volume['host'] diff --git a/nova/volume/manager.py b/nova/volume/manager.py index ff53f0701..798bd379a 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -42,8 +42,6 @@ intact. """ -import datetime - from nova import context from nova import exception @@ -127,7 +125,7 @@ class VolumeManager(manager.SchedulerDependentManager): volume_ref['id'], {'status': 'error'}) raise - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) -- cgit From 4762aebe4ddc57d8502ed3b5aec56b613d0ec93b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 14:51:30 -0700 Subject: switch zones to use utcnow --- nova/scheduler/zone_manager.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index 3ddf6f3c3..3f483adff 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -17,16 +17,17 @@ ZoneManager oversees all communications with child Zones. """ +import datetime import novaclient import thread import traceback -from datetime import datetime from eventlet import greenpool from nova import db from nova import flags from nova import log as logging +from nova import utils FLAGS = flags.FLAGS flags.DEFINE_integer('zone_db_check_interval', 60, @@ -42,7 +43,7 @@ class ZoneState(object): self.name = None self.capabilities = None self.attempt = 0 - self.last_seen = datetime.min + self.last_seen = datetime.datetime.min self.last_exception = None self.last_exception_time = None @@ -56,7 +57,7 @@ class ZoneState(object): def update_metadata(self, zone_metadata): """Update zone metadata after successful communications with child zone.""" - self.last_seen = datetime.now() + self.last_seen = utils.utcnow() self.attempt = 0 self.name = zone_metadata.get("name", "n/a") self.capabilities = ", ".join(["%s=%s" % (k, v) @@ -72,7 +73,7 @@ class ZoneState(object): """Something went wrong. Check to see if zone should be marked as offline.""" self.last_exception = exception - self.last_exception_time = datetime.now() + self.last_exception_time = utils.utcnow() api_url = self.api_url logging.warning(_("'%(exception)s' error talking to " "zone %(api_url)s") % locals()) @@ -104,7 +105,7 @@ def _poll_zone(zone): class ZoneManager(object): """Keeps the zone states updated.""" def __init__(self): - self.last_zone_db_check = datetime.min + self.last_zone_db_check = datetime.datetime.min self.zone_states = {} # { : ZoneState } self.service_states = {} # { : { : { cap k : v }}} self.green_pool = greenpool.GreenPool() @@ -158,10 +159,10 @@ class ZoneManager(object): def ping(self, context=None): """Ping should be called periodically to update zone status.""" - diff = datetime.now() - self.last_zone_db_check + diff = utils.utcnow() - self.last_zone_db_check if diff.seconds >= FLAGS.zone_db_check_interval: logging.debug(_("Updating zone cache from db.")) - self.last_zone_db_check = datetime.now() + self.last_zone_db_check = utils.utcnow() self._refresh_from_db(context) self._poll_zones(context) -- cgit From d7d628d58612b94491310a1a03727e1afa9d5ad5 Mon Sep 17 00:00:00 2001 From: Justin Shepherd Date: Thu, 2 Jun 2011 20:45:36 -0500 Subject: Added paramiko to tools/pip-requires --- tools/pip-requires | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pip-requires b/tools/pip-requires index f1c5b2003..e81ef944a 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -33,3 +33,4 @@ suds==0.4 coverage nosexcover GitPython +paramiko -- cgit From a3b8b3467d836463dda806c93756841a52c055d3 Mon Sep 17 00:00:00 2001 From: Justin Shepherd Date: Thu, 2 Jun 2011 21:18:09 -0500 Subject: added nova_adminclient to tools/pip-requires --- tools/pip-requires | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pip-requires b/tools/pip-requires index e81ef944a..035e4347d 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -34,3 +34,4 @@ coverage nosexcover GitPython paramiko +nova_adminclient -- cgit From 9ee103a91fe3bed03c3f4c6c1a6e89fa474e1aae Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Fri, 3 Jun 2011 12:37:58 +0400 Subject: Fixed FakeLdapDriver, made it call LdapDriver.__init__ --- nova/auth/ldapdriver.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 95e31ae3b..183f7a985 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -676,6 +676,7 @@ class LdapDriver(object): class FakeLdapDriver(LdapDriver): """Fake Ldap Auth driver""" - def __init__(self): # pylint: disable=W0231 - __import__('nova.auth.fakeldap') - self.ldap = sys.modules['nova.auth.fakeldap'] + def __init__(self): + import nova.auth.fakeldap + sys.modules['ldap'] = nova.auth.fakeldap + super(FakeLdapDriver, self).__init__() -- cgit From 72a47784dc09d9b840db146d58ea71f6af30a8ea Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Fri, 3 Jun 2011 13:39:22 +0400 Subject: Flush AuthManager's cache before each test. --- nova/tests/test_auth.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index f02dd94b7..7d00bddfe 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -86,6 +86,7 @@ class _AuthManagerBaseTestCase(test.TestCase): super(_AuthManagerBaseTestCase, self).setUp() self.flags(connection_type='fake') self.manager = manager.AuthManager(new=True) + self.manager.mc.cache = {} def test_create_and_find_user(self): with user_generator(self.manager): -- cgit From 29eec21f6752ef2c03412213a74aa12745286c82 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 3 Jun 2011 05:23:43 -0700 Subject: little tweaks --- doc/source/devref/distributed_scheduler.rst | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index 28ba20af7..eb6a1a03e 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -1,7 +1,3 @@ - - - - .. Copyright 2011 OpenStack LLC All Rights Reserved. @@ -40,7 +36,7 @@ Some Costs are more esoteric. Consider a rule that says we should prefer Hosts t An example of some other costs might include selecting: * a GPU-based host over a standard CPU * a host with fast ethernet over a 10mbps line -* a host than can run Windows instances +* a host that can run Windows instances * a host in the EU vs North America * etc -- cgit From 25c8e9318c1ffbf2f2c88d3ed644df9e81b92b04 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Fri, 3 Jun 2011 11:52:20 -0400 Subject: Fixed pip-requires double requirement. --- tools/pip-requires | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/pip-requires b/tools/pip-requires index 035e4347d..e81ef944a 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -34,4 +34,3 @@ coverage nosexcover GitPython paramiko -nova_adminclient -- cgit From f521426039e8a9cc5dccc2c7e7e1797cfe778d7e Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 3 Jun 2011 14:14:28 -0400 Subject: Updated to use the '/v1/images' URL when uploading images to glance in the Xen glance plugin. Fixes issue where snapshots failed to get uploaded. --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 0c00d168b..46031ebe8 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -244,7 +244,7 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type): conn = httplib.HTTPConnection(glance_host, glance_port) # NOTE(sirp): httplib under python2.4 won't accept a file-like object # to request - conn.putrequest('PUT', '/images/%s' % image_id) + conn.putrequest('PUT', '/v1/images/%s' % image_id) # NOTE(sirp): There is some confusion around OVF. Here's a summary of # where we currently stand: -- cgit From f6aa513024e14975709ef8facf1db6535eefbc44 Mon Sep 17 00:00:00 2001 From: Justin Shepherd Date: Fri, 3 Jun 2011 13:20:34 -0500 Subject: added 'nova-manage config list' which will list out all of the flags and their values. I also alphabetized the list of available categories --- bin/nova-manage | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 5de4d9e81..fb3810779 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -1081,24 +1081,35 @@ class ImageCommands(object): self._convert_images(machine_images) +class ConfigCommands(object): + """Class for exposing the flags defined by flag_file(s).""" + + def __init__(self): + pass + + def list(self): + print FLAGS.FlagsIntoString() + + CATEGORIES = [ - ('user', UserCommands), ('account', AccountCommands), - ('project', ProjectCommands), - ('role', RoleCommands), - ('shell', ShellCommands), - ('vpn', VpnCommands), + ('config', ConfigCommands), + ('db', DbCommands), ('fixed', FixedIpCommands), + ('flavor', InstanceTypeCommands), ('floating', FloatingIpCommands), + ('instance_type', InstanceTypeCommands), + ('image', ImageCommands), ('network', NetworkCommands), - ('vm', VmCommands), + ('project', ProjectCommands), + ('role', RoleCommands), ('service', ServiceCommands), - ('db', DbCommands), + ('shell', ShellCommands), + ('user', UserCommands), + ('version', VersionCommands), + ('vm', VmCommands), ('volume', VolumeCommands), - ('instance_type', InstanceTypeCommands), - ('image', ImageCommands), - ('flavor', InstanceTypeCommands), - ('version', VersionCommands)] + ('vpn', VpnCommands)] def lazy_match(name, key_value_tuples): -- cgit