summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@yahoo.com>2010-09-11 20:00:56 -0700
committerVishvananda Ishaya <vishvananda@yahoo.com>2010-09-11 20:00:56 -0700
commit3aae714e4feb4a29ac7d2aed78aef37b3385300c (patch)
tree1747e2ae3a42865c2d012d3805000ab0318f4bbb /nova
parentef1913292dd8a88041f603d79c09c738a7ecbb04 (diff)
parent15ca1fe1670cfd95880f2e1c2a5270be787c6035 (diff)
downloadnova-3aae714e4feb4a29ac7d2aed78aef37b3385300c.tar.gz
nova-3aae714e4feb4a29ac7d2aed78aef37b3385300c.tar.xz
nova-3aae714e4feb4a29ac7d2aed78aef37b3385300c.zip
merged scheduler
Diffstat (limited to 'nova')
-rw-r--r--nova/db/api.py38
-rw-r--r--nova/db/sqlalchemy/api.py82
-rw-r--r--nova/db/sqlalchemy/models.py35
-rwxr-xr-xnova/endpoint/api.py5
-rw-r--r--nova/endpoint/cloud.py55
-rw-r--r--nova/quota.py91
-rw-r--r--nova/scheduler/simple.py24
-rw-r--r--nova/tests/compute_unittest.py1
-rw-r--r--nova/tests/quota_unittest.py155
-rw-r--r--nova/tests/scheduler_unittest.py138
10 files changed, 568 insertions, 56 deletions
diff --git a/nova/db/api.py b/nova/db/api.py
index 1b477da72..9f6ff99c3 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -125,6 +125,11 @@ def floating_ip_create(context, values):
return IMPL.floating_ip_create(context, values)
+def floating_ip_count_by_project(context, project_id):
+ """Count floating ips used by project."""
+ return IMPL.floating_ip_count_by_project(context, project_id)
+
+
def floating_ip_deallocate(context, address):
"""Deallocate an floating ip by address"""
return IMPL.floating_ip_deallocate(context, address)
@@ -227,6 +232,11 @@ def instance_create(context, values):
return IMPL.instance_create(context, values)
+def instance_data_get_for_project(context, project_id):
+ """Get (instance_count, core_count) for project."""
+ return IMPL.instance_data_get_for_project(context, project_id)
+
+
def instance_destroy(context, instance_id):
"""Destroy the instance or raise if it does not exist."""
return IMPL.instance_destroy(context, instance_id)
@@ -411,6 +421,29 @@ def export_device_create(context, values):
###################
+def quota_create(context, values):
+ """Create a quota from the values dictionary."""
+ return IMPL.quota_create(context, values)
+
+
+def quota_get(context, project_id):
+ """Retrieve a quota or raise if it does not exist."""
+ return IMPL.quota_get(context, project_id)
+
+
+def quota_update(context, project_id, values):
+ """Update a quota from the values dictionary."""
+ return IMPL.quota_update(context, project_id, values)
+
+
+def quota_destroy(context, project_id):
+ """Destroy the quota or raise if it does not exist."""
+ return IMPL.quota_destroy(context, project_id)
+
+
+###################
+
+
def volume_allocate_shelf_and_blade(context, volume_id):
"""Atomically allocate a free shelf and blade from the pool."""
return IMPL.volume_allocate_shelf_and_blade(context, volume_id)
@@ -426,6 +459,11 @@ def volume_create(context, values):
return IMPL.volume_create(context, values)
+def volume_data_get_for_project(context, project_id):
+ """Get (volume_count, gigabytes) for project."""
+ return IMPL.volume_data_get_for_project(context, project_id)
+
+
def volume_destroy(context, volume_id):
"""Destroy the volume or raise if it does not exist."""
return IMPL.volume_destroy(context, volume_id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 735e88145..d612fe669 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -67,7 +67,7 @@ def service_get_all_by_topic(context, topic):
def _service_get_all_topic_subquery(_context, session, topic, subq, label):
sort_value = getattr(subq.c, label)
- return session.query(models.Service, sort_value
+ return session.query(models.Service, func.coalesce(sort_value, 0)
).filter_by(topic=topic
).filter_by(deleted=False
).outerjoin((subq, models.Service.host == subq.c.host)
@@ -79,15 +79,16 @@ def service_get_all_compute_sorted(context):
session = get_session()
with session.begin():
# NOTE(vish): The intended query is below
- # SELECT services.*, inst_count.instance_count
+ # SELECT services.*, COALESCE(inst_cores.instance_cores,
+ # 0)
# FROM services LEFT OUTER JOIN
- # (SELECT host, count(*) AS instance_count
- # FROM instances GROUP BY host) AS inst_count
- # ON services.host = inst_count.host
+ # (SELECT host, SUM(instances.vcpus) AS instance_cores
+ # FROM instances GROUP BY host) AS inst_cores
+ # ON services.host = inst_cores.host
topic = 'compute'
- label = 'instance_count'
+ label = 'instance_cores'
subq = session.query(models.Instance.host,
- func.count('*').label(label)
+ func.sum(models.Instance.vcpus).label(label)
).filter_by(deleted=False
).group_by(models.Instance.host
).subquery()
@@ -104,7 +105,7 @@ def service_get_all_network_sorted(context):
topic = 'network'
label = 'network_count'
subq = session.query(models.Network.host,
- func.count('*').label(label)
+ func.count(models.Network.id).label(label)
).filter_by(deleted=False
).group_by(models.Network.host
).subquery()
@@ -119,9 +120,9 @@ def service_get_all_volume_sorted(context):
session = get_session()
with session.begin():
topic = 'volume'
- label = 'volume_count'
+ label = 'volume_gigabytes'
subq = session.query(models.Volume.host,
- func.count('*').label(label)
+ func.sum(models.Volume.size).label(label)
).filter_by(deleted=False
).group_by(models.Volume.host
).subquery()
@@ -182,6 +183,14 @@ def floating_ip_create(_context, values):
return floating_ip_ref['address']
+def floating_ip_count_by_project(_context, project_id):
+ session = get_session()
+ return session.query(models.FloatingIp
+ ).filter_by(project_id=project_id
+ ).filter_by(deleted=False
+ ).count()
+
+
def floating_ip_fixed_ip_associate(_context, floating_address, fixed_address):
session = get_session()
with session.begin():
@@ -351,6 +360,17 @@ def instance_create(_context, values):
return instance_ref
+def instance_data_get_for_project(_context, project_id):
+ session = get_session()
+ result = session.query(func.count(models.Instance.id),
+ func.sum(models.Instance.vcpus)
+ ).filter_by(project_id=project_id
+ ).filter_by(deleted=False
+ ).first()
+ # NOTE(vish): convert None to 0
+ return (result[0] or 0, result[1] or 0)
+
+
def instance_destroy(_context, instance_id):
session = get_session()
with session.begin():
@@ -621,6 +641,37 @@ def export_device_create(_context, values):
###################
+def quota_create(_context, values):
+ quota_ref = models.Quota()
+ for (key, value) in values.iteritems():
+ quota_ref[key] = value
+ quota_ref.save()
+ return quota_ref
+
+
+def quota_get(_context, project_id):
+ return models.Quota.find_by_str(project_id)
+
+
+def quota_update(_context, project_id, values):
+ session = get_session()
+ with session.begin():
+ quota_ref = models.Quota.find_by_str(project_id, session=session)
+ for (key, value) in values.iteritems():
+ quota_ref[key] = value
+ quota_ref.save(session=session)
+
+
+def quota_destroy(_context, project_id):
+ session = get_session()
+ with session.begin():
+ quota_ref = models.Quota.find_by_str(project_id, session=session)
+ quota_ref.delete(session=session)
+
+
+###################
+
+
def volume_allocate_shelf_and_blade(_context, volume_id):
session = get_session()
with session.begin():
@@ -658,6 +709,17 @@ def volume_create(_context, values):
return volume_ref
+def volume_data_get_for_project(_context, project_id):
+ session = get_session()
+ result = session.query(func.count(models.Volume.id),
+ func.sum(models.Volume.size)
+ ).filter_by(project_id=project_id
+ ).filter_by(deleted=False
+ ).first()
+ # NOTE(vish): convert None to 0
+ return (result[0] or 0, result[1] or 0)
+
+
def volume_destroy(_context, volume_id):
session = get_session()
with session.begin():
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index c16f684fe..41013f41b 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -229,6 +229,11 @@ class Instance(BASE, NovaBase):
state = Column(Integer)
state_description = Column(String(255))
+ memory_mb = Column(Integer)
+ vcpus = Column(Integer)
+ local_gb = Column(Integer)
+
+
hostname = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
@@ -277,6 +282,36 @@ class Volume(BASE, NovaBase):
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
+class Quota(BASE, NovaBase):
+ """Represents quota overrides for a project"""
+ __tablename__ = 'quotas'
+ id = Column(Integer, primary_key=True)
+
+ project_id = Column(String(255))
+
+ instances = Column(Integer)
+ cores = Column(Integer)
+ volumes = Column(Integer)
+ gigabytes = Column(Integer)
+ floating_ips = Column(Integer)
+
+ @property
+ def str_id(self):
+ return self.project_id
+
+ @classmethod
+ def find_by_str(cls, str_id, session=None, deleted=False):
+ if not session:
+ session = get_session()
+ try:
+ return session.query(cls
+ ).filter_by(project_id=str_id
+ ).filter_by(deleted=deleted
+ ).one()
+ except exc.NoResultFound:
+ new_exc = exception.NotFound("No model for project_id %s" % str_id)
+ raise new_exc.__class__, new_exc, sys.exc_info()[2]
+
class ExportDevice(BASE, NovaBase):
"""Represates a shelf and blade that a volume can be exported on"""
__tablename__ = 'export_devices'
diff --git a/nova/endpoint/api.py b/nova/endpoint/api.py
index 40be00bb7..12eedfe67 100755
--- a/nova/endpoint/api.py
+++ b/nova/endpoint/api.py
@@ -304,7 +304,10 @@ class APIRequestHandler(tornado.web.RequestHandler):
try:
failure.raiseException()
except exception.ApiError as ex:
- self._error(type(ex).__name__ + "." + ex.code, ex.message)
+ if ex.code:
+ self._error(ex.code, ex.message)
+ else:
+ self._error(type(ex).__name__, ex.message)
# TODO(vish): do something more useful with unknown exceptions
except Exception as ex:
self._error(type(ex).__name__, str(ex))
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 9d8e45f30..94a04fb1b 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -33,6 +33,7 @@ from twisted.internet import defer
from nova import db
from nova import exception
from nova import flags
+from nova import quota
from nova import rpc
from nova import utils
from nova.auth import rbac
@@ -45,6 +46,11 @@ FLAGS = flags.FLAGS
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
+class QuotaError(exception.ApiError):
+ """Quota Exceeeded"""
+ pass
+
+
def _gen_key(user_id, key_name):
""" Tuck this into AuthManager """
try:
@@ -278,6 +284,14 @@ class CloudController(object):
@rbac.allow('projectmanager', 'sysadmin')
def create_volume(self, context, size, **kwargs):
+ # check quota
+ size = int(size)
+ if quota.allowed_volumes(context, 1, size) < 1:
+ logging.warn("Quota exceeeded for %s, tried to create %sG volume",
+ context.project.id, size)
+ raise QuotaError("Volume quota exceeded. You cannot "
+ "create a volume of size %s" %
+ size)
vol = {}
vol['size'] = size
vol['user_id'] = context.user.id
@@ -287,9 +301,11 @@ class CloudController(object):
vol['attach_status'] = "detached"
volume_ref = db.volume_create(context, vol)
- rpc.cast(FLAGS.volume_topic, {"method": "create_volume",
- "args": {"context": None,
- "volume_id": volume_ref['id']}})
+ rpc.cast(FLAGS.scheduler_topic,
+ {"method": "create_volume",
+ "args": {"context": None,
+ "topic": FLAGS.volume_topic,
+ "volume_id": volume_ref['id']}})
return {'volumeSet': [self._format_volume(context, volume_ref)]}
@@ -442,6 +458,12 @@ class CloudController(object):
@rbac.allow('netadmin')
@defer.inlineCallbacks
def allocate_address(self, context, **kwargs):
+ # check quota
+ if quota.allowed_floating_ips(context, 1) < 1:
+ logging.warn("Quota exceeeded for %s, tried to allocate address",
+ context.project.id)
+ raise QuotaError("Address quota exceeded. You cannot "
+ "allocate any more addresses")
network_topic = yield self._get_network_topic(context)
public_ip = yield rpc.call(network_topic,
{"method": "allocate_floating_ip",
@@ -501,6 +523,22 @@ class CloudController(object):
@rbac.allow('projectmanager', 'sysadmin')
@defer.inlineCallbacks
def run_instances(self, context, **kwargs):
+ instance_type = kwargs.get('instance_type', 'm1.small')
+ if instance_type not in INSTANCE_TYPES:
+ raise exception.ApiError("Unknown instance type: %s",
+ instance_type)
+ # check quota
+ max_instances = int(kwargs.get('max_count', 1))
+ min_instances = int(kwargs.get('min_count', max_instances))
+ num_instances = quota.allowed_instances(context,
+ max_instances,
+ instance_type)
+ if num_instances < min_instances:
+ logging.warn("Quota exceeeded for %s, tried to run %s instances",
+ context.project.id, min_instances)
+ raise QuotaError("Instance quota exceeded. You can only "
+ "run %s more instances of this type." %
+ num_instances, "InstanceLimitExceeded")
# make sure user can access the image
# vpn image is private so it doesn't show up on lists
vpn = kwargs['image_id'] == FLAGS.vpn_image_id
@@ -522,7 +560,7 @@ class CloudController(object):
images.get(context, kernel_id)
images.get(context, ramdisk_id)
- logging.debug("Going to run instances...")
+ logging.debug("Going to run %s instances...", num_instances)
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
key_data = None
if kwargs.has_key('key_name'):
@@ -547,10 +585,15 @@ class CloudController(object):
base_options['user_id'] = context.user.id
base_options['project_id'] = context.project.id
base_options['user_data'] = kwargs.get('user_data', '')
- base_options['instance_type'] = kwargs.get('instance_type', 'm1.small')
base_options['security_group'] = security_group
+ base_options['instance_type'] = instance_type
+
+ type_data = INSTANCE_TYPES[instance_type]
+ base_options['memory_mb'] = type_data['memory_mb']
+ base_options['vcpus'] = type_data['vcpus']
+ base_options['local_gb'] = type_data['local_gb']
- for num in range(int(kwargs['max_count'])):
+ for num in range(num_instances):
instance_ref = db.instance_create(context, base_options)
inst_id = instance_ref['id']
diff --git a/nova/quota.py b/nova/quota.py
new file mode 100644
index 000000000..f0e51feeb
--- /dev/null
+++ b/nova/quota.py
@@ -0,0 +1,91 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Quotas for instances, volumes, and floating ips
+"""
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova.compute import instance_types
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_integer('quota_instances', 10,
+ 'number of instances allowed per project')
+flags.DEFINE_integer('quota_cores', 20,
+ 'number of instance cores allowed per project')
+flags.DEFINE_integer('quota_volumes', 10,
+ 'number of volumes allowed per project')
+flags.DEFINE_integer('quota_gigabytes', 1000,
+ 'number of volume gigabytes allowed per project')
+flags.DEFINE_integer('quota_floating_ips', 10,
+ 'number of floating ips allowed per project')
+
+def _get_quota(context, project_id):
+ rval = {'instances': FLAGS.quota_instances,
+ 'cores': FLAGS.quota_cores,
+ 'volumes': FLAGS.quota_volumes,
+ 'gigabytes': FLAGS.quota_gigabytes,
+ 'floating_ips': FLAGS.quota_floating_ips}
+ try:
+ quota = db.quota_get(context, project_id)
+ for key in rval.keys():
+ if quota[key] is not None:
+ rval[key] = quota[key]
+ except exception.NotFound:
+ pass
+ return rval
+
+def allowed_instances(context, num_instances, instance_type):
+ """Check quota and return min(num_instances, allowed_instances)"""
+ project_id = context.project.id
+ used_instances, used_cores = db.instance_data_get_for_project(context,
+ project_id)
+ quota = _get_quota(context, project_id)
+ allowed_instances = quota['instances'] - used_instances
+ allowed_cores = quota['cores'] - used_cores
+ type_cores = instance_types.INSTANCE_TYPES[instance_type]['vcpus']
+ num_cores = num_instances * type_cores
+ allowed_instances = min(allowed_instances,
+ int(allowed_cores // type_cores))
+ return min(num_instances, allowed_instances)
+
+
+def allowed_volumes(context, num_volumes, size):
+ """Check quota and return min(num_volumes, allowed_volumes)"""
+ project_id = context.project.id
+ used_volumes, used_gigabytes = db.volume_data_get_for_project(context,
+ project_id)
+ quota = _get_quota(context, project_id)
+ allowed_volumes = quota['volumes'] - used_volumes
+ allowed_gigabytes = quota['gigabytes'] - used_gigabytes
+ num_gigabytes = num_volumes * size
+ allowed_volumes = min(allowed_volumes,
+ int(allowed_gigabytes // size))
+ return min(num_volumes, allowed_volumes)
+
+
+def allowed_floating_ips(context, num_floating_ips):
+ """Check quota and return min(num_floating_ips, allowed_floating_ips)"""
+ project_id = context.project.id
+ used_floating_ips = db.floating_ip_count_by_project(context, project_id)
+ quota = _get_quota(context, project_id)
+ allowed_floating_ips = quota['floating_ips'] - used_floating_ips
+ return min(num_floating_ips, allowed_floating_ips)
+
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index ecb37bd57..fdaff74d8 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -29,10 +29,10 @@ from nova.scheduler import driver
from nova.scheduler import chance
FLAGS = flags.FLAGS
-flags.DEFINE_integer("max_instances", 16,
- "maximum number of instances to allow per host")
-flags.DEFINE_integer("max_volumes", 100,
- "maximum number of volumes to allow per host")
+flags.DEFINE_integer("max_cores", 16,
+ "maximum number of instance cores to allow per host")
+flags.DEFINE_integer("max_gigabytes", 10000,
+ "maximum number of volume gigabytes to allow per host")
flags.DEFINE_integer("max_networks", 1000,
"maximum number of networks to allow per host")
@@ -41,12 +41,12 @@ class SimpleScheduler(chance.ChanceScheduler):
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances."""
-
+ instance_ref = db.instance_get(context, instance_id)
results = db.service_get_all_compute_sorted(context)
for result in results:
- (service, instance_count) = result
- if instance_count >= FLAGS.max_instances:
- raise driver.NoValidHost("All hosts have too many instances")
+ (service, instance_cores) = result
+ if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores:
+ raise driver.NoValidHost("All hosts have too many cores")
if self.service_is_up(service):
# NOTE(vish): this probably belongs in the manager, if we
# can generalize this somehow
@@ -60,12 +60,12 @@ class SimpleScheduler(chance.ChanceScheduler):
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
-
+ volume_ref = db.volume_get(context, volume_id)
results = db.service_get_all_volume_sorted(context)
for result in results:
- (service, instance_count) = result
- if instance_count >= FLAGS.max_volumes:
- raise driver.NoValidHost("All hosts have too many volumes")
+ (service, volume_gigabytes) = result
+ if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
+ raise driver.NoValidHost("All hosts have too many gigabytes")
if self.service_is_up(service):
# NOTE(vish): this probably belongs in the manager, if we
# can generalize this somehow
diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py
index c983e05c9..f5c0f1c09 100644
--- a/nova/tests/compute_unittest.py
+++ b/nova/tests/compute_unittest.py
@@ -50,6 +50,7 @@ class ComputeTestCase(test.TrialTestCase):
def tearDown(self): # pylint: disable-msg=C0103
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
+ super(ComputeTestCase, self).tearDown()
def _create_instance(self):
"""Create a test instance"""
diff --git a/nova/tests/quota_unittest.py b/nova/tests/quota_unittest.py
new file mode 100644
index 000000000..cab9f663d
--- /dev/null
+++ b/nova/tests/quota_unittest.py
@@ -0,0 +1,155 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import quota
+from nova import test
+from nova import utils
+from nova.auth import manager
+from nova.endpoint import cloud
+from nova.endpoint import api
+
+
+FLAGS = flags.FLAGS
+
+
+class QuotaTestCase(test.TrialTestCase):
+ def setUp(self): # pylint: disable-msg=C0103
+ logging.getLogger().setLevel(logging.DEBUG)
+ super(QuotaTestCase, self).setUp()
+ self.flags(connection_type='fake',
+ quota_instances=2,
+ quota_cores=4,
+ quota_volumes=2,
+ quota_gigabytes=20,
+ quota_floating_ips=1)
+
+ self.cloud = cloud.CloudController()
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('admin', 'admin', 'admin', True)
+ self.project = self.manager.create_project('admin', 'admin', 'admin')
+ self.network = utils.import_object(FLAGS.network_manager)
+ self.context = api.APIRequestContext(handler=None,
+ project=self.project,
+ user=self.user)
+
+ def tearDown(self): # pylint: disable-msg=C0103
+ manager.AuthManager().delete_project(self.project)
+ manager.AuthManager().delete_user(self.user)
+ super(QuotaTestCase, self).tearDown()
+
+ def _create_instance(self, cores=2):
+ """Create a test instance"""
+ inst = {}
+ inst['image_id'] = 'ami-test'
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = self.user.id
+ inst['project_id'] = self.project.id
+ inst['instance_type'] = 'm1.large'
+ inst['vcpus'] = cores
+ inst['mac_address'] = utils.generate_mac()
+ return db.instance_create(self.context, inst)['id']
+
+ def _create_volume(self, size=10):
+ """Create a test volume"""
+ vol = {}
+ vol['user_id'] = self.user.id
+ vol['project_id'] = self.project.id
+ vol['size'] = size
+ return db.volume_create(self.context, vol)['id']
+
+ def test_quota_overrides(self):
+ """Make sure overriding a projects quotas works"""
+ num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
+ self.assertEqual(num_instances, 2)
+ db.quota_create(self.context, {'project_id': self.project.id,
+ 'instances': 10})
+ num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
+ self.assertEqual(num_instances, 4)
+ db.quota_update(self.context, self.project.id, {'cores': 100})
+ num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
+ self.assertEqual(num_instances, 10)
+ db.quota_destroy(self.context, self.project.id)
+
+ def test_too_many_instances(self):
+ instance_ids = []
+ for i in range(FLAGS.quota_instances):
+ instance_id = self._create_instance()
+ instance_ids.append(instance_id)
+ self.assertFailure(self.cloud.run_instances(self.context,
+ min_count=1,
+ max_count=1,
+ instance_type='m1.small'),
+ cloud.QuotaError)
+ for instance_id in instance_ids:
+ db.instance_destroy(self.context, instance_id)
+
+ def test_too_many_cores(self):
+ instance_ids = []
+ instance_id = self._create_instance(cores=4)
+ instance_ids.append(instance_id)
+ self.assertFailure(self.cloud.run_instances(self.context,
+ min_count=1,
+ max_count=1,
+ instance_type='m1.small'),
+ cloud.QuotaError)
+ for instance_id in instance_ids:
+ db.instance_destroy(self.context, instance_id)
+
+ def test_too_many_volumes(self):
+ volume_ids = []
+ for i in range(FLAGS.quota_volumes):
+ volume_id = self._create_volume()
+ volume_ids.append(volume_id)
+ self.assertRaises(cloud.QuotaError,
+ self.cloud.create_volume,
+ self.context,
+ size=10)
+ for volume_id in volume_ids:
+ db.volume_destroy(self.context, volume_id)
+
+ def test_too_many_gigabytes(self):
+ volume_ids = []
+ volume_id = self._create_volume(size=20)
+ volume_ids.append(volume_id)
+ self.assertRaises(cloud.QuotaError,
+ self.cloud.create_volume,
+ self.context,
+ size=10)
+ for volume_id in volume_ids:
+ db.volume_destroy(self.context, volume_id)
+
+ def test_too_many_addresses(self):
+ address = '192.168.0.100'
+ try:
+ db.floating_ip_get_by_address(None, address)
+ except exception.NotFound:
+ db.floating_ip_create(None, {'address': address,
+ 'host': FLAGS.host})
+ float_addr = self.network.allocate_floating_ip(self.context,
+ self.project.id)
+ # NOTE(vish): This assert never fails. When cloud attempts to
+ # make an rpc.call, the test just finishes with OK. It
+ # appears to be something in the magic inline callbacks
+ # that is breaking.
+ self.assertFailure(self.cloud.allocate_address(self.context),
+ cloud.QuotaError)
diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py
index 27e100fa0..fde30f81e 100644
--- a/nova/tests/scheduler_unittest.py
+++ b/nova/tests/scheduler_unittest.py
@@ -19,8 +19,6 @@
Tests For Scheduler
"""
-import mox
-
from nova import db
from nova import flags
from nova import service
@@ -33,7 +31,7 @@ from nova.scheduler import driver
FLAGS = flags.FLAGS
-flags.DECLARE('max_instances', 'nova.scheduler.simple')
+flags.DECLARE('max_cores', 'nova.scheduler.simple')
class TestDriver(driver.Scheduler):
"""Scheduler Driver for Tests"""
@@ -75,7 +73,9 @@ class SimpleDriverTestCase(test.TrialTestCase):
def setUp(self): # pylint: disable-msg=C0103
super(SimpleDriverTestCase, self).setUp()
self.flags(connection_type='fake',
- max_instances=4,
+ max_cores=4,
+ max_gigabytes=4,
+ volume_driver='nova.volume.driver.FakeAOEDriver',
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
self.scheduler = manager.SchedulerManager()
self.context = None
@@ -83,65 +83,149 @@ class SimpleDriverTestCase(test.TrialTestCase):
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = None
- self.service1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- self.service2 = service.Service('host2',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
def tearDown(self): # pylint: disable-msg=C0103
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
- self.service1.kill()
- self.service2.kill()
def _create_instance(self):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'ami-test'
inst['reservation_id'] = 'r-fakeres'
- inst['launch_time'] = '10'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny'
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
+ inst['vcpus'] = 1
return db.instance_create(self.context, inst)['id']
+ def _create_volume(self):
+ """Create a test volume"""
+ vol = {}
+ vol['image_id'] = 'ami-test'
+ vol['reservation_id'] = 'r-fakeres'
+ vol['size'] = 1
+ return db.volume_create(self.context, vol)['id']
+
def test_hosts_are_up(self):
+ """Ensures driver can find the hosts that are up"""
# NOTE(vish): constructing service without create method
# because we are going to use it without queue
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
self.assertEqual(len(hosts), 2)
+ compute1.kill()
+ compute2.kill()
def test_least_busy_host_gets_instance(self):
- instance_id = self._create_instance()
- self.service1.run_instance(self.context, instance_id)
+ """Ensures the host with less cores gets the next one"""
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ instance_id1 = self._create_instance()
+ compute1.run_instance(self.context, instance_id1)
+ instance_id2 = self._create_instance()
host = self.scheduler.driver.schedule_run_instance(self.context,
- instance_id)
+ instance_id2)
self.assertEqual(host, 'host2')
- self.service1.terminate_instance(self.context, instance_id)
-
- def test_too_many_instances(self):
+ compute1.terminate_instance(self.context, instance_id1)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+ compute2.kill()
+
+ def test_too_many_cores(self):
+ """Ensures we don't go over max cores"""
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
instance_ids1 = []
instance_ids2 = []
- for index in xrange(FLAGS.max_instances):
+ for index in xrange(FLAGS.max_cores):
instance_id = self._create_instance()
- self.service1.run_instance(self.context, instance_id)
+ compute1.run_instance(self.context, instance_id)
instance_ids1.append(instance_id)
instance_id = self._create_instance()
- self.service2.run_instance(self.context, instance_id)
+ compute2.run_instance(self.context, instance_id)
instance_ids2.append(instance_id)
instance_id = self._create_instance()
self.assertRaises(driver.NoValidHost,
self.scheduler.driver.schedule_run_instance,
self.context,
- 'compute',
instance_id)
for instance_id in instance_ids1:
- self.service1.terminate_instance(self.context, instance_id)
+ compute1.terminate_instance(self.context, instance_id)
for instance_id in instance_ids2:
- self.service2.terminate_instance(self.context, instance_id)
+ compute2.terminate_instance(self.context, instance_id)
+ compute1.kill()
+ compute2.kill()
+
+ def test_least_busy_host_gets_volume(self):
+ """Ensures the host with less gigabytes gets the next one"""
+ volume1 = service.Service('host1',
+ 'nova-volume',
+ 'volume',
+ FLAGS.volume_manager)
+ volume2 = service.Service('host2',
+ 'nova-volume',
+ 'volume',
+ FLAGS.volume_manager)
+ volume_id1 = self._create_volume()
+ volume1.create_volume(self.context, volume_id1)
+ volume_id2 = self._create_volume()
+ host = self.scheduler.driver.schedule_create_volume(self.context,
+ volume_id2)
+ self.assertEqual(host, 'host2')
+ volume1.delete_volume(self.context, volume_id1)
+ db.volume_destroy(self.context, volume_id2)
+ volume1.kill()
+ volume2.kill()
+
+ def test_too_many_gigabytes(self):
+ """Ensures we don't go over max gigabytes"""
+ volume1 = service.Service('host1',
+ 'nova-volume',
+ 'volume',
+ FLAGS.volume_manager)
+ volume2 = service.Service('host2',
+ 'nova-volume',
+ 'volume',
+ FLAGS.volume_manager)
+ volume_ids1 = []
+ volume_ids2 = []
+ for index in xrange(FLAGS.max_gigabytes):
+ volume_id = self._create_volume()
+ volume1.create_volume(self.context, volume_id)
+ volume_ids1.append(volume_id)
+ volume_id = self._create_volume()
+ volume2.create_volume(self.context, volume_id)
+ volume_ids2.append(volume_id)
+ volume_id = self._create_volume()
+ self.assertRaises(driver.NoValidHost,
+ self.scheduler.driver.schedule_create_volume,
+ self.context,
+ volume_id)
+ for volume_id in volume_ids1:
+ volume1.delete_volume(self.context, volume_id)
+ for volume_id in volume_ids2:
+ volume2.delete_volume(self.context, volume_id)
+ volume1.kill()
+ volume2.kill()