summaryrefslogtreecommitdiffstats
path: root/nova/api
diff options
context:
space:
mode:
authorMichael Gundlach <michael.gundlach@rackspace.com>2010-09-20 15:36:45 -0400
committerMichael Gundlach <michael.gundlach@rackspace.com>2010-09-20 15:36:45 -0400
commit8fb32a956490f9de623fad12d9b2b1f08f88511a (patch)
treeb5db72db732eb8cc390cbb4ec545df9e7cffa79a /nova/api
parent8e304fe0bf69fe5f6bad2fa3d5a71a93cb0612e8 (diff)
parentcda407fa0eff533e3aae7d3bf7276e0013a8a13d (diff)
downloadnova-8fb32a956490f9de623fad12d9b2b1f08f88511a.tar.gz
nova-8fb32a956490f9de623fad12d9b2b1f08f88511a.tar.xz
nova-8fb32a956490f9de623fad12d9b2b1f08f88511a.zip
Merge from trunk
Diffstat (limited to 'nova/api')
-rw-r--r--nova/api/ec2/__init__.py6
-rw-r--r--nova/api/ec2/cloud.py86
2 files changed, 76 insertions, 16 deletions
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index d500b127c..a7b10e428 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -213,7 +213,11 @@ class Executor(wsgi.Application):
req.headers['Content-Type'] = 'text/xml'
return result
except exception.ApiError as ex:
- return self._error(req, type(ex).__name__ + "." + ex.code, ex.message)
+
+ if ex.code:
+ return self._error(req, ex.code, ex.message)
+ else:
+ return self._error(req, type(ex).__name__, ex.message)
# TODO(vish): do something more useful with unknown exceptions
except Exception as ex:
return self._error(req, type(ex).__name__, str(ex))
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index c04e722cc..25daa5988 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -23,6 +23,7 @@ datastore.
"""
import base64
+import datetime
import logging
import os
import time
@@ -30,6 +31,7 @@ import time
from nova import db
from nova import exception
from nova import flags
+from nova import quota
from nova import rpc
from nova import utils
from nova.auth import manager
@@ -41,6 +43,11 @@ FLAGS = flags.FLAGS
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
+class QuotaError(exception.ApiError):
+ """Quota Exceeeded"""
+ pass
+
+
def _gen_key(user_id, key_name):
""" Tuck this into AuthManager """
mgr = manager.AuthManager()
@@ -251,6 +258,14 @@ class CloudController(object):
return v
def create_volume(self, context, size, **kwargs):
+ # check quota
+ size = int(size)
+ if quota.allowed_volumes(context, 1, size) < 1:
+ logging.warn("Quota exceeeded for %s, tried to create %sG volume",
+ context.project.id, size)
+ raise QuotaError("Volume quota exceeded. You cannot "
+ "create a volume of size %s" %
+ size)
vol = {}
vol['size'] = size
vol['user_id'] = context.user.id
@@ -260,9 +275,11 @@ class CloudController(object):
vol['attach_status'] = "detached"
volume_ref = db.volume_create(context, vol)
- rpc.cast(FLAGS.volume_topic, {"method": "create_volume",
- "args": {"context": None,
- "volume_id": volume_ref['id']}})
+ rpc.cast(FLAGS.scheduler_topic,
+ {"method": "create_volume",
+ "args": {"context": None,
+ "topic": FLAGS.volume_topic,
+ "volume_id": volume_ref['id']}})
return {'volumeSet': [self._format_volume(context, volume_ref)]}
@@ -270,6 +287,8 @@ class CloudController(object):
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume_ref = db.volume_get_by_str(context, volume_id)
# TODO(vish): abstract status checking?
+ if volume_ref['status'] != "available":
+ raise exception.ApiError("Volume status must be available")
if volume_ref['attach_status'] == "attached":
raise exception.ApiError("Volume is already attached")
instance_ref = db.instance_get_by_str(context, instance_id)
@@ -291,10 +310,10 @@ class CloudController(object):
volume_ref = db.volume_get_by_str(context, volume_id)
instance_ref = db.volume_get_instance(context, volume_ref['id'])
if not instance_ref:
- raise exception.Error("Volume isn't attached to anything!")
+ raise exception.ApiError("Volume isn't attached to anything!")
# TODO(vish): abstract status checking?
if volume_ref['status'] == "available":
- raise exception.Error("Volume is already detached")
+ raise exception.ApiError("Volume is already detached")
try:
host = instance_ref['host']
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
@@ -337,7 +356,7 @@ class CloudController(object):
instances = db.instance_get_by_reservation(context,
reservation_id)
else:
- if not context.user.is_admin():
+ if context.user.is_admin():
instances = db.instance_get_all(context)
else:
instances = db.instance_get_by_project(context,
@@ -409,6 +428,12 @@ class CloudController(object):
return {'addressesSet': addresses}
def allocate_address(self, context, **kwargs):
+ # check quota
+ if quota.allowed_floating_ips(context, 1) < 1:
+ logging.warn("Quota exceeeded for %s, tried to allocate address",
+ context.project.id)
+ raise QuotaError("Address quota exceeded. You cannot "
+ "allocate any more addresses")
network_topic = self._get_network_topic(context)
public_ip = rpc.call(network_topic,
{"method": "allocate_floating_ip",
@@ -459,6 +484,22 @@ class CloudController(object):
return db.queue_get_for(context, FLAGS.network_topic, host)
def run_instances(self, context, **kwargs):
+ instance_type = kwargs.get('instance_type', 'm1.small')
+ if instance_type not in INSTANCE_TYPES:
+ raise exception.ApiError("Unknown instance type: %s",
+ instance_type)
+ # check quota
+ max_instances = int(kwargs.get('max_count', 1))
+ min_instances = int(kwargs.get('min_count', max_instances))
+ num_instances = quota.allowed_instances(context,
+ max_instances,
+ instance_type)
+ if num_instances < min_instances:
+ logging.warn("Quota exceeeded for %s, tried to run %s instances",
+ context.project.id, min_instances)
+ raise QuotaError("Instance quota exceeded. You can only "
+ "run %s more instances of this type." %
+ num_instances, "InstanceLimitExceeded")
# make sure user can access the image
# vpn image is private so it doesn't show up on lists
vpn = kwargs['image_id'] == FLAGS.vpn_image_id
@@ -480,7 +521,7 @@ class CloudController(object):
images.get(context, kernel_id)
images.get(context, ramdisk_id)
- logging.debug("Going to run instances...")
+ logging.debug("Going to run %s instances...", num_instances)
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
key_data = None
if kwargs.has_key('key_name'):
@@ -495,6 +536,7 @@ class CloudController(object):
reservation_id = utils.generate_uid('r')
base_options = {}
+ base_options['state_description'] = 'scheduling'
base_options['image_id'] = image_id
base_options['kernel_id'] = kernel_id
base_options['ramdisk_id'] = ramdisk_id
@@ -504,10 +546,15 @@ class CloudController(object):
base_options['user_id'] = context.user.id
base_options['project_id'] = context.project.id
base_options['user_data'] = kwargs.get('user_data', '')
- base_options['instance_type'] = kwargs.get('instance_type', 'm1.small')
base_options['security_group'] = security_group
+ base_options['instance_type'] = instance_type
+
+ type_data = INSTANCE_TYPES[instance_type]
+ base_options['memory_mb'] = type_data['memory_mb']
+ base_options['vcpus'] = type_data['vcpus']
+ base_options['local_gb'] = type_data['local_gb']
- for num in range(int(kwargs['max_count'])):
+ for num in range(num_instances):
instance_ref = db.instance_create(context, base_options)
inst_id = instance_ref['id']
@@ -528,11 +575,12 @@ class CloudController(object):
"args": {"context": None,
"address": address}})
- rpc.cast(FLAGS.compute_topic,
- {"method": "run_instance",
- "args": {"context": None,
- "instance_id": inst_id}})
- logging.debug("Casting to node for %s/%s's instance %s" %
+ rpc.cast(FLAGS.scheduler_topic,
+ {"method": "run_instance",
+ "args": {"context": None,
+ "topic": FLAGS.compute_topic,
+ "instance_id": inst_id}})
+ logging.debug("Casting to scheduler for %s/%s's instance %s" %
(context.project.name, context.user.name, inst_id))
return self._format_run_instances(context, reservation_id)
@@ -548,6 +596,10 @@ class CloudController(object):
% id_str)
continue
+ now = datetime.datetime.utcnow()
+ db.instance_update(context,
+ instance_ref['id'],
+ {'terminated_at': now})
# FIXME(ja): where should network deallocate occur?
address = db.instance_get_floating_address(context,
instance_ref['id'])
@@ -569,7 +621,7 @@ class CloudController(object):
# NOTE(vish): Currently, nothing needs to be done on the
# network node until release. If this changes,
# we will need to cast here.
- self.network.deallocate_fixed_ip(context, address)
+ self.network_manager.deallocate_fixed_ip(context, address)
host = instance_ref['host']
if host:
@@ -595,6 +647,10 @@ class CloudController(object):
def delete_volume(self, context, volume_id, **kwargs):
# TODO: return error if not authorized
volume_ref = db.volume_get_by_str(context, volume_id)
+ if volume_ref['status'] != "available":
+ raise exception.ApiError("Volume status must be available")
+ now = datetime.datetime.utcnow()
+ db.volume_update(context, volume_ref['id'], {'terminated_at': now})
host = volume_ref['host']
rpc.cast(db.queue_get_for(context, FLAGS.volume_topic, host),
{"method": "delete_volume",