summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@yahoo.com>2010-10-12 19:00:34 -0700
committerVishvananda Ishaya <vishvananda@yahoo.com>2010-10-12 19:00:34 -0700
commitdbbdebbcd13b08ec2afb6d0d58144eeee6ecce84 (patch)
tree2e458dec12a89fcf5497367486f4288f87a06883
parent6cc81214b58972c0d0e815ad340c32862be834dc (diff)
parentd0a55238fdf64a8da51ea1fe328a1a3dc3d17dc7 (diff)
downloadnova-dbbdebbcd13b08ec2afb6d0d58144eeee6ecce84.tar.gz
nova-dbbdebbcd13b08ec2afb6d0d58144eeee6ecce84.tar.xz
nova-dbbdebbcd13b08ec2afb6d0d58144eeee6ecce84.zip
merged trunk
-rwxr-xr-xbin/nova-manage16
-rw-r--r--nova/api/__init__.py30
-rw-r--r--nova/api/cloud.py2
-rw-r--r--nova/api/ec2/__init__.py2
-rw-r--r--nova/api/ec2/cloud.py293
-rw-r--r--nova/api/ec2/images.py3
-rw-r--r--nova/api/openstack/__init__.py (renamed from nova/api/rackspace/__init__.py)26
-rw-r--r--nova/api/openstack/_id_translator.py (renamed from nova/api/rackspace/_id_translator.py)0
-rw-r--r--nova/api/openstack/auth.py (renamed from nova/api/rackspace/auth.py)4
-rw-r--r--nova/api/openstack/backup_schedules.py (renamed from nova/api/rackspace/backup_schedules.py)3
-rw-r--r--nova/api/openstack/context.py (renamed from nova/api/rackspace/context.py)0
-rw-r--r--nova/api/openstack/faults.py (renamed from nova/api/rackspace/faults.py)0
-rw-r--r--nova/api/openstack/flavors.py (renamed from nova/api/rackspace/flavors.py)8
-rw-r--r--nova/api/openstack/images.py (renamed from nova/api/rackspace/images.py)23
-rw-r--r--nova/api/openstack/notes.txt (renamed from nova/api/rackspace/notes.txt)4
-rw-r--r--nova/api/openstack/ratelimiting/__init__.py (renamed from nova/api/rackspace/ratelimiting/__init__.py)0
-rw-r--r--nova/api/openstack/servers.py (renamed from nova/api/rackspace/servers.py)73
-rw-r--r--nova/api/openstack/sharedipgroups.py (renamed from nova/api/rackspace/sharedipgroups.py)0
-rw-r--r--nova/auth/dbdriver.py236
-rw-r--r--nova/auth/manager.py7
-rw-r--r--nova/compute/manager.py11
-rw-r--r--nova/db/api.py199
-rw-r--r--nova/db/sqlalchemy/api.py439
-rw-r--r--nova/db/sqlalchemy/models.py139
-rw-r--r--nova/db/sqlalchemy/session.py9
-rw-r--r--nova/exception.py3
-rw-r--r--nova/fakerabbit.py14
-rw-r--r--nova/flags.py4
-rw-r--r--nova/image/service.py235
-rw-r--r--nova/network/linux_net.py50
-rw-r--r--nova/network/manager.py10
-rw-r--r--nova/objectstore/image.py10
-rw-r--r--nova/process.py2
-rw-r--r--nova/rpc.py9
-rw-r--r--nova/server.py6
-rw-r--r--nova/service.py12
-rw-r--r--nova/test.py1
-rw-r--r--nova/tests/api/__init__.py13
-rw-r--r--nova/tests/api/fakes.py (renamed from nova/tests/api/test_helper.py)0
-rw-r--r--nova/tests/api/openstack/__init__.py (renamed from nova/tests/api/rackspace/__init__.py)8
-rw-r--r--nova/tests/api/openstack/fakes.py205
-rw-r--r--nova/tests/api/openstack/test_auth.py (renamed from nova/tests/api/rackspace/auth.py)30
-rw-r--r--nova/tests/api/openstack/test_faults.py (renamed from nova/tests/api/rackspace/testfaults.py)2
-rw-r--r--nova/tests/api/openstack/test_flavors.py (renamed from nova/tests/api/rackspace/flavors.py)18
-rw-r--r--nova/tests/api/openstack/test_images.py141
-rw-r--r--nova/tests/api/openstack/test_ratelimiting.py (renamed from nova/api/rackspace/ratelimiting/tests.py)2
-rw-r--r--nova/tests/api/openstack/test_servers.py (renamed from nova/tests/api/rackspace/servers.py)38
-rw-r--r--nova/tests/api/openstack/test_sharedipgroups.py (renamed from nova/tests/api/rackspace/sharedipgroups.py)10
-rw-r--r--nova/tests/api/rackspace/images.py40
-rw-r--r--nova/tests/api/rackspace/test_helper.py134
-rw-r--r--nova/tests/api/test_wsgi.py (renamed from nova/tests/api/wsgi_test.py)0
-rw-r--r--nova/tests/api_unittest.py188
-rw-r--r--nova/tests/auth_unittest.py9
-rw-r--r--nova/tests/bundle/1mb.manifest.xml2
-rw-r--r--nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml1
-rw-r--r--nova/tests/cloud_unittest.py34
-rw-r--r--nova/tests/fake_flags.py2
-rw-r--r--nova/tests/objectstore_unittest.py29
-rw-r--r--nova/tests/scheduler_unittest.py10
-rw-r--r--nova/tests/service_unittest.py3
-rw-r--r--nova/tests/virt_unittest.py225
-rw-r--r--nova/utils.py8
-rw-r--r--nova/virt/images.py1
-rw-r--r--nova/virt/interfaces.template1
-rw-r--r--nova/virt/libvirt.qemu.xml.template4
-rw-r--r--nova/virt/libvirt.uml.xml.template4
-rw-r--r--nova/virt/libvirt_conn.py209
-rw-r--r--nova/virt/xenapi.py33
-rw-r--r--run_tests.py2
-rwxr-xr-xrun_tests.sh25
-rw-r--r--tools/pip-requires1
71 files changed, 2794 insertions, 521 deletions
diff --git a/bin/nova-manage b/bin/nova-manage
index 6da2efe95..d36b0f53a 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -52,6 +52,7 @@
CLI interface for nova management.
"""
+import logging
import os
import sys
import time
@@ -89,11 +90,16 @@ class VpnCommands(object):
def list(self):
"""Print a listing of the VPNs for all projects."""
print "%-12s\t" % 'project',
- print "%-12s\t" % 'ip:port',
+ print "%-20s\t" % 'ip:port',
print "%s" % 'state'
for project in self.manager.get_projects():
print "%-12s\t" % project.name,
- print "%s:%s\t" % (project.vpn_ip, project.vpn_port),
+
+ try:
+ s = "%s:%s" % (project.vpn_ip, project.vpn_port)
+ except exception.NotFound:
+ s = "None"
+ print "%-20s\t" % s,
vpn = self._vpn_for(project.id)
if vpn:
@@ -115,7 +121,7 @@ class VpnCommands(object):
def _vpn_for(self, project_id):
"""Get the VPN instance for a project ID."""
- for instance in db.instance_get_all():
+ for instance in db.instance_get_all(None):
if (instance['image_id'] == FLAGS.vpn_image_id
and not instance['state_description'] in
['shutting_down', 'shutdown']
@@ -442,6 +448,10 @@ def main():
"""Parse options and call the appropriate class/method."""
utils.default_flagfile('/etc/nova/nova-manage.conf')
argv = FLAGS(sys.argv)
+
+ if FLAGS.verbose:
+ logging.getLogger().setLevel(logging.DEBUG)
+
script_name = argv.pop(0)
if len(argv) < 1:
print script_name + " category action [<args>]"
diff --git a/nova/api/__init__.py b/nova/api/__init__.py
index 744abd621..8ec7094d7 100644
--- a/nova/api/__init__.py
+++ b/nova/api/__init__.py
@@ -27,16 +27,16 @@ from nova import flags
from nova import wsgi
from nova.api import cloudpipe
from nova.api import ec2
-from nova.api import rackspace
+from nova.api import openstack
from nova.api.ec2 import metadatarequesthandler
-flags.DEFINE_string('rsapi_subdomain', 'rs',
- 'subdomain running the RS API')
+flags.DEFINE_string('osapi_subdomain', 'api',
+ 'subdomain running the OpenStack API')
flags.DEFINE_string('ec2api_subdomain', 'ec2',
'subdomain running the EC2 API')
flags.DEFINE_string('FAKE_subdomain', None,
- 'set to rs or ec2 to fake the subdomain of the host for testing')
+ 'set to api or ec2 to fake the subdomain of the host for testing')
FLAGS = flags.FLAGS
@@ -44,21 +44,21 @@ class API(wsgi.Router):
"""Routes top-level requests to the appropriate controller."""
def __init__(self):
- rsdomain = {'sub_domain': [FLAGS.rsapi_subdomain]}
+ osapidomain = {'sub_domain': [FLAGS.osapi_subdomain]}
ec2domain = {'sub_domain': [FLAGS.ec2api_subdomain]}
- # If someone wants to pretend they're hitting the RS subdomain
- # on their local box, they can set FAKE_subdomain to 'rs', which
- # removes subdomain restrictions from the RS routes below.
- if FLAGS.FAKE_subdomain == 'rs':
- rsdomain = {}
+ # If someone wants to pretend they're hitting the OSAPI subdomain
+ # on their local box, they can set FAKE_subdomain to 'api', which
+ # removes subdomain restrictions from the OpenStack API routes below.
+ if FLAGS.FAKE_subdomain == 'api':
+ osapidomain = {}
elif FLAGS.FAKE_subdomain == 'ec2':
ec2domain = {}
mapper = routes.Mapper()
mapper.sub_domains = True
- mapper.connect("/", controller=self.rsapi_versions,
- conditions=rsdomain)
- mapper.connect("/v1.0/{path_info:.*}", controller=rackspace.API(),
- conditions=rsdomain)
+ mapper.connect("/", controller=self.osapi_versions,
+ conditions=osapidomain)
+ mapper.connect("/v1.0/{path_info:.*}", controller=openstack.API(),
+ conditions=osapidomain)
mapper.connect("/", controller=self.ec2api_versions,
conditions=ec2domain)
@@ -81,7 +81,7 @@ class API(wsgi.Router):
super(API, self).__init__(mapper)
@webob.dec.wsgify
- def rsapi_versions(self, req):
+ def osapi_versions(self, req):
"""Respond to a request for all OpenStack API versions."""
response = {
"versions": [
diff --git a/nova/api/cloud.py b/nova/api/cloud.py
index 345677d4f..57e94a17a 100644
--- a/nova/api/cloud.py
+++ b/nova/api/cloud.py
@@ -34,7 +34,7 @@ def reboot(instance_id, context=None):
#TODO(gundlach) not actually sure what context is used for by ec2 here
-- I think we can just remove it and use None all the time.
"""
- instance_ref = db.instance_get_by_ec2_id(None, instance_id)
+ instance_ref = db.instance_get_by_internal_id(None, instance_id)
host = instance_ref['host']
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "reboot_instance",
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 6b538a7f1..6e771f064 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -142,6 +142,8 @@ class Authorizer(wsgi.Middleware):
'CreateKeyPair': ['all'],
'DeleteKeyPair': ['all'],
'DescribeSecurityGroups': ['all'],
+ 'AuthorizeSecurityGroupIngress': ['netadmin'],
+ 'RevokeSecurityGroupIngress': ['netadmin'],
'CreateSecurityGroup': ['netadmin'],
'DeleteSecurityGroup': ['netadmin'],
'GetConsoleOutput': ['projectmanager', 'sysadmin'],
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index d8462f7a0..278055be1 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -28,6 +28,8 @@ import logging
import os
import time
+import IPy
+
from nova import crypto
from nova import db
from nova import exception
@@ -43,6 +45,7 @@ from nova.api.ec2 import images
FLAGS = flags.FLAGS
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
+InvalidInputException = exception.InvalidInputException
class QuotaError(exception.ApiError):
"""Quota Exceeeded"""
@@ -72,6 +75,20 @@ def _gen_key(context, user_id, key_name):
return {'private_key': private_key, 'fingerprint': fingerprint}
+def ec2_id_to_internal_id(ec2_id):
+ """Convert an ec2 ID (i-[base 36 number]) to an internal id (int)"""
+ return int(ec2_id[2:], 36)
+
+
+def internal_id_to_ec2_id(internal_id):
+ """Convert an internal ID (int) to an ec2 ID (i-[base 36 number])"""
+ digits = []
+ while internal_id != 0:
+ internal_id, remainder = divmod(internal_id, 36)
+ digits.append('0123456789abcdefghijklmnopqrstuvwxyz'[remainder])
+ return "i-%s" % ''.join(reversed(digits))
+
+
class CloudController(object):
""" CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
@@ -113,6 +130,15 @@ class CloudController(object):
result[key] = [line]
return result
+ def _trigger_refresh_security_group(self, security_group):
+ nodes = set([instance['host'] for instance in security_group.instances
+ if instance['host'] is not None])
+ for node in nodes:
+ rpc.call('%s.%s' % (FLAGS.compute_topic, node),
+ { "method": "refresh_security_group",
+ "args": { "context": None,
+ "security_group_id": security_group.id}})
+
def get_metadata(self, address):
instance_ref = db.fixed_ip_get_instance(None, address)
if instance_ref is None:
@@ -144,7 +170,7 @@ class CloudController(object):
},
'hostname': hostname,
'instance-action': 'none',
- 'instance-id': instance_ref['ec2_id'],
+ 'instance-id': internal_id_to_ec2_id(instance_ref['internal_id']),
'instance-type': instance_ref['instance_type'],
'local-hostname': hostname,
'local-ipv4': address,
@@ -232,21 +258,200 @@ class CloudController(object):
pass
return True
- def describe_security_groups(self, context, group_names, **kwargs):
- groups = {'securityGroupSet': []}
+ def describe_security_groups(self, context, group_name=None, **kwargs):
+ self._ensure_default_security_group(context)
+ if context.user.is_admin():
+ groups = db.security_group_get_all(context)
+ else:
+ groups = db.security_group_get_by_project(context,
+ context.project.id)
+ groups = [self._format_security_group(context, g) for g in groups]
+ if not group_name is None:
+ groups = [g for g in groups if g.name in group_name]
+
+ return {'securityGroupInfo': groups }
+
+ def _format_security_group(self, context, group):
+ g = {}
+ g['groupDescription'] = group.description
+ g['groupName'] = group.name
+ g['ownerId'] = group.project_id
+ g['ipPermissions'] = []
+ for rule in group.rules:
+ r = {}
+ r['ipProtocol'] = rule.protocol
+ r['fromPort'] = rule.from_port
+ r['toPort'] = rule.to_port
+ r['groups'] = []
+ r['ipRanges'] = []
+ if rule.group_id:
+ source_group = db.security_group_get(context, rule.group_id)
+ r['groups'] += [{'groupName': source_group.name,
+ 'userId': source_group.project_id}]
+ else:
+ r['ipRanges'] += [{'cidrIp': rule.cidr}]
+ g['ipPermissions'] += [r]
+ return g
+
+
+ def _authorize_revoke_rule_args_to_dict(self, context,
+ to_port=None, from_port=None,
+ ip_protocol=None, cidr_ip=None,
+ user_id=None,
+ source_security_group_name=None,
+ source_security_group_owner_id=None):
+
+ values = {}
+
+ if source_security_group_name:
+ source_project_id = self._get_source_project_id(context,
+ source_security_group_owner_id)
+
+ source_security_group = \
+ db.security_group_get_by_name(context,
+ source_project_id,
+ source_security_group_name)
+ values['group_id'] = source_security_group['id']
+ elif cidr_ip:
+ # If this fails, it throws an exception. This is what we want.
+ IPy.IP(cidr_ip)
+ values['cidr'] = cidr_ip
+ else:
+ values['cidr'] = '0.0.0.0/0'
+
+ if ip_protocol and from_port and to_port:
+ from_port = int(from_port)
+ to_port = int(to_port)
+ ip_protocol = str(ip_protocol)
+
+ if ip_protocol.upper() not in ['TCP','UDP','ICMP']:
+ raise InvalidInputException('%s is not a valid ipProtocol' %
+ (ip_protocol,))
+ if ((min(from_port, to_port) < -1) or
+ (max(from_port, to_port) > 65535)):
+ raise InvalidInputException('Invalid port range')
+
+ values['protocol'] = ip_protocol
+ values['from_port'] = from_port
+ values['to_port'] = to_port
+ else:
+ # If cidr based filtering, protocol and ports are mandatory
+ if 'cidr' in values:
+ return None
+
+ return values
- # Stubbed for now to unblock other things.
- return groups
- def create_security_group(self, context, group_name, **kwargs):
+ def _security_group_rule_exists(self, security_group, values):
+ """Indicates whether the specified rule values are already
+ defined in the given security group.
+ """
+ for rule in security_group.rules:
+ if 'group_id' in values:
+ if rule['group_id'] == values['group_id']:
+ return True
+ else:
+ is_duplicate = True
+ for key in ('cidr', 'from_port', 'to_port', 'protocol'):
+ if rule[key] != values[key]:
+ is_duplicate = False
+ break
+ if is_duplicate:
+ return True
+ return False
+
+
+ def revoke_security_group_ingress(self, context, group_name, **kwargs):
+ self._ensure_default_security_group(context)
+ security_group = db.security_group_get_by_name(context,
+ context.project.id,
+ group_name)
+
+ criteria = self._authorize_revoke_rule_args_to_dict(context, **kwargs)
+ if criteria == None:
+ raise exception.ApiError("No rule for the specified parameters.")
+
+ for rule in security_group.rules:
+ match = True
+ for (k,v) in criteria.iteritems():
+ if getattr(rule, k, False) != v:
+ match = False
+ if match:
+ db.security_group_rule_destroy(context, rule['id'])
+ self._trigger_refresh_security_group(security_group)
+ return True
+ raise exception.ApiError("No rule for the specified parameters.")
+
+ # TODO(soren): This has only been tested with Boto as the client.
+ # Unfortunately, it seems Boto is using an old API
+ # for these operations, so support for newer API versions
+ # is sketchy.
+ def authorize_security_group_ingress(self, context, group_name, **kwargs):
+ self._ensure_default_security_group(context)
+ security_group = db.security_group_get_by_name(context,
+ context.project.id,
+ group_name)
+
+ values = self._authorize_revoke_rule_args_to_dict(context, **kwargs)
+ values['parent_group_id'] = security_group.id
+
+ if self._security_group_rule_exists(security_group, values):
+ raise exception.ApiError('This rule already exists in group %s' %
+ group_name)
+
+ security_group_rule = db.security_group_rule_create(context, values)
+
+ self._trigger_refresh_security_group(security_group)
+
return True
+
+ def _get_source_project_id(self, context, source_security_group_owner_id):
+ if source_security_group_owner_id:
+ # Parse user:project for source group.
+ source_parts = source_security_group_owner_id.split(':')
+
+ # If no project name specified, assume it's same as user name.
+ # Since we're looking up by project name, the user name is not
+ # used here. It's only read for EC2 API compatibility.
+ if len(source_parts) == 2:
+ source_project_id = source_parts[1]
+ else:
+ source_project_id = source_parts[0]
+ else:
+ source_project_id = context.project.id
+
+ return source_project_id
+
+
+ def create_security_group(self, context, group_name, group_description):
+ self._ensure_default_security_group(context)
+ if db.security_group_exists(context, context.project.id, group_name):
+ raise exception.ApiError('group %s already exists' % group_name)
+
+ group = {'user_id' : context.user.id,
+ 'project_id': context.project.id,
+ 'name': group_name,
+ 'description': group_description}
+ group_ref = db.security_group_create(context, group)
+
+ return {'securityGroupSet': [self._format_security_group(context,
+ group_ref)]}
+
+
def delete_security_group(self, context, group_name, **kwargs):
+ security_group = db.security_group_get_by_name(context,
+ context.project.id,
+ group_name)
+ db.security_group_destroy(context, security_group.id)
return True
+
def get_console_output(self, context, instance_id, **kwargs):
# instance_id is passed in as a list of instances
- instance_ref = db.instance_get_by_ec2_id(context, instance_id[0])
+ ec2_id = instance_id[0]
+ internal_id = ec2_id_to_internal_id(ec2_id)
+ instance_ref = db.instance_get_by_internal_id(context, internal_id)
return rpc.call('%s.%s' % (FLAGS.compute_topic,
instance_ref['host']),
{"method": "get_console_output",
@@ -326,7 +531,8 @@ class CloudController(object):
raise exception.ApiError("Volume status must be available")
if volume_ref['attach_status'] == "attached":
raise exception.ApiError("Volume is already attached")
- instance_ref = db.instance_get_by_ec2_id(context, instance_id)
+ internal_id = ec2_id_to_internal_id(instance_id)
+ instance_ref = db.instance_get_by_internal_id(context, internal_id)
host = instance_ref['host']
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "attach_volume",
@@ -360,9 +566,11 @@ class CloudController(object):
# If the instance doesn't exist anymore,
# then we need to call detach blind
db.volume_detached(context)
+ internal_id = instance_ref['internal_id']
+ ec2_id = internal_id_to_ec2_id(internal_id)
return {'attachTime': volume_ref['attach_time'],
'device': volume_ref['mountpoint'],
- 'instanceId': instance_ref['ec2_id'],
+ 'instanceId': internal_id,
'requestId': context.request_id,
'status': volume_ref['attach_status'],
'volumeId': volume_ref['id']}
@@ -411,7 +619,9 @@ class CloudController(object):
if instance['image_id'] == FLAGS.vpn_image_id:
continue
i = {}
- i['instanceId'] = instance['ec2_id']
+ internal_id = instance['internal_id']
+ ec2_id = internal_id_to_ec2_id(internal_id)
+ i['instanceId'] = ec2_id
i['imageId'] = instance['image_id']
i['instanceState'] = {
'code': instance['state'],
@@ -464,9 +674,10 @@ class CloudController(object):
instance_id = None
if (floating_ip_ref['fixed_ip']
and floating_ip_ref['fixed_ip']['instance']):
- instance_id = floating_ip_ref['fixed_ip']['instance']['ec2_id']
+ internal_id = floating_ip_ref['fixed_ip']['instance']['ec2_id']
+ ec2_id = internal_id_to_ec2_id(internal_id)
address_rv = {'public_ip': address,
- 'instance_id': instance_id}
+ 'instance_id': ec2_id}
if context.user.is_admin():
details = "%s (%s)" % (address_rv['instance_id'],
floating_ip_ref['project_id'])
@@ -498,8 +709,9 @@ class CloudController(object):
"floating_address": floating_ip_ref['address']}})
return {'releaseResponse': ["Address released."]}
- def associate_address(self, context, instance_id, public_ip, **kwargs):
- instance_ref = db.instance_get_by_ec2_id(context, instance_id)
+ def associate_address(self, context, ec2_id, public_ip, **kwargs):
+ internal_id = ec2_id_to_internal_id(ec2_id)
+ instance_ref = db.instance_get_by_internal_id(context, internal_id)
fixed_address = db.instance_get_fixed_address(context,
instance_ref['id'])
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
@@ -531,6 +743,18 @@ class CloudController(object):
"network_id": network_ref['id']}})
return db.queue_get_for(context, FLAGS.network_topic, host)
+ def _ensure_default_security_group(self, context):
+ try:
+ db.security_group_get_by_name(context,
+ context.project.id,
+ 'default')
+ except exception.NotFound:
+ values = { 'name' : 'default',
+ 'description' : 'default',
+ 'user_id' : context.user.id,
+ 'project_id' : context.project.id }
+ group = db.security_group_create(context, values)
+
def run_instances(self, context, **kwargs):
instance_type = kwargs.get('instance_type', 'm1.small')
if instance_type not in INSTANCE_TYPES:
@@ -578,8 +802,17 @@ class CloudController(object):
kwargs['key_name'])
key_data = key_pair_ref['public_key']
- # TODO: Get the real security group of launch in here
- security_group = "default"
+ security_group_arg = kwargs.get('security_group', ["default"])
+ if not type(security_group_arg) is list:
+ security_group_arg = [security_group_arg]
+
+ security_groups = []
+ self._ensure_default_security_group(context)
+ for security_group_name in security_group_arg:
+ group = db.security_group_get_by_name(context,
+ context.project.id,
+ security_group_name)
+ security_groups.append(group['id'])
reservation_id = utils.generate_uid('r')
base_options = {}
@@ -593,12 +826,12 @@ class CloudController(object):
base_options['user_id'] = context.user.id
base_options['project_id'] = context.project.id
base_options['user_data'] = kwargs.get('user_data', '')
- base_options['security_group'] = security_group
- base_options['instance_type'] = instance_type
+
base_options['display_name'] = kwargs.get('display_name')
base_options['display_description'] = kwargs.get('display_description')
type_data = INSTANCE_TYPES[instance_type]
+ base_options['instance_type'] = instance_type
base_options['memory_mb'] = type_data['memory_mb']
base_options['vcpus'] = type_data['vcpus']
base_options['local_gb'] = type_data['local_gb']
@@ -607,10 +840,16 @@ class CloudController(object):
instance_ref = db.instance_create(context, base_options)
inst_id = instance_ref['id']
+ for security_group_id in security_groups:
+ db.instance_add_security_group(context, inst_id,
+ security_group_id)
+
inst = {}
inst['mac_address'] = utils.generate_mac()
inst['launch_index'] = num
- inst['hostname'] = instance_ref['ec2_id']
+ internal_id = instance_ref['internal_id']
+ ec2_id = internal_id_to_ec2_id(internal_id)
+ inst['hostname'] = ec2_id
db.instance_update(context, inst_id, inst)
# TODO(vish): This probably should be done in the scheduler
# or in compute as a call. The network should be
@@ -636,11 +875,18 @@ class CloudController(object):
def terminate_instances(self, context, instance_id, **kwargs):
+ """Terminate each instance in instance_id, which is a list of ec2 ids.
+
+ instance_id is a kwarg so its name cannot be modified.
+ """
+ ec2_id_list = instance_id
logging.debug("Going to start terminating instances")
- for id_str in instance_id:
+ for id_str in ec2_id_list:
+ internal_id = ec2_id_to_internal_id(id_str)
logging.debug("Going to try and terminate %s" % id_str)
try:
- instance_ref = db.instance_get_by_ec2_id(context, id_str)
+ instance_ref = db.instance_get_by_internal_id(context,
+ internal_id)
except exception.NotFound:
logging.warning("Instance %s was not found during terminate"
% id_str)
@@ -689,7 +935,7 @@ class CloudController(object):
cloud.reboot(id_str, context=context)
return True
- def update_instance(self, context, instance_id, **kwargs):
+ def update_instance(self, context, ec2_id, **kwargs):
updatable_fields = ['display_name', 'display_description']
changes = {}
for field in updatable_fields:
@@ -697,7 +943,8 @@ class CloudController(object):
changes[field] = kwargs[field]
if changes:
db_context = {}
- inst = db.instance_get_by_ec2_id(db_context, instance_id)
+ internal_id = ec2_id_to_internal_id(ec2_id)
+ inst = db.instance_get_by_internal_id(db_context, internal_id)
db.instance_update(db_context, inst['id'], kwargs)
return True
diff --git a/nova/api/ec2/images.py b/nova/api/ec2/images.py
index cb54cdda2..f0a43dad6 100644
--- a/nova/api/ec2/images.py
+++ b/nova/api/ec2/images.py
@@ -69,6 +69,9 @@ def list(context, filter_list=[]):
optionally filtered by a list of image_id """
+ if FLAGS.connection_type == 'fake':
+ return [{ 'imageId' : 'bar'}]
+
# FIXME: send along the list of only_images to check for
response = conn(context).make_request(
method='GET',
diff --git a/nova/api/rackspace/__init__.py b/nova/api/openstack/__init__.py
index 89a4693ad..5e81ba2bd 100644
--- a/nova/api/rackspace/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -17,7 +17,7 @@
# under the License.
"""
-WSGI middleware for Rackspace API controllers.
+WSGI middleware for OpenStack API controllers.
"""
import json
@@ -31,30 +31,30 @@ import webob
from nova import flags
from nova import utils
from nova import wsgi
-from nova.api.rackspace import faults
-from nova.api.rackspace import backup_schedules
-from nova.api.rackspace import flavors
-from nova.api.rackspace import images
-from nova.api.rackspace import ratelimiting
-from nova.api.rackspace import servers
-from nova.api.rackspace import sharedipgroups
+from nova.api.openstack import faults
+from nova.api.openstack import backup_schedules
+from nova.api.openstack import flavors
+from nova.api.openstack import images
+from nova.api.openstack import ratelimiting
+from nova.api.openstack import servers
+from nova.api.openstack import sharedipgroups
from nova.auth import manager
FLAGS = flags.FLAGS
flags.DEFINE_string('nova_api_auth',
- 'nova.api.rackspace.auth.BasicApiAuthManager',
- 'The auth mechanism to use for the Rackspace API implemenation')
+ 'nova.api.openstack.auth.BasicApiAuthManager',
+ 'The auth mechanism to use for the OpenStack API implemenation')
class API(wsgi.Middleware):
- """WSGI entry point for all Rackspace API requests."""
+ """WSGI entry point for all OpenStack API requests."""
def __init__(self):
app = AuthMiddleware(RateLimitingMiddleware(APIRouter()))
super(API, self).__init__(app)
class AuthMiddleware(wsgi.Middleware):
- """Authorize the rackspace API request or return an HTTP Forbidden."""
+ """Authorize the openstack API request or return an HTTP Forbidden."""
def __init__(self, application):
self.auth_driver = utils.import_class(FLAGS.nova_api_auth)()
@@ -145,7 +145,7 @@ class RateLimitingMiddleware(wsgi.Middleware):
class APIRouter(wsgi.Router):
"""
- Routes requests on the Rackspace API to the appropriate controller
+ Routes requests on the OpenStack API to the appropriate controller
and method.
"""
diff --git a/nova/api/rackspace/_id_translator.py b/nova/api/openstack/_id_translator.py
index 333aa8434..333aa8434 100644
--- a/nova/api/rackspace/_id_translator.py
+++ b/nova/api/openstack/_id_translator.py
diff --git a/nova/api/rackspace/auth.py b/nova/api/openstack/auth.py
index c45156ebd..4c909293e 100644
--- a/nova/api/rackspace/auth.py
+++ b/nova/api/openstack/auth.py
@@ -11,7 +11,7 @@ from nova import db
from nova import flags
from nova import manager
from nova import utils
-from nova.api.rackspace import faults
+from nova.api.openstack import faults
FLAGS = flags.FLAGS
@@ -19,7 +19,7 @@ class Context(object):
pass
class BasicApiAuthManager(object):
- """ Implements a somewhat rudimentary version of Rackspace Auth"""
+ """ Implements a somewhat rudimentary version of OpenStack Auth"""
def __init__(self, host=None, db_driver=None):
if not host:
diff --git a/nova/api/rackspace/backup_schedules.py b/nova/api/openstack/backup_schedules.py
index cb83023bc..76ad6ef87 100644
--- a/nova/api/rackspace/backup_schedules.py
+++ b/nova/api/openstack/backup_schedules.py
@@ -19,8 +19,7 @@ import time
from webob import exc
from nova import wsgi
-from nova.api.rackspace import _id_translator
-from nova.api.rackspace import faults
+from nova.api.openstack import faults
import nova.image.service
class Controller(wsgi.Controller):
diff --git a/nova/api/rackspace/context.py b/nova/api/openstack/context.py
index 77394615b..77394615b 100644
--- a/nova/api/rackspace/context.py
+++ b/nova/api/openstack/context.py
diff --git a/nova/api/rackspace/faults.py b/nova/api/openstack/faults.py
index 32e5c866f..32e5c866f 100644
--- a/nova/api/rackspace/faults.py
+++ b/nova/api/openstack/faults.py
diff --git a/nova/api/rackspace/flavors.py b/nova/api/openstack/flavors.py
index 916449854..793984a5d 100644
--- a/nova/api/rackspace/flavors.py
+++ b/nova/api/openstack/flavors.py
@@ -17,13 +17,13 @@
from webob import exc
-from nova.api.rackspace import faults
+from nova.api.openstack import faults
from nova.compute import instance_types
from nova import wsgi
-import nova.api.rackspace
+import nova.api.openstack
class Controller(wsgi.Controller):
- """Flavor controller for the Rackspace API."""
+ """Flavor controller for the OpenStack API."""
_serialization_metadata = {
'application/xml': {
@@ -41,7 +41,7 @@ class Controller(wsgi.Controller):
def detail(self, req):
"""Return all flavors in detail."""
items = [self.show(req, id)['flavor'] for id in self._all_ids()]
- items = nova.api.rackspace.limited(items, req)
+ items = nova.api.openstack.limited(items, req)
return dict(flavors=items)
def show(self, req, id):
diff --git a/nova/api/rackspace/images.py b/nova/api/openstack/images.py
index 4a7dd489c..aa438739c 100644
--- a/nova/api/rackspace/images.py
+++ b/nova/api/openstack/images.py
@@ -17,11 +17,15 @@
from webob import exc
+from nova import flags
+from nova import utils
from nova import wsgi
-from nova.api.rackspace import _id_translator
-import nova.api.rackspace
+import nova.api.openstack
import nova.image.service
-from nova.api.rackspace import faults
+from nova.api.openstack import faults
+
+
+FLAGS = flags.FLAGS
class Controller(wsgi.Controller):
@@ -35,9 +39,7 @@ class Controller(wsgi.Controller):
}
def __init__(self):
- self._service = nova.image.service.ImageService.load()
- self._id_translator = _id_translator.RackspaceAPIIdTranslator(
- "image", self._service.__class__.__name__)
+ self._service = utils.import_object(FLAGS.image_service)
def index(self, req):
"""Return all public images in brief."""
@@ -47,17 +49,12 @@ class Controller(wsgi.Controller):
def detail(self, req):
"""Return all public images in detail."""
data = self._service.index()
- data = nova.api.rackspace.limited(data, req)
- for img in data:
- img['id'] = self._id_translator.to_rs_id(img['id'])
+ data = nova.api.openstack.limited(data, req)
return dict(images=data)
def show(self, req, id):
"""Return data about the given image id."""
- opaque_id = self._id_translator.from_rs_id(id)
- img = self._service.show(opaque_id)
- img['id'] = id
- return dict(image=img)
+ return dict(image=self._service.show(id))
def delete(self, req, id):
# Only public images are supported for now.
diff --git a/nova/api/rackspace/notes.txt b/nova/api/openstack/notes.txt
index e133bf5ea..2330f1002 100644
--- a/nova/api/rackspace/notes.txt
+++ b/nova/api/openstack/notes.txt
@@ -10,11 +10,11 @@ image ids are URIs.
LocalImageService(ImageService):
image ids are random strings.
-RackspaceAPITranslationStore:
+OpenstackAPITranslationStore:
translates RS server/images/flavor/etc ids into formats required
by a given ImageService strategy.
-api.rackspace.images.Controller:
+api.openstack.images.Controller:
uses an ImageService strategy behind the scenes to do its fetching; it just
converts int image id into a strategy-specific image id.
diff --git a/nova/api/rackspace/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py
index f843bac0f..f843bac0f 100644
--- a/nova/api/rackspace/ratelimiting/__init__.py
+++ b/nova/api/openstack/ratelimiting/__init__.py
diff --git a/nova/api/rackspace/servers.py b/nova/api/openstack/servers.py
index 0606d14bb..1a0792bf8 100644
--- a/nova/api/rackspace/servers.py
+++ b/nova/api/openstack/servers.py
@@ -25,30 +25,15 @@ from nova import rpc
from nova import utils
from nova import wsgi
from nova.api import cloud
-from nova.api.rackspace import _id_translator
-from nova.api.rackspace import context
-from nova.api.rackspace import faults
+from nova.api.openstack import context
+from nova.api.openstack import faults
from nova.compute import instance_types
from nova.compute import power_state
-import nova.api.rackspace
+import nova.api.openstack
import nova.image.service
FLAGS = flags.FLAGS
-flags.DEFINE_string('rs_network_manager', 'nova.network.manager.FlatManager',
- 'Networking for rackspace')
-
-def _instance_id_translator():
- """ Helper method for initializing an id translator for Rackspace instance
- ids """
- return _id_translator.RackspaceAPIIdTranslator( "instance", 'nova')
-
-def _image_service():
- """ Helper method for initializing the image id translator """
- service = nova.image.service.ImageService.load()
- return (service, _id_translator.RackspaceAPIIdTranslator(
- "image", service.__class__.__name__))
-
def _filter_params(inst_dict):
""" Extracts all updatable parameters for a server update request """
keys = dict(name='name', admin_pass='adminPass')
@@ -93,7 +78,7 @@ def _entity_inst(inst):
return dict(server=dict(id=inst['id'], name=inst['server_name']))
class Controller(wsgi.Controller):
- """ The Server API controller for the Openstack API """
+ """ The Server API controller for the OpenStack API """
_serialization_metadata = {
'application/xml': {
@@ -125,17 +110,14 @@ class Controller(wsgi.Controller):
"""
user_id = req.environ['nova.context']['user']['id']
instance_list = self.db_driver.instance_get_all_by_user(None, user_id)
- limited_list = nova.api.rackspace.limited(instance_list, req)
+ limited_list = nova.api.openstack.limited(instance_list, req)
res = [entity_maker(inst)['server'] for inst in limited_list]
return _entity_list(res)
def show(self, req, id):
""" Returns server details by server id """
- inst_id_trans = _instance_id_translator()
- inst_id = inst_id_trans.from_rs_id(id)
-
user_id = req.environ['nova.context']['user']['id']
- inst = self.db_driver.instance_get_by_ec2_id(None, inst_id)
+ inst = self.db_driver.instance_get_by_internal_id(None, int(id))
if inst:
if inst.user_id == user_id:
return _entity_detail(inst)
@@ -143,11 +125,8 @@ class Controller(wsgi.Controller):
def delete(self, req, id):
""" Destroys a server """
- inst_id_trans = _instance_id_translator()
- inst_id = inst_id_trans.from_rs_id(id)
-
user_id = req.environ['nova.context']['user']['id']
- instance = self.db_driver.instance_get_by_ec2_id(None, inst_id)
+ instance = self.db_driver.instance_get_by_internal_id(None, int(id))
if instance and instance['user_id'] == user_id:
self.db_driver.instance_destroy(None, id)
return faults.Fault(exc.HTTPAccepted())
@@ -160,10 +139,10 @@ class Controller(wsgi.Controller):
if not env:
return faults.Fault(exc.HTTPUnprocessableEntity())
- try:
- inst = self._build_server_instance(req, env)
- except Exception, e:
- return faults.Fault(exc.HTTPUnprocessableEntity())
+ #try:
+ inst = self._build_server_instance(req, env)
+ #except Exception, e:
+ # return faults.Fault(exc.HTTPUnprocessableEntity())
rpc.cast(
FLAGS.compute_topic, {
@@ -173,8 +152,6 @@ class Controller(wsgi.Controller):
def update(self, req, id):
""" Updates the server name or password """
- inst_id_trans = _instance_id_translator()
- inst_id = inst_id_trans.from_rs_id(id)
user_id = req.environ['nova.context']['user']['id']
inst_dict = self._deserialize(req.body, req)
@@ -182,32 +159,33 @@ class Controller(wsgi.Controller):
if not inst_dict:
return faults.Fault(exc.HTTPUnprocessableEntity())
- instance = self.db_driver.instance_get_by_ec2_id(None, inst_id)
+ instance = self.db_driver.instance_get_by_internal_id(None, int(id))
if not instance or instance.user_id != user_id:
return faults.Fault(exc.HTTPNotFound())
- self.db_driver.instance_update(None, id,
+ self.db_driver.instance_update(None, int(id),
_filter_params(inst_dict['server']))
return faults.Fault(exc.HTTPNoContent())
def action(self, req, id):
""" multi-purpose method used to reboot, rebuild, and
resize a server """
+ user_id = req.environ['nova.context']['user']['id']
input_dict = self._deserialize(req.body, req)
try:
reboot_type = input_dict['reboot']['type']
except Exception:
raise faults.Fault(webob.exc.HTTPNotImplemented())
- opaque_id = _instance_id_translator().from_rs_id(id)
- cloud.reboot(opaque_id)
+ inst_ref = self.db.instance_get_by_internal_id(None, int(id))
+ if not inst_ref or (inst_ref and not inst_ref.user_id == user_id):
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ cloud.reboot(id)
def _build_server_instance(self, req, env):
"""Build instance data structure and save it to the data store."""
ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
inst = {}
- inst_id_trans = _instance_id_translator()
-
user_id = req.environ['nova.context']['user']['id']
flavor_id = env['server']['flavorId']
@@ -217,17 +195,15 @@ class Controller(wsgi.Controller):
if v['flavorid'] == flavor_id][0]
image_id = env['server']['imageId']
+ img_service = utils.import_object(FLAGS.image_service)
- img_service, image_id_trans = _image_service()
-
- opaque_image_id = image_id_trans.to_rs_id(image_id)
- image = img_service.show(opaque_image_id)
+ image = img_service.show(image_id)
if not image:
raise Exception, "Image not found"
inst['server_name'] = env['server']['name']
- inst['image_id'] = opaque_image_id
+ inst['image_id'] = image_id
inst['user_id'] = user_id
inst['launch_time'] = ltime
inst['mac_address'] = utils.generate_mac()
@@ -258,8 +234,7 @@ class Controller(wsgi.Controller):
inst['local_gb'] = flavor['local_gb']
ref = self.db_driver.instance_create(None, inst)
- inst['id'] = inst_id_trans.to_rs_id(ref.ec2_id)
-
+ inst['id'] = ref.internal_id
# TODO(dietz): this isn't explicitly necessary, but the networking
# calls depend on an object with a project_id property, and therefore
# should be cleaned up later
@@ -270,10 +245,10 @@ class Controller(wsgi.Controller):
#TODO(dietz) is this necessary?
inst['launch_index'] = 0
- inst['hostname'] = ref.ec2_id
+ inst['hostname'] = str(ref.internal_id)
self.db_driver.instance_update(None, inst['id'], inst)
- network_manager = utils.import_object(FLAGS.rs_network_manager)
+ network_manager = utils.import_object(FLAGS.network_manager)
address = network_manager.allocate_fixed_ip(api_context,
inst['id'])
diff --git a/nova/api/rackspace/sharedipgroups.py b/nova/api/openstack/sharedipgroups.py
index 4d2d0ede1..4d2d0ede1 100644
--- a/nova/api/rackspace/sharedipgroups.py
+++ b/nova/api/openstack/sharedipgroups.py
diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py
new file mode 100644
index 000000000..09d15018b
--- /dev/null
+++ b/nova/auth/dbdriver.py
@@ -0,0 +1,236 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Auth driver using the DB as its backend.
+"""
+
+import logging
+import sys
+
+from nova import exception
+from nova import db
+
+
+class DbDriver(object):
+ """DB Auth driver
+
+ Defines enter and exit and therefore supports the with/as syntax.
+ """
+
+ def __init__(self):
+ """Imports the LDAP module"""
+ pass
+ db
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
+ def get_user(self, uid):
+ """Retrieve user by id"""
+ return self._db_user_to_auth_user(db.user_get({}, uid))
+
+ def get_user_from_access_key(self, access):
+ """Retrieve user by access key"""
+ return self._db_user_to_auth_user(db.user_get_by_access_key({}, access))
+
+ def get_project(self, pid):
+ """Retrieve project by id"""
+ return self._db_project_to_auth_projectuser(db.project_get({}, pid))
+
+ def get_users(self):
+ """Retrieve list of users"""
+ return [self._db_user_to_auth_user(user) for user in db.user_get_all({})]
+
+ def get_projects(self, uid=None):
+ """Retrieve list of projects"""
+ if uid:
+ result = db.project_get_by_user({}, uid)
+ else:
+ result = db.project_get_all({})
+ return [self._db_project_to_auth_projectuser(proj) for proj in result]
+
+ def create_user(self, name, access_key, secret_key, is_admin):
+ """Create a user"""
+ values = { 'id' : name,
+ 'access_key' : access_key,
+ 'secret_key' : secret_key,
+ 'is_admin' : is_admin
+ }
+ try:
+ user_ref = db.user_create({}, values)
+ return self._db_user_to_auth_user(user_ref)
+ except exception.Duplicate, e:
+ raise exception.Duplicate('User %s already exists' % name)
+
+ def _db_user_to_auth_user(self, user_ref):
+ return { 'id' : user_ref['id'],
+ 'name' : user_ref['id'],
+ 'access' : user_ref['access_key'],
+ 'secret' : user_ref['secret_key'],
+ 'admin' : user_ref['is_admin'] }
+
+ def _db_project_to_auth_projectuser(self, project_ref):
+ return { 'id' : project_ref['id'],
+ 'name' : project_ref['name'],
+ 'project_manager_id' : project_ref['project_manager'],
+ 'description' : project_ref['description'],
+ 'member_ids' : [member['id'] for member in project_ref['members']] }
+
+ def create_project(self, name, manager_uid,
+ description=None, member_uids=None):
+ """Create a project"""
+ manager = db.user_get({}, manager_uid)
+ if not manager:
+ raise exception.NotFound("Project can't be created because "
+ "manager %s doesn't exist" % manager_uid)
+
+ # description is a required attribute
+ if description is None:
+ description = name
+
+ # First, we ensure that all the given users exist before we go
+ # on to create the project. This way we won't have to destroy
+ # the project again because a user turns out to be invalid.
+ members = set([manager])
+ if member_uids != None:
+ for member_uid in member_uids:
+ member = db.user_get({}, member_uid)
+ if not member:
+ raise exception.NotFound("Project can't be created "
+ "because user %s doesn't exist"
+ % member_uid)
+ members.add(member)
+
+ values = { 'id' : name,
+ 'name' : name,
+ 'project_manager' : manager['id'],
+ 'description': description }
+
+ try:
+ project = db.project_create({}, values)
+ except exception.Duplicate:
+ raise exception.Duplicate("Project can't be created because "
+ "project %s already exists" % name)
+
+ for member in members:
+ db.project_add_member({}, project['id'], member['id'])
+
+ # This looks silly, but ensures that the members element has been
+ # correctly populated
+ project_ref = db.project_get({}, project['id'])
+ return self._db_project_to_auth_projectuser(project_ref)
+
+ def modify_project(self, project_id, manager_uid=None, description=None):
+ """Modify an existing project"""
+ if not manager_uid and not description:
+ return
+ values = {}
+ if manager_uid:
+ manager = db.user_get({}, manager_uid)
+ if not manager:
+ raise exception.NotFound("Project can't be modified because "
+ "manager %s doesn't exist" %
+ manager_uid)
+ values['project_manager'] = manager['id']
+ if description:
+ values['description'] = description
+
+ db.project_update({}, project_id, values)
+
+ def add_to_project(self, uid, project_id):
+ """Add user to project"""
+ user, project = self._validate_user_and_project(uid, project_id)
+ db.project_add_member({}, project['id'], user['id'])
+
+ def remove_from_project(self, uid, project_id):
+ """Remove user from project"""
+ user, project = self._validate_user_and_project(uid, project_id)
+ db.project_remove_member({}, project['id'], user['id'])
+
+ def is_in_project(self, uid, project_id):
+ """Check if user is in project"""
+ user, project = self._validate_user_and_project(uid, project_id)
+ return user in project.members
+
+ def has_role(self, uid, role, project_id=None):
+ """Check if user has role
+
+ If project is specified, it checks for local role, otherwise it
+ checks for global role
+ """
+
+ return role in self.get_user_roles(uid, project_id)
+
+ def add_role(self, uid, role, project_id=None):
+ """Add role for user (or user and project)"""
+ if not project_id:
+ db.user_add_role({}, uid, role)
+ return
+ db.user_add_project_role({}, uid, project_id, role)
+
+ def remove_role(self, uid, role, project_id=None):
+ """Remove role for user (or user and project)"""
+ if not project_id:
+ db.user_remove_role({}, uid, role)
+ return
+ db.user_remove_project_role({}, uid, project_id, role)
+
+ def get_user_roles(self, uid, project_id=None):
+ """Retrieve list of roles for user (or user and project)"""
+ if project_id is None:
+ roles = db.user_get_roles({}, uid)
+ return roles
+ else:
+ roles = db.user_get_roles_for_project({}, uid, project_id)
+ return roles
+
+ def delete_user(self, id):
+ """Delete a user"""
+ user = db.user_get({}, id)
+ db.user_delete({}, user['id'])
+
+ def delete_project(self, project_id):
+ """Delete a project"""
+ db.project_delete({}, project_id)
+
+ def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
+ """Modify an existing user"""
+ if not access_key and not secret_key and admin is None:
+ return
+ values = {}
+ if access_key:
+ values['access_key'] = access_key
+ if secret_key:
+ values['secret_key'] = secret_key
+ if admin is not None:
+ values['is_admin'] = admin
+ db.user_update({}, uid, values)
+
+ def _validate_user_and_project(self, user_id, project_id):
+ user = db.user_get({}, user_id)
+ if not user:
+ raise exception.NotFound('User "%s" not found' % user_id)
+ project = db.project_get({}, project_id)
+ if not project:
+ raise exception.NotFound('Project "%s" not found' % project_id)
+ return user, project
+
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 67ea06b6c..9c499c98d 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -69,7 +69,7 @@ flags.DEFINE_string('credential_cert_subject',
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
'OU=NovaDev/CN=%s-%s',
'Subject for certificate for users')
-flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver',
+flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
'Driver that auth manager uses')
@@ -640,7 +640,10 @@ class AuthManager(object):
zippy.writestr(FLAGS.credential_key_file, private_key)
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
- (vpn_ip, vpn_port) = self.get_project_vpn_data(project)
+ try:
+ (vpn_ip, vpn_port) = self.get_project_vpn_data(project)
+ except exception.NotFound:
+ vpn_ip = None
if vpn_ip:
configfile = open(FLAGS.vpn_client_template, "r")
s = string.Template(configfile.read())
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 4c6d2f06f..f36e14aa2 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -64,10 +64,15 @@ class ComputeManager(manager.Manager):
@defer.inlineCallbacks
@exception.wrap_exception
+ def refresh_security_group(self, context, security_group_id, **_kwargs):
+ yield self.driver.refresh_security_group(security_group_id)
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
def run_instance(self, context, instance_id, **_kwargs):
"""Launch a new instance with specified options."""
instance_ref = self.db.instance_get(context, instance_id)
- if instance_ref['ec2_id'] in self.driver.list_instances():
+ if instance_ref['name'] in self.driver.list_instances():
raise exception.Error("Instance has already been created")
logging.debug("instance %s: starting...", instance_id)
project_id = instance_ref['project_id']
@@ -129,7 +134,7 @@ class ComputeManager(manager.Manager):
raise exception.Error(
'trying to reboot a non-running'
'instance: %s (state: %s excepted: %s)' %
- (instance_ref['ec2_id'],
+ (instance_ref['internal_id'],
instance_ref['state'],
power_state.RUNNING))
@@ -151,7 +156,7 @@ class ComputeManager(manager.Manager):
if FLAGS.connection_type == 'libvirt':
fname = os.path.abspath(os.path.join(FLAGS.instances_path,
- instance_ref['ec2_id'],
+ instance_ref['internal_id'],
'console.log'))
with open(fname, 'r') as f:
output = f.read()
diff --git a/nova/db/api.py b/nova/db/api.py
index a9f34ef04..a655e6a8a 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -280,9 +280,9 @@ def instance_get_floating_address(context, instance_id):
return IMPL.instance_get_floating_address(context, instance_id)
-def instance_get_by_ec2_id(context, ec2_id):
+def instance_get_by_internal_id(context, internal_id):
"""Get an instance by ec2 id."""
- return IMPL.instance_get_by_ec2_id(context, ec2_id)
+ return IMPL.instance_get_by_internal_id(context, internal_id)
def instance_is_vpn(context, instance_id):
@@ -304,6 +304,11 @@ def instance_update(context, instance_id, values):
return IMPL.instance_update(context, instance_id, values)
+def instance_add_security_group(context, instance_id, security_group_id):
+ """Associate the given security group with the given instance"""
+ return IMPL.instance_add_security_group(context, instance_id, security_group_id)
+
+
###################
@@ -437,7 +442,11 @@ def network_update(context, network_id, values):
def project_get_network(context, project_id):
- """Return the network associated with the project."""
+ """Return the network associated with the project.
+
+ Raises NotFound if no such network can be found.
+
+ """
return IMPL.project_get_network(context, project_id)
@@ -570,3 +579,187 @@ def volume_update(context, volume_id, values):
"""
return IMPL.volume_update(context, volume_id, values)
+
+
+####################
+
+
+def security_group_get_all(context):
+ """Get all security groups"""
+ return IMPL.security_group_get_all(context)
+
+
+def security_group_get(context, security_group_id):
+ """Get security group by its internal id"""
+ return IMPL.security_group_get(context, security_group_id)
+
+
+def security_group_get_by_name(context, project_id, group_name):
+ """Returns a security group with the specified name from a project"""
+ return IMPL.security_group_get_by_name(context, project_id, group_name)
+
+
+def security_group_get_by_project(context, project_id):
+ """Get all security groups belonging to a project"""
+ return IMPL.security_group_get_by_project(context, project_id)
+
+
+def security_group_get_by_instance(context, instance_id):
+ """Get security groups to which the instance is assigned"""
+ return IMPL.security_group_get_by_instance(context, instance_id)
+
+
+def security_group_exists(context, project_id, group_name):
+ """Indicates if a group name exists in a project"""
+ return IMPL.security_group_exists(context, project_id, group_name)
+
+
+def security_group_create(context, values):
+ """Create a new security group"""
+ return IMPL.security_group_create(context, values)
+
+
+def security_group_destroy(context, security_group_id):
+ """Deletes a security group"""
+ return IMPL.security_group_destroy(context, security_group_id)
+
+
+def security_group_destroy_all(context):
+ """Deletes a security group"""
+ return IMPL.security_group_destroy_all(context)
+
+
+####################
+
+
+def security_group_rule_create(context, values):
+ """Create a new security group"""
+ return IMPL.security_group_rule_create(context, values)
+
+
+def security_group_rule_get_by_security_group(context, security_group_id):
+ """Get all rules for a a given security group"""
+ return IMPL.security_group_rule_get_by_security_group(context, security_group_id)
+
+def security_group_rule_destroy(context, security_group_rule_id):
+ """Deletes a security group rule"""
+ return IMPL.security_group_rule_destroy(context, security_group_rule_id)
+
+
+###################
+
+
+def user_get(context, id):
+ """Get user by id"""
+ return IMPL.user_get(context, id)
+
+
+def user_get_by_uid(context, uid):
+ """Get user by uid"""
+ return IMPL.user_get_by_uid(context, uid)
+
+
+def user_get_by_access_key(context, access_key):
+ """Get user by access key"""
+ return IMPL.user_get_by_access_key(context, access_key)
+
+
+def user_create(context, values):
+ """Create a new user"""
+ return IMPL.user_create(context, values)
+
+
+def user_delete(context, id):
+ """Delete a user"""
+ return IMPL.user_delete(context, id)
+
+
+def user_get_all(context):
+ """Create a new user"""
+ return IMPL.user_get_all(context)
+
+
+def user_add_role(context, user_id, role):
+ """Add another global role for user"""
+ return IMPL.user_add_role(context, user_id, role)
+
+
+def user_remove_role(context, user_id, role):
+ """Remove global role from user"""
+ return IMPL.user_remove_role(context, user_id, role)
+
+
+def user_get_roles(context, user_id):
+ """Get global roles for user"""
+ return IMPL.user_get_roles(context, user_id)
+
+
+def user_add_project_role(context, user_id, project_id, role):
+ """Add project role for user"""
+ return IMPL.user_add_project_role(context, user_id, project_id, role)
+
+
+def user_remove_project_role(context, user_id, project_id, role):
+ """Remove project role from user"""
+ return IMPL.user_remove_project_role(context, user_id, project_id, role)
+
+
+def user_get_roles_for_project(context, user_id, project_id):
+ """Return list of roles a user holds on project"""
+ return IMPL.user_get_roles_for_project(context, user_id, project_id)
+
+
+def user_update(context, user_id, values):
+ """Update user"""
+ return IMPL.user_update(context, user_id, values)
+
+
+def project_get(context, id):
+ """Get project by id"""
+ return IMPL.project_get(context, id)
+
+
+def project_create(context, values):
+ """Create a new project"""
+ return IMPL.project_create(context, values)
+
+
+def project_add_member(context, project_id, user_id):
+ """Add user to project"""
+ return IMPL.project_add_member(context, project_id, user_id)
+
+
+def project_get_all(context):
+ """Get all projects"""
+ return IMPL.project_get_all(context)
+
+
+def project_get_by_user(context, user_id):
+ """Get all projects of which the given user is a member"""
+ return IMPL.project_get_by_user(context, user_id)
+
+
+def project_remove_member(context, project_id, user_id):
+ """Remove the given user from the given project"""
+ return IMPL.project_remove_member(context, project_id, user_id)
+
+
+def project_update(context, project_id, values):
+ """Update Remove the given user from the given project"""
+ return IMPL.project_update(context, project_id, values)
+
+
+def project_delete(context, project_id):
+ """Delete project"""
+ return IMPL.project_delete(context, project_id)
+
+
+###################
+
+
+def host_get_networks(context, host):
+ """Return all networks for which the given host is the designated
+ network host
+ """
+ return IMPL.host_get_networks(context, host)
+
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 6868eb176..14714d4b1 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -29,8 +29,11 @@ from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
from sqlalchemy import or_
from sqlalchemy.exc import IntegrityError
-from sqlalchemy.orm import joinedload, joinedload_all
-from sqlalchemy.sql import exists, func
+from sqlalchemy.orm import joinedload
+from sqlalchemy.orm import joinedload_all
+from sqlalchemy.sql import exists
+from sqlalchemy.sql import func
+from sqlalchemy.orm.exc import NoResultFound
FLAGS = flags.FLAGS
@@ -240,7 +243,7 @@ def service_create(context, values):
def service_update(context, service_id, values):
session = get_session()
with session.begin():
- service_ref = session_get(context, service_id, session=session)
+ service_ref = service_get(context, service_id, session=session)
for (key, value) in values.iteritems():
service_ref[key] = value
service_ref.save(session=session)
@@ -450,7 +453,6 @@ def fixed_ip_create(_context, values):
fixed_ip_ref.save()
return fixed_ip_ref['address']
-
@require_context
def fixed_ip_disassociate(context, address):
session = get_session()
@@ -461,7 +463,6 @@ def fixed_ip_disassociate(context, address):
fixed_ip_ref.instance = None
fixed_ip_ref.save(session=session)
-
@require_admin_context
def fixed_ip_disassociate_all_by_timeout(_context, host, time):
session = get_session()
@@ -525,6 +526,9 @@ def fixed_ip_update(context, address, values):
###################
+#TODO(gundlach): instance_create and volume_create are nearly identical
+#and should be refactored. I expect there are other copy-and-paste
+#functions between the two of them as well.
@require_context
def instance_create(context, values):
instance_ref = models.Instance()
@@ -533,10 +537,11 @@ def instance_create(context, values):
session = get_session()
with session.begin():
- while instance_ref.ec2_id == None:
- ec2_id = utils.generate_uid(instance_ref.__prefix__)
- if not instance_ec2_id_exists(context, ec2_id, session=session):
- instance_ref.ec2_id = ec2_id
+ while instance_ref.internal_id == None:
+ internal_id = utils.generate_uid(instance_ref.__prefix__)
+ if not instance_internal_id_exists(context, internal_id,
+ session=session):
+ instance_ref.internal_id = internal_id
instance_ref.save(session=session)
return instance_ref
@@ -569,11 +574,13 @@ def instance_get(context, instance_id, session=None):
if is_admin_context(context):
result = session.query(models.Instance
+ ).options(joinedload('security_groups')
).filter_by(id=instance_id
).filter_by(deleted=can_read_deleted(context)
).first()
elif is_user_context(context):
result = session.query(models.Instance
+ ).options(joinedload('security_groups')
).filter_by(project_id=context.project.id
).filter_by(id=instance_id
).filter_by(deleted=False
@@ -589,6 +596,7 @@ def instance_get_all(context):
session = get_session()
return session.query(models.Instance
).options(joinedload_all('fixed_ip.floating_ips')
+ ).options(joinedload('security_groups')
).filter_by(deleted=can_read_deleted(context)
).all()
@@ -598,6 +606,7 @@ def instance_get_all_by_user(context, user_id):
session = get_session()
return session.query(models.Instance
).options(joinedload_all('fixed_ip.floating_ips')
+ ).options(joinedload('security_groups')
).filter_by(deleted=can_read_deleted(context)
).filter_by(user_id=user_id
).all()
@@ -610,6 +619,7 @@ def instance_get_all_by_project(context, project_id):
session = get_session()
return session.query(models.Instance
).options(joinedload_all('fixed_ip.floating_ips')
+ ).options(joinedload('security_groups')
).filter_by(project_id=project_id
).filter_by(deleted=can_read_deleted(context)
).all()
@@ -622,12 +632,14 @@ def instance_get_all_by_reservation(context, reservation_id):
if is_admin_context(context):
return session.query(models.Instance
).options(joinedload_all('fixed_ip.floating_ips')
+ ).options(joinedload('security_groups')
).filter_by(reservation_id=reservation_id
).filter_by(deleted=can_read_deleted(context)
).all()
elif is_user_context(context):
return session.query(models.Instance
).options(joinedload_all('fixed_ip.floating_ips')
+ ).options(joinedload('security_groups')
).filter_by(project_id=context.project.id
).filter_by(reservation_id=reservation_id
).filter_by(deleted=False
@@ -635,31 +647,35 @@ def instance_get_all_by_reservation(context, reservation_id):
@require_context
-def instance_get_by_ec2_id(context, ec2_id):
+def instance_get_by_internal_id(context, internal_id):
session = get_session()
if is_admin_context(context):
result = session.query(models.Instance
- ).filter_by(ec2_id=ec2_id
+ ).options(joinedload('security_groups')
+ ).filter_by(internal_id=internal_id
).filter_by(deleted=can_read_deleted(context)
).first()
elif is_user_context(context):
result = session.query(models.Instance
+ ).options(joinedload('security_groups')
).filter_by(project_id=context.project.id
- ).filter_by(ec2_id=ec2_id
+ ).filter_by(internal_id=internal_id
).filter_by(deleted=False
).first()
if not result:
- raise exception.NotFound('Instance %s not found' % (ec2_id))
+ raise exception.NotFound('Instance %s not found' % (internal_id))
return result
@require_context
-def instance_ec2_id_exists(context, ec2_id, session=None):
+def instance_internal_id_exists(context, internal_id, session=None):
if not session:
session = get_session()
- return session.query(exists().where(models.Instance.id==ec2_id)).one()[0]
+ return session.query(
+ exists().where(models.Instance.internal_id==internal_id)
+ ).one()[0]
@require_context
@@ -714,6 +730,18 @@ def instance_update(context, instance_id, values):
instance_ref.save(session=session)
+def instance_add_security_group(context, instance_id, security_group_id):
+ """Associate the given security group with the given instance"""
+ session = get_session()
+ with session.begin():
+ instance_ref = instance_get(context, instance_id, session=session)
+ security_group_ref = security_group_get(context,
+ security_group_id,
+ session=session)
+ instance_ref.security_groups += [security_group_ref]
+ instance_ref.save(session=session)
+
+
###################
@@ -1181,6 +1209,7 @@ def volume_get(context, volume_id, session=None):
@require_admin_context
def volume_get_all(context):
+ session = get_session()
return session.query(models.Volume
).filter_by(deleted=can_read_deleted(context)
).all()
@@ -1266,3 +1295,383 @@ def volume_update(context, volume_id, values):
for (key, value) in values.iteritems():
volume_ref[key] = value
volume_ref.save(session=session)
+
+
+###################
+
+
+@require_context
+def security_group_get_all(context):
+ session = get_session()
+ return session.query(models.SecurityGroup
+ ).filter_by(deleted=can_read_deleted(context)
+ ).options(joinedload_all('rules')
+ ).all()
+
+
+@require_context
+def security_group_get(context, security_group_id, session=None):
+ if not session:
+ session = get_session()
+ if is_admin_context(context):
+ result = session.query(models.SecurityGroup
+ ).filter_by(deleted=can_read_deleted(context),
+ ).filter_by(id=security_group_id
+ ).options(joinedload_all('rules')
+ ).first()
+ else:
+ result = session.query(models.SecurityGroup
+ ).filter_by(deleted=False
+ ).filter_by(id=security_group_id
+ ).filter_by(project_id=context.project_id
+ ).options(joinedload_all('rules')
+ ).first()
+ if not result:
+ raise exception.NotFound("No secuity group with id %s" %
+ security_group_id)
+ return result
+
+
+@require_context
+def security_group_get_by_name(context, project_id, group_name):
+ session = get_session()
+ result = session.query(models.SecurityGroup
+ ).filter_by(project_id=project_id
+ ).filter_by(name=group_name
+ ).filter_by(deleted=False
+ ).options(joinedload_all('rules')
+ ).options(joinedload_all('instances')
+ ).first()
+ if not result:
+ raise exception.NotFound(
+ 'No security group named %s for project: %s' \
+ % (group_name, project_id))
+ return result
+
+
+@require_context
+def security_group_get_by_project(context, project_id):
+ session = get_session()
+ return session.query(models.SecurityGroup
+ ).filter_by(project_id=project_id
+ ).filter_by(deleted=False
+ ).options(joinedload_all('rules')
+ ).all()
+
+
+@require_context
+def security_group_get_by_instance(context, instance_id):
+ session = get_session()
+ return session.query(models.SecurityGroup
+ ).filter_by(deleted=False
+ ).options(joinedload_all('rules')
+ ).join(models.SecurityGroup.instances
+ ).filter_by(id=instance_id
+ ).filter_by(deleted=False
+ ).all()
+
+
+@require_context
+def security_group_exists(context, project_id, group_name):
+ try:
+ group = security_group_get_by_name(context, project_id, group_name)
+ return group != None
+ except exception.NotFound:
+ return False
+
+
+@require_context
+def security_group_create(context, values):
+ security_group_ref = models.SecurityGroup()
+ # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
+ # once save() is called. This will get cleaned up in next orm pass.
+ security_group_ref.rules
+ for (key, value) in values.iteritems():
+ security_group_ref[key] = value
+ security_group_ref.save()
+ return security_group_ref
+
+
+@require_context
+def security_group_destroy(context, security_group_id):
+ session = get_session()
+ with session.begin():
+ # TODO(vish): do we have to use sql here?
+ session.execute('update security_groups set deleted=1 where id=:id',
+ {'id': security_group_id})
+ session.execute('update security_group_rules set deleted=1 '
+ 'where group_id=:id',
+ {'id': security_group_id})
+
+@require_context
+def security_group_destroy_all(context, session=None):
+ if not session:
+ session = get_session()
+ with session.begin():
+ # TODO(vish): do we have to use sql here?
+ session.execute('update security_groups set deleted=1')
+ session.execute('update security_group_rules set deleted=1')
+
+
+###################
+
+
+@require_context
+def security_group_rule_get(context, security_group_rule_id, session=None):
+ if not session:
+ session = get_session()
+ if is_admin_context(context):
+ result = session.query(models.SecurityGroupIngressRule
+ ).filter_by(deleted=can_read_deleted(context)
+ ).filter_by(id=security_group_rule_id
+ ).first()
+ else:
+ # TODO(vish): Join to group and check for project_id
+ result = session.query(models.SecurityGroupIngressRule
+ ).filter_by(deleted=False
+ ).filter_by(id=security_group_rule_id
+ ).first()
+ if not result:
+ raise exception.NotFound("No secuity group rule with id %s" %
+ security_group_rule_id)
+ return result
+
+
+@require_context
+def security_group_rule_create(context, values):
+ security_group_rule_ref = models.SecurityGroupIngressRule()
+ for (key, value) in values.iteritems():
+ security_group_rule_ref[key] = value
+ security_group_rule_ref.save()
+ return security_group_rule_ref
+
+@require_context
+def security_group_rule_destroy(context, security_group_rule_id):
+ session = get_session()
+ with session.begin():
+ security_group_rule = security_group_rule_get(context,
+ security_group_rule_id,
+ session=session)
+ security_group_rule.delete(session=session)
+
+
+###################
+
+@require_admin_context
+def user_get(context, id, session=None):
+ if not session:
+ session = get_session()
+
+ result = session.query(models.User
+ ).filter_by(id=id
+ ).filter_by(deleted=can_read_deleted(context)
+ ).first()
+
+ if not result:
+ raise exception.NotFound('No user for id %s' % id)
+
+ return result
+
+
+@require_admin_context
+def user_get_by_access_key(context, access_key, session=None):
+ if not session:
+ session = get_session()
+
+ result = session.query(models.User
+ ).filter_by(access_key=access_key
+ ).filter_by(deleted=can_read_deleted(context)
+ ).first()
+
+ if not result:
+ raise exception.NotFound('No user for id %s' % id)
+
+ return result
+
+
+@require_admin_context
+def user_create(_context, values):
+ user_ref = models.User()
+ for (key, value) in values.iteritems():
+ user_ref[key] = value
+ user_ref.save()
+ return user_ref
+
+
+@require_admin_context
+def user_delete(context, id):
+ session = get_session()
+ with session.begin():
+ session.execute('delete from user_project_association where user_id=:id',
+ {'id': id})
+ session.execute('delete from user_role_association where user_id=:id',
+ {'id': id})
+ session.execute('delete from user_project_role_association where user_id=:id',
+ {'id': id})
+ user_ref = user_get(context, id, session=session)
+ session.delete(user_ref)
+
+
+def user_get_all(context):
+ session = get_session()
+ return session.query(models.User
+ ).filter_by(deleted=can_read_deleted(context)
+ ).all()
+
+
+def project_create(_context, values):
+ project_ref = models.Project()
+ for (key, value) in values.iteritems():
+ project_ref[key] = value
+ project_ref.save()
+ return project_ref
+
+
+def project_add_member(context, project_id, user_id):
+ session = get_session()
+ with session.begin():
+ project_ref = project_get(context, project_id, session=session)
+ user_ref = user_get(context, user_id, session=session)
+
+ project_ref.members += [user_ref]
+ project_ref.save(session=session)
+
+
+def project_get(context, id, session=None):
+ if not session:
+ session = get_session()
+
+ result = session.query(models.Project
+ ).filter_by(deleted=False
+ ).filter_by(id=id
+ ).options(joinedload_all('members')
+ ).first()
+
+ if not result:
+ raise exception.NotFound("No project with id %s" % id)
+
+ return result
+
+
+def project_get_all(context):
+ session = get_session()
+ return session.query(models.Project
+ ).filter_by(deleted=can_read_deleted(context)
+ ).options(joinedload_all('members')
+ ).all()
+
+
+def project_get_by_user(context, user_id):
+ session = get_session()
+ user = session.query(models.User
+ ).filter_by(deleted=can_read_deleted(context)
+ ).options(joinedload_all('projects')
+ ).first()
+ return user.projects
+
+
+def project_remove_member(context, project_id, user_id):
+ session = get_session()
+ project = project_get(context, project_id, session=session)
+ user = user_get(context, user_id, session=session)
+
+ if user in project.members:
+ project.members.remove(user)
+ project.save(session=session)
+
+
+def user_update(context, user_id, values):
+ session = get_session()
+ with session.begin():
+ user_ref = user_get(context, user_id, session=session)
+ for (key, value) in values.iteritems():
+ user_ref[key] = value
+ user_ref.save(session=session)
+
+
+def project_update(context, project_id, values):
+ session = get_session()
+ with session.begin():
+ project_ref = project_get(context, project_id, session=session)
+ for (key, value) in values.iteritems():
+ project_ref[key] = value
+ project_ref.save(session=session)
+
+
+def project_delete(context, id):
+ session = get_session()
+ with session.begin():
+ session.execute('delete from user_project_association where project_id=:id',
+ {'id': id})
+ session.execute('delete from user_project_role_association where project_id=:id',
+ {'id': id})
+ project_ref = project_get(context, id, session=session)
+ session.delete(project_ref)
+
+
+def user_get_roles(context, user_id):
+ session = get_session()
+ with session.begin():
+ user_ref = user_get(context, user_id, session=session)
+ return [role.role for role in user_ref['roles']]
+
+
+def user_get_roles_for_project(context, user_id, project_id):
+ session = get_session()
+ with session.begin():
+ res = session.query(models.UserProjectRoleAssociation
+ ).filter_by(user_id=user_id
+ ).filter_by(project_id=project_id
+ ).all()
+ return [association.role for association in res]
+
+def user_remove_project_role(context, user_id, project_id, role):
+ session = get_session()
+ with session.begin():
+ session.execute('delete from user_project_role_association where ' + \
+ 'user_id=:user_id and project_id=:project_id and ' + \
+ 'role=:role', { 'user_id' : user_id,
+ 'project_id' : project_id,
+ 'role' : role })
+
+
+def user_remove_role(context, user_id, role):
+ session = get_session()
+ with session.begin():
+ res = session.query(models.UserRoleAssociation
+ ).filter_by(user_id=user_id
+ ).filter_by(role=role
+ ).all()
+ for role in res:
+ session.delete(role)
+
+
+def user_add_role(context, user_id, role):
+ session = get_session()
+ with session.begin():
+ user_ref = user_get(context, user_id, session=session)
+ models.UserRoleAssociation(user=user_ref, role=role).save(session=session)
+
+
+def user_add_project_role(context, user_id, project_id, role):
+ session = get_session()
+ with session.begin():
+ user_ref = user_get(context, user_id, session=session)
+ project_ref = project_get(context, project_id, session=session)
+ models.UserProjectRoleAssociation(user_id=user_ref['id'],
+ project_id=project_ref['id'],
+ role=role).save(session=session)
+
+
+###################
+
+
+
+@require_admin_context
+def host_get_networks(context, host):
+ session = get_session()
+ with session.begin():
+ return session.query(models.Network
+ ).filter_by(deleted=False
+ ).filter_by(host=host
+ ).all()
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 6c64d553d..eed8f0578 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -27,7 +27,9 @@ import datetime
from sqlalchemy.orm import relationship, backref, exc, object_mapper
from sqlalchemy import Column, Integer, String, schema
from sqlalchemy import ForeignKey, DateTime, Boolean, Text
+from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.schema import ForeignKeyConstraint
from nova.db.sqlalchemy.session import get_session
@@ -60,7 +62,13 @@ class NovaBase(object):
if not session:
session = get_session()
session.add(self)
- session.flush()
+ try:
+ session.flush()
+ except IntegrityError, e:
+ if str(e).endswith('is not unique'):
+ raise exception.Duplicate(str(e))
+ else:
+ raise
def delete(self, session=None):
"""Delete this object"""
@@ -144,7 +152,7 @@ class Instance(BASE, NovaBase):
__tablename__ = 'instances'
__prefix__ = 'i'
id = Column(Integer, primary_key=True)
- ec2_id = Column(String(10), unique=True)
+ internal_id = Column(Integer, unique=True)
admin_pass = Column(String(255))
@@ -161,7 +169,7 @@ class Instance(BASE, NovaBase):
@property
def name(self):
- return self.ec2_id
+ return "instance-%d" % self.internal_id
image_id = Column(String(255))
kernel_id = Column(String(255))
@@ -179,7 +187,6 @@ class Instance(BASE, NovaBase):
launch_index = Column(Integer)
key_name = Column(String(255))
key_data = Column(Text)
- security_group = Column(String(255))
state = Column(Integer)
state_description = Column(String(255))
@@ -281,10 +288,66 @@ class ExportDevice(BASE, NovaBase):
'ExportDevice.deleted==False)')
+class SecurityGroupInstanceAssociation(BASE, NovaBase):
+ __tablename__ = 'security_group_instance_association'
+ id = Column(Integer, primary_key=True)
+ security_group_id = Column(Integer, ForeignKey('security_groups.id'))
+ instance_id = Column(Integer, ForeignKey('instances.id'))
+
+
+class SecurityGroup(BASE, NovaBase):
+ """Represents a security group"""
+ __tablename__ = 'security_groups'
+ id = Column(Integer, primary_key=True)
+
+ name = Column(String(255))
+ description = Column(String(255))
+ user_id = Column(String(255))
+ project_id = Column(String(255))
+
+ instances = relationship(Instance,
+ secondary="security_group_instance_association",
+ primaryjoin="and_(SecurityGroup.id == SecurityGroupInstanceAssociation.security_group_id,"
+ "SecurityGroup.deleted == False)",
+ secondaryjoin="and_(SecurityGroupInstanceAssociation.instance_id == Instance.id,"
+ "Instance.deleted == False)",
+ backref='security_groups')
+
+ @property
+ def user(self):
+ return auth.manager.AuthManager().get_user(self.user_id)
+
+ @property
+ def project(self):
+ return auth.manager.AuthManager().get_project(self.project_id)
+
+
+class SecurityGroupIngressRule(BASE, NovaBase):
+ """Represents a rule in a security group"""
+ __tablename__ = 'security_group_rules'
+ id = Column(Integer, primary_key=True)
+
+ parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
+ parent_group = relationship("SecurityGroup", backref="rules",
+ foreign_keys=parent_group_id,
+ primaryjoin="and_(SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,"
+ "SecurityGroupIngressRule.deleted == False)")
+
+ protocol = Column(String(5)) # "tcp", "udp", or "icmp"
+ from_port = Column(Integer)
+ to_port = Column(Integer)
+ cidr = Column(String(255))
+
+ # Note: This is not the parent SecurityGroup. It's SecurityGroup we're
+ # granting access for.
+ group_id = Column(Integer, ForeignKey('security_groups.id'))
+
+
class KeyPair(BASE, NovaBase):
"""Represents a public key pair for ssh"""
__tablename__ = 'key_pairs'
id = Column(Integer, primary_key=True)
+
name = Column(String(255))
user_id = Column(String(255))
@@ -361,6 +424,67 @@ class FixedIp(BASE, NovaBase):
return self.address
+class User(BASE, NovaBase):
+ """Represents a user"""
+ __tablename__ = 'users'
+ id = Column(String(255), primary_key=True)
+
+ name = Column(String(255))
+ access_key = Column(String(255))
+ secret_key = Column(String(255))
+
+ is_admin = Column(Boolean)
+
+
+class Project(BASE, NovaBase):
+ """Represents a project"""
+ __tablename__ = 'projects'
+ id = Column(String(255), primary_key=True)
+ name = Column(String(255))
+ description = Column(String(255))
+
+ project_manager = Column(String(255), ForeignKey(User.id))
+
+ members = relationship(User,
+ secondary='user_project_association',
+ backref='projects')
+
+
+class UserProjectRoleAssociation(BASE, NovaBase):
+ __tablename__ = 'user_project_role_association'
+ user_id = Column(String(255), primary_key=True)
+ user = relationship(User,
+ primaryjoin=user_id==User.id,
+ foreign_keys=[User.id],
+ uselist=False)
+
+ project_id = Column(String(255), primary_key=True)
+ project = relationship(Project,
+ primaryjoin=project_id==Project.id,
+ foreign_keys=[Project.id],
+ uselist=False)
+
+ role = Column(String(255), primary_key=True)
+ ForeignKeyConstraint(['user_id',
+ 'project_id'],
+ ['user_project_association.user_id',
+ 'user_project_association.project_id'])
+
+
+class UserRoleAssociation(BASE, NovaBase):
+ __tablename__ = 'user_role_association'
+ user_id = Column(String(255), ForeignKey('users.id'), primary_key=True)
+ user = relationship(User, backref='roles')
+ role = Column(String(255), primary_key=True)
+
+
+class UserProjectAssociation(BASE, NovaBase):
+ __tablename__ = 'user_project_association'
+ user_id = Column(String(255), ForeignKey(User.id), primary_key=True)
+ project_id = Column(String(255), ForeignKey(Project.id), primary_key=True)
+
+
+
class FloatingIp(BASE, NovaBase):
"""Represents a floating ip that dynamically forwards to a fixed ip"""
__tablename__ = 'floating_ips'
@@ -379,9 +503,10 @@ class FloatingIp(BASE, NovaBase):
def register_models():
"""Register Models and create metadata"""
from sqlalchemy import create_engine
- models = (Service, Instance, Volume, ExportDevice,
- FixedIp, FloatingIp, Network,
- AuthToken) # , Image, Host)
+ models = (Service, Instance, Volume, ExportDevice, FixedIp,
+ FloatingIp, Network, SecurityGroup,
+ SecurityGroupIngressRule, SecurityGroupInstanceAssociation,
+ AuthToken, User, Project) # , Image, Host
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index 69a205378..826754f6a 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -36,7 +36,8 @@ def get_session(autocommit=True, expire_on_commit=False):
if not _MAKER:
if not _ENGINE:
_ENGINE = create_engine(FLAGS.sql_connection, echo=False)
- _MAKER = sessionmaker(bind=_ENGINE,
- autocommit=autocommit,
- expire_on_commit=expire_on_commit)
- return _MAKER()
+ _MAKER = (sessionmaker(bind=_ENGINE,
+ autocommit=autocommit,
+ expire_on_commit=expire_on_commit))
+ session = _MAKER()
+ return session
diff --git a/nova/exception.py b/nova/exception.py
index b8894758f..f157fab2d 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -69,6 +69,9 @@ class NotEmpty(Error):
class Invalid(Error):
pass
+class InvalidInputException(Error):
+ pass
+
def wrap_exception(f):
def _wrap(*args, **kw):
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index 068025249..835973810 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -22,6 +22,7 @@ import logging
import Queue as queue
from carrot.backends import base
+from eventlet import greenthread
class Message(base.BaseMessage):
@@ -38,6 +39,7 @@ class Exchange(object):
def publish(self, message, routing_key=None):
logging.debug('(%s) publish (key: %s) %s',
self.name, routing_key, message)
+ routing_key = routing_key.split('.')[0]
if routing_key in self._routes:
for f in self._routes[routing_key]:
logging.debug('Publishing to route %s', f)
@@ -94,6 +96,18 @@ class Backend(object):
self._exchanges[exchange].bind(self._queues[queue].push,
routing_key)
+ def declare_consumer(self, queue, callback, *args, **kwargs):
+ self.current_queue = queue
+ self.current_callback = callback
+
+ def consume(self, *args, **kwargs):
+ while True:
+ item = self.get(self.current_queue)
+ if item:
+ self.current_callback(item)
+ raise StopIteration()
+ greenthread.sleep(0)
+
def get(self, queue, no_ack=False):
if not queue in self._queues or not self._queues[queue].size():
return None
diff --git a/nova/flags.py b/nova/flags.py
index c32cdd7a4..ab80e83fb 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -222,6 +222,10 @@ DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager',
DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
+# The service to use for image search and retrieval
+DEFINE_string('image_service', 'nova.image.service.LocalImageService',
+ 'The service to use for retrieving and searching for images.')
+
DEFINE_string('host', socket.gethostname(),
'name of this node')
diff --git a/nova/image/service.py b/nova/image/service.py
index 1a7a258b7..5276e1312 100644
--- a/nova/image/service.py
+++ b/nova/image/service.py
@@ -16,39 +16,218 @@
# under the License.
import cPickle as pickle
+import httplib
+import json
+import logging
import os.path
import random
import string
+import urlparse
-class ImageService(object):
- """Provides storage and retrieval of disk image objects."""
+import webob.exc
- @staticmethod
- def load():
- """Factory method to return image service."""
- #TODO(gundlach): read from config.
- class_ = LocalImageService
- return class_()
+from nova import utils
+from nova import flags
+from nova import exception
+
+
+FLAGS = flags.FLAGS
+
+
+flags.DEFINE_string('glance_teller_address', 'http://127.0.0.1',
+ 'IP address or URL where Glance\'s Teller service resides')
+flags.DEFINE_string('glance_teller_port', '9191',
+ 'Port for Glance\'s Teller service')
+flags.DEFINE_string('glance_parallax_address', 'http://127.0.0.1',
+ 'IP address or URL where Glance\'s Parallax service resides')
+flags.DEFINE_string('glance_parallax_port', '9292',
+ 'Port for Glance\'s Parallax service')
+
+
+class BaseImageService(object):
+
+ """Base class for providing image search and retrieval services"""
def index(self):
"""
Return a dict from opaque image id to image data.
"""
+ raise NotImplementedError
def show(self, id):
"""
Returns a dict containing image data for the given opaque image id.
+
+ :raises NotFound if the image does not exist
+ """
+ raise NotImplementedError
+
+ def create(self, data):
+ """
+ Store the image data and return the new image id.
+
+ :raises AlreadyExists if the image already exist.
+
+ """
+ raise NotImplementedError
+
+ def update(self, image_id, data):
+ """Replace the contents of the given image with the new data.
+
+ :raises NotFound if the image does not exist.
+
+ """
+ raise NotImplementedError
+
+ def delete(self, image_id):
+ """
+ Delete the given image.
+
+ :raises NotFound if the image does not exist.
+
+ """
+ raise NotImplementedError
+
+
+class TellerClient(object):
+
+ def __init__(self):
+ self.address = FLAGS.glance_teller_address
+ self.port = FLAGS.glance_teller_port
+ url = urlparse.urlparse(self.address)
+ self.netloc = url.netloc
+ self.connection_type = {'http': httplib.HTTPConnection,
+ 'https': httplib.HTTPSConnection}[url.scheme]
+
+
+class ParallaxClient(object):
+
+ def __init__(self):
+ self.address = FLAGS.glance_parallax_address
+ self.port = FLAGS.glance_parallax_port
+ url = urlparse.urlparse(self.address)
+ self.netloc = url.netloc
+ self.connection_type = {'http': httplib.HTTPConnection,
+ 'https': httplib.HTTPSConnection}[url.scheme]
+
+ def get_images(self):
+ """
+ Returns a list of image data mappings from Parallax
+ """
+ try:
+ c = self.connection_type(self.netloc, self.port)
+ c.request("GET", "images")
+ res = c.getresponse()
+ if res.status == 200:
+ # Parallax returns a JSONified dict(images=image_list)
+ data = json.loads(res.read())['images']
+ return data
+ else:
+ logging.warn("Parallax returned HTTP error %d from "
+ "request for /images", res.status_int)
+ return []
+ finally:
+ c.close()
+
+ def get_image_metadata(self, image_id):
+ """
+ Returns a mapping of image metadata from Parallax
+ """
+ try:
+ c = self.connection_type(self.netloc, self.port)
+ c.request("GET", "images/%s" % image_id)
+ res = c.getresponse()
+ if res.status == 200:
+ # Parallax returns a JSONified dict(image=image_info)
+ data = json.loads(res.read())['image']
+ return data
+ else:
+ # TODO(jaypipes): log the error?
+ return None
+ finally:
+ c.close()
+
+ def add_image_metadata(self, image_metadata):
+ """
+ Tells parallax about an image's metadata
"""
+ pass
+
+ def update_image_metadata(self, image_id, image_metadata):
+ """
+ Updates Parallax's information about an image
+ """
+ pass
+
+ def delete_image_metadata(self, image_id):
+ """
+ Deletes Parallax's information about an image
+ """
+ pass
-class GlanceImageService(ImageService):
+class GlanceImageService(BaseImageService):
+
"""Provides storage and retrieval of disk image objects within Glance."""
- # TODO(gundlach): once Glance has an API, build this.
- pass
+ def __init__(self):
+ self.teller = TellerClient()
+ self.parallax = ParallaxClient()
+
+ def index(self):
+ """
+ Calls out to Parallax for a list of images available
+ """
+ images = self.parallax.get_images()
+ return images
+
+ def show(self, id):
+ """
+ Returns a dict containing image data for the given opaque image id.
+ """
+ image = self.parallax.get_image_metadata(id)
+ if image:
+ return image
+ raise exception.NotFound
-class LocalImageService(ImageService):
- """Image service storing images to local disk."""
+ def create(self, data):
+ """
+ Store the image data and return the new image id.
+
+ :raises AlreadyExists if the image already exist.
+
+ """
+ return self.parallax.add_image_metadata(data)
+
+ def update(self, image_id, data):
+ """Replace the contents of the given image with the new data.
+
+ :raises NotFound if the image does not exist.
+
+ """
+ self.parallax.update_image_metadata(image_id, data)
+
+ def delete(self, image_id):
+ """
+ Delete the given image.
+
+ :raises NotFound if the image does not exist.
+
+ """
+ self.parallax.delete_image_metadata(image_id)
+
+ def delete_all(self):
+ """
+ Clears out all images
+ """
+ pass
+
+
+class LocalImageService(BaseImageService):
+
+ """Image service storing images to local disk.
+
+ It assumes that image_ids are integers."""
def __init__(self):
self._path = "/tmp/nova/images"
@@ -57,34 +236,50 @@ class LocalImageService(ImageService):
except OSError: # exists
pass
- def _path_to(self, image_id=''):
- return os.path.join(self._path, image_id)
+ def _path_to(self, image_id):
+ return os.path.join(self._path, str(image_id))
def _ids(self):
"""The list of all image ids."""
- return os.listdir(self._path)
+ return [int(i) for i in os.listdir(self._path)]
def index(self):
return [ self.show(id) for id in self._ids() ]
def show(self, id):
- return pickle.load(open(self._path_to(id)))
+ try:
+ return pickle.load(open(self._path_to(id)))
+ except IOError:
+ raise exception.NotFound
def create(self, data):
"""
Store the image data and return the new image id.
"""
- id = ''.join(random.choice(string.letters) for _ in range(20))
+ id = random.randint(0, 2**32-1)
data['id'] = id
self.update(id, data)
return id
def update(self, image_id, data):
"""Replace the contents of the given image with the new data."""
- pickle.dump(data, open(self._path_to(image_id), 'w'))
+ try:
+ pickle.dump(data, open(self._path_to(image_id), 'w'))
+ except IOError:
+ raise exception.NotFound
def delete(self, image_id):
"""
Delete the given image. Raises OSError if the image does not exist.
"""
- os.unlink(self._path_to(image_id))
+ try:
+ os.unlink(self._path_to(image_id))
+ except IOError:
+ raise exception.NotFound
+
+ def delete_all(self):
+ """
+ Clears out all images in local directory
+ """
+ for id in self._ids():
+ os.unlink(self._path_to(id))
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index a3a0d9a37..0891c02b1 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -28,6 +28,11 @@ from nova import flags
from nova import utils
+def _bin_file(script):
+ """Return the absolute path to scipt in the bin directory"""
+ return os.path.abspath(os.path.join(__file__, "../../../bin", script))
+
+
FLAGS = flags.FLAGS
flags.DEFINE_string('dhcpbridge_flagfile',
'/etc/nova/nova-dhcpbridge.conf',
@@ -38,7 +43,9 @@ flags.DEFINE_string('networks_path', utils.abspath('../networks'),
flags.DEFINE_string('public_interface', 'vlan1',
'Interface for public IP addresses')
flags.DEFINE_string('bridge_dev', 'eth0',
- 'network device for bridges')
+ 'network device for bridges')
+flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'),
+ 'location of nova-dhcpbridge')
flags.DEFINE_string('routing_source_ip', '127.0.0.1',
'Public IP of network host')
flags.DEFINE_bool('use_nova_chains', False,
@@ -139,16 +146,16 @@ def ensure_bridge(bridge, interface, net_attrs=None):
# _execute("sudo brctl setageing %s 10" % bridge)
_execute("sudo brctl stp %s off" % bridge)
_execute("sudo brctl addif %s %s" % (bridge, interface))
- if net_attrs:
- _execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \
- (bridge,
- net_attrs['gateway'],
- net_attrs['broadcast'],
- net_attrs['netmask']))
- else:
- _execute("sudo ifconfig %s up" % bridge)
- _confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge)
- _confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge)
+ if net_attrs:
+ _execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \
+ (bridge,
+ net_attrs['gateway'],
+ net_attrs['broadcast'],
+ net_attrs['netmask']))
+ else:
+ _execute("sudo ifconfig %s up" % bridge)
+ _confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge)
+ _confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge)
def get_dhcp_hosts(context, network_id):
@@ -172,9 +179,14 @@ def update_dhcp(context, network_id):
signal causing it to reload, otherwise spawn a new instance
"""
network_ref = db.network_get(context, network_id)
- with open(_dhcp_file(network_ref['vlan'], 'conf'), 'w') as f:
+
+ conffile = _dhcp_file(network_ref['vlan'], 'conf')
+ with open(conffile, 'w') as f:
f.write(get_dhcp_hosts(context, network_id))
+ # Make sure dnsmasq can actually read it (it setuid()s to "nobody")
+ os.chmod(conffile, 0644)
+
pid = _dnsmasq_pid_for(network_ref['vlan'])
# if dnsmasq is already running, then tell it to reload
@@ -182,7 +194,7 @@ def update_dhcp(context, network_id):
# TODO(ja): use "/proc/%d/cmdline" % (pid) to determine if pid refers
# correct dnsmasq process
try:
- os.kill(pid, signal.SIGHUP)
+ _execute('sudo kill -HUP %d' % pid)
return
except Exception as exc: # pylint: disable-msg=W0703
logging.debug("Hupping dnsmasq threw %s", exc)
@@ -243,7 +255,7 @@ def _dnsmasq_cmd(net):
' --except-interface=lo',
' --dhcp-range=%s,static,120s' % net['dhcp_start'],
' --dhcp-hostsfile=%s' % _dhcp_file(net['vlan'], 'conf'),
- ' --dhcp-script=%s' % _bin_file('nova-dhcpbridge'),
+ ' --dhcp-script=%s' % FLAGS.dhcpbridge,
' --leasefile-ro']
return ''.join(cmd)
@@ -254,7 +266,7 @@ def _stop_dnsmasq(network):
if pid:
try:
- os.kill(pid, signal.SIGTERM)
+ _execute('sudo kill -TERM %d' % pid)
except Exception as exc: # pylint: disable-msg=W0703
logging.debug("Killing dnsmasq threw %s", exc)
@@ -262,12 +274,10 @@ def _stop_dnsmasq(network):
def _dhcp_file(vlan, kind):
"""Return path to a pid, leases or conf file for a vlan"""
- return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind))
-
+ if not os.path.exists(FLAGS.networks_path):
+ os.makedirs(FLAGS.networks_path)
-def _bin_file(script):
- """Return the absolute path to scipt in the bin directory"""
- return os.path.abspath(os.path.join(__file__, "../../../bin", script))
+ return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind))
def _dnsmasq_pid_for(vlan):
diff --git a/nova/network/manager.py b/nova/network/manager.py
index a95c46fc9..a1d10c119 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -74,6 +74,12 @@ class NetworkManager(manager.Manager):
self.driver = utils.import_object(network_driver)
super(NetworkManager, self).__init__(*args, **kwargs)
+ def init_host(self):
+ # Set up networking for the projects for which we're already
+ # the designated network host.
+ for network in self.db.host_get_networks(None, self.host):
+ self._on_set_network_host(None, network['id'])
+
def set_network_host(self, context, network_id):
"""Safely sets the host of the network"""
logging.debug("setting network host")
@@ -247,7 +253,7 @@ class VlanManager(NetworkManager):
now = datetime.datetime.utcnow()
timeout = FLAGS.fixed_ip_disassociate_timeout
time = now - datetime.timedelta(seconds=timeout)
- num = self.db.fixed_ip_disassociate_all_by_timeout(self,
+ num = self.db.fixed_ip_disassociate_all_by_timeout(context,
self.host,
time)
if num:
@@ -257,6 +263,7 @@ class VlanManager(NetworkManager):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
+ super(VlanManager, self).init_host()
self.driver.init_host()
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
@@ -383,6 +390,7 @@ class VlanManager(NetworkManager):
self.driver.ensure_vlan_bridge(network_ref['vlan'],
network_ref['bridge'],
network_ref)
+ self.driver.update_dhcp(context, network_id)
@property
def _bottom_reserved_ips(self):
diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py
index def1b8167..c01b041bb 100644
--- a/nova/objectstore/image.py
+++ b/nova/objectstore/image.py
@@ -191,14 +191,14 @@ class Image(object):
if kernel_id == 'true':
image_type = 'kernel'
except:
- pass
+ kernel_id = None
try:
ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text
if ramdisk_id == 'true':
image_type = 'ramdisk'
except:
- pass
+ ramdisk_id = None
info = {
'imageId': image_id,
@@ -209,6 +209,12 @@ class Image(object):
'imageType' : image_type
}
+ if kernel_id:
+ info['kernelId'] = kernel_id
+
+ if ramdisk_id:
+ info['ramdiskId'] = ramdisk_id
+
def write_state(state):
info['imageState'] = state
with open(os.path.join(image_path, 'info.json'), "w") as f:
diff --git a/nova/process.py b/nova/process.py
index b3cad894b..13cb90e82 100644
--- a/nova/process.py
+++ b/nova/process.py
@@ -113,7 +113,7 @@ class BackRelayWithInput(protocol.ProcessProtocol):
if self.started_deferred:
self.started_deferred.callback(self)
if self.process_input:
- self.transport.write(self.process_input)
+ self.transport.write(str(self.process_input))
self.transport.closeStdin()
def get_process_output(executable, args=None, env=None, path=None,
diff --git a/nova/rpc.py b/nova/rpc.py
index fe52ad35f..447ad3b93 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -28,6 +28,7 @@ import uuid
from carrot import connection as carrot_connection
from carrot import messaging
+from eventlet import greenthread
from twisted.internet import defer
from twisted.internet import task
@@ -107,6 +108,14 @@ class Consumer(messaging.Consumer):
logging.exception("Failed to fetch message from queue")
self.failed_connection = True
+ def attach_to_eventlet(self):
+ """Only needed for unit tests!"""
+ def fetch_repeatedly():
+ while True:
+ self.fetch(enable_callbacks=True)
+ greenthread.sleep(0.1)
+ greenthread.spawn(fetch_repeatedly)
+
def attach_to_twisted(self):
"""Attach a callback to twisted that fires 10 times a second"""
loop = task.LoopingCall(self.fetch, enable_callbacks=True)
diff --git a/nova/server.py b/nova/server.py
index d4563bfe0..c58a15041 100644
--- a/nova/server.py
+++ b/nova/server.py
@@ -106,6 +106,7 @@ def serve(name, main):
def daemonize(args, name, main):
"""Does the work of daemonizing the process"""
logging.getLogger('amqplib').setLevel(logging.WARN)
+ files_to_keep = []
if FLAGS.daemonize:
logger = logging.getLogger()
formatter = logging.Formatter(
@@ -114,12 +115,14 @@ def daemonize(args, name, main):
syslog = logging.handlers.SysLogHandler(address='/dev/log')
syslog.setFormatter(formatter)
logger.addHandler(syslog)
+ files_to_keep.append(syslog.socket)
else:
if not FLAGS.logfile:
FLAGS.logfile = '%s.log' % name
logfile = logging.FileHandler(FLAGS.logfile)
logfile.setFormatter(formatter)
logger.addHandler(logfile)
+ files_to_keep.append(logfile.stream)
stdin, stdout, stderr = None, None, None
else:
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
@@ -139,6 +142,7 @@ def daemonize(args, name, main):
stdout=stdout,
stderr=stderr,
uid=FLAGS.uid,
- gid=FLAGS.gid
+ gid=FLAGS.gid,
+ files_preserve=files_to_keep
):
main(args)
diff --git a/nova/service.py b/nova/service.py
index a6c186896..115e0ff32 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -52,11 +52,17 @@ class Service(object, service.Service):
self.host = host
self.binary = binary
self.topic = topic
- manager_class = utils.import_class(manager)
- self.manager = manager_class(host=host, *args, **kwargs)
+ self.manager_class_name = manager
+ super(Service, self).__init__(*args, **kwargs)
+ self.saved_args, self.saved_kwargs = args, kwargs
+
+
+ def startService(self): # pylint: disable-msg C0103
+ manager_class = utils.import_class(self.manager_class_name)
+ self.manager = manager_class(host=self.host, *self.saved_args,
+ **self.saved_kwargs)
self.manager.init_host()
self.model_disconnected = False
- super(Service, self).__init__(*args, **kwargs)
try:
service_ref = db.service_get_by_args(None,
self.host,
diff --git a/nova/test.py b/nova/test.py
index 493754e83..5cf2abd53 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -94,6 +94,7 @@ class TrialTestCase(unittest.TestCase):
if FLAGS.fake_rabbit:
fakerabbit.reset_all()
+ db.security_group_destroy_all(None)
super(TrialTestCase, self).tearDown()
diff --git a/nova/tests/api/__init__.py b/nova/tests/api/__init__.py
index fc1ab9ae2..f051e2390 100644
--- a/nova/tests/api/__init__.py
+++ b/nova/tests/api/__init__.py
@@ -27,7 +27,8 @@ import webob.dec
import nova.exception
from nova import api
-from nova.tests.api.test_helper import *
+from nova.tests.api.fakes import APIStub
+
class Test(unittest.TestCase):
@@ -43,9 +44,9 @@ class Test(unittest.TestCase):
req = webob.Request.blank(url, environ_keys)
return req.get_response(api.API())
- def test_rackspace(self):
- self.stubs.Set(api.rackspace, 'API', APIStub)
- result = self._request('/v1.0/cloud', 'rs')
+ def test_openstack(self):
+ self.stubs.Set(api.openstack, 'API', APIStub)
+ result = self._request('/v1.0/cloud', 'api')
self.assertEqual(result.body, "/cloud")
def test_ec2(self):
@@ -55,12 +56,12 @@ class Test(unittest.TestCase):
def test_not_found(self):
self.stubs.Set(api.ec2, 'API', APIStub)
- self.stubs.Set(api.rackspace, 'API', APIStub)
+ self.stubs.Set(api.openstack, 'API', APIStub)
result = self._request('/test/cloud', 'ec2')
self.assertNotEqual(result.body, "/cloud")
def test_query_api_versions(self):
- result = self._request('/', 'rs')
+ result = self._request('/', 'api')
self.assertTrue('CURRENT' in result.body)
def test_metadata(self):
diff --git a/nova/tests/api/test_helper.py b/nova/tests/api/fakes.py
index d0a2cc027..d0a2cc027 100644
--- a/nova/tests/api/test_helper.py
+++ b/nova/tests/api/fakes.py
diff --git a/nova/tests/api/rackspace/__init__.py b/nova/tests/api/openstack/__init__.py
index bfd0f87a7..b534897f5 100644
--- a/nova/tests/api/rackspace/__init__.py
+++ b/nova/tests/api/openstack/__init__.py
@@ -17,9 +17,9 @@
import unittest
-from nova.api.rackspace import limited
-from nova.api.rackspace import RateLimitingMiddleware
-from nova.tests.api.test_helper import *
+from nova.api.openstack import limited
+from nova.api.openstack import RateLimitingMiddleware
+from nova.tests.api.fakes import APIStub
from webob import Request
@@ -82,7 +82,7 @@ class RateLimitingMiddlewareTest(unittest.TestCase):
class LimiterTest(unittest.TestCase):
- def testLimiter(self):
+ def test_limiter(self):
items = range(2000)
req = Request.blank('/')
self.assertEqual(limited(items, req), items[ :1000])
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
new file mode 100644
index 000000000..34bc1f2a9
--- /dev/null
+++ b/nova/tests/api/openstack/fakes.py
@@ -0,0 +1,205 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import json
+import random
+import string
+
+import webob
+import webob.dec
+
+from nova import auth
+from nova import utils
+from nova import flags
+from nova import exception as exc
+import nova.api.openstack.auth
+from nova.image import service
+from nova.wsgi import Router
+
+
+FLAGS = flags.FLAGS
+
+
+class Context(object):
+ pass
+
+
+class FakeRouter(Router):
+ def __init__(self):
+ pass
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ res = webob.Response()
+ res.status = '200'
+ res.headers['X-Test-Success'] = 'True'
+ return res
+
+
+def fake_auth_init(self):
+ self.db = FakeAuthDatabase()
+ self.context = Context()
+ self.auth = FakeAuthManager()
+ self.host = 'foo'
+
+
+@webob.dec.wsgify
+def fake_wsgi(self, req):
+ req.environ['nova.context'] = dict(user=dict(id=1))
+ if req.body:
+ req.environ['inst_dict'] = json.loads(req.body)
+ return self.application
+
+
+def stub_out_key_pair_funcs(stubs):
+ def key_pair(context, user_id):
+ return [dict(name='key', public_key='public_key')]
+ stubs.Set(nova.db.api, 'key_pair_get_all_by_user',
+ key_pair)
+
+
+def stub_out_image_service(stubs):
+ def fake_image_show(meh, id):
+ return dict(kernelId=1, ramdiskId=1)
+
+ stubs.Set(nova.image.service.LocalImageService, 'show', fake_image_show)
+
+def stub_out_auth(stubs):
+ def fake_auth_init(self, app):
+ self.application = app
+
+ stubs.Set(nova.api.openstack.AuthMiddleware,
+ '__init__', fake_auth_init)
+ stubs.Set(nova.api.openstack.AuthMiddleware,
+ '__call__', fake_wsgi)
+
+
+def stub_out_rate_limiting(stubs):
+ def fake_rate_init(self, app):
+ super(nova.api.openstack.RateLimitingMiddleware, self).__init__(app)
+ self.application = app
+
+ stubs.Set(nova.api.openstack.RateLimitingMiddleware,
+ '__init__', fake_rate_init)
+
+ stubs.Set(nova.api.openstack.RateLimitingMiddleware,
+ '__call__', fake_wsgi)
+
+
+def stub_out_networking(stubs):
+ def get_my_ip():
+ return '127.0.0.1'
+ stubs.Set(nova.utils, 'get_my_ip', get_my_ip)
+ FLAGS.FAKE_subdomain = 'api'
+
+
+def stub_out_glance(stubs):
+
+ class FakeParallaxClient:
+
+ def __init__(self):
+ self.fixtures = {}
+
+ def fake_get_images(self):
+ return self.fixtures
+
+ def fake_get_image_metadata(self, image_id):
+ for k, f in self.fixtures.iteritems():
+ if k == image_id:
+ return f
+ return None
+
+ def fake_add_image_metadata(self, image_data):
+ id = ''.join(random.choice(string.letters) for _ in range(20))
+ image_data['id'] = id
+ self.fixtures[id] = image_data
+ return id
+
+ def fake_update_image_metadata(self, image_id, image_data):
+
+ if image_id not in self.fixtures.keys():
+ raise exc.NotFound
+
+ self.fixtures[image_id].update(image_data)
+
+ def fake_delete_image_metadata(self, image_id):
+
+ if image_id not in self.fixtures.keys():
+ raise exc.NotFound
+
+ del self.fixtures[image_id]
+
+ def fake_delete_all(self):
+ self.fixtures = {}
+
+ fake_parallax_client = FakeParallaxClient()
+ stubs.Set(nova.image.service.ParallaxClient, 'get_images',
+ fake_parallax_client.fake_get_images)
+ stubs.Set(nova.image.service.ParallaxClient, 'get_image_metadata',
+ fake_parallax_client.fake_get_image_metadata)
+ stubs.Set(nova.image.service.ParallaxClient, 'add_image_metadata',
+ fake_parallax_client.fake_add_image_metadata)
+ stubs.Set(nova.image.service.ParallaxClient, 'update_image_metadata',
+ fake_parallax_client.fake_update_image_metadata)
+ stubs.Set(nova.image.service.ParallaxClient, 'delete_image_metadata',
+ fake_parallax_client.fake_delete_image_metadata)
+ stubs.Set(nova.image.service.GlanceImageService, 'delete_all',
+ fake_parallax_client.fake_delete_all)
+
+
+class FakeAuthDatabase(object):
+ data = {}
+
+ @staticmethod
+ def auth_get_token(context, token_hash):
+ return FakeAuthDatabase.data.get(token_hash, None)
+
+ @staticmethod
+ def auth_create_token(context, token):
+ token['created_at'] = datetime.datetime.now()
+ FakeAuthDatabase.data[token['token_hash']] = token
+
+ @staticmethod
+ def auth_destroy_token(context, token):
+ if FakeAuthDatabase.data.has_key(token['token_hash']):
+ del FakeAuthDatabase.data['token_hash']
+
+
+class FakeAuthManager(object):
+ auth_data = {}
+
+ def add_user(self, key, user):
+ FakeAuthManager.auth_data[key] = user
+
+ def get_user(self, uid):
+ for k, v in FakeAuthManager.auth_data.iteritems():
+ if v['uid'] == uid:
+ return v
+ return None
+
+ def get_user_from_access_key(self, key):
+ return FakeAuthManager.auth_data.get(key, None)
+
+
+class FakeRateLimiter(object):
+ def __init__(self, application):
+ self.application = application
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ return self.application
diff --git a/nova/tests/api/rackspace/auth.py b/nova/tests/api/openstack/test_auth.py
index 56677c2f4..d2ba80243 100644
--- a/nova/tests/api/rackspace/auth.py
+++ b/nova/tests/api/openstack/test_auth.py
@@ -6,26 +6,26 @@ import webob
import webob.dec
import nova.api
-import nova.api.rackspace.auth
+import nova.api.openstack.auth
from nova import auth
-from nova.tests.api.rackspace import test_helper
+from nova.tests.api.openstack import fakes
class Test(unittest.TestCase):
def setUp(self):
self.stubs = stubout.StubOutForTesting()
- self.stubs.Set(nova.api.rackspace.auth.BasicApiAuthManager,
- '__init__', test_helper.fake_auth_init)
- test_helper.FakeAuthManager.auth_data = {}
- test_helper.FakeAuthDatabase.data = {}
- test_helper.stub_out_rate_limiting(self.stubs)
- test_helper.stub_for_testing(self.stubs)
+ self.stubs.Set(nova.api.openstack.auth.BasicApiAuthManager,
+ '__init__', fakes.fake_auth_init)
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_networking(self.stubs)
def tearDown(self):
self.stubs.UnsetAll()
- test_helper.fake_data_store = {}
+ fakes.fake_data_store = {}
def test_authorize_user(self):
- f = test_helper.FakeAuthManager()
+ f = fakes.FakeAuthManager()
f.add_user('derp', { 'uid': 1, 'name':'herp' } )
req = webob.Request.blank('/v1.0/')
@@ -39,7 +39,7 @@ class Test(unittest.TestCase):
self.assertEqual(result.headers['X-Storage-Url'], "")
def test_authorize_token(self):
- f = test_helper.FakeAuthManager()
+ f = fakes.FakeAuthManager()
f.add_user('derp', { 'uid': 1, 'name':'herp' } )
req = webob.Request.blank('/v1.0/')
@@ -55,8 +55,8 @@ class Test(unittest.TestCase):
self.assertEqual(result.headers['X-Storage-Url'], "")
token = result.headers['X-Auth-Token']
- self.stubs.Set(nova.api.rackspace, 'APIRouter',
- test_helper.FakeRouter)
+ self.stubs.Set(nova.api.openstack, 'APIRouter',
+ fakes.FakeRouter)
req = webob.Request.blank('/v1.0/fake')
req.headers['X-Auth-Token'] = token
result = req.get_response(nova.api.API())
@@ -74,10 +74,10 @@ class Test(unittest.TestCase):
return { 'token_hash':token_hash,
'created_at':datetime.datetime(1990, 1, 1) }
- self.stubs.Set(test_helper.FakeAuthDatabase, 'auth_destroy_token',
+ self.stubs.Set(fakes.FakeAuthDatabase, 'auth_destroy_token',
destroy_token_mock)
- self.stubs.Set(test_helper.FakeAuthDatabase, 'auth_get_token',
+ self.stubs.Set(fakes.FakeAuthDatabase, 'auth_get_token',
bad_token)
req = webob.Request.blank('/v1.0/')
diff --git a/nova/tests/api/rackspace/testfaults.py b/nova/tests/api/openstack/test_faults.py
index b2931bc98..70a811469 100644
--- a/nova/tests/api/rackspace/testfaults.py
+++ b/nova/tests/api/openstack/test_faults.py
@@ -3,7 +3,7 @@ import webob
import webob.dec
import webob.exc
-from nova.api.rackspace import faults
+from nova.api.openstack import faults
class TestFaults(unittest.TestCase):
diff --git a/nova/tests/api/rackspace/flavors.py b/nova/tests/api/openstack/test_flavors.py
index d25a2e2be..8dd4d1f29 100644
--- a/nova/tests/api/rackspace/flavors.py
+++ b/nova/tests/api/openstack/test_flavors.py
@@ -16,21 +16,23 @@
# under the License.
import unittest
+
import stubout
+import webob
import nova.api
-from nova.api.rackspace import flavors
-from nova.tests.api.rackspace import test_helper
-from nova.tests.api.test_helper import *
+from nova.api.openstack import flavors
+from nova.tests.api.openstack import fakes
+
class FlavorsTest(unittest.TestCase):
def setUp(self):
self.stubs = stubout.StubOutForTesting()
- test_helper.FakeAuthManager.auth_data = {}
- test_helper.FakeAuthDatabase.data = {}
- test_helper.stub_for_testing(self.stubs)
- test_helper.stub_out_rate_limiting(self.stubs)
- test_helper.stub_out_auth(self.stubs)
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
def tearDown(self):
self.stubs.UnsetAll()
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
new file mode 100644
index 000000000..505fea3e2
--- /dev/null
+++ b/nova/tests/api/openstack/test_images.py
@@ -0,0 +1,141 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import unittest
+
+import stubout
+
+from nova import exception
+from nova import utils
+from nova.api.openstack import images
+from nova.tests.api.openstack import fakes
+
+
+class BaseImageServiceTests():
+
+ """Tasks to test for all image services"""
+
+ def test_create(self):
+
+ fixture = {'name': 'test image',
+ 'updated': None,
+ 'created': None,
+ 'status': None,
+ 'serverId': None,
+ 'progress': None}
+
+ num_images = len(self.service.index())
+
+ id = self.service.create(fixture)
+
+ self.assertNotEquals(None, id)
+ self.assertEquals(num_images + 1, len(self.service.index()))
+
+ def test_create_and_show_non_existing_image(self):
+
+ fixture = {'name': 'test image',
+ 'updated': None,
+ 'created': None,
+ 'status': None,
+ 'serverId': None,
+ 'progress': None}
+
+ num_images = len(self.service.index())
+
+ id = self.service.create(fixture)
+
+ self.assertNotEquals(None, id)
+
+ self.assertRaises(exception.NotFound,
+ self.service.show,
+ 'bad image id')
+
+ def test_update(self):
+
+ fixture = {'name': 'test image',
+ 'updated': None,
+ 'created': None,
+ 'status': None,
+ 'serverId': None,
+ 'progress': None}
+
+ id = self.service.create(fixture)
+
+ fixture['status'] = 'in progress'
+
+ self.service.update(id, fixture)
+ new_image_data = self.service.show(id)
+ self.assertEquals('in progress', new_image_data['status'])
+
+ def test_delete(self):
+
+ fixtures = [
+ {'name': 'test image 1',
+ 'updated': None,
+ 'created': None,
+ 'status': None,
+ 'serverId': None,
+ 'progress': None},
+ {'name': 'test image 2',
+ 'updated': None,
+ 'created': None,
+ 'status': None,
+ 'serverId': None,
+ 'progress': None}]
+
+ ids = []
+ for fixture in fixtures:
+ new_id = self.service.create(fixture)
+ ids.append(new_id)
+
+ num_images = len(self.service.index())
+ self.assertEquals(2, num_images)
+
+ self.service.delete(ids[0])
+
+ num_images = len(self.service.index())
+ self.assertEquals(1, num_images)
+
+
+class LocalImageServiceTest(unittest.TestCase,
+ BaseImageServiceTests):
+
+ """Tests the local image service"""
+
+ def setUp(self):
+ self.stubs = stubout.StubOutForTesting()
+ self.service = utils.import_object('nova.image.service.LocalImageService')
+
+ def tearDown(self):
+ self.service.delete_all()
+ self.stubs.UnsetAll()
+
+
+class GlanceImageServiceTest(unittest.TestCase,
+ BaseImageServiceTests):
+
+ """Tests the local image service"""
+
+ def setUp(self):
+ self.stubs = stubout.StubOutForTesting()
+ fakes.stub_out_glance(self.stubs)
+ self.service = utils.import_object('nova.image.service.GlanceImageService')
+
+ def tearDown(self):
+ self.service.delete_all()
+ self.stubs.UnsetAll()
diff --git a/nova/api/rackspace/ratelimiting/tests.py b/nova/tests/api/openstack/test_ratelimiting.py
index 4c9510917..ad9e67454 100644
--- a/nova/api/rackspace/ratelimiting/tests.py
+++ b/nova/tests/api/openstack/test_ratelimiting.py
@@ -4,7 +4,7 @@ import time
import unittest
import webob
-import nova.api.rackspace.ratelimiting as ratelimiting
+import nova.api.openstack.ratelimiting as ratelimiting
class LimiterTest(unittest.TestCase):
diff --git a/nova/tests/api/rackspace/servers.py b/nova/tests/api/openstack/test_servers.py
index 69ad2c1d3..d1ee533b6 100644
--- a/nova/tests/api/rackspace/servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -19,22 +19,26 @@ import json
import unittest
import stubout
+import webob
from nova import db
from nova import flags
-import nova.api.rackspace
-from nova.api.rackspace import servers
+import nova.api.openstack
+from nova.api.openstack import servers
import nova.db.api
from nova.db.sqlalchemy.models import Instance
import nova.rpc
-from nova.tests.api.test_helper import *
-from nova.tests.api.rackspace import test_helper
+from nova.tests.api.openstack import fakes
+
FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
def return_server(context, id):
return stub_instance(id)
+
def return_servers(context, user_id=1):
return [stub_instance(i, user_id) for i in xrange(5)]
@@ -45,19 +49,19 @@ def stub_instance(id, user_id=1):
user_id=user_id
)
+
class ServersTest(unittest.TestCase):
def setUp(self):
self.stubs = stubout.StubOutForTesting()
- test_helper.FakeAuthManager.auth_data = {}
- test_helper.FakeAuthDatabase.data = {}
- test_helper.stub_for_testing(self.stubs)
- test_helper.stub_out_rate_limiting(self.stubs)
- test_helper.stub_out_auth(self.stubs)
- test_helper.stub_out_id_translator(self.stubs)
- test_helper.stub_out_key_pair_funcs(self.stubs)
- test_helper.stub_out_image_service(self.stubs)
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fakes.stub_out_image_service(self.stubs)
self.stubs.Set(nova.db.api, 'instance_get_all', return_servers)
- self.stubs.Set(nova.db.api, 'instance_get_by_ec2_id', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get_by_internal_id', return_server)
self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
return_servers)
@@ -68,7 +72,7 @@ class ServersTest(unittest.TestCase):
req = webob.Request.blank('/v1.0/servers/1')
res = req.get_response(nova.api.API())
res_dict = json.loads(res.body)
- self.assertEqual(res_dict['server']['id'], '1')
+ self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1')
def test_get_server_list(self):
@@ -89,7 +93,7 @@ class ServersTest(unittest.TestCase):
def instance_create(context, inst):
class Foo(object):
- ec2_id = 1
+ internal_id = 1
return Foo()
def fake_method(*args, **kwargs):
@@ -108,10 +112,9 @@ class ServersTest(unittest.TestCase):
self.stubs.Set(nova.db.api, 'instance_update',
server_update)
self.stubs.Set(nova.db.api, 'queue_get_for', queue_get_for)
- self.stubs.Set(nova.network.manager.FlatManager, 'allocate_fixed_ip',
+ self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip',
fake_method)
- test_helper.stub_out_id_translator(self.stubs)
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality = {}
@@ -241,5 +244,6 @@ class ServersTest(unittest.TestCase):
self.assertEqual(res.status, '202 Accepted')
self.assertEqual(self.server_delete_called, True)
+
if __name__ == "__main__":
unittest.main()
diff --git a/nova/tests/api/rackspace/sharedipgroups.py b/nova/tests/api/openstack/test_sharedipgroups.py
index 1906b54f5..d199951d8 100644
--- a/nova/tests/api/rackspace/sharedipgroups.py
+++ b/nova/tests/api/openstack/test_sharedipgroups.py
@@ -15,11 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-import stubout
import unittest
-from nova.api.rackspace import sharedipgroups
-from nova.tests.api.test_helper import *
+import stubout
+
+from nova.api.openstack import sharedipgroups
+
class SharedIpGroupsTest(unittest.TestCase):
def setUp(self):
@@ -36,6 +37,3 @@ class SharedIpGroupsTest(unittest.TestCase):
def test_delete_shared_ip_group(self):
pass
-
-
-
diff --git a/nova/tests/api/rackspace/images.py b/nova/tests/api/rackspace/images.py
deleted file mode 100644
index 4c9987e8b..000000000
--- a/nova/tests/api/rackspace/images.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import stubout
-import unittest
-
-from nova.api.rackspace import images
-from nova.tests.api.test_helper import *
-
-class ImagesTest(unittest.TestCase):
- def setUp(self):
- self.stubs = stubout.StubOutForTesting()
-
- def tearDown(self):
- self.stubs.UnsetAll()
-
- def test_get_image_list(self):
- pass
-
- def test_delete_image(self):
- pass
-
- def test_create_image(self):
- pass
-
-
diff --git a/nova/tests/api/rackspace/test_helper.py b/nova/tests/api/rackspace/test_helper.py
deleted file mode 100644
index 2cf154f63..000000000
--- a/nova/tests/api/rackspace/test_helper.py
+++ /dev/null
@@ -1,134 +0,0 @@
-import datetime
-import json
-
-import webob
-import webob.dec
-
-from nova import auth
-from nova import utils
-from nova import flags
-import nova.api.rackspace.auth
-import nova.api.rackspace._id_translator
-from nova.image import service
-from nova.wsgi import Router
-
-FLAGS = flags.FLAGS
-
-class Context(object):
- pass
-
-class FakeRouter(Router):
- def __init__(self):
- pass
-
- @webob.dec.wsgify
- def __call__(self, req):
- res = webob.Response()
- res.status = '200'
- res.headers['X-Test-Success'] = 'True'
- return res
-
-def fake_auth_init(self):
- self.db = FakeAuthDatabase()
- self.context = Context()
- self.auth = FakeAuthManager()
- self.host = 'foo'
-
-@webob.dec.wsgify
-def fake_wsgi(self, req):
- req.environ['nova.context'] = dict(user=dict(id=1))
- if req.body:
- req.environ['inst_dict'] = json.loads(req.body)
- return self.application
-
-def stub_out_key_pair_funcs(stubs):
- def key_pair(context, user_id):
- return [dict(name='key', public_key='public_key')]
- stubs.Set(nova.db.api, 'key_pair_get_all_by_user',
- key_pair)
-
-def stub_out_image_service(stubs):
- def fake_image_show(meh, id):
- return dict(kernelId=1, ramdiskId=1)
-
- stubs.Set(nova.image.service.LocalImageService, 'show', fake_image_show)
-
-def stub_out_id_translator(stubs):
- class FakeTranslator(object):
- def __init__(self, id_type, service_name):
- pass
-
- def to_rs_id(self, id):
- return id
-
- def from_rs_id(self, id):
- return id
-
- stubs.Set(nova.api.rackspace._id_translator,
- 'RackspaceAPIIdTranslator', FakeTranslator)
-
-def stub_out_auth(stubs):
- def fake_auth_init(self, app):
- self.application = app
-
- stubs.Set(nova.api.rackspace.AuthMiddleware,
- '__init__', fake_auth_init)
- stubs.Set(nova.api.rackspace.AuthMiddleware,
- '__call__', fake_wsgi)
-
-def stub_out_rate_limiting(stubs):
- def fake_rate_init(self, app):
- super(nova.api.rackspace.RateLimitingMiddleware, self).__init__(app)
- self.application = app
-
- stubs.Set(nova.api.rackspace.RateLimitingMiddleware,
- '__init__', fake_rate_init)
-
- stubs.Set(nova.api.rackspace.RateLimitingMiddleware,
- '__call__', fake_wsgi)
-
-def stub_for_testing(stubs):
- def get_my_ip():
- return '127.0.0.1'
- stubs.Set(nova.utils, 'get_my_ip', get_my_ip)
- FLAGS.FAKE_subdomain = 'rs'
-
-class FakeAuthDatabase(object):
- data = {}
-
- @staticmethod
- def auth_get_token(context, token_hash):
- return FakeAuthDatabase.data.get(token_hash, None)
-
- @staticmethod
- def auth_create_token(context, token):
- token['created_at'] = datetime.datetime.now()
- FakeAuthDatabase.data[token['token_hash']] = token
-
- @staticmethod
- def auth_destroy_token(context, token):
- if FakeAuthDatabase.data.has_key(token['token_hash']):
- del FakeAuthDatabase.data['token_hash']
-
-class FakeAuthManager(object):
- auth_data = {}
-
- def add_user(self, key, user):
- FakeAuthManager.auth_data[key] = user
-
- def get_user(self, uid):
- for k, v in FakeAuthManager.auth_data.iteritems():
- if v['uid'] == uid:
- return v
- return None
-
- def get_user_from_access_key(self, key):
- return FakeAuthManager.auth_data.get(key, None)
-
-class FakeRateLimiter(object):
- def __init__(self, application):
- self.application = application
-
- @webob.dec.wsgify
- def __call__(self, req):
- return self.application
diff --git a/nova/tests/api/wsgi_test.py b/nova/tests/api/test_wsgi.py
index 9425b01d0..9425b01d0 100644
--- a/nova/tests/api/wsgi_test.py
+++ b/nova/tests/api/test_wsgi.py
diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py
index c040cdad3..7ab27e000 100644
--- a/nova/tests/api_unittest.py
+++ b/nova/tests/api_unittest.py
@@ -91,6 +91,9 @@ class ApiEc2TestCase(test.BaseTestCase):
self.host = '127.0.0.1'
self.app = api.API()
+
+ def expect_http(self, host=None, is_secure=False):
+ """Returns a new EC2 connection"""
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
@@ -100,9 +103,6 @@ class ApiEc2TestCase(test.BaseTestCase):
path='/services/Cloud')
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
-
- def expect_http(self, host=None, is_secure=False):
- """Returns a new EC2 connection"""
http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
# pylint: disable-msg=E1103
@@ -138,3 +138,185 @@ class ApiEc2TestCase(test.BaseTestCase):
self.assertEquals(len(results), 1)
self.manager.delete_project(project)
self.manager.delete_user(user)
+
+ def test_get_all_security_groups(self):
+ """Test that we can retrieve security groups"""
+ self.expect_http()
+ self.mox.ReplayAll()
+ user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
+ project = self.manager.create_project('fake', 'fake', 'fake')
+
+ rv = self.ec2.get_all_security_groups()
+
+ self.assertEquals(len(rv), 1)
+ self.assertEquals(rv[0].name, 'default')
+
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
+
+ def test_create_delete_security_group(self):
+ """Test that we can create a security group"""
+ self.expect_http()
+ self.mox.ReplayAll()
+ user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
+ project = self.manager.create_project('fake', 'fake', 'fake')
+
+ # At the moment, you need both of these to actually be netadmin
+ self.manager.add_role('fake', 'netadmin')
+ project.add_role('fake', 'netadmin')
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
+ for x in range(random.randint(4, 8)))
+
+ self.ec2.create_security_group(security_group_name, 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+ self.assertEquals(len(rv), 2)
+ self.assertTrue(security_group_name in [group.name for group in rv])
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
+
+ def test_authorize_revoke_security_group_cidr(self):
+ """
+ Test that we can add and remove CIDR based rules
+ to a security group
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+ user = self.manager.create_user('fake', 'fake', 'fake')
+ project = self.manager.create_project('fake', 'fake', 'fake')
+
+ # At the moment, you need both of these to actually be netadmin
+ self.manager.add_role('fake', 'netadmin')
+ project.add_role('fake', 'netadmin')
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
+ for x in range(random.randint(4, 8)))
+
+ group = self.ec2.create_security_group(security_group_name, 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.authorize('tcp', 80, 81, '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+ # I don't bother checkng that we actually find it here,
+ # because the create/delete unit test further up should
+ # be good enough for that.
+ for group in rv:
+ if group.name == security_group_name:
+ self.assertEquals(len(group.rules), 1)
+ self.assertEquals(int(group.rules[0].from_port), 80)
+ self.assertEquals(int(group.rules[0].to_port), 81)
+ self.assertEquals(len(group.rules[0].grants), 1)
+ self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.revoke('tcp', 80, 81, '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ rv = self.ec2.get_all_security_groups()
+
+ self.assertEqual(len(rv), 1)
+ self.assertEqual(rv[0].name, 'default')
+
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
+
+ return
+
+ def test_authorize_revoke_security_group_foreign_group(self):
+ """
+ Test that we can grant and revoke another security group access
+ to a security group
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+ user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
+ project = self.manager.create_project('fake', 'fake', 'fake')
+
+ # At the moment, you need both of these to actually be netadmin
+ self.manager.add_role('fake', 'netadmin')
+ project.add_role('fake', 'netadmin')
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
+ for x in range(random.randint(4, 8)))
+ other_security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
+ for x in range(random.randint(4, 8)))
+
+ group = self.ec2.create_security_group(security_group_name, 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ other_group = self.ec2.create_security_group(other_security_group_name,
+ 'some other group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.authorize(src_group=other_group)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ # I don't bother checkng that we actually find it here,
+ # because the create/delete unit test further up should
+ # be good enough for that.
+ for group in rv:
+ if group.name == security_group_name:
+ self.assertEquals(len(group.rules), 1)
+ self.assertEquals(len(group.rules[0].grants), 1)
+ self.assertEquals(str(group.rules[0].grants[0]),
+ '%s-%s' % (other_security_group_name, 'fake'))
+
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ for group in rv:
+ if group.name == security_group_name:
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+ group.revoke(src_group=other_group)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
+
+ return
diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py
index 1955bb417..99f7ab599 100644
--- a/nova/tests/auth_unittest.py
+++ b/nova/tests/auth_unittest.py
@@ -75,8 +75,9 @@ class user_and_project_generator(object):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
-class AuthManagerTestCase(test.TrialTestCase):
+class AuthManagerTestCase(object):
def setUp(self):
+ FLAGS.auth_driver = self.auth_driver
super(AuthManagerTestCase, self).setUp()
self.flags(connection_type='fake')
self.manager = manager.AuthManager()
@@ -320,6 +321,12 @@ class AuthManagerTestCase(test.TrialTestCase):
self.assertEqual('secret', user.secret)
self.assertTrue(user.is_admin())
+class AuthManagerLdapTestCase(AuthManagerTestCase, test.TrialTestCase):
+ auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
+
+class AuthManagerDbTestCase(AuthManagerTestCase, test.TrialTestCase):
+ auth_driver = 'nova.auth.dbdriver.DbDriver'
+
if __name__ == "__main__":
# TODO: Implement use_fake as an option
diff --git a/nova/tests/bundle/1mb.manifest.xml b/nova/tests/bundle/1mb.manifest.xml
index dc3315957..01648a544 100644
--- a/nova/tests/bundle/1mb.manifest.xml
+++ b/nova/tests/bundle/1mb.manifest.xml
@@ -1 +1 @@
-<?xml version="1.0" ?><manifest><version>2007-10-10</version><bundler><name>euca-tools</name><version>1.2</version><release>31337</release></bundler><machine_configuration><architecture>x86_64</architecture></machine_configuration><signature>4e00b5ba28114dda4a9df7eeae94be847ec46117a09a1cbe41e578660642f0660dda1776b39fb3bf826b6cfec019e2a5e9c566728d186b7400ebc989a30670eb1db26ce01e68bd9d3f31290370077a85b81c66b63c1e0d5499bac115c06c17a21a81b6d3a67ebbce6c17019095af7ab07f3796c708cc843e58efc12ddc788c5e</signature></manifest> \ No newline at end of file
+<?xml version="1.0" ?><manifest><version>2007-10-10</version><bundler><name>euca-tools</name><version>1.2</version><release>31337</release></bundler><machine_configuration><architecture>x86_64</architecture><kernel_id>aki-test</kernel_id><ramdisk_id>ari-test</ramdisk_id></machine_configuration><signature>4e00b5ba28114dda4a9df7eeae94be847ec46117a09a1cbe41e578660642f0660dda1776b39fb3bf826b6cfec019e2a5e9c566728d186b7400ebc989a30670eb1db26ce01e68bd9d3f31290370077a85b81c66b63c1e0d5499bac115c06c17a21a81b6d3a67ebbce6c17019095af7ab07f3796c708cc843e58efc12ddc788c5e</signature></manifest>
diff --git a/nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml b/nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml
new file mode 100644
index 000000000..73d7ace00
--- /dev/null
+++ b/nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml
@@ -0,0 +1 @@
+<?xml version="1.0" ?><manifest><version>2007-10-10</version><bundler><name>euca-tools</name><version>1.2</version><release>31337</release></bundler><machine_configuration><architecture>x86_64</architecture></machine_configuration><signature>4e00b5ba28114dda4a9df7eeae94be847ec46117a09a1cbe41e578660642f0660dda1776b39fb3bf826b6cfec019e2a5e9c566728d186b7400ebc989a30670eb1db26ce01e68bd9d3f31290370077a85b81c66b63c1e0d5499bac115c06c17a21a81b6d3a67ebbce6c17019095af7ab07f3796c708cc843e58efc12ddc788c5e</signature></manifest>
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py
index ae7dea1db..8e5881edb 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/cloud_unittest.py
@@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from base64 import b64decode
import json
import logging
from M2Crypto import BIO
@@ -63,11 +64,17 @@ class CloudTestCase(test.TrialTestCase):
self.cloud = cloud.CloudController()
# set up a service
- self.compute = utils.import_class(FLAGS.compute_manager)
+ self.compute = utils.import_class(FLAGS.compute_manager)()
self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.compute_topic,
proxy=self.compute)
- self.compute_consumer.attach_to_twisted()
+ self.compute_consumer.attach_to_eventlet()
+ self.network = utils.import_class(FLAGS.network_manager)()
+ self.network_consumer = rpc.AdapterConsumer(connection=self.conn,
+ topic=FLAGS.network_topic,
+ proxy=self.network)
+ self.network_consumer.attach_to_eventlet()
+
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
@@ -85,15 +92,17 @@ class CloudTestCase(test.TrialTestCase):
return cloud._gen_key(self.context, self.context.user.id, name)
def test_console_output(self):
- if FLAGS.connection_type == 'fake':
- logging.debug("Can't test instances without a real virtual env.")
- return
- instance_id = 'foo'
- inst = yield self.compute.run_instance(instance_id)
- output = yield self.cloud.get_console_output(self.context, [instance_id])
- logging.debug(output)
- self.assert_(output)
- rv = yield self.compute.terminate_instance(instance_id)
+ image_id = FLAGS.default_image
+ instance_type = FLAGS.default_instance_type
+ max_count = 1
+ kwargs = {'image_id': image_id,
+ 'instance_type': instance_type,
+ 'max_count': max_count }
+ rv = yield self.cloud.run_instances(self.context, **kwargs)
+ instance_id = rv['instancesSet'][0]['instanceId']
+ output = yield self.cloud.get_console_output(context=self.context, instance_id=[instance_id])
+ self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT')
+ rv = yield self.cloud.terminate_instances(self.context, [instance_id])
def test_key_generation(self):
@@ -236,7 +245,8 @@ class CloudTestCase(test.TrialTestCase):
def test_update_of_instance_display_fields(self):
inst = db.instance_create({}, {})
- self.cloud.update_instance(self.context, inst['ec2_id'],
+ ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id'])
+ self.cloud.update_instance(self.context, ec2_id,
display_name='c00l 1m4g3')
inst = db.instance_get({}, inst['id'])
self.assertEqual('c00l 1m4g3', inst['display_name'])
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 8f4754650..4bbef8832 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -24,7 +24,7 @@ flags.DECLARE('volume_driver', 'nova.volume.manager')
FLAGS.volume_driver = 'nova.volume.driver.FakeAOEDriver'
FLAGS.connection_type = 'fake'
FLAGS.fake_rabbit = True
-FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
+FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('fake_network', 'nova.network.manager')
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index 1d6b9e826..872f1ab23 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -133,13 +133,22 @@ class ObjectStoreTestCase(test.TrialTestCase):
self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')
def test_images(self):
+ self.do_test_images('1mb.manifest.xml', True,
+ 'image_bucket1', 'i-testing1')
+
+ def test_images_no_kernel_or_ramdisk(self):
+ self.do_test_images('1mb.no_kernel_or_ramdisk.manifest.xml',
+ False, 'image_bucket2', 'i-testing2')
+
+ def do_test_images(self, manifest_file, expect_kernel_and_ramdisk,
+ image_bucket, image_name):
"Test the image API."
self.context.user = self.auth_manager.get_user('user1')
self.context.project = self.auth_manager.get_project('proj1')
# create a bucket for our bundle
- objectstore.bucket.Bucket.create('image_bucket', self.context)
- bucket = objectstore.bucket.Bucket('image_bucket')
+ objectstore.bucket.Bucket.create(image_bucket, self.context)
+ bucket = objectstore.bucket.Bucket(image_bucket)
# upload an image manifest/parts
bundle_path = os.path.join(os.path.dirname(__file__), 'bundle')
@@ -147,18 +156,28 @@ class ObjectStoreTestCase(test.TrialTestCase):
bucket[os.path.basename(path)] = open(path, 'rb').read()
# register an image
- image.Image.register_aws_image('i-testing',
- 'image_bucket/1mb.manifest.xml',
+ image.Image.register_aws_image(image_name,
+ '%s/%s' % (image_bucket, manifest_file),
self.context)
# verify image
- my_img = image.Image('i-testing')
+ my_img = image.Image(image_name)
result_image_file = os.path.join(my_img.path, 'image')
self.assertEqual(os.stat(result_image_file).st_size, 1048576)
sha = hashlib.sha1(open(result_image_file).read()).hexdigest()
self.assertEqual(sha, '3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3')
+ if expect_kernel_and_ramdisk:
+ # Verify the default kernel and ramdisk are set
+ self.assertEqual(my_img.metadata['kernelId'], 'aki-test')
+ self.assertEqual(my_img.metadata['ramdiskId'], 'ari-test')
+ else:
+ # Verify that the default kernel and ramdisk (the one from FLAGS)
+ # doesn't get embedded in the metadata
+ self.assertFalse('kernelId' in my_img.metadata)
+ self.assertFalse('ramdiskId' in my_img.metadata)
+
# verify image permissions
self.context.user = self.auth_manager.get_user('user2')
self.context.project = self.auth_manager.get_project('proj2')
diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py
index f6ee19756..80100fc2f 100644
--- a/nova/tests/scheduler_unittest.py
+++ b/nova/tests/scheduler_unittest.py
@@ -118,10 +118,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
'nova-compute',
'compute',
FLAGS.compute_manager)
+ compute1.startService()
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
+ compute2.startService()
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
self.assertEqual(len(hosts), 2)
compute1.kill()
@@ -133,10 +135,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
'nova-compute',
'compute',
FLAGS.compute_manager)
+ compute1.startService()
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
+ compute2.startService()
instance_id1 = self._create_instance()
compute1.run_instance(self.context, instance_id1)
instance_id2 = self._create_instance()
@@ -154,10 +158,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
'nova-compute',
'compute',
FLAGS.compute_manager)
+ compute1.startService()
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
+ compute2.startService()
instance_ids1 = []
instance_ids2 = []
for index in xrange(FLAGS.max_cores):
@@ -185,10 +191,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
'nova-volume',
'volume',
FLAGS.volume_manager)
+ volume1.startService()
volume2 = service.Service('host2',
'nova-volume',
'volume',
FLAGS.volume_manager)
+ volume2.startService()
volume_id1 = self._create_volume()
volume1.create_volume(self.context, volume_id1)
volume_id2 = self._create_volume()
@@ -206,10 +214,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
'nova-volume',
'volume',
FLAGS.volume_manager)
+ volume1.startService()
volume2 = service.Service('host2',
'nova-volume',
'volume',
FLAGS.volume_manager)
+ volume2.startService()
volume_ids1 = []
volume_ids2 = []
for index in xrange(FLAGS.max_gigabytes):
diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py
index 06f80e82c..6afeec377 100644
--- a/nova/tests/service_unittest.py
+++ b/nova/tests/service_unittest.py
@@ -22,6 +22,8 @@ Unit Tests for remote procedure calls using queue
import mox
+from twisted.application.app import startApplication
+
from nova import exception
from nova import flags
from nova import rpc
@@ -96,6 +98,7 @@ class ServiceTestCase(test.BaseTestCase):
self.mox.ReplayAll()
app = service.Service.create(host=host, binary=binary)
+ startApplication(app, False)
self.assert_(app)
# We're testing sort of weird behavior in how report_state decides
diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py
index 2aab16809..684347473 100644
--- a/nova/tests/virt_unittest.py
+++ b/nova/tests/virt_unittest.py
@@ -14,36 +14,72 @@
# License for the specific language governing permissions and limitations
# under the License.
+from xml.etree.ElementTree import fromstring as xml_to_tree
+from xml.dom.minidom import parseString as xml_to_dom
+
+from nova import db
from nova import flags
from nova import test
+from nova.api import context
+from nova.api.ec2 import cloud
+from nova.auth import manager
+
+# Needed to get FLAGS.instances_path defined:
+from nova.compute import manager as compute_manager
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
-
class LibvirtConnTestCase(test.TrialTestCase):
+ def setUp(self):
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ FLAGS.instances_path = ''
+
def test_get_uri_and_template(self):
- class MockDataModel(object):
- def __init__(self):
- self.datamodel = { 'name' : 'i-cafebabe',
- 'memory_kb' : '1024000',
- 'basepath' : '/some/path',
- 'bridge_name' : 'br100',
- 'mac_address' : '02:12:34:46:56:67',
- 'vcpus' : 2 }
+ ip = '10.11.12.13'
+
+ instance = { 'internal_id' : 1,
+ 'memory_kb' : '1024000',
+ 'basepath' : '/some/path',
+ 'bridge_name' : 'br100',
+ 'mac_address' : '02:12:34:46:56:67',
+ 'vcpus' : 2,
+ 'project_id' : 'fake',
+ 'bridge' : 'br101',
+ 'instance_type' : 'm1.small'}
+
+ instance_ref = db.instance_create(None, instance)
+ network_ref = db.project_get_network(None, self.project.id)
+
+ fixed_ip = { 'address' : ip,
+ 'network_id' : network_ref['id'] }
+
+ fixed_ip_ref = db.fixed_ip_create(None, fixed_ip)
+ db.fixed_ip_update(None, ip, { 'allocated' : True,
+ 'instance_id' : instance_ref['id'] })
type_uri_map = { 'qemu' : ('qemu:///system',
- [lambda s: '<domain type=\'qemu\'>' in s,
- lambda s: 'type>hvm</type' in s,
- lambda s: 'emulator>/usr/bin/kvm' not in s]),
+ [(lambda t: t.find('.').get('type'), 'qemu'),
+ (lambda t: t.find('./os/type').text, 'hvm'),
+ (lambda t: t.find('./devices/emulator'), None)]),
'kvm' : ('qemu:///system',
- [lambda s: '<domain type=\'kvm\'>' in s,
- lambda s: 'type>hvm</type' in s,
- lambda s: 'emulator>/usr/bin/qemu<' not in s]),
+ [(lambda t: t.find('.').get('type'), 'kvm'),
+ (lambda t: t.find('./os/type').text, 'hvm'),
+ (lambda t: t.find('./devices/emulator'), None)]),
'uml' : ('uml:///system',
- [lambda s: '<domain type=\'uml\'>' in s,
- lambda s: 'type>uml</type' in s]),
- }
+ [(lambda t: t.find('.').get('type'), 'uml'),
+ (lambda t: t.find('./os/type').text, 'uml')]),
+ }
+
+ common_checks = [(lambda t: t.find('.').tag, 'domain'),
+ (lambda t: \
+ t.find('./devices/interface/filterref/parameter') \
+ .get('name'), 'IP'),
+ (lambda t: \
+ t.find('./devices/interface/filterref/parameter') \
+ .get('value'), '10.11.12.13')]
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
@@ -52,9 +88,17 @@ class LibvirtConnTestCase(test.TrialTestCase):
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
- for i, check in enumerate(checks):
- xml = conn.toXml(MockDataModel())
- self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
+ xml = conn.to_xml(instance_ref)
+ tree = xml_to_tree(xml)
+ for i, (check, expected_result) in enumerate(checks):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s failed check %d' % (xml, i))
+
+ for i, (check, expected_result) in enumerate(common_checks):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s failed common check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
@@ -67,3 +111,142 @@ class LibvirtConnTestCase(test.TrialTestCase):
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
+
+ def tearDown(self):
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
+
+class NWFilterTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(NWFilterTestCase, self).setUp()
+
+ class Mock(object):
+ pass
+
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.context = context.APIRequestContext(self.user, self.project)
+
+ self.fake_libvirt_connection = Mock()
+
+ self.fw = libvirt_conn.NWFilterFirewall(self.fake_libvirt_connection)
+
+ def tearDown(self):
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
+
+
+ def test_cidr_rule_nwfilter_xml(self):
+ cloud_controller = cloud.CloudController()
+ cloud_controller.create_security_group(self.context,
+ 'testgroup',
+ 'test group description')
+ cloud_controller.authorize_security_group_ingress(self.context,
+ 'testgroup',
+ from_port='80',
+ to_port='81',
+ ip_protocol='tcp',
+ cidr_ip='0.0.0.0/0')
+
+
+ security_group = db.security_group_get_by_name(self.context,
+ 'fake',
+ 'testgroup')
+
+ xml = self.fw.security_group_to_nwfilter_xml(security_group.id)
+
+ dom = xml_to_dom(xml)
+ self.assertEqual(dom.firstChild.tagName, 'filter')
+
+ rules = dom.getElementsByTagName('rule')
+ self.assertEqual(len(rules), 1)
+
+ # It's supposed to allow inbound traffic.
+ self.assertEqual(rules[0].getAttribute('action'), 'accept')
+ self.assertEqual(rules[0].getAttribute('direction'), 'in')
+
+ # Must be lower priority than the base filter (which blocks everything)
+ self.assertTrue(int(rules[0].getAttribute('priority')) < 1000)
+
+ ip_conditions = rules[0].getElementsByTagName('tcp')
+ self.assertEqual(len(ip_conditions), 1)
+ self.assertEqual(ip_conditions[0].getAttribute('srcipaddr'), '0.0.0.0')
+ self.assertEqual(ip_conditions[0].getAttribute('srcipmask'), '0.0.0.0')
+ self.assertEqual(ip_conditions[0].getAttribute('dstportstart'), '80')
+ self.assertEqual(ip_conditions[0].getAttribute('dstportend'), '81')
+
+
+ self.teardown_security_group()
+
+ def teardown_security_group(self):
+ cloud_controller = cloud.CloudController()
+ cloud_controller.delete_security_group(self.context, 'testgroup')
+
+
+ def setup_and_return_security_group(self):
+ cloud_controller = cloud.CloudController()
+ cloud_controller.create_security_group(self.context,
+ 'testgroup',
+ 'test group description')
+ cloud_controller.authorize_security_group_ingress(self.context,
+ 'testgroup',
+ from_port='80',
+ to_port='81',
+ ip_protocol='tcp',
+ cidr_ip='0.0.0.0/0')
+
+ return db.security_group_get_by_name(self.context, 'fake', 'testgroup')
+
+ def test_creates_base_rule_first(self):
+ # These come pre-defined by libvirt
+ self.defined_filters = ['no-mac-spoofing',
+ 'no-ip-spoofing',
+ 'no-arp-spoofing',
+ 'allow-dhcp-server']
+
+ self.recursive_depends = {}
+ for f in self.defined_filters:
+ self.recursive_depends[f] = []
+
+ def _filterDefineXMLMock(xml):
+ dom = xml_to_dom(xml)
+ name = dom.firstChild.getAttribute('name')
+ self.recursive_depends[name] = []
+ for f in dom.getElementsByTagName('filterref'):
+ ref = f.getAttribute('filter')
+ self.assertTrue(ref in self.defined_filters,
+ ('%s referenced filter that does ' +
+ 'not yet exist: %s') % (name, ref))
+ dependencies = [ref] + self.recursive_depends[ref]
+ self.recursive_depends[name] += dependencies
+
+ self.defined_filters.append(name)
+ return True
+
+ self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
+
+ instance_ref = db.instance_create(self.context,
+ {'user_id': 'fake',
+ 'project_id': 'fake'})
+ inst_id = instance_ref['id']
+
+ def _ensure_all_called(_):
+ instance_filter = 'nova-instance-%s' % instance_ref['name']
+ secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
+ for required in [secgroup_filter, 'allow-dhcp-server',
+ 'no-arp-spoofing', 'no-ip-spoofing',
+ 'no-mac-spoofing']:
+ self.assertTrue(required in self.recursive_depends[instance_filter],
+ "Instance's filter does not include %s" % required)
+
+ self.security_group = self.setup_and_return_security_group()
+
+ db.instance_add_security_group(self.context, inst_id, self.security_group.id)
+ instance = db.instance_get(self.context, inst_id)
+
+ d = self.fw.setup_nwfilters_for_instance(instance)
+ d.addCallback(_ensure_all_called)
+ d.addCallback(lambda _:self.teardown_security_group())
+
+ return d
diff --git a/nova/utils.py b/nova/utils.py
index d18dd9843..10b27ffec 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -126,7 +126,13 @@ def runthis(prompt, cmd, check_exit_code = True):
def generate_uid(topic, size=8):
- return '%s-%s' % (topic, ''.join([random.choice('01234567890abcdefghijklmnopqrstuvwxyz') for x in xrange(size)]))
+ if topic == "i":
+ # Instances have integer internal ids.
+ return random.randint(0, 2**32-1)
+ else:
+ characters = '01234567890abcdefghijklmnopqrstuvwxyz'
+ choices = [random.choice(characters) for x in xrange(size)]
+ return '%s-%s' % (topic, ''.join(choices))
def generate_mac():
diff --git a/nova/virt/images.py b/nova/virt/images.py
index a60bcc4c1..dc50764d9 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -29,6 +29,7 @@ from nova import flags
from nova import process
from nova.auth import manager
from nova.auth import signer
+from nova.objectstore import image
FLAGS = flags.FLAGS
diff --git a/nova/virt/interfaces.template b/nova/virt/interfaces.template
index 11df301f6..87b92b84a 100644
--- a/nova/virt/interfaces.template
+++ b/nova/virt/interfaces.template
@@ -10,7 +10,6 @@ auto eth0
iface eth0 inet static
address %(address)s
netmask %(netmask)s
- network %(network)s
broadcast %(broadcast)s
gateway %(gateway)s
dns-nameservers %(dns)s
diff --git a/nova/virt/libvirt.qemu.xml.template b/nova/virt/libvirt.qemu.xml.template
index 17bd79b7c..2538b1ade 100644
--- a/nova/virt/libvirt.qemu.xml.template
+++ b/nova/virt/libvirt.qemu.xml.template
@@ -20,6 +20,10 @@
<source bridge='%(bridge_name)s'/>
<mac address='%(mac_address)s'/>
<!-- <model type='virtio'/> CANT RUN virtio network right now -->
+ <filterref filter="nova-instance-%(name)s">
+ <parameter name="IP" value="%(ip_address)s" />
+ <parameter name="DHCPSERVER" value="%(dhcp_server)s" />
+ </filterref>
</interface>
<serial type="file">
<source path='%(basepath)s/console.log'/>
diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template
index c039d6d90..bb8b47911 100644
--- a/nova/virt/libvirt.uml.xml.template
+++ b/nova/virt/libvirt.uml.xml.template
@@ -14,6 +14,10 @@
<interface type='bridge'>
<source bridge='%(bridge_name)s'/>
<mac address='%(mac_address)s'/>
+ <filterref filter="nova-instance-%(name)s">
+ <parameter name="IP" value="%(ip_address)s" />
+ <parameter name="DHCPSERVER" value="%(dhcp_server)s" />
+ </filterref>
</interface>
<console type="file">
<source path='%(basepath)s/console.log'/>
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index f6d8aace6..6ef5aa472 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -25,14 +25,17 @@ import logging
import os
import shutil
+import IPy
from twisted.internet import defer
from twisted.internet import task
+from twisted.internet import threads
from nova import db
from nova import exception
from nova import flags
from nova import process
from nova import utils
+#from nova.api import context
from nova.auth import manager
from nova.compute import disk
from nova.compute import instance_types
@@ -60,6 +63,9 @@ flags.DEFINE_string('libvirt_uri',
'',
'Override the default libvirt URI (which is dependent'
' on libvirt_type)')
+flags.DEFINE_bool('allow_project_net_traffic',
+ True,
+ 'Whether to allow in project network traffic')
def get_connection(read_only):
@@ -134,7 +140,7 @@ class LibvirtConnection(object):
d.addCallback(lambda _: self._cleanup(instance))
# FIXME: What does this comment mean?
# TODO(termie): short-circuit me for tests
- # WE'LL save this for when we do shutdown,
+ # WE'LL save this for when we do shutdown,
# instead of destroy - but destroy returns immediately
timer = task.LoopingCall(f=None)
def _wait_for_shutdown():
@@ -214,6 +220,7 @@ class LibvirtConnection(object):
instance['id'],
power_state.NOSTATE,
'launching')
+ yield NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance)
yield self._create_image(instance, xml)
yield self._conn.createXML(xml, 0)
# TODO(termie): this should actually register
@@ -285,7 +292,6 @@ class LibvirtConnection(object):
address = db.instance_get_fixed_address(None, inst['id'])
with open(FLAGS.injected_network_template) as f:
net = f.read() % {'address': address,
- 'network': network_ref['network'],
'netmask': network_ref['netmask'],
'gateway': network_ref['gateway'],
'broadcast': network_ref['broadcast'],
@@ -317,6 +323,9 @@ class LibvirtConnection(object):
network = db.instance_get_fixed_by_instance(None, inst['id'])
# FIXME(vish): stick this in db
instance_type = instance_types.INSTANCE_TYPES[instance['instance_type']]
+ ip_address = db.instance_get_fixed_address({}, instance['id'])
+ # Assume that the gateway also acts as the dhcp server.
+ dhcp_server = network['gateway']
xml_info = {'type': FLAGS.libvirt_type,
'name': instance['name'],
'basepath': os.path.join(FLAGS.instances_path,
@@ -324,7 +333,9 @@ class LibvirtConnection(object):
'memory_kb': instance_type['memory_mb'] * 1024,
'vcpus': instance_type['vcpus'],
'bridge_name': network['bridge'],
- 'mac_address': instance['mac_address']}
+ 'mac_address': instance['mac_address'],
+ 'ip_address': ip_address,
+ 'dhcp_server': dhcp_server }
libvirt_xml = self.libvirt_xml % xml_info
logging.debug('instance %s: finished toXML method', instance['name'])
@@ -438,3 +449,195 @@ class LibvirtConnection(object):
"""
domain = self._conn.lookupByName(instance_name)
return domain.interfaceStats(interface)
+
+
+ def refresh_security_group(self, security_group_id):
+ fw = NWFilterFirewall(self._conn)
+ fw.ensure_security_group_filter(security_group_id)
+
+
+class NWFilterFirewall(object):
+ """
+ This class implements a network filtering mechanism versatile
+ enough for EC2 style Security Group filtering by leveraging
+ libvirt's nwfilter.
+
+ First, all instances get a filter ("nova-base-filter") applied.
+ This filter drops all incoming ipv4 and ipv6 connections.
+ Outgoing connections are never blocked.
+
+ Second, every security group maps to a nwfilter filter(*).
+ NWFilters can be updated at runtime and changes are applied
+ immediately, so changes to security groups can be applied at
+ runtime (as mandated by the spec).
+
+ Security group rules are named "nova-secgroup-<id>" where <id>
+ is the internal id of the security group. They're applied only on
+ hosts that have instances in the security group in question.
+
+ Updates to security groups are done by updating the data model
+ (in response to API calls) followed by a request sent to all
+ the nodes with instances in the security group to refresh the
+ security group.
+
+ Each instance has its own NWFilter, which references the above
+ mentioned security group NWFilters. This was done because
+ interfaces can only reference one filter while filters can
+ reference multiple other filters. This has the added benefit of
+ actually being able to add and remove security groups from an
+ instance at run time. This functionality is not exposed anywhere,
+ though.
+
+ Outstanding questions:
+
+ The name is unique, so would there be any good reason to sync
+ the uuid across the nodes (by assigning it from the datamodel)?
+
+
+ (*) This sentence brought to you by the redundancy department of
+ redundancy.
+ """
+
+ def __init__(self, get_connection):
+ self._conn = get_connection
+
+
+ nova_base_filter = '''<filter name='nova-base' chain='root'>
+ <uuid>26717364-50cf-42d1-8185-29bf893ab110</uuid>
+ <filterref filter='no-mac-spoofing'/>
+ <filterref filter='no-ip-spoofing'/>
+ <filterref filter='no-arp-spoofing'/>
+ <filterref filter='allow-dhcp-server'/>
+ <filterref filter='nova-allow-dhcp-server'/>
+ <filterref filter='nova-base-ipv4'/>
+ <filterref filter='nova-base-ipv6'/>
+ </filter>'''
+
+ nova_dhcp_filter = '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
+ <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
+ <rule action='accept' direction='out'
+ priority='100'>
+ <udp srcipaddr='0.0.0.0'
+ dstipaddr='255.255.255.255'
+ srcportstart='68'
+ dstportstart='67'/>
+ </rule>
+ <rule action='accept' direction='in' priority='100'>
+ <udp srcipaddr='$DHCPSERVER'
+ srcportstart='67'
+ dstportstart='68'/>
+ </rule>
+ </filter>'''
+
+ def nova_base_ipv4_filter(self):
+ retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
+ for protocol in ['tcp', 'udp', 'icmp']:
+ for direction,action,priority in [('out','accept', 399),
+ ('inout','drop', 400)]:
+ retval += """<rule action='%s' direction='%s' priority='%d'>
+ <%s />
+ </rule>""" % (action, direction,
+ priority, protocol)
+ retval += '</filter>'
+ return retval
+
+
+ def nova_base_ipv6_filter(self):
+ retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
+ for protocol in ['tcp', 'udp', 'icmp']:
+ for direction,action,priority in [('out','accept',399),
+ ('inout','drop',400)]:
+ retval += """<rule action='%s' direction='%s' priority='%d'>
+ <%s-ipv6 />
+ </rule>""" % (action, direction,
+ priority, protocol)
+ retval += '</filter>'
+ return retval
+
+
+ def nova_project_filter(self, project, net, mask):
+ retval = "<filter name='nova-project-%s' chain='ipv4'>" % project
+ for protocol in ['tcp', 'udp', 'icmp']:
+ retval += """<rule action='accept' direction='in' priority='200'>
+ <%s srcipaddr='%s' srcipmask='%s' />
+ </rule>""" % (protocol, net, mask)
+ retval += '</filter>'
+ return retval
+
+
+ def _define_filter(self, xml):
+ if callable(xml):
+ xml = xml()
+ d = threads.deferToThread(self._conn.nwfilterDefineXML, xml)
+ return d
+
+
+ @staticmethod
+ def _get_net_and_mask(cidr):
+ net = IPy.IP(cidr)
+ return str(net.net()), str(net.netmask())
+
+ @defer.inlineCallbacks
+ def setup_nwfilters_for_instance(self, instance):
+ """
+ Creates an NWFilter for the given instance. In the process,
+ it makes sure the filters for the security groups as well as
+ the base filter are all in place.
+ """
+
+ yield self._define_filter(self.nova_base_ipv4_filter)
+ yield self._define_filter(self.nova_base_ipv6_filter)
+ yield self._define_filter(self.nova_dhcp_filter)
+ yield self._define_filter(self.nova_base_filter)
+
+ nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n" +
+ " <filterref filter='nova-base' />\n"
+ ) % instance['name']
+
+ if FLAGS.allow_project_net_traffic:
+ network_ref = db.project_get_network({}, instance['project_id'])
+ net, mask = self._get_net_and_mask(network_ref['cidr'])
+ project_filter = self.nova_project_filter(instance['project_id'],
+ net, mask)
+ yield self._define_filter(project_filter)
+
+ nwfilter_xml += (" <filterref filter='nova-project-%s' />\n"
+ ) % instance['project_id']
+
+ for security_group in instance.security_groups:
+ yield self.ensure_security_group_filter(security_group['id'])
+
+ nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n"
+ ) % security_group['id']
+ nwfilter_xml += "</filter>"
+
+ yield self._define_filter(nwfilter_xml)
+ return
+
+ def ensure_security_group_filter(self, security_group_id):
+ return self._define_filter(
+ self.security_group_to_nwfilter_xml(security_group_id))
+
+
+ def security_group_to_nwfilter_xml(self, security_group_id):
+ security_group = db.security_group_get({}, security_group_id)
+ rule_xml = ""
+ for rule in security_group.rules:
+ rule_xml += "<rule action='accept' direction='in' priority='300'>"
+ if rule.cidr:
+ net, mask = self._get_net_and_mask(rule.cidr)
+ rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % (rule.protocol, net, mask)
+ if rule.protocol in ['tcp', 'udp']:
+ rule_xml += "dstportstart='%s' dstportend='%s' " % \
+ (rule.from_port, rule.to_port)
+ elif rule.protocol == 'icmp':
+ logging.info('rule.protocol: %r, rule.from_port: %r, rule.to_port: %r' % (rule.protocol, rule.from_port, rule.to_port))
+ if rule.from_port != -1:
+ rule_xml += "type='%s' " % rule.from_port
+ if rule.to_port != -1:
+ rule_xml += "code='%s' " % rule.to_port
+
+ rule_xml += '/>\n'
+ rule_xml += "</rule>\n"
+ xml = '''<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>''' % (security_group_id, rule_xml,)
+ return xml
diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py
index 0d06b1fce..118e0b687 100644
--- a/nova/virt/xenapi.py
+++ b/nova/virt/xenapi.py
@@ -42,10 +42,12 @@ from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet import task
+from nova import db
from nova import flags
from nova import process
from nova import utils
from nova.auth.manager import AuthManager
+from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import images
@@ -113,32 +115,24 @@ class XenAPIConnection(object):
raise Exception('Attempted to create non-unique name %s' %
instance.name)
- if 'bridge_name' in instance.datamodel:
- network_ref = \
- yield self._find_network_with_bridge(
- instance.datamodel['bridge_name'])
- else:
- network_ref = None
-
- if 'mac_address' in instance.datamodel:
- mac_address = instance.datamodel['mac_address']
- else:
- mac_address = ''
+ network = db.project_get_network(None, instance.project_id)
+ network_ref = \
+ yield self._find_network_with_bridge(network.bridge)
- user = AuthManager().get_user(instance.datamodel['user_id'])
- project = AuthManager().get_project(instance.datamodel['project_id'])
+ user = AuthManager().get_user(instance.user_id)
+ project = AuthManager().get_project(instance.project_id)
vdi_uuid = yield self._fetch_image(
- instance.datamodel['image_id'], user, project, True)
+ instance.image_id, user, project, True)
kernel = yield self._fetch_image(
- instance.datamodel['kernel_id'], user, project, False)
+ instance.kernel_id, user, project, False)
ramdisk = yield self._fetch_image(
- instance.datamodel['ramdisk_id'], user, project, False)
+ instance.ramdisk_id, user, project, False)
vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid)
vm_ref = yield self._create_vm(instance, kernel, ramdisk)
yield self._create_vbd(vm_ref, vdi_ref, 0, True)
if network_ref:
- yield self._create_vif(vm_ref, network_ref, mac_address)
+ yield self._create_vif(vm_ref, network_ref, instance.mac_address)
logging.debug('Starting VM %s...', vm_ref)
yield self._call_xenapi('VM.start', vm_ref, False, False)
logging.info('Spawning VM %s created %s.', instance.name, vm_ref)
@@ -148,8 +142,9 @@ class XenAPIConnection(object):
"""Create a VM record. Returns a Deferred that gives the new
VM reference."""
- mem = str(long(instance.datamodel['memory_kb']) * 1024)
- vcpus = str(instance.datamodel['vcpus'])
+ instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
+ mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
+ vcpus = str(instance_type['vcpus'])
rec = {
'name_label': instance.name,
'name_description': '',
diff --git a/run_tests.py b/run_tests.py
index 4121f4c06..0b27ec6cf 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -63,7 +63,9 @@ from nova.tests.rpc_unittest import *
from nova.tests.scheduler_unittest import *
from nova.tests.service_unittest import *
from nova.tests.validator_unittest import *
+from nova.tests.virt_unittest import *
from nova.tests.volume_unittest import *
+from nova.tests.virt_unittest import *
FLAGS = flags.FLAGS
diff --git a/run_tests.sh b/run_tests.sh
index 6ea40d95e..ec727d094 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -6,6 +6,7 @@ function usage {
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
+ echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -h, --help Print this usage message"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
@@ -14,20 +15,12 @@ function usage {
exit
}
-function process_options {
- array=$1
- elements=${#array[@]}
- for (( x=0;x<$elements;x++)); do
- process_option ${array[${x}]}
- done
-}
-
function process_option {
- option=$1
- case $option in
+ case "$1" in
-h|--help) usage;;
-V|--virtual-env) let always_venv=1; let never_venv=0;;
-N|--no-virtual-env) let always_venv=0; let never_venv=1;;
+ -f|--force) let force=1;;
esac
}
@@ -35,9 +28,11 @@ venv=.nova-venv
with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
-options=("$@")
+force=0
-process_options $options
+for arg in "$@"; do
+ process_option $arg
+done
if [ $never_venv -eq 1 ]; then
# Just run the test suites in current environment
@@ -45,6 +40,12 @@ if [ $never_venv -eq 1 ]; then
exit
fi
+# Remove the virtual environment if --force used
+if [ $force -eq 1 ]; then
+ echo "Cleaning virtualenv..."
+ rm -rf ${venv}
+fi
+
if [ -e ${venv} ]; then
${with_venv} python run_tests.py $@
else
diff --git a/tools/pip-requires b/tools/pip-requires
index 1e2707be7..6c3940372 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -20,3 +20,4 @@ zope.interface==3.6.1
mox==0.5.0
-f http://pymox.googlecode.com/files/mox-0.5.0.tar.gz
greenlet==0.3.1
+nose