summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorTodd Willey <todd@rubidine.com>2010-07-14 23:46:23 -0400
committerTodd Willey <todd@rubidine.com>2010-07-14 23:46:23 -0400
commit69bb10a9f3258be06e34dbb2b051ed2bdd31c1d7 (patch)
treedd9339d9f1ea371b5e5722112120c9f4cf729f21 /nova
parent702391e5d3f3cee5fe1d5e34d175f0fe0b5d0d7a (diff)
parent1624e2aa51d6a77fbcbbf75f756aa88d27d1c474 (diff)
Merge branch 'master' into vpnredis
Conflicts: nova/auth/users.py
Diffstat (limited to 'nova')
-rw-r--r--nova/adminclient.py7
-rw-r--r--nova/auth/fakeldap.py62
-rw-r--r--nova/auth/users.py11
-rw-r--r--nova/cloudpipe/pipelib.py5
-rw-r--r--nova/compute/disk.py30
-rw-r--r--nova/compute/interfaces.template18
-rw-r--r--nova/compute/linux_net.py12
-rw-r--r--nova/compute/model.py231
-rw-r--r--nova/compute/network.py120
-rw-r--r--nova/compute/node.py48
-rw-r--r--nova/crypto.py21
-rw-r--r--nova/datastore.py197
-rw-r--r--nova/endpoint/admin.py23
-rwxr-xr-xnova/endpoint/api.py3
-rw-r--r--nova/endpoint/cloud.py85
-rw-r--r--nova/flags.py3
-rw-r--r--nova/rpc.py28
-rw-r--r--nova/tests/api_unittest.py2
-rw-r--r--nova/tests/fake_flags.py2
-rw-r--r--nova/tests/network_unittest.py141
-rw-r--r--nova/tests/objectstore_unittest.py2
-rw-r--r--nova/tests/storage_unittest.py17
-rw-r--r--nova/tests/validator_unittest.py3
-rw-r--r--nova/utils.py17
-rw-r--r--nova/volume/storage.py87
25 files changed, 719 insertions, 456 deletions
diff --git a/nova/adminclient.py b/nova/adminclient.py
index 480e907c9..3d239fb1d 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -28,7 +28,8 @@ import boto
from boto.ec2.regioninfo import RegionInfo
class UserInfo(object):
- """ Information about a Nova user
+ """
+ Information about a Nova user, as parsed through SAX
fields include:
username
accesskey
@@ -46,11 +47,9 @@ class UserInfo(object):
def __repr__(self):
return 'UserInfo:%s' % self.username
- # this is needed by the sax parser, so ignore the ugly name
def startElement(self, name, attrs, connection):
return None
- # this is needed by the sax parser, so ignore the ugly name
def endElement(self, name, value, connection):
if name == 'username':
self.username = str(value)
@@ -63,7 +62,7 @@ class UserInfo(object):
class HostInfo(object):
"""
- Information about a Nova Host:
+ Information about a Nova Host, as parsed through SAX:
Disk stats
Running Instances
Memory stats
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index e27ac57bb..116fcbb78 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -34,15 +34,19 @@ SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
+
class NO_SUCH_OBJECT(Exception):
pass
+
class OBJECT_CLASS_VIOLATION(Exception):
pass
+
def initialize(uri):
return FakeLDAP()
+
def _match_query(query, attrs):
"""Match an ldap query to an attribute dictionary.
@@ -67,6 +71,7 @@ def _match_query(query, attrs):
(k, sep, v) = inner.partition('=')
return _match(k, v, attrs)
+
def _paren_groups(source):
"""Split a string into parenthesized groups."""
count = 0
@@ -83,6 +88,7 @@ def _paren_groups(source):
result.append(source[start:pos+1])
return result
+
def _match(k, v, attrs):
"""Match a given key and value against an attribute list."""
if k not in attrs:
@@ -96,6 +102,7 @@ def _match(k, v, attrs):
return True
return False
+
def _subs(value):
"""Returns a list of subclass strings.
@@ -109,6 +116,32 @@ def _subs(value):
return [value] + subs[value]
return [value]
+
+def _from_json(encoded):
+ """Convert attribute values from json representation.
+
+ Args:
+ encoded -- a json encoded string
+
+ Returns a list of strings
+
+ """
+ return [str(x) for x in json.loads(encoded)]
+
+
+def _to_json(unencoded):
+ """Convert attribute values into json representation.
+
+ Args:
+ unencoded -- an unencoded string or list of strings. If it
+ is a single string, it will be converted into a list.
+
+ Returns a json string
+
+ """
+ return json.dumps(list(unencoded))
+
+
class FakeLDAP(object):
#TODO(vish): refactor this class to use a wrapper instead of accessing
# redis directly
@@ -125,7 +158,7 @@ class FakeLDAP(object):
"""Add an object with the specified attributes at dn."""
key = "%s%s" % (self.__redis_prefix, dn)
- value_dict = dict([(k, self.__to_json(v)) for k, v in attr])
+ value_dict = dict([(k, _to_json(v)) for k, v in attr])
datastore.Redis.instance().hmset(key, value_dict)
def delete_s(self, dn):
@@ -145,12 +178,12 @@ class FakeLDAP(object):
key = "%s%s" % (self.__redis_prefix, dn)
for cmd, k, v in attrs:
- values = self.__from_json(redis.hget(key, k))
+ values = _from_json(redis.hget(key, k))
if cmd == MOD_ADD:
values.append(v)
else:
values.remove(v)
- values = redis.hset(key, k, self.__to_json(values))
+ values = redis.hset(key, k, _to_json(values))
def search_s(self, dn, scope, query=None, fields=None):
"""Search for all matching objects under dn using the query.
@@ -171,7 +204,7 @@ class FakeLDAP(object):
# get the attributes from redis
attrs = redis.hgetall(key)
# turn the values from redis into lists
- attrs = dict([(k, self.__from_json(v))
+ attrs = dict([(k, _from_json(v))
for k, v in attrs.iteritems()])
# filter the objects by query
if not query or _match_query(query, attrs):
@@ -188,25 +221,4 @@ class FakeLDAP(object):
def __redis_prefix(self):
return 'ldap:'
- def __from_json(self, encoded):
- """Convert attribute values from json representation.
-
- Args:
- encoded -- a json encoded string
-
- Returns a list of strings
- """
- return [str(x) for x in json.loads(encoded)]
-
- def __to_json(self, unencoded):
- """Convert attribute values into json representation.
-
- Args:
- unencoded -- an unencoded string or list of strings. If it
- is a single string, it will be converted into a list.
-
- Returns a json string
-
- """
- return json.dumps(list(unencoded))
diff --git a/nova/auth/users.py b/nova/auth/users.py
index 29d10affd..2ac4bb6da 100644
--- a/nova/auth/users.py
+++ b/nova/auth/users.py
@@ -101,10 +101,17 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_rc_file', 'novarc',
'Filename of rc in credentials zip')
+
flags.DEFINE_integer('vpn_start_port', 8000,
'Start port for the cloudpipe VPN servers')
flags.DEFINE_integer('vpn_end_port', 9999,
'End port for the cloudpipe VPN servers')
+
+flags.DEFINE_string('credential_cert_subject',
+ '/C=US/ST=California/L=MountainView/O=AnsoLabs/'
+ 'OU=NovaDev/CN=%s-%s',
+ 'Subject for certificate for users')
+
flags.DEFINE_string('vpn_ip', '127.0.0.1',
'Public IP for the cloudpipe VPN servers')
@@ -590,7 +597,7 @@ class UserManager(object):
def __cert_subject(self, uid):
# FIXME(ja) - this should be pulled from a global configuration
- return "/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=%s-%s" % (uid, str(datetime.datetime.utcnow().isoformat()))
+ return FLAGS.credential_cert_subject % (uid, utils.isotime())
class LDAPWrapper(object):
@@ -779,7 +786,7 @@ class LDAPWrapper(object):
def __create_group(self, group_dn, name, uid,
description, member_uids = None):
- if self.group_exists(name):
+ if self.group_exists(group_dn):
raise exception.Duplicate("Group can't be created because "
"group %s already exists" % name)
members = []
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 09da71c64..5f6ccf82e 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -76,12 +76,13 @@ class CloudPipe(object):
zippy.close()
def setup_keypair(self, user_id, project_id):
- key_name = '%s-key' % project_id
+ key_name = '%s%s' % (project_id, FLAGS.vpn_key_suffix)
try:
private_key, fingerprint = self.manager.generate_key_pair(user_id, key_name)
try:
key_dir = os.path.join(FLAGS.keys_path, user_id)
- os.makedirs(key_dir)
+ if not os.path.exists(key_dir):
+ os.makedirs(key_dir)
with open(os.path.join(key_dir, '%s.pem' % key_name),'w') as f:
f.write(private_key)
except:
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index e7090dad3..b6398f41e 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -36,7 +36,7 @@ from nova import exception
def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
"""Takes a single partition represented by infile and writes a bootable
drive image into outfile.
-
+
The first 63 sectors (0-62) of the resulting image is a master boot record.
Infile becomes the first primary partition.
If local bytes is specified, a second primary partition is created and
@@ -87,12 +87,14 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
% (infile, outfile, sector_size, primary_first))
@defer.inlineCallbacks
-def inject_key(key, image, partition=None, execute=None):
- """Injects a ssh key into a disk image.
- It adds the specified key to /root/.ssh/authorized_keys
+def inject_data(image, key=None, net=None, partition=None, execute=None):
+ """Injects a ssh key and optionally net data into a disk image.
+
it will mount the image as a fully partitioned disk and attempt to inject
into the specified partition number.
+
If partition is not specified it mounts the image as a single partition.
+
"""
out, err = yield execute('sudo losetup -f --show %s' % image)
if err:
@@ -119,15 +121,17 @@ def inject_key(key, image, partition=None, execute=None):
raise exception.Error('Failed to mount filesystem: %s' % err)
try:
- # inject key file
- yield _inject_into_fs(key, tmpdir, execute=execute)
+ if key:
+ # inject key file
+ yield _inject_key_into_fs(key, tmpdir, execute=execute)
+ if net:
+ yield _inject_net_into_fs(net, tmpdir, execute=execute)
finally:
# unmount device
yield execute('sudo umount %s' % mapped_device)
finally:
# remove temporary directory
- # TODO(termie): scary, is there any thing we can check here?
- yield execute('rm -rf %s' % tmpdir)
+ yield execute('rmdir %s' % tmpdir)
if not partition is None:
# remove partitions
yield execute('sudo kpartx -d %s' % device)
@@ -136,11 +140,17 @@ def inject_key(key, image, partition=None, execute=None):
yield execute('sudo losetup -d %s' % device)
@defer.inlineCallbacks
-def _inject_into_fs(key, fs, execute=None):
+def _inject_key_into_fs(key, fs, execute=None):
sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
yield execute('sudo chown root %s' % sshdir)
yield execute('sudo chmod 700 %s' % sshdir)
keyfile = os.path.join(sshdir, 'authorized_keys')
- yield execute('sudo bash -c "cat >> %s"' % keyfile, '\n' + key + '\n')
+ yield execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
+
+@defer.inlineCallbacks
+def _inject_net_into_fs(net, fs, execute=None):
+ netfile = os.path.join(os.path.join(os.path.join(
+ fs, 'etc'), 'network'), 'interfaces')
+ yield execute('sudo tee %s' % netfile, net)
diff --git a/nova/compute/interfaces.template b/nova/compute/interfaces.template
new file mode 100644
index 000000000..11df301f6
--- /dev/null
+++ b/nova/compute/interfaces.template
@@ -0,0 +1,18 @@
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+# The primary network interface
+auto eth0
+iface eth0 inet static
+ address %(address)s
+ netmask %(netmask)s
+ network %(network)s
+ broadcast %(broadcast)s
+ gateway %(gateway)s
+ dns-nameservers %(dns)s
+
+
diff --git a/nova/compute/linux_net.py b/nova/compute/linux_net.py
index 0bd5ce007..c9e5bb1a7 100644
--- a/nova/compute/linux_net.py
+++ b/nova/compute/linux_net.py
@@ -62,6 +62,9 @@ def remove_rule(cmd):
def bind_public_ip(ip, interface):
runthis("Binding IP to interface: %s", "sudo ip addr add %s dev %s" % (ip, interface))
+
+def unbind_public_ip(ip, interface):
+ runthis("Binding IP to interface: %s", "sudo ip addr del %s dev %s" % (ip, interface))
def vlan_create(net):
""" create a vlan on on a bridge device unless vlan already exists """
@@ -95,10 +98,10 @@ def dnsmasq_cmd(net):
' --pid-file=%s' % dhcp_file(net['vlan'], 'pid'),
' --listen-address=%s' % net.dhcp_listen_address,
' --except-interface=lo',
- ' --dhcp-range=%s,static,120s' % (net.dhcp_range_start),
- ' --dhcp-lease-max=61',
+ ' --dhcp-range=%s,static,600s' % (net.dhcp_range_start),
' --dhcp-hostsfile=%s' % dhcp_file(net['vlan'], 'conf'),
- ' --dhcp-leasefile=%s' % dhcp_file(net['vlan'], 'leases')]
+ ' --dhcp-script=%s' % bin_file('dhcpleasor.py'),
+ ' --leasefile-ro']
return ''.join(cmd)
def hostDHCP(network, host, mac):
@@ -154,6 +157,9 @@ def dhcp_file(vlan, kind):
return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind))
+def bin_file(script):
+ return os.path.abspath(os.path.join(__file__, "../../../bin", script))
+
def dnsmasq_pid_for(network):
""" the pid for prior dnsmasq instance for a vlan,
returns None if no pid file exists
diff --git a/nova/compute/model.py b/nova/compute/model.py
index ad1f97a0a..cc5f74b3d 100644
--- a/nova/compute/model.py
+++ b/nova/compute/model.py
@@ -57,19 +57,6 @@ from nova import utils
FLAGS = flags.FLAGS
-class ConnectionError(exception.Error):
- pass
-
-
-def absorb_connection_error(fn):
- def _wrapper(*args, **kwargs):
- try:
- return fn(*args, **kwargs)
- except redis.exceptions.ConnectionError, ce:
- raise ConnectionError(str(ce))
- return _wrapper
-
-
# TODO(todd): Implement this at the class level for Instance
class InstanceDirectory(object):
"""an api for interacting with the global state of instances"""
@@ -81,7 +68,7 @@ class InstanceDirectory(object):
def __getitem__(self, item):
return self.get(item)
- @absorb_connection_error
+ @datastore.absorb_connection_error
def by_project(self, project):
"""returns a list of instance objects for a project"""
for instance_id in datastore.Redis.instance().smembers('project:%s:instances' % project):
@@ -105,12 +92,12 @@ class InstanceDirectory(object):
"""returns the instance a volume is attached to"""
pass
- @absorb_connection_error
+ @datastore.absorb_connection_error
def exists(self, instance_id):
return datastore.Redis.instance().sismember('instances', instance_id)
@property
- @absorb_connection_error
+ @datastore.absorb_connection_error
def all(self):
"""returns a list of all instances"""
for instance_id in datastore.Redis.instance().smembers('instances'):
@@ -121,196 +108,8 @@ class InstanceDirectory(object):
instance_id = utils.generate_uid('i')
return self.get(instance_id)
-class BasicModel(object):
- """
- All Redis-backed data derives from this class.
-
- You MUST specify an identifier() property that returns a unique string
- per instance.
-
- You MUST have an initializer that takes a single argument that is a value
- returned by identifier() to load a new class with.
-
- You may want to specify a dictionary for default_state().
-
- You may also specify override_type at the class left to use a key other
- than __class__.__name__.
-
- You override save and destroy calls to automatically build and destroy
- associations.
- """
-
- override_type = None
-
- @absorb_connection_error
- def __init__(self):
- self.initial_state = {}
- self.state = datastore.Redis.instance().hgetall(self.__redis_key)
- if self.state:
- self.initial_state = self.state
- else:
- self.state = self.default_state()
-
- def default_state(self):
- """You probably want to define this in your subclass"""
- return {}
-
- @classmethod
- def _redis_name(cls):
- return self.override_type or cls.__name__
-
- @classmethod
- def lookup(cls, identifier):
- rv = cls(identifier)
- if rv.is_new_record():
- return None
- else:
- return rv
-
- @classmethod
- @absorb_connection_error
- def all(cls):
- """yields all objects in the store"""
- redis_set = cls._redis_set_name(cls.__name__)
- for identifier in datastore.Redis.instance().smembers(redis_set):
- yield cls(identifier)
-
- @classmethod
- @absorb_connection_error
- def associated_to(cls, foreign_type, foreign_id):
- redis_set = cls._redis_association_name(foreign_type, foreign_id)
- for identifier in datastore.Redis.instance().smembers(redis_set):
- yield cls(identifier)
-
- @classmethod
- def _redis_set_name(cls, kls_name):
- # stupidly pluralize (for compatiblity with previous codebase)
- return kls_name.lower() + "s"
- @classmethod
- def _redis_association_name(cls, foreign_type, foreign_id):
- return cls._redis_set_name("%s:%s:%s" %
- (foreign_type, foreign_id, cls.__name__))
-
- @property
- def identifier(self):
- """You DEFINITELY want to define this in your subclass"""
- raise NotImplementedError("Your sublcass should define identifier")
-
- @property
- def __redis_key(self):
- return '%s:%s' % (self.__class__.__name__.lower(), self.identifier)
-
- def __repr__(self):
- return "<%s:%s>" % (self.__class__.__name__, self.identifier)
-
- def keys(self):
- return self.state.keys()
-
- def copy(self):
- copyDict = {}
- for item in self.keys():
- copyDict[item] = self[item]
- return copyDict
-
- def get(self, item, default):
- return self.state.get(item, default)
-
- def update(self, update_dict):
- return self.state.update(update_dict)
-
- def setdefault(self, item, default):
- return self.state.setdefault(item, default)
-
- def __getitem__(self, item):
- return self.state[item]
-
- def __setitem__(self, item, val):
- self.state[item] = val
- return self.state[item]
-
- def __delitem__(self, item):
- """We don't support this"""
- raise Exception("Silly monkey, models NEED all their properties.")
-
- def is_new_record(self):
- return self.initial_state == {}
-
- @absorb_connection_error
- def add_to_index(self):
- set_name = self.__class__._redis_set_name(self.__class__.__name__)
- datastore.Redis.instance().sadd(set_name, self.identifier)
-
- @absorb_connection_error
- def remove_from_index(self):
- set_name = self.__class__._redis_set_name(self.__class__.__name__)
- datastore.Redis.instance().srem(set_name, self.identifier)
-
- @absorb_connection_error
- def remove_from_index(self):
- set_name = self.__class__._redis_set_name(self.__class__.__name__)
- datastore.Redis.instance().srem(set_name, self.identifier)
-
- @absorb_connection_error
- def associate_with(self, foreign_type, foreign_id):
- # note the extra 's' on the end is for plurality
- # to match the old data without requiring a migration of any sort
- self.add_associated_model_to_its_set(foreign_type, foreign_id)
- redis_set = self.__class__._redis_association_name(foreign_type,
- foreign_id)
- datastore.Redis.instance().sadd(redis_set, self.identifier)
-
- @absorb_connection_error
- def unassociate_with(self, foreign_type, foreign_id):
- redis_set = self.__class__._redis_association_name(foreign_type,
- foreign_id)
- datastore.Redis.instance().srem(redis_set, self.identifier)
-
- def add_associated_model_to_its_set(self, my_type, my_id):
- table = globals()
- klsname = my_type.capitalize()
- if table.has_key(klsname):
- my_class = table[klsname]
- my_inst = my_class(my_id)
- my_inst.save()
- else:
- logging.warning("no model class for %s when building"
- " association from %s",
- klsname, self)
-
- @absorb_connection_error
- def save(self):
- """
- update the directory with the state from this model
- also add it to the index of items of the same type
- then set the initial_state = state so new changes are tracked
- """
- # TODO(ja): implement hmset in redis-py and use it
- # instead of multiple calls to hset
- if self.is_new_record():
- self["create_time"] = utils.isotime()
- for key, val in self.state.iteritems():
- # if (not self.initial_state.has_key(key)
- # or self.initial_state[key] != val):
- datastore.Redis.instance().hset(self.__redis_key, key, val)
- self.add_to_index()
- self.initial_state = self.state
- return True
-
- @absorb_connection_error
- def destroy(self):
- """
- deletes all related records from datastore.
- does NOT do anything to running libvirt state.
- """
- logging.info("Destroying datamodel for %s %s",
- self.__class__.__name__, self.identifier)
- datastore.Redis.instance().delete(self.__redis_key)
- self.remove_from_index()
- return True
-
-
-class Instance(BasicModel):
+class Instance(datastore.BasicModel):
"""Wrapper around stored properties of an instance"""
def __init__(self, instance_id):
@@ -363,12 +162,11 @@ class Instance(BasicModel):
def destroy(self):
"""Destroy associations, then destroy the object"""
self.unassociate_with("project", self.project)
+ self.unassociate_with("node", self['node_name'])
return super(Instance, self).destroy()
-class Host(BasicModel):
- """
- A Host is the machine where a Daemon is running.
- """
+class Host(datastore.BasicModel):
+ """A Host is the machine where a Daemon is running."""
def __init__(self, hostname):
"""loads an instance from the datastore if exists"""
@@ -384,10 +182,8 @@ class Host(BasicModel):
return self.hostname
-class Daemon(BasicModel):
- """
- A Daemon is a job (compute, api, network, ...) that runs on a host.
- """
+class Daemon(datastore.BasicModel):
+ """A Daemon is a job (compute, api, network, ...) that runs on a host."""
def __init__(self, host_or_combined, binpath=None):
"""loads an instance from the datastore if exists"""
@@ -404,9 +200,9 @@ class Daemon(BasicModel):
super(Daemon, self).__init__()
def default_state(self):
- return {"hostname": self.hostname,
- "binary": self.binary,
- "updated_at": utils.isotime()
+ return {"hostname": self.hostname,
+ "binary": self.binary,
+ "updated_at": utils.isotime()
}
@property
@@ -429,8 +225,7 @@ class Daemon(BasicModel):
def heartbeat(self):
self['updated_at'] = utils.isotime()
- self.save()
- return True
+ return self.save()
@classmethod
def by_host(cls, hostname):
diff --git a/nova/compute/network.py b/nova/compute/network.py
index 7b37cde6d..e5d3d18df 100644
--- a/nova/compute/network.py
+++ b/nova/compute/network.py
@@ -31,11 +31,10 @@ from nova import vendor
import IPy
from nova import datastore
-import nova.exception
-from nova.compute import exception
from nova import flags
-from nova.compute import model
from nova import utils
+from nova import exception
+from nova.compute import exception as compute_exception
from nova.auth import users
import linux_net
@@ -59,10 +58,30 @@ flags.DEFINE_integer('cnt_vpn_clients', 5,
flags.DEFINE_integer('cloudpipe_start_port', 12000,
'Starting port for mapped CloudPipe external ports')
+flags.DEFINE_boolean('simple_network', False,
+ 'Use simple networking instead of vlans')
+flags.DEFINE_string('simple_network_bridge', 'br100',
+ 'Bridge for simple network instances')
+flags.DEFINE_list('simple_network_ips', ['192.168.0.2'],
+ 'Available ips for simple network')
+flags.DEFINE_string('simple_network_template',
+ utils.abspath('compute/interfaces.template'),
+ 'Template file for simple network')
+flags.DEFINE_string('simple_network_netmask', '255.255.255.0',
+ 'Netmask for simple network')
+flags.DEFINE_string('simple_network_network', '192.168.0.0',
+ 'Network for simple network')
+flags.DEFINE_string('simple_network_gateway', '192.168.0.1',
+ 'Broadcast for simple network')
+flags.DEFINE_string('simple_network_broadcast', '192.168.0.255',
+ 'Broadcast for simple network')
+flags.DEFINE_string('simple_network_dns', '8.8.4.4',
+ 'Dns for simple network')
+
logging.getLogger().setLevel(logging.DEBUG)
-class Vlan(model.BasicModel):
+class Vlan(datastore.BasicModel):
def __init__(self, project, vlan):
"""
Since we don't want to try and find a vlan by its identifier,
@@ -82,7 +101,7 @@ class Vlan(model.BasicModel):
return instance
@classmethod
- @model.absorb_connection_error
+ @datastore.absorb_connection_error
def lookup(cls, project):
set_name = cls._redis_set_name(cls.__name__)
vlan = datastore.Redis.instance().hget(set_name, project)
@@ -92,14 +111,14 @@ class Vlan(model.BasicModel):
return None
@classmethod
- @model.absorb_connection_error
+ @datastore.absorb_connection_error
def dict_by_project(cls):
"""a hash of project:vlan"""
set_name = cls._redis_set_name(cls.__name__)
return datastore.Redis.instance().hgetall(set_name)
@classmethod
- @model.absorb_connection_error
+ @datastore.absorb_connection_error
def dict_by_vlan(cls):
"""a hash of vlan:project"""
set_name = cls._redis_set_name(cls.__name__)
@@ -110,23 +129,23 @@ class Vlan(model.BasicModel):
return rv
@classmethod
- @model.absorb_connection_error
+ @datastore.absorb_connection_error
def all(cls):
set_name = cls._redis_set_name(cls.__name__)
for project,vlan in datastore.Redis.instance().hgetall(set_name):
yield cls(project, vlan)
- @model.absorb_connection_error
+ @datastore.absorb_connection_error
def save(self):
"""
Vlan saves state into a giant hash named "vlans", with keys of
- proejct_id and value of valn number. Therefore, we skip the
+ project_id and value of vlan number. Therefore, we skip the
default way of saving into "vlan:ID" and adding to a set of "vlans".
"""
set_name = self._redis_set_name(self.__class__.__name__)
datastore.Redis.instance().hset(set_name, self.project_id, self.vlan_id)
- @model.absorb_connection_error
+ @datastore.absorb_connection_error
def destroy(self):
set_name = self._redis_set_name(self.__class__.__name__)
datastore.Redis.instance().hdel(set_name, self.project)
@@ -135,6 +154,7 @@ class Vlan(model.BasicModel):
vlan = int(self.vlan_id)
network = IPy.IP(FLAGS.private_range)
start = (vlan-FLAGS.vlan_start) * FLAGS.network_size
+ # minus one for the gateway.
return "%s-%s" % (network[start],
network[start + FLAGS.network_size - 1])
@@ -144,7 +164,7 @@ class Vlan(model.BasicModel):
# TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win?
# TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients
-class BaseNetwork(model.BasicModel):
+class BaseNetwork(datastore.BasicModel):
override_type = 'network'
@property
@@ -188,18 +208,9 @@ class BaseNetwork(model.BasicModel):
return self.network.broadcast()
@property
- def gateway(self):
- return self.network[1]
-
- @property
def bridge_name(self):
return "br%s" % (self["vlan"])
- def range(self):
- # the .2 address is always CloudPipe
- for idx in range(3, len(self.network)-2):
- yield self.network[idx]
-
@property
def user(self):
return users.UserManager.instance().get_user(self['user_id'])
@@ -214,7 +225,7 @@ class BaseNetwork(model.BasicModel):
@property
def hosts(self):
- return datastore.Redis.instance().hgetall(self._hosts_key)
+ return datastore.Redis.instance().hgetall(self._hosts_key) or {}
def _add_host(self, _user_id, _project_id, host, target):
datastore.Redis.instance().hset(self._hosts_key, host, target)
@@ -241,14 +252,22 @@ class BaseNetwork(model.BasicModel):
self._add_host(user_id, project_id, address, mac)
self.express(address=address)
return address
- raise exception.NoMoreAddresses()
+ raise compute_exception.NoMoreAddresses("Project %s with network %s" %
+ (project_id, str(self.network)))
- def deallocate_ip(self, ip_str):
+ def lease_ip(self, ip_str):
+ logging.debug("Leasing allocated IP %s" % (ip_str))
+
+ def release_ip(self, ip_str):
if not ip_str in self.assigned:
- raise exception.AddressNotAllocated()
+ raise compute_exception.AddressNotAllocated()
self.deexpress(address=ip_str)
self._rem_host(ip_str)
+ def deallocate_ip(self, ip_str):
+ # Do nothing for now, cleanup on ip release
+ pass
+
def list_addresses(self):
for address in self.hosts:
yield address
@@ -280,8 +299,6 @@ class BridgedNetwork(BaseNetwork):
def get_network_for_project(cls, user_id, project_id, security_group):
vlan = get_vlan_for_project(project_id)
network_str = vlan.subnet()
- logging.debug("creating network on vlan %s with network string %s",
- vlan.vlan_id, network_str)
return cls.create(user_id, project_id, security_group, vlan.vlan_id,
network_str)
@@ -307,7 +324,7 @@ class DHCPNetwork(BridgedNetwork):
def __init__(self, *args, **kwargs):
super(DHCPNetwork, self).__init__(*args, **kwargs)
- logging.debug("Initing DHCPNetwork object...")
+ # logging.debug("Initing DHCPNetwork object...")
self.dhcp_listen_address = self.network[1]
self.dhcp_range_start = self.network[3]
self.dhcp_range_end = self.network[-(1 + FLAGS.cnt_vpn_clients)]
@@ -351,7 +368,7 @@ class DHCPNetwork(BridgedNetwork):
else:
linux_net.start_dnsmasq(self)
-class PublicAddress(model.BasicModel):
+class PublicAddress(datastore.BasicModel):
override_type = "address"
def __init__(self, address):
@@ -422,14 +439,14 @@ class PublicNetworkController(BaseNetwork):
def associate_address(self, public_ip, private_ip, instance_id):
if not public_ip in self.assigned:
- raise exception.AddressNotAllocated()
+ raise compute_exception.AddressNotAllocated()
# TODO(joshua): Keep an index going both ways
for addr in self.host_objs:
if addr.get('private_ip', None) == private_ip:
- raise exception.AddressAlreadyAssociated()
+ raise compute_exception.AddressAlreadyAssociated()
addr = self.get_host(public_ip)
if addr.get('private_ip', 'available') != 'available':
- raise exception.AddressAlreadyAssociated()
+ raise compute_exception.AddressAlreadyAssociated()
addr['private_ip'] = private_ip
addr['instance_id'] = instance_id
addr.save()
@@ -437,10 +454,10 @@ class PublicNetworkController(BaseNetwork):
def disassociate_address(self, public_ip):
if not public_ip in self.assigned:
- raise exception.AddressNotAllocated()
+ raise compute_exception.AddressNotAllocated()
addr = self.get_host(public_ip)
if addr.get('private_ip', 'available') == 'available':
- raise exception.AddressNotAssociated()
+ raise compute_exception.AddressNotAssociated()
self.deexpress(address=public_ip)
addr['private_ip'] = 'available'
addr['instance_id'] = 'available'
@@ -470,6 +487,7 @@ class PublicNetworkController(BaseNetwork):
def deexpress(self, address=None):
addr = self.get_host(address)
private_ip = addr['private_ip']
+ linux_net.unbind_public_ip(address, FLAGS.public_interface)
linux_net.remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s"
% (address, private_ip))
linux_net.remove_rule("POSTROUTING -t nat -s %s -j SNAT --to %s"
@@ -502,22 +520,42 @@ def get_vlan_for_project(project_id):
# NOTE(todd): This doesn't check for vlan id match, because
# it seems to be assumed that vlan<=>project is
# always a 1:1 mapping. It could be made way
- # sexier if it didn't fight agains the way
+ # sexier if it didn't fight against the way
# BasicModel worked and used associate_with
# to build connections to projects.
+ # NOTE(josh): This is here because we want to make sure we
+ # don't orphan any VLANs. It is basically
+ # garbage collection for after projects abandoned
+ # their reference.
vlan.project_id = project_id
vlan.save()
return vlan
else:
return Vlan.create(project_id, vnum)
- raise exception.AddressNotAllocated("Out of VLANs")
+ raise compute_exception.AddressNotAllocated("Out of VLANs")
def get_network_by_address(address):
+ logging.debug("Get Network By Address: %s" % address)
for project in users.UserManager.instance().get_projects():
net = get_project_network(project.id)
if address in net.assigned:
+ logging.debug("Found %s in %s" % (address, project.id))
return net
- raise exception.AddressNotAllocated()
+ raise compute_exception.AddressNotAllocated()
+
+def allocate_simple_ip():
+ redis = datastore.Redis.instance()
+ if not redis.exists('ips') and not len(redis.keys('instances:*')):
+ for address in FLAGS.simple_network_ips:
+ redis.sadd('ips', address)
+ address = redis.spop('ips')
+ if not address:
+ raise exception.NoMoreAddresses()
+ return address
+
+def deallocate_simple_ip(address):
+ datastore.Redis.instance().sadd('ips', address)
+
def allocate_vpn_ip(user_id, project_id, mac):
return get_project_network(project_id).allocate_vpn_ip(mac)
@@ -527,6 +565,12 @@ def allocate_ip(user_id, project_id, mac):
def deallocate_ip(address):
return get_network_by_address(address).deallocate_ip(address)
+
+def release_ip(address):
+ return get_network_by_address(address).release_ip(address)
+
+def lease_ip(address):
+ return get_network_by_address(address).lease_ip(address)
def get_project_network(project_id, security_group='default'):
""" get a project's private network, allocating one if needed """
@@ -534,7 +578,7 @@ def get_project_network(project_id, security_group='default'):
# Refactor to still use the LDAP backend, but not User specific.
project = users.UserManager.instance().get_project(project_id)
if not project:
- raise nova.exception.Error("Project %s doesn't exist, uhoh." %
+ raise exception.Error("Project %s doesn't exist, uhoh." %
project_id)
return DHCPNetwork.get_network_for_project(project.project_manager_id,
project.id, security_group)
diff --git a/nova/compute/node.py b/nova/compute/node.py
index 7859d71c0..f41bc34ea 100644
--- a/nova/compute/node.py
+++ b/nova/compute/node.py
@@ -30,7 +30,6 @@ import base64
import json
import logging
import os
-import random
import shutil
import sys
@@ -58,7 +57,7 @@ from nova.objectstore import image # for image_path flag
FLAGS = flags.FLAGS
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('compute/libvirt.xml.template'),
- 'Network XML Template')
+ 'Libvirt XML Template')
flags.DEFINE_bool('use_s3', True,
'whether to get images from s3 or use local copy')
flags.DEFINE_string('instances_path', utils.abspath('../instances'),
@@ -144,8 +143,7 @@ class Node(object, service.Service):
@defer.inlineCallbacks
def report_state(self, nodename, daemon):
- # TODO(termie): Termie has an idea for wrapping this connection failure
- # pattern to be more elegant. -todd
+ # TODO(termie): make this pattern be more elegant. -todd
try:
record = model.Daemon(nodename, daemon)
record.heartbeat()
@@ -164,9 +162,10 @@ class Node(object, service.Service):
""" launch a new instance with specified options """
logging.debug("Starting instance %s..." % (instance_id))
inst = self.instdir.get(instance_id)
- # TODO: Get the real security group of launch in here
- security_group = "default"
- net = network.BridgedNetwork.get_network_for_project(inst['user_id'],
+ if not FLAGS.simple_network:
+ # TODO: Get the real security group of launch in here
+ security_group = "default"
+ net = network.BridgedNetwork.get_network_for_project(inst['user_id'],
inst['project_id'],
security_group).express()
inst['node_name'] = FLAGS.node_name
@@ -470,7 +469,7 @@ class Instance(object):
# ensure directories exist and are writable
yield self._pool.simpleExecute('mkdir -p %s' % basepath())
yield self._pool.simpleExecute('chmod 0777 %s' % basepath())
-
+
# TODO(termie): these are blocking calls, it would be great
# if they weren't.
@@ -478,11 +477,11 @@ class Instance(object):
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
f.close()
-
+
if FLAGS.fake_libvirt:
logging.info('fake_libvirt, nothing to do for create_image')
raise defer.returnValue(None);
-
+
if FLAGS.use_s3:
_fetch_file = self._fetch_s3_image
else:
@@ -495,12 +494,23 @@ class Instance(object):
if not os.path.exists(basepath('ramdisk')):
yield _fetch_file(data['ramdisk_id'], basepath('ramdisk'))
- execute = lambda cmd, input=None: self._pool.simpleExecute(cmd=cmd, input=input, error_ok=1)
-
- if data['key_data']:
- logging.info('Injecting key data into image %s', data['image_id'])
- yield disk.inject_key(
- data['key_data'], basepath('disk-raw'), execute=execute)
+ execute = lambda cmd, input=None: self._pool.simpleExecute(cmd=cmd,
+ input=input,
+ error_ok=1)
+
+ key = data['key_data']
+ net = None
+ if FLAGS.simple_network:
+ with open(FLAGS.simple_network_template) as f:
+ net = f.read() % {'address': data['private_dns_name'],
+ 'network': FLAGS.simple_network_network,
+ 'netmask': FLAGS.simple_network_netmask,
+ 'gateway': FLAGS.simple_network_gateway,
+ 'broadcast': FLAGS.simple_network_broadcast,
+ 'dns': FLAGS.simple_network_dns}
+ if key or net:
+ logging.info('Injecting data into image %s', data['image_id'])
+ yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute)
if os.path.exists(basepath('disk')):
yield self._pool.simpleExecute('rm -f %s' % basepath('disk'))
@@ -509,7 +519,7 @@ class Instance(object):
* 1024 * 1024 * 1024)
yield disk.partition(
basepath('disk-raw'), basepath('disk'), bytes, execute=execute)
-
+
@defer.inlineCallbacks
@exception.wrap_exception
def spawn(self):
@@ -520,7 +530,7 @@ class Instance(object):
self.set_state(Instance.NOSTATE, 'launching')
logging.info('self %s', self)
try:
- yield self._create_image(xml)
+ yield self._create_image(xml)
self._conn.createXML(xml, 0)
# TODO(termie): this should actually register
# a callback to check for successful boot
@@ -543,8 +553,6 @@ class Instance(object):
timer.f = _wait_for_boot
timer.start(interval=0.5, now=True)
except Exception, ex:
- # FIXME(todd): this is just for debugging during testing
- print "FUUUUUUUUUUUUUUUUUUUUUU: %s" % ex
logging.debug(ex)
self.set_state(Instance.SHUTDOWN)
diff --git a/nova/crypto.py b/nova/crypto.py
index 413796ccc..fc6ed714f 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -23,10 +23,12 @@ Wrappers around standard crypto, including root and intermediate CAs,
SSH keypairs and x509 certificates.
"""
+import base64
import hashlib
import logging
import os
import shutil
+import struct
import tempfile
import time
import utils
@@ -86,14 +88,17 @@ def generate_key_pair(bits=1024):
def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'):
- """requires lsh-utils"""
- convert="sed -e'1d' -e'$d' | pkcs1-conv --public-key-info --base-64 |" \
- + " sexp-conv | sed -e'1s/(rsa-pkcs1/(rsa-pkcs1-sha1/' | sexp-conv -s" \
- + " transport | lsh-export-key --openssh"
- (out, err) = utils.execute(convert, ssl_public_key)
- if err:
- raise exception.Error("Failed to generate key: %s", err)
- return '%s %s@%s\n' %(out.strip(), name, suffix)
+ rsa_key = M2Crypto.RSA.load_pub_key_bio(M2Crypto.BIO.MemoryBuffer(ssl_public_key))
+ e, n = rsa_key.pub()
+
+ key_type = 'ssh-rsa'
+
+ key_data = struct.pack('>I', len(key_type))
+ key_data += key_type
+ key_data += '%s%s' % (e,n)
+
+ b64_blob = base64.b64encode(key_data)
+ return '%s %s %s@%s\n' %(key_type, b64_blob, name, suffix)
def generate_x509_cert(subject, bits=1024):
diff --git a/nova/datastore.py b/nova/datastore.py
index 5a9b80c62..7d6e00823 100644
--- a/nova/datastore.py
+++ b/nova/datastore.py
@@ -37,6 +37,7 @@ import time
from nova import vendor
import redis
+from nova import exception
from nova import flags
from nova import utils
@@ -305,3 +306,199 @@ def Keeper(prefix=''):
'sqlite': SqliteKeeper}
return KEEPERS[FLAGS.keeper_backend](prefix)
+class ConnectionError(exception.Error):
+ pass
+
+
+def absorb_connection_error(fn):
+ def _wrapper(*args, **kwargs):
+ try:
+ return fn(*args, **kwargs)
+ except redis.exceptions.ConnectionError, ce:
+ raise ConnectionError(str(ce))
+ return _wrapper
+
+
+class BasicModel(object):
+ """
+ All Redis-backed data derives from this class.
+
+ You MUST specify an identifier() property that returns a unique string
+ per instance.
+
+ You MUST have an initializer that takes a single argument that is a value
+ returned by identifier() to load a new class with.
+
+ You may want to specify a dictionary for default_state().
+
+ You may also specify override_type at the class left to use a key other
+ than __class__.__name__.
+
+ You override save and destroy calls to automatically build and destroy
+ associations.
+ """
+
+ override_type = None
+
+ @absorb_connection_error
+ def __init__(self):
+ self.initial_state = {}
+ self.state = Redis.instance().hgetall(self.__redis_key)
+ if self.state:
+ self.initial_state = self.state
+ else:
+ self.state = self.default_state()
+
+ def default_state(self):
+ """You probably want to define this in your subclass"""
+ return {}
+
+ @classmethod
+ def _redis_name(cls):
+ return self.override_type or cls.__name__
+
+ @classmethod
+ def lookup(cls, identifier):
+ rv = cls(identifier)
+ if rv.is_new_record():
+ return None
+ else:
+ return rv
+
+ @classmethod
+ @absorb_connection_error
+ def all(cls):
+ """yields all objects in the store"""
+ redis_set = cls._redis_set_name(cls.__name__)
+ for identifier in Redis.instance().smembers(redis_set):
+ yield cls(identifier)
+
+ @classmethod
+ @absorb_connection_error
+ def associated_to(cls, foreign_type, foreign_id):
+ redis_set = cls._redis_association_name(foreign_type, foreign_id)
+ for identifier in Redis.instance().smembers(redis_set):
+ yield cls(identifier)
+
+ @classmethod
+ def _redis_set_name(cls, kls_name):
+ # stupidly pluralize (for compatiblity with previous codebase)
+ return kls_name.lower() + "s"
+
+ @classmethod
+ def _redis_association_name(cls, foreign_type, foreign_id):
+ return cls._redis_set_name("%s:%s:%s" %
+ (foreign_type, foreign_id, cls.__name__))
+
+ @property
+ def identifier(self):
+ """You DEFINITELY want to define this in your subclass"""
+ raise NotImplementedError("Your subclass should define identifier")
+
+ @property
+ def __redis_key(self):
+ return '%s:%s' % (self.__class__.__name__.lower(), self.identifier)
+
+ def __repr__(self):
+ return "<%s:%s>" % (self.__class__.__name__, self.identifier)
+
+ def keys(self):
+ return self.state.keys()
+
+ def copy(self):
+ copyDict = {}
+ for item in self.keys():
+ copyDict[item] = self[item]
+ return copyDict
+
+ def get(self, item, default):
+ return self.state.get(item, default)
+
+ def update(self, update_dict):
+ return self.state.update(update_dict)
+
+ def setdefault(self, item, default):
+ return self.state.setdefault(item, default)
+
+ def __getitem__(self, item):
+ return self.state[item]
+
+ def __setitem__(self, item, val):
+ self.state[item] = val
+ return self.state[item]
+
+ def __delitem__(self, item):
+ """We don't support this"""
+ raise Exception("Silly monkey, models NEED all their properties.")
+
+ def is_new_record(self):
+ return self.initial_state == {}
+
+ @absorb_connection_error
+ def add_to_index(self):
+ set_name = self.__class__._redis_set_name(self.__class__.__name__)
+ Redis.instance().sadd(set_name, self.identifier)
+
+ @absorb_connection_error
+ def remove_from_index(self):
+ set_name = self.__class__._redis_set_name(self.__class__.__name__)
+ Redis.instance().srem(set_name, self.identifier)
+
+ @absorb_connection_error
+ def remove_from_index(self):
+ set_name = self.__class__._redis_set_name(self.__class__.__name__)
+ Redis.instance().srem(set_name, self.identifier)
+
+ @absorb_connection_error
+ def associate_with(self, foreign_type, foreign_id):
+ # note the extra 's' on the end is for plurality
+ # to match the old data without requiring a migration of any sort
+ self.add_associated_model_to_its_set(foreign_type, foreign_id)
+ redis_set = self.__class__._redis_association_name(foreign_type,
+ foreign_id)
+ Redis.instance().sadd(redis_set, self.identifier)
+
+ @absorb_connection_error
+ def unassociate_with(self, foreign_type, foreign_id):
+ redis_set = self.__class__._redis_association_name(foreign_type,
+ foreign_id)
+ Redis.instance().srem(redis_set, self.identifier)
+
+ def add_associated_model_to_its_set(self, my_type, my_id):
+ table = globals()
+ klsname = my_type.capitalize()
+ if table.has_key(klsname):
+ my_class = table[klsname]
+ my_inst = my_class(my_id)
+ my_inst.save()
+ else:
+ logging.warning("no model class for %s when building"
+ " association from %s",
+ klsname, self)
+
+ @absorb_connection_error
+ def save(self):
+ """
+ update the directory with the state from this model
+ also add it to the index of items of the same type
+ then set the initial_state = state so new changes are tracked
+ """
+ # TODO(ja): implement hmset in redis-py and use it
+ # instead of multiple calls to hset
+ if self.is_new_record():
+ self["create_time"] = utils.isotime()
+ for key, val in self.state.iteritems():
+ Redis.instance().hset(self.__redis_key, key, val)
+ self.add_to_index()
+ self.initial_state = self.state
+ return True
+
+ @absorb_connection_error
+ def destroy(self):
+ """deletes all related records from datastore."""
+ logging.info("Destroying datamodel for %s %s",
+ self.__class__.__name__, self.identifier)
+ Redis.instance().delete(self.__redis_key)
+ self.remove_from_index()
+ return True
+
diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py
index 4471a4ef4..ceab7d1f9 100644
--- a/nova/endpoint/admin.py
+++ b/nova/endpoint/admin.py
@@ -24,6 +24,9 @@ Admin API controller, exposed through http via the api worker.
import base64
+from nova.auth import users
+from nova.compute import model
+
def user_dict(user, base64_file=None):
"""Convert the user object to a result dict"""
if user:
@@ -62,28 +65,24 @@ class AdminController(object):
allowing project managers to administer project users.
"""
- def __init__(self, user_manager, host_manager):
- self.user_manager = user_manager
- self.host_manager = host_manager
-
def __str__(self):
return 'AdminController'
@admin_only
def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys."""
- return user_dict(self.user_manager.get_user(name))
+ return user_dict(users.UserManager.instance().get_user(name))
@admin_only
def describe_users(self, _context, **_kwargs):
"""Returns all users - should be changed to deal with a list."""
return {'userSet':
- [user_dict(u) for u in self.user_manager.get_users()] }
+ [user_dict(u) for u in users.UserManager.instance().get_users()] }
@admin_only
def register_user(self, _context, name, **_kwargs):
"""Creates a new user, and returns generated credentials."""
- return user_dict(self.user_manager.create_user(name))
+ return user_dict(users.UserManager.instance().create_user(name))
@admin_only
def deregister_user(self, _context, name, **_kwargs):
@@ -91,7 +90,7 @@ class AdminController(object):
Should throw an exception if the user has instances,
volumes, or buckets remaining.
"""
- self.user_manager.delete_user(name)
+ users.UserManager.instance().delete_user(name)
return True
@@ -103,8 +102,8 @@ class AdminController(object):
"""
if project is None:
project = name
- project = self.user_manager.get_project(project)
- user = self.user_manager.get_user(name)
+ project = users.UserManager.instance().get_project(project)
+ user = users.UserManager.instance().get_user(name)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
@admin_only
@@ -117,9 +116,9 @@ class AdminController(object):
* DHCP servers running
* Iptables / bridges
"""
- return {'hostSet': [host_dict(h) for h in self.host_manager.all()]}
+ return {'hostSet': [host_dict(h) for h in model.Host.all()]}
@admin_only
def describe_host(self, _context, name, **_kwargs):
"""Returns status info for single node."""
- return host_dict(self.host_manager.lookup(name))
+ return host_dict(model.Host.lookup(name))
diff --git a/nova/endpoint/api.py b/nova/endpoint/api.py
index ece487b7b..86a4551ad 100755
--- a/nova/endpoint/api.py
+++ b/nova/endpoint/api.py
@@ -324,7 +324,7 @@ class APIRequestHandler(tornado.web.RequestHandler):
class APIServerApplication(tornado.web.Application):
- def __init__(self, user_manager, controllers):
+ def __init__(self, controllers):
tornado.web.Application.__init__(self, [
(r'/', RootRequestHandler),
(r'/cloudpipe/(.*)', nova.cloudpipe.api.CloudPipeRequestHandler),
@@ -341,5 +341,4 @@ class APIServerApplication(tornado.web.Application):
(r'/2007-01-19/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/1.0/([-A-Za-z0-9/]*)', MetadataRequestHandler),
], pool=multiprocessing.Pool(4))
- self.user_manager = user_manager
self.controllers = controllers
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index d6c164163..9dccc24dc 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -115,9 +115,9 @@ class CloudController(object):
def get_metadata(self, ip):
i = self.get_instance_by_ip(ip)
- mpi = self._get_mpi_data(i['project_id'])
if i is None:
return None
+ mpi = self._get_mpi_data(i['project_id'])
if i['key_name']:
keys = {
'0': {
@@ -170,6 +170,28 @@ class CloudController(object):
'zoneState': 'available'}]}
@rbac.allow('all')
+ def describe_regions(self, context, region_name=None, **kwargs):
+ # TODO(vish): region_name is an array. Support filtering
+ return {'regionInfo': [{'regionName': 'nova',
+ 'regionUrl': FLAGS.ec2_url}]}
+
+ @rbac.allow('all')
+ def describe_snapshots(self,
+ context,
+ snapshot_id=None,
+ owner=None,
+ restorable_by=None,
+ **kwargs):
+ return {'snapshotSet': [{'snapshotId': 'fixme',
+ 'volumeId': 'fixme',
+ 'status': 'fixme',
+ 'startTime': 'fixme',
+ 'progress': 'fixme',
+ 'ownerId': 'fixme',
+ 'volumeSize': 0,
+ 'description': 'fixme'}]}
+
+ @rbac.allow('all')
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = context.user.get_key_pairs()
if not key_name is None:
@@ -178,7 +200,8 @@ class CloudController(object):
result = []
for key_pair in key_pairs:
# filter out the vpn keys
- if context.user.is_admin() or not key_pair.name.endswith('-key'):
+ suffix = FLAGS.vpn_key_suffix
+ if context.user.is_admin() or not key_pair.name.endswith(suffix):
result.append({
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint,
@@ -380,15 +403,17 @@ class CloudController(object):
def _format_instances(self, context, reservation_id = None):
reservations = {}
- for instance in self.instdir.all:
+ if context.user.is_admin():
+ instgenerator = self.instdir.all
+ else:
+ instgenerator = self.instdir.by_project(context.project.id)
+ for instance in instgenerator:
res_id = instance.get('reservation_id', 'Unknown')
if reservation_id != None and reservation_id != res_id:
continue
if not context.user.is_admin():
if instance['image_id'] == FLAGS.vpn_image_id:
continue
- if context.project.id != instance['project_id']:
- continue
i = {}
i['instance_id'] = instance.get('instance_id', None)
i['image_id'] = instance.get('image_id', None)
@@ -475,6 +500,14 @@ class CloudController(object):
# TODO - Strip the IP from the instance
return defer.succeed({'disassociateResponse': ["Address disassociated."]})
+ def release_ip(self, context, private_ip, **kwargs):
+ self.network.release_ip(private_ip)
+ return defer.succeed({'releaseResponse': ["Address released."]})
+
+ def lease_ip(self, context, private_ip, **kwargs):
+ self.network.lease_ip(private_ip)
+ return defer.succeed({'leaseResponse': ["Address leased."]})
+
@rbac.allow('projectmanager', 'sysadmin')
def run_instances(self, context, **kwargs):
# make sure user can access the image
@@ -493,11 +526,20 @@ class CloudController(object):
key_data = key_pair.public_key
# TODO: Get the real security group of launch in here
security_group = "default"
- bridge_name = network.BridgedNetwork.get_network_for_project(context.user.id, context.project.id, security_group)['bridge_name']
+ if FLAGS.simple_network:
+ bridge_name = FLAGS.simple_network_bridge
+ else:
+ net = network.BridgedNetwork.get_network_for_project(
+ context.user.id, context.project.id, security_group)
+ bridge_name = net['bridge_name']
for num in range(int(kwargs['max_count'])):
inst = self.instdir.new()
# TODO(ja): add ari, aki
inst['image_id'] = kwargs['image_id']
+ if 'kernel_id' in kwargs:
+ inst['kernel_id'] = kwargs['kernel_id']
+ if 'ramdisk_id' in kwargs:
+ inst['ramdisk_id'] = kwargs['ramdisk_id']
inst['user_data'] = kwargs.get('user_data', '')
inst['instance_type'] = kwargs.get('instance_type', 'm1.small')
inst['reservation_id'] = reservation_id
@@ -509,12 +551,19 @@ class CloudController(object):
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = num
inst['bridge_name'] = bridge_name
- if inst['image_id'] == FLAGS.vpn_image_id:
- address = network.allocate_vpn_ip(
- inst['user_id'], inst['project_id'], mac=inst['mac_address'])
+ if FLAGS.simple_network:
+ address = network.allocate_simple_ip()
else:
- address = network.allocate_ip(
- inst['user_id'], inst['project_id'], mac=inst['mac_address'])
+ if inst['image_id'] == FLAGS.vpn_image_id:
+ address = network.allocate_vpn_ip(
+ inst['user_id'],
+ inst['project_id'],
+ mac=inst['mac_address'])
+ else:
+ address = network.allocate_ip(
+ inst['user_id'],
+ inst['project_id'],
+ mac=inst['mac_address'])
inst['private_dns_name'] = str(address)
# TODO: allocate expresses on the router node
inst.save()
@@ -544,10 +593,13 @@ class CloudController(object):
pass
if instance.get('private_dns_name', None):
logging.debug("Deallocating address %s" % instance.get('private_dns_name', None))
- try:
- self.network.deallocate_ip(instance.get('private_dns_name', None))
- except Exception, _err:
- pass
+ if FLAGS.simple_network:
+ network.deallocate_simple_ip(instance.get('private_dns_name', None))
+ else:
+ try:
+ self.network.deallocate_ip(instance.get('private_dns_name', None))
+ except Exception, _err:
+ pass
if instance.get('node_name', 'unassigned') != 'unassigned': #It's also internal default
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "terminate_instance",
@@ -609,9 +661,8 @@ class CloudController(object):
result = { 'image_id': image_id, 'launchPermission': [] }
if image['isPublic']:
result['launchPermission'].append({ 'group': 'all' })
-
return defer.succeed(result)
-
+
@rbac.allow('projectmanager', 'sysadmin')
def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
diff --git a/nova/flags.py b/nova/flags.py
index bf7b6e3a3..985f9ba04 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -74,6 +74,9 @@ DEFINE_string('default_instance_type',
'default instance type to use, testing only')
DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server')
+DEFINE_string('vpn_key_suffix',
+ '-key',
+ 'Suffix to add to project name for vpn key')
# UNUSED
DEFINE_string('node_availability_zone',
diff --git a/nova/rpc.py b/nova/rpc.py
index 54843973a..b0f6ef7f3 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -63,6 +63,10 @@ class Connection(connection.BrokerConnection):
cls._instance = cls(**params)
return cls._instance
+ @classmethod
+ def recreate(cls):
+ del cls._instance
+ return cls.instance()
class Consumer(messaging.Consumer):
# TODO(termie): it would be nice to give these some way of automatically
@@ -79,9 +83,22 @@ class Consumer(messaging.Consumer):
attachToTornado = attach_to_tornado
- @exception.wrap_exception
def fetch(self, *args, **kwargs):
- super(Consumer, self).fetch(*args, **kwargs)
+ # TODO(vish): the logic for failed connections and logging should be
+ # refactored into some sort of connection manager object
+ try:
+ if getattr(self, 'failed_connection', False):
+ # attempt to reconnect
+ self.conn = Connection.recreate()
+ self.backend = self.conn.create_backend()
+ super(Consumer, self).fetch(*args, **kwargs)
+ if getattr(self, 'failed_connection', False):
+ logging.error("Reconnected to queue")
+ self.failed_connection = False
+ except Exception, ex:
+ if not getattr(self, 'failed_connection', False):
+ logging.exception("Failed to fetch message from queue")
+ self.failed_connection = True
def attach_to_twisted(self):
loop = task.LoopingCall(self.fetch, enable_callbacks=True)
@@ -115,9 +132,10 @@ class AdapterConsumer(TopicConsumer):
args = message_data.get('args', {})
message.ack()
if not method:
- # vish: we may not want to ack here, but that means that bad messages
- # stay in the queue indefinitely, so for now we just log the
- # message and send an error string back to the caller
+ # NOTE(vish): we may not want to ack here, but that means that bad
+ # messages stay in the queue indefinitely, so for now
+ # we just log the message and send an error string
+ # back to the caller
_log.warn('no method for message: %s' % (message_data))
msg_reply(msg_id, 'No method for message: %s' % message_data)
return
diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py
index a796ab41e..d82089e6f 100644
--- a/nova/tests/api_unittest.py
+++ b/nova/tests/api_unittest.py
@@ -159,7 +159,7 @@ class ApiEc2TestCase(test.BaseTestCase):
self.host = '127.0.0.1'
- self.app = api.APIServerApplication(self.users, {'Cloud': self.cloud})
+ self.app = api.APIServerApplication({'Cloud': self.cloud})
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index d40172cfc..3f5d9ff54 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -28,5 +28,5 @@ FLAGS.fake_rabbit = True
FLAGS.fake_network = True
FLAGS.fake_users = True
#FLAGS.keeper_backend = 'sqlite'
-FLAGS.datastore_path = ':memory:'
+# FLAGS.datastore_path = ':memory:'
FLAGS.verbose = True
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index f215c0b3f..bccaacfa7 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -18,6 +18,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
import logging
import unittest
@@ -26,6 +27,8 @@ import IPy
from nova import flags
from nova import test
+from nova import exception
+from nova.compute.exception import NoMoreAddresses
from nova.compute import network
from nova.auth import users
from nova import utils
@@ -40,6 +43,7 @@ class NetworkTestCase(test.TrialTestCase):
network_size=32)
logging.getLogger().setLevel(logging.DEBUG)
self.manager = users.UserManager.instance()
+ self.dnsmasq = FakeDNSMasq()
try:
self.manager.create_user('netuser', 'netuser', 'netuser')
except: pass
@@ -66,59 +70,128 @@ class NetworkTestCase(test.TrialTestCase):
address = network.allocate_ip(
"netuser", "project0", utils.generate_mac())
logging.debug("Was allocated %s" % (address))
- self.assertEqual(True, address in self._get_project_addresses("project0"))
+ net = network.get_project_network("project0", "default")
+ self.assertEqual(True, is_in_project(address, "project0"))
+ mac = utils.generate_mac()
+ hostname = "test-host"
+ self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
rv = network.deallocate_ip(address)
- self.assertEqual(False, address in self._get_project_addresses("project0"))
+
+ # Doesn't go away until it's dhcp released
+ self.assertEqual(True, is_in_project(address, "project0"))
+
+ self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
+ self.assertEqual(False, is_in_project(address, "project0"))
def test_range_allocation(self):
+ mac = utils.generate_mac()
+ secondmac = utils.generate_mac()
+ hostname = "test-host"
address = network.allocate_ip(
- "netuser", "project0", utils.generate_mac())
+ "netuser", "project0", mac)
secondaddress = network.allocate_ip(
- "netuser", "project1", utils.generate_mac())
- self.assertEqual(True,
- address in self._get_project_addresses("project0"))
- self.assertEqual(True,
- secondaddress in self._get_project_addresses("project1"))
- self.assertEqual(False, address in self._get_project_addresses("project1"))
+ "netuser", "project1", secondmac)
+ net = network.get_project_network("project0", "default")
+ secondnet = network.get_project_network("project1", "default")
+
+ self.assertEqual(True, is_in_project(address, "project0"))
+ self.assertEqual(True, is_in_project(secondaddress, "project1"))
+ self.assertEqual(False, is_in_project(address, "project1"))
+
+ # Addresses are allocated before they're issued
+ self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
+ self.dnsmasq.issue_ip(secondmac, secondaddress,
+ hostname, secondnet.bridge_name)
+
rv = network.deallocate_ip(address)
- self.assertEqual(False, address in self._get_project_addresses("project0"))
+ self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
+ self.assertEqual(False, is_in_project(address, "project0"))
+
+ # First address release shouldn't affect the second
+ self.assertEqual(True, is_in_project(secondaddress, "project1"))
+
rv = network.deallocate_ip(secondaddress)
- self.assertEqual(False,
- secondaddress in self._get_project_addresses("project1"))
+ self.dnsmasq.release_ip(secondmac, secondaddress,
+ hostname, secondnet.bridge_name)
+ self.assertEqual(False, is_in_project(secondaddress, "project1"))
def test_subnet_edge(self):
secondaddress = network.allocate_ip("netuser", "project0",
utils.generate_mac())
+ hostname = "toomany-hosts"
for project in range(1,5):
project_id = "project%s" % (project)
+ mac = utils.generate_mac()
+ mac2 = utils.generate_mac()
+ mac3 = utils.generate_mac()
address = network.allocate_ip(
- "netuser", project_id, utils.generate_mac())
+ "netuser", project_id, mac)
address2 = network.allocate_ip(
- "netuser", project_id, utils.generate_mac())
+ "netuser", project_id, mac2)
address3 = network.allocate_ip(
- "netuser", project_id, utils.generate_mac())
- self.assertEqual(False,
- address in self._get_project_addresses("project0"))
- self.assertEqual(False,
- address2 in self._get_project_addresses("project0"))
- self.assertEqual(False,
- address3 in self._get_project_addresses("project0"))
+ "netuser", project_id, mac3)
+ self.assertEqual(False, is_in_project(address, "project0"))
+ self.assertEqual(False, is_in_project(address2, "project0"))
+ self.assertEqual(False, is_in_project(address3, "project0"))
rv = network.deallocate_ip(address)
rv = network.deallocate_ip(address2)
rv = network.deallocate_ip(address3)
+ net = network.get_project_network(project_id, "default")
+ self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
+ self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name)
+ self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
+ net = network.get_project_network("project0", "default")
rv = network.deallocate_ip(secondaddress)
+ self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
- def test_too_many_projects(self):
- for i in range(0, 30):
- name = 'toomany-project%s' % i
- self.manager.create_project(name, 'netuser', name)
- address = network.allocate_ip(
- "netuser", name, utils.generate_mac())
- rv = network.deallocate_ip(address)
- self.manager.delete_project(name)
+ def test_release_before_deallocate(self):
+ pass
+
+ def test_deallocate_before_issued(self):
+ pass
+
+ def test_too_many_addresses(self):
+ """
+ Network size is 32, there are 5 addresses reserved for VPN.
+ So we should get 23 usable addresses
+ """
+ net = network.get_project_network("project0", "default")
+ hostname = "toomany-hosts"
+ macs = {}
+ addresses = {}
+ for i in range(0, 22):
+ macs[i] = utils.generate_mac()
+ addresses[i] = network.allocate_ip("netuser", "project0", macs[i])
+ self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
+
+ self.assertRaises(NoMoreAddresses, network.allocate_ip, "netuser", "project0", utils.generate_mac())
+
+ for i in range(0, 22):
+ rv = network.deallocate_ip(addresses[i])
+ self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name)
+
+def is_in_project(address, project_id):
+ return address in network.get_project_network(project_id).list_addresses()
+
+def _get_project_addresses(project_id):
+ project_addresses = []
+ for addr in network.get_project_network(project_id).list_addresses():
+ project_addresses.append(addr)
+ return project_addresses
+
+def binpath(script):
+ return os.path.abspath(os.path.join(__file__, "../../../bin", script))
+
+class FakeDNSMasq(object):
+ def issue_ip(self, mac, ip, hostname, interface):
+ cmd = "%s add %s %s %s" % (binpath('dhcpleasor.py'), mac, ip, hostname)
+ env = {'DNSMASQ_INTERFACE': interface, 'TESTING' : '1'}
+ (out, err) = utils.execute(cmd, addl_env=env)
+ logging.debug("ISSUE_IP: %s, %s " % (out, err))
+
+ def release_ip(self, mac, ip, hostname, interface):
+ cmd = "%s del %s %s %s" % (binpath('dhcpleasor.py'), mac, ip, hostname)
+ env = {'DNSMASQ_INTERFACE': interface, 'TESTING' : '1'}
+ (out, err) = utils.execute(cmd, addl_env=env)
+ logging.debug("RELEASE_IP: %s, %s " % (out, err))
- def _get_project_addresses(self, project_id):
- project_addresses = []
- for addr in network.get_project_network(project_id).list_addresses():
- project_addresses.append(addr)
- return project_addresses
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index 89c1d59c5..cee567c8b 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -28,7 +28,6 @@ import tempfile
from nova import vendor
from nova import flags
-from nova import rpc
from nova import objectstore
from nova import test
from nova.auth import users
@@ -57,7 +56,6 @@ class ObjectStoreTestCase(test.BaseTestCase):
buckets_path=os.path.join(oss_tempdir, 'buckets'),
images_path=os.path.join(oss_tempdir, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
- self.conn = rpc.Connection.instance()
logging.getLogger().setLevel(logging.DEBUG)
self.um = users.UserManager.instance()
diff --git a/nova/tests/storage_unittest.py b/nova/tests/storage_unittest.py
index 73215c5ca..36fcc6f19 100644
--- a/nova/tests/storage_unittest.py
+++ b/nova/tests/storage_unittest.py
@@ -38,10 +38,7 @@ class StorageTestCase(test.TrialTestCase):
self.mystorage = None
self.flags(fake_libvirt=True,
fake_storage=True)
- if FLAGS.fake_storage:
- self.mystorage = storage.FakeBlockStore()
- else:
- self.mystorage = storage.BlockStore()
+ self.mystorage = storage.BlockStore()
def test_run_create_volume(self):
vol_size = '0'
@@ -65,6 +62,18 @@ class StorageTestCase(test.TrialTestCase):
self.mystorage.create_volume,
vol_size, user_id, project_id)
+ def test_too_many_volumes(self):
+ vol_size = '1'
+ user_id = 'fake'
+ project_id = 'fake'
+ num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
+ total_slots = FLAGS.slots_per_shelf * num_shelves
+ for i in xrange(total_slots):
+ self.mystorage.create_volume(vol_size, user_id, project_id)
+ self.assertRaises(storage.NoMoreVolumes,
+ self.mystorage.create_volume,
+ vol_size, user_id, project_id)
+
def test_run_attach_detach_volume(self):
# Create one volume and one node to test with
instance_id = "storage-test"
diff --git a/nova/tests/validator_unittest.py b/nova/tests/validator_unittest.py
index 6ebce0994..eea1beccb 100644
--- a/nova/tests/validator_unittest.py
+++ b/nova/tests/validator_unittest.py
@@ -42,5 +42,4 @@ class ValidationTestCase(test.TrialTestCase):
@validate.typetest(instanceid=str, size=int, number_of_instances=int)
def type_case(instanceid, size, number_of_instances):
- print ("type_case was successfully executed")
- return True \ No newline at end of file
+ return True
diff --git a/nova/utils.py b/nova/utils.py
index 094de5d74..2982b5480 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -22,13 +22,13 @@
System-level utilities and helper functions.
"""
+import inspect
import logging
+import os
+import random
+import subprocess
import socket
import sys
-import os.path
-import inspect
-import subprocess
-import random
from datetime import datetime
from nova import flags
@@ -47,11 +47,12 @@ def fetchfile(url, target):
# fp.close()
execute("curl %s -o %s" % (url, target))
-
-def execute(cmd, input=None):
- #logging.debug("Running %s" % (cmd))
+def execute(cmd, input=None, addl_env=None):
+ env = os.environ.copy()
+ if addl_env:
+ env.update(addl_env)
obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
result = None
if input != None:
result = obj.communicate(input)
diff --git a/nova/volume/storage.py b/nova/volume/storage.py
index 273a6afd1..288ab76ba 100644
--- a/nova/volume/storage.py
+++ b/nova/volume/storage.py
@@ -26,9 +26,10 @@ Currently uses Ata-over-Ethernet.
import glob
import logging
-import random
+import os
import socket
-import subprocess
+import shutil
+import tempfile
import time
from nova import vendor
@@ -38,10 +39,8 @@ from twisted.internet import defer
from nova import datastore
from nova import exception
from nova import flags
-from nova import rpc
from nova import utils
from nova import validate
-from nova.compute import model
FLAGS = flags.FLAGS
@@ -54,16 +53,27 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0',
flags.DEFINE_string('storage_name',
socket.gethostname(),
'name of this node')
-flags.DEFINE_integer('shelf_id',
- utils.last_octet(utils.get_my_ip()),
- 'AoE shelf_id for this node')
+flags.DEFINE_integer('first_shelf_id',
+ utils.last_octet(utils.get_my_ip()) * 10,
+ 'AoE starting shelf_id for this node')
+flags.DEFINE_integer('last_shelf_id',
+ utils.last_octet(utils.get_my_ip()) * 10 + 9,
+ 'AoE starting shelf_id for this node')
+flags.DEFINE_string('aoe_export_dir',
+ '/var/lib/vblade-persist/vblades',
+ 'AoE directory where exports are created')
+flags.DEFINE_integer('slots_per_shelf',
+ 16,
+ 'Number of AoE slots per shelf')
flags.DEFINE_string('storage_availability_zone',
'nova',
'availability zone of this node')
flags.DEFINE_boolean('fake_storage', False,
'Should we make real storage volumes to attach?')
-# TODO(joshua) Index of volumes by project
+
+class NoMoreVolumes(exception.Error):
+ pass
def get_volume(volume_id):
""" Returns a redis-backed volume object """
@@ -84,9 +94,14 @@ class BlockStore(object):
super(BlockStore, self).__init__()
self.volume_class = Volume
if FLAGS.fake_storage:
+ FLAGS.aoe_export_dir = tempfile.mkdtemp()
self.volume_class = FakeVolume
self._init_volume_group()
+ def __del__(self):
+ if FLAGS.fake_storage:
+ shutil.rmtree(FLAGS.aoe_export_dir)
+
def report_state(self):
#TODO: aggregate the state of the system
pass
@@ -140,19 +155,7 @@ class BlockStore(object):
utils.runthis("PVCreate returned: %s", "sudo pvcreate %s" % (FLAGS.storage_dev))
utils.runthis("VGCreate returned: %s", "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev))
-
-class FakeBlockStore(BlockStore):
- def __init__(self):
- super(FakeBlockStore, self).__init__()
-
- def _init_volume_group(self):
- pass
-
- def _restart_exports(self):
- pass
-
-
-class Volume(model.BasicModel):
+class Volume(datastore.BasicModel):
def __init__(self, volume_id=None):
self.volume_id = volume_id
@@ -182,7 +185,7 @@ class Volume(model.BasicModel):
vol['delete_on_termination'] = 'False'
vol.save()
vol.create_lv()
- vol.setup_export()
+ vol._setup_export()
# TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes
# TODO(joshua
vol['status'] = "available"
@@ -195,7 +198,7 @@ class Volume(model.BasicModel):
self['mountpoint'] = mountpoint
self['status'] = "in-use"
self['attach_status'] = "attaching"
- self['attach_time'] = utils.utctime()
+ self['attach_time'] = utils.isotime()
self['delete_on_termination'] = 'False'
self.save()
@@ -234,15 +237,22 @@ class Volume(model.BasicModel):
def _delete_lv(self):
utils.runthis("Removing LV: %s", "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id']))
- def setup_export(self):
+ def _setup_export(self):
(shelf_id, blade_id) = get_next_aoe_numbers()
self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id)
self['shelf_id'] = shelf_id
self['blade_id'] = blade_id
self.save()
+ self._exec_export()
+
+ def _exec_export(self):
utils.runthis("Creating AOE export: %s",
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
- (shelf_id, blade_id, FLAGS.aoe_eth_dev, FLAGS.volume_group, self['volume_id']))
+ (self['shelf_id'],
+ self['blade_id'],
+ FLAGS.aoe_eth_dev,
+ FLAGS.volume_group,
+ self['volume_id']))
def _remove_export(self):
utils.runthis("Stopped AOE export: %s", "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id']))
@@ -253,13 +263,10 @@ class FakeVolume(Volume):
def create_lv(self):
pass
- def setup_export(self):
- # TODO(???): This may not be good enough?
- blade_id = ''.join([random.choice('0123456789') for x in xrange(3)])
- self['shelf_id'] = FLAGS.shelf_id
- self['blade_id'] = blade_id
- self['aoe_device'] = "e%s.%s" % (FLAGS.shelf_id, blade_id)
- self.save()
+ def _exec_export(self):
+ fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device'])
+ f = file(fname, "w")
+ f.close()
def _remove_export(self):
pass
@@ -268,9 +275,13 @@ class FakeVolume(Volume):
pass
def get_next_aoe_numbers():
- aoes = glob.glob("/var/lib/vblade-persist/vblades/e*")
- aoes.extend(['e0.0'])
- blade_id = int(max([int(a.split('.')[1]) for a in aoes])) + 1
- logging.debug("Next blade_id is %s" % (blade_id))
- shelf_id = FLAGS.shelf_id
- return (shelf_id, blade_id)
+ for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1):
+ aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id))
+ if not aoes:
+ blade_id = 0
+ else:
+ blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1
+ if blade_id < FLAGS.slots_per_shelf:
+ logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id)
+ return (shelf_id, blade_id)
+ raise NoMoreVolumes()