summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@gmail.com>2010-06-28 22:47:02 -0700
committerVishvananda Ishaya <vishvananda@gmail.com>2010-06-28 22:47:02 -0700
commita013a801246bed9302303c304b90c748e2d7aec0 (patch)
tree36ae3566eb9fa4b5edd2728318fa0f6d4a4acaf2
parentc363f3239168081d5b87f9ef4690a6123784a024 (diff)
parentc7f7e1bc4185be38ff792bfd82a74e35ecbeda12 (diff)
downloadnova-a013a801246bed9302303c304b90c748e2d7aec0.tar.gz
nova-a013a801246bed9302303c304b90c748e2d7aec0.tar.xz
nova-a013a801246bed9302303c304b90c748e2d7aec0.zip
Merge remote branch 'angst/apply_api' into diekeeper
-rwxr-xr-xbin/nova-api6
-rwxr-xr-xbin/nova-compute4
-rw-r--r--nova/adminclient.py37
-rw-r--r--nova/auth/users.py3
-rw-r--r--nova/compute/model.py323
-rw-r--r--nova/compute/network.py203
-rw-r--r--nova/compute/node.py34
-rw-r--r--nova/datastore.py86
-rw-r--r--nova/endpoint/admin.py42
-rw-r--r--nova/tests/model_unittest.py205
-rw-r--r--nova/utils.py6
-rw-r--r--nova/volume/storage.py21
-rw-r--r--run_tests.py1
13 files changed, 736 insertions, 235 deletions
diff --git a/bin/nova-api b/bin/nova-api
index e9772ec81..f0f79a236 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -20,7 +20,7 @@
# under the License.
"""
- Tornado daemon for the main API endpoint.
+Tornado daemon for the main API endpoint.
"""
import logging
@@ -34,6 +34,7 @@ from nova import rpc
from nova import server
from nova import utils
from nova.auth import users
+from nova.compute import model
from nova.endpoint import admin
from nova.endpoint import api
from nova.endpoint import cloud
@@ -43,9 +44,10 @@ FLAGS = flags.FLAGS
def main(_argv):
user_manager = users.UserManager()
+ host_manager = model.Host
controllers = {
'Cloud': cloud.CloudController(),
- 'Admin': admin.AdminController(user_manager)
+ 'Admin': admin.AdminController(user_manager, host_manager)
}
_app = api.APIServerApplication(user_manager, controllers)
diff --git a/bin/nova-compute b/bin/nova-compute
index cc738e87f..ed829ecc8 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -75,8 +75,8 @@ def main():
topic='%s.%s' % (FLAGS.compute_topic, FLAGS.node_name),
proxy=n)
- # heartbeat = task.LoopingCall(n.report_state)
- # heartbeat.start(interval=FLAGS.node_report_state_interval, now=False)
+ pulse = task.LoopingCall(n.report_state, FLAGS.node_name, 'nova-compute')
+ pulse.start(interval=FLAGS.node_report_state_interval, now=False)
injected = consumer_all.attach_to_twisted()
injected = consumer_node.attach_to_twisted()
diff --git a/nova/adminclient.py b/nova/adminclient.py
index fe873b8f7..480e907c9 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -21,9 +21,11 @@
Nova User API client library.
"""
+import base64
+
+from nova import vendor
import boto
from boto.ec2.regioninfo import RegionInfo
-import base64
class UserInfo(object):
""" Information about a Nova user
@@ -44,9 +46,11 @@ class UserInfo(object):
def __repr__(self):
return 'UserInfo:%s' % self.username
+ # this is needed by the sax parser, so ignore the ugly name
def startElement(self, name, attrs, connection):
return None
+ # this is needed by the sax parser, so ignore the ugly name
def endElement(self, name, value, connection):
if name == 'username':
self.username = str(value)
@@ -57,6 +61,32 @@ class UserInfo(object):
elif name == 'secretkey':
self.secretkey = str(value)
+class HostInfo(object):
+ """
+ Information about a Nova Host:
+ Disk stats
+ Running Instances
+ Memory stats
+ CPU stats
+ Network address info
+ Firewall info
+ Bridge and devices
+ """
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.hostname = None
+
+ def __repr__(self):
+ return 'Host:%s' % self.hostname
+
+ # this is needed by the sax parser, so ignore the ugly name
+ def startElement(self, name, attrs, connection):
+ return None
+
+ # this is needed by the sax parser, so ignore the ugly name
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
class NovaAdminClient(object):
def __init__(self, clc_ip='127.0.0.1', region='nova', access_key='admin',
@@ -91,7 +121,7 @@ class NovaAdminClient(object):
def get_users(self):
""" grabs the list of all users """
- return self.apiconn.get_list('DescribeUsers', {}, (['item', UserInfo]))
+ return self.apiconn.get_list('DescribeUsers', {}, [('item', UserInfo)])
def get_user(self, name):
""" grab a single user by name """
@@ -116,3 +146,6 @@ class NovaAdminClient(object):
""" returns the content of a zip file containing novarc and access credentials. """
return self.apiconn.get_object('GenerateX509ForUser', {'Name': username}, UserInfo).file
+ def get_hosts(self):
+ return self.apiconn.get_list('DescribeHosts', {}, [('item', HostInfo)])
+
diff --git a/nova/auth/users.py b/nova/auth/users.py
index 1fc97345f..7b703aa82 100644
--- a/nova/auth/users.py
+++ b/nova/auth/users.py
@@ -350,7 +350,8 @@ class UserManager(object):
user = self.get_user_from_access_key(access_key)
if user == None:
- raise exception.NotFound('No user found for access key')
+ raise exception.NotFound('No user found for access key %s' %
+ access_key)
if project_name is '':
project_name = user.name
diff --git a/nova/compute/model.py b/nova/compute/model.py
index 86f912f01..ad1f97a0a 100644
--- a/nova/compute/model.py
+++ b/nova/compute/model.py
@@ -43,92 +43,166 @@ True
"""
import logging
+import time
from nova import vendor
+import redis
from nova import datastore
+from nova import exception
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
-# TODO(ja): singleton instance of the directory
+
+class ConnectionError(exception.Error):
+ pass
+
+
+def absorb_connection_error(fn):
+ def _wrapper(*args, **kwargs):
+ try:
+ return fn(*args, **kwargs)
+ except redis.exceptions.ConnectionError, ce:
+ raise ConnectionError(str(ce))
+ return _wrapper
+
+
+# TODO(todd): Implement this at the class level for Instance
class InstanceDirectory(object):
- """an api for interacting with the global state of instances """
+ """an api for interacting with the global state of instances"""
def get(self, instance_id):
- """ returns an instance object for a given id """
+ """returns an instance object for a given id"""
return Instance(instance_id)
def __getitem__(self, item):
return self.get(item)
+ @absorb_connection_error
def by_project(self, project):
- """ returns a list of instance objects for a project """
+ """returns a list of instance objects for a project"""
for instance_id in datastore.Redis.instance().smembers('project:%s:instances' % project):
yield Instance(instance_id)
def by_node(self, node_id):
- """ returns a list of instances for a node """
+ """returns a list of instances for a node"""
for instance in self.all:
if instance['node_name'] == node_id:
yield instance
def by_ip(self, ip_address):
- """ returns an instance object that is using the IP """
+ """returns an instance object that is using the IP"""
for instance in self.all:
if instance['private_dns_name'] == ip_address:
return instance
return None
def by_volume(self, volume_id):
- """ returns the instance a volume is attached to """
+ """returns the instance a volume is attached to"""
pass
+ @absorb_connection_error
def exists(self, instance_id):
return datastore.Redis.instance().sismember('instances', instance_id)
@property
+ @absorb_connection_error
def all(self):
- """ returns a list of all instances """
+ """returns a list of all instances"""
for instance_id in datastore.Redis.instance().smembers('instances'):
yield Instance(instance_id)
def new(self):
- """ returns an empty Instance object, with ID """
+ """returns an empty Instance object, with ID"""
instance_id = utils.generate_uid('i')
return self.get(instance_id)
+class BasicModel(object):
+ """
+ All Redis-backed data derives from this class.
+ You MUST specify an identifier() property that returns a unique string
+ per instance.
-class Instance(object):
- """ Wrapper around stored properties of an instance """
+ You MUST have an initializer that takes a single argument that is a value
+ returned by identifier() to load a new class with.
- def __init__(self, instance_id):
- """ loads an instance from the datastore if exists """
- self.instance_id = instance_id
+ You may want to specify a dictionary for default_state().
+
+ You may also specify override_type at the class left to use a key other
+ than __class__.__name__.
+
+ You override save and destroy calls to automatically build and destroy
+ associations.
+ """
+
+ override_type = None
+
+ @absorb_connection_error
+ def __init__(self):
self.initial_state = {}
self.state = datastore.Redis.instance().hgetall(self.__redis_key)
if self.state:
self.initial_state = self.state
else:
- self.state = {'state': 0,
- 'state_description': 'pending',
- 'instance_id': instance_id,
- 'node_name': 'unassigned',
- 'project_id': 'unassigned',
- 'user_id': 'unassigned'
- }
+ self.state = self.default_state()
+
+ def default_state(self):
+ """You probably want to define this in your subclass"""
+ return {}
+
+ @classmethod
+ def _redis_name(cls):
+ return self.override_type or cls.__name__
+
+ @classmethod
+ def lookup(cls, identifier):
+ rv = cls(identifier)
+ if rv.is_new_record():
+ return None
+ else:
+ return rv
+
+ @classmethod
+ @absorb_connection_error
+ def all(cls):
+ """yields all objects in the store"""
+ redis_set = cls._redis_set_name(cls.__name__)
+ for identifier in datastore.Redis.instance().smembers(redis_set):
+ yield cls(identifier)
+
+ @classmethod
+ @absorb_connection_error
+ def associated_to(cls, foreign_type, foreign_id):
+ redis_set = cls._redis_association_name(foreign_type, foreign_id)
+ for identifier in datastore.Redis.instance().smembers(redis_set):
+ yield cls(identifier)
+
+ @classmethod
+ def _redis_set_name(cls, kls_name):
+ # stupidly pluralize (for compatiblity with previous codebase)
+ return kls_name.lower() + "s"
+
+ @classmethod
+ def _redis_association_name(cls, foreign_type, foreign_id):
+ return cls._redis_set_name("%s:%s:%s" %
+ (foreign_type, foreign_id, cls.__name__))
+
+ @property
+ def identifier(self):
+ """You DEFINITELY want to define this in your subclass"""
+ raise NotImplementedError("Your sublcass should define identifier")
@property
def __redis_key(self):
- """ Magic string for instance keys """
- return 'instance:%s' % self.instance_id
+ return '%s:%s' % (self.__class__.__name__.lower(), self.identifier)
def __repr__(self):
- return "<Instance:%s>" % self.instance_id
+ return "<%s:%s>" % (self.__class__.__name__, self.identifier)
def keys(self):
return self.state.keys()
@@ -156,53 +230,212 @@ class Instance(object):
return self.state[item]
def __delitem__(self, item):
- """ We don't support this """
- raise Exception("Silly monkey, Instances NEED all their properties.")
+ """We don't support this"""
+ raise Exception("Silly monkey, models NEED all their properties.")
+
+ def is_new_record(self):
+ return self.initial_state == {}
+
+ @absorb_connection_error
+ def add_to_index(self):
+ set_name = self.__class__._redis_set_name(self.__class__.__name__)
+ datastore.Redis.instance().sadd(set_name, self.identifier)
+
+ @absorb_connection_error
+ def remove_from_index(self):
+ set_name = self.__class__._redis_set_name(self.__class__.__name__)
+ datastore.Redis.instance().srem(set_name, self.identifier)
+
+ @absorb_connection_error
+ def remove_from_index(self):
+ set_name = self.__class__._redis_set_name(self.__class__.__name__)
+ datastore.Redis.instance().srem(set_name, self.identifier)
+
+ @absorb_connection_error
+ def associate_with(self, foreign_type, foreign_id):
+ # note the extra 's' on the end is for plurality
+ # to match the old data without requiring a migration of any sort
+ self.add_associated_model_to_its_set(foreign_type, foreign_id)
+ redis_set = self.__class__._redis_association_name(foreign_type,
+ foreign_id)
+ datastore.Redis.instance().sadd(redis_set, self.identifier)
+
+ @absorb_connection_error
+ def unassociate_with(self, foreign_type, foreign_id):
+ redis_set = self.__class__._redis_association_name(foreign_type,
+ foreign_id)
+ datastore.Redis.instance().srem(redis_set, self.identifier)
+
+ def add_associated_model_to_its_set(self, my_type, my_id):
+ table = globals()
+ klsname = my_type.capitalize()
+ if table.has_key(klsname):
+ my_class = table[klsname]
+ my_inst = my_class(my_id)
+ my_inst.save()
+ else:
+ logging.warning("no model class for %s when building"
+ " association from %s",
+ klsname, self)
+ @absorb_connection_error
def save(self):
- """ update the directory with the state from this instance
- make sure you've set the project_id and user_id before you call save
- for the first time.
+ """
+ update the directory with the state from this model
+ also add it to the index of items of the same type
+ then set the initial_state = state so new changes are tracked
"""
# TODO(ja): implement hmset in redis-py and use it
# instead of multiple calls to hset
+ if self.is_new_record():
+ self["create_time"] = utils.isotime()
for key, val in self.state.iteritems():
# if (not self.initial_state.has_key(key)
# or self.initial_state[key] != val):
datastore.Redis.instance().hset(self.__redis_key, key, val)
- if self.initial_state == {}:
- datastore.Redis.instance().sadd('project:%s:instances' % self.project,
- self.instance_id)
- datastore.Redis.instance().sadd('instances', self.instance_id)
+ self.add_to_index()
self.initial_state = self.state
return True
+ @absorb_connection_error
+ def destroy(self):
+ """
+ deletes all related records from datastore.
+ does NOT do anything to running libvirt state.
+ """
+ logging.info("Destroying datamodel for %s %s",
+ self.__class__.__name__, self.identifier)
+ datastore.Redis.instance().delete(self.__redis_key)
+ self.remove_from_index()
+ return True
+
+
+class Instance(BasicModel):
+ """Wrapper around stored properties of an instance"""
+
+ def __init__(self, instance_id):
+ """loads an instance from the datastore if exists"""
+ # set instance data before super call since it uses default_state
+ self.instance_id = instance_id
+ super(Instance, self).__init__()
+
+ def default_state(self):
+ return {'state': 0,
+ 'state_description': 'pending',
+ 'instance_id': self.instance_id,
+ 'node_name': 'unassigned',
+ 'project_id': 'unassigned',
+ 'user_id': 'unassigned'}
+
+ @property
+ def identifier(self):
+ return self.instance_id
+
@property
def project(self):
if self.state.get('project_id', None):
return self.state['project_id']
return self.state.get('owner_id', 'unassigned')
- def destroy(self):
- """ deletes all related records from datastore.
- does NOT do anything to running libvirt state.
- """
- logging.info("Destroying datamodel for instance %s", self.instance_id)
- datastore.Redis.instance().srem('project:%s:instances' % self.project,
- self.instance_id)
- datastore.Redis.instance().srem('instances', self.instance_id)
- return True
-
@property
def volumes(self):
- """ returns a list of attached volumes """
+ """returns a list of attached volumes"""
pass
@property
def reservation(self):
- """ Returns a reservation object """
+ """Returns a reservation object"""
pass
+ def save(self):
+ """Call into superclass to save object, then save associations"""
+ # NOTE(todd): doesn't track migration between projects/nodes,
+ # it just adds the first one
+ should_update_project = self.is_new_record()
+ should_update_node = self.is_new_record()
+ success = super(Instance, self).save()
+ if success and should_update_project:
+ self.associate_with("project", self.project)
+ if success and should_update_node:
+ self.associate_with("node", self['node_name'])
+ return True
+
+ def destroy(self):
+ """Destroy associations, then destroy the object"""
+ self.unassociate_with("project", self.project)
+ return super(Instance, self).destroy()
+
+class Host(BasicModel):
+ """
+ A Host is the machine where a Daemon is running.
+ """
+
+ def __init__(self, hostname):
+ """loads an instance from the datastore if exists"""
+ # set instance data before super call since it uses default_state
+ self.hostname = hostname
+ super(Host, self).__init__()
+
+ def default_state(self):
+ return {"hostname": self.hostname}
+
+ @property
+ def identifier(self):
+ return self.hostname
+
+
+class Daemon(BasicModel):
+ """
+ A Daemon is a job (compute, api, network, ...) that runs on a host.
+ """
+
+ def __init__(self, host_or_combined, binpath=None):
+ """loads an instance from the datastore if exists"""
+ # set instance data before super call since it uses default_state
+ # since loading from datastore expects a combined key that
+ # is equivilent to identifier, we need to expect that, while
+ # maintaining meaningful semantics (2 arguments) when creating
+ # from within other code like the bin/nova-* scripts
+ if binpath:
+ self.hostname = host_or_combined
+ self.binary = binpath
+ else:
+ self.hostname, self.binary = host_or_combined.split(":")
+ super(Daemon, self).__init__()
+
+ def default_state(self):
+ return {"hostname": self.hostname,
+ "binary": self.binary,
+ "updated_at": utils.isotime()
+ }
+
+ @property
+ def identifier(self):
+ return "%s:%s" % (self.hostname, self.binary)
+
+ def save(self):
+ """Call into superclass to save object, then save associations"""
+ # NOTE(todd): this makes no attempt to destroy itsself,
+ # so after termination a record w/ old timestmap remains
+ success = super(Daemon, self).save()
+ if success:
+ self.associate_with("host", self.hostname)
+ return True
+
+ def destroy(self):
+ """Destroy associations, then destroy the object"""
+ self.unassociate_with("host", self.hostname)
+ return super(Daemon, self).destroy()
+
+ def heartbeat(self):
+ self['updated_at'] = utils.isotime()
+ self.save()
+ return True
+
+ @classmethod
+ def by_host(cls, hostname):
+ for x in cls.associated_to("host", hostname):
+ yield x
if __name__ == "__main__":
import doctest
diff --git a/nova/compute/network.py b/nova/compute/network.py
index 911d0344a..7b37cde6d 100644
--- a/nova/compute/network.py
+++ b/nova/compute/network.py
@@ -34,6 +34,7 @@ from nova import datastore
import nova.exception
from nova.compute import exception
from nova import flags
+from nova.compute import model
from nova import utils
from nova.auth import users
@@ -60,23 +61,98 @@ flags.DEFINE_integer('cloudpipe_start_port', 12000,
logging.getLogger().setLevel(logging.DEBUG)
+
+class Vlan(model.BasicModel):
+ def __init__(self, project, vlan):
+ """
+ Since we don't want to try and find a vlan by its identifier,
+ but by a project id, we don't call super-init.
+ """
+ self.project_id = project
+ self.vlan_id = vlan
+
+ @property
+ def identifier(self):
+ return "%s:%s" % (self.project_id, self.vlan_id)
+
+ @classmethod
+ def create(cls, project, vlan):
+ instance = cls(project, vlan)
+ instance.save()
+ return instance
+
+ @classmethod
+ @model.absorb_connection_error
+ def lookup(cls, project):
+ set_name = cls._redis_set_name(cls.__name__)
+ vlan = datastore.Redis.instance().hget(set_name, project)
+ if vlan:
+ return cls(project, vlan)
+ else:
+ return None
+
+ @classmethod
+ @model.absorb_connection_error
+ def dict_by_project(cls):
+ """a hash of project:vlan"""
+ set_name = cls._redis_set_name(cls.__name__)
+ return datastore.Redis.instance().hgetall(set_name)
+
+ @classmethod
+ @model.absorb_connection_error
+ def dict_by_vlan(cls):
+ """a hash of vlan:project"""
+ set_name = cls._redis_set_name(cls.__name__)
+ rv = {}
+ h = datastore.Redis.instance().hgetall(set_name)
+ for v in h.keys():
+ rv[h[v]] = v
+ return rv
+
+ @classmethod
+ @model.absorb_connection_error
+ def all(cls):
+ set_name = cls._redis_set_name(cls.__name__)
+ for project,vlan in datastore.Redis.instance().hgetall(set_name):
+ yield cls(project, vlan)
+
+ @model.absorb_connection_error
+ def save(self):
+ """
+ Vlan saves state into a giant hash named "vlans", with keys of
+ proejct_id and value of valn number. Therefore, we skip the
+ default way of saving into "vlan:ID" and adding to a set of "vlans".
+ """
+ set_name = self._redis_set_name(self.__class__.__name__)
+ datastore.Redis.instance().hset(set_name, self.project_id, self.vlan_id)
+
+ @model.absorb_connection_error
+ def destroy(self):
+ set_name = self._redis_set_name(self.__class__.__name__)
+ datastore.Redis.instance().hdel(set_name, self.project)
+
+ def subnet(self):
+ vlan = int(self.vlan_id)
+ network = IPy.IP(FLAGS.private_range)
+ start = (vlan-FLAGS.vlan_start) * FLAGS.network_size
+ return "%s-%s" % (network[start],
+ network[start + FLAGS.network_size - 1])
+
# CLEANUP:
# TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients
# TODO(ja): use singleton for usermanager instead of self.manager in vlanpool et al
# TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win?
# TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients
-class BaseNetwork(datastore.RedisModel):
- bridge_gets_ip = False
- object_type = 'network'
+class BaseNetwork(model.BasicModel):
+ override_type = 'network'
- @classmethod
- def get_all_hosts(cls):
- for vlan in get_assigned_vlans().values():
- network_str = get_subnet_from_vlan(vlan)
- for addr in datastore.Redis.instance().hgetall(
- "network:%s:hosts" % (network_str)):
- yield addr
+ @property
+ def identifier(self):
+ return self.network_id
+
+ def default_state(self):
+ return {'network_id': self.network_id, 'network_str': self.network_str}
@classmethod
def create(cls, user_id, project_id, security_group, vlan, network_str):
@@ -90,26 +166,31 @@ class BaseNetwork(datastore.RedisModel):
return net
def __init__(self, network_id, network_str=None):
- super(BaseNetwork, self).__init__(object_id=network_id)
- self['network_id'] = network_id
- self['network_str'] = network_str
+ self.network_id = network_id
+ self.network_str = network_str
+ super(BaseNetwork, self).__init__()
self.save()
@property
def network(self):
return IPy.IP(self['network_str'])
+
@property
def netmask(self):
return self.network.netmask()
+
@property
def gateway(self):
return self.network[1]
+
@property
def broadcast(self):
return self.network.broadcast()
+
@property
def gateway(self):
return self.network[1]
+
@property
def bridge_name(self):
return "br%s" % (self["vlan"])
@@ -193,12 +274,16 @@ class BridgedNetwork(BaseNetwork):
netmask
"""
+ override_type = 'network'
+
@classmethod
def get_network_for_project(cls, user_id, project_id, security_group):
vlan = get_vlan_for_project(project_id)
- network_str = get_subnet_from_vlan(vlan)
- logging.debug("creating network on vlan %s with network string %s" % (vlan, network_str))
- return cls.create(user_id, project_id, security_group, vlan, network_str)
+ network_str = vlan.subnet()
+ logging.debug("creating network on vlan %s with network string %s",
+ vlan.vlan_id, network_str)
+ return cls.create(user_id, project_id, security_group, vlan.vlan_id,
+ network_str)
def __init__(self, *args, **kwargs):
super(BridgedNetwork, self).__init__(*args, **kwargs)
@@ -218,6 +303,7 @@ class DHCPNetwork(BridgedNetwork):
dhcp_range_end: the last ip to give out
"""
bridge_gets_ip = True
+ override_type = 'network'
def __init__(self, *args, **kwargs):
super(DHCPNetwork, self).__init__(*args, **kwargs)
@@ -227,6 +313,10 @@ class DHCPNetwork(BridgedNetwork):
self.dhcp_range_end = self.network[-(1 + FLAGS.cnt_vpn_clients)]
try:
os.makedirs(FLAGS.networks_path)
+ # NOTE(todd): I guess this is a lazy way to not have to check if the
+ # directory exists, but shouldn't we be smarter about
+ # telling the difference between existing directory and
+ # permission denied? (Errno 17 vs 13, OSError)
except Exception, err:
pass
@@ -261,26 +351,34 @@ class DHCPNetwork(BridgedNetwork):
else:
linux_net.start_dnsmasq(self)
-class PublicAddress(datastore.RedisModel):
- object_type="address"
+class PublicAddress(model.BasicModel):
+ override_type = "address"
def __init__(self, address):
- super(PublicAddress, self).__init__(address)
+ self.address = address
+ super(PublicAddress, self).__init__()
+
+ @property
+ def identifier(self):
+ return self.address
+
+ def default_state(self):
+ return {'address': self.address}
@classmethod
def create(cls, user_id, project_id, address):
- addr = cls(address=address)
- addr['address'] = address
+ addr = cls(address)
addr['user_id'] = user_id
addr['project_id'] = project_id
addr['instance_id'] = 'available'
addr['private_ip'] = 'available'
- addr["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
addr.save()
return addr
DEFAULT_PORTS = [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)]
class PublicNetworkController(BaseNetwork):
+ override_type = 'network'
+
def __init__(self, *args, **kwargs):
network_id = "public:default"
super(PublicNetworkController, self).__init__(network_id, FLAGS.public_range)
@@ -383,38 +481,37 @@ class PublicNetworkController(BaseNetwork):
% (private_ip, protocol, port))
-VLANS_KEY = "vlans"
-def _add_vlan(project_id, vlan):
- datastore.Redis.instance().hset(VLANS_KEY, project_id, vlan)
-
-def _rem_vlan(project_id):
- datastore.Redis.instance().hdel(VLANS_KEY, project_id)
-
-def get_assigned_vlans():
- """ Returns a dictionary, with keys of project_id and values of vlan_id """
- return datastore.Redis.instance().hgetall(VLANS_KEY)
-
+# FIXME(todd): does this present a race condition, or is there some piece of
+# architecture that mitigates it (only one queue listener per net)?
def get_vlan_for_project(project_id):
"""
Allocate vlan IDs to individual users.
"""
- vlan = datastore.Redis.instance().hget(VLANS_KEY, project_id)
+ vlan = Vlan.lookup(project_id)
if vlan:
return vlan
- assigned_vlans = get_assigned_vlans()
- # TODO(joshua) I can do this in one loop, I think
- for old_project_id, vlan in assigned_vlans.iteritems():
+ known_vlans = Vlan.dict_by_vlan()
+ for vnum in range(FLAGS.vlan_start, FLAGS.vlan_end):
+ vstr = str(vnum)
+ if not known_vlans.has_key(vstr):
+ return Vlan.create(project_id, vnum)
+ old_project_id = known_vlans[vstr]
if not users.UserManager.instance().get_project(old_project_id):
- _rem_vlan(old_project_id)
- _add_vlan(project_id, vlan)
- return vlan
- for vlan in range(FLAGS.vlan_start, FLAGS.vlan_end):
- if not str(vlan) in assigned_vlans.values():
- _add_vlan(project_id, vlan)
- return vlan
+ vlan = Vlan.lookup(old_project_id)
+ if vlan:
+ # NOTE(todd): This doesn't check for vlan id match, because
+ # it seems to be assumed that vlan<=>project is
+ # always a 1:1 mapping. It could be made way
+ # sexier if it didn't fight agains the way
+ # BasicModel worked and used associate_with
+ # to build connections to projects.
+ vlan.project_id = project_id
+ vlan.save()
+ return vlan
+ else:
+ return Vlan.create(project_id, vnum)
raise exception.AddressNotAllocated("Out of VLANs")
-
def get_network_by_address(address):
for project in users.UserManager.instance().get_projects():
net = get_project_network(project.id)
@@ -433,17 +530,15 @@ def deallocate_ip(address):
def get_project_network(project_id, security_group='default'):
""" get a project's private network, allocating one if needed """
+ # TODO(todd): It looks goofy to get a project from a UserManager.
+ # Refactor to still use the LDAP backend, but not User specific.
project = users.UserManager.instance().get_project(project_id)
if not project:
- raise nova.exception.Error("Project %s doesn't exist, uhoh." % project_id)
- return DHCPNetwork.get_network_for_project(project.project_manager_id, project.id, security_group)
-
-def get_subnet_from_vlan(vlan):
- """Assign one subnet to each VLAN, for now."""
- vlan = int(vlan)
- network = IPy.IP(FLAGS.private_range)
- start = (vlan-FLAGS.vlan_start) * FLAGS.network_size
- return "%s-%s" % (network[start], network[start + FLAGS.network_size - 1])
+ raise nova.exception.Error("Project %s doesn't exist, uhoh." %
+ project_id)
+ return DHCPNetwork.get_network_for_project(project.project_manager_id,
+ project.id, security_group)
+
def restart_nets():
""" Ensure the network for each user is enabled"""
diff --git a/nova/compute/node.py b/nova/compute/node.py
index c217056f5..7859d71c0 100644
--- a/nova/compute/node.py
+++ b/nova/compute/node.py
@@ -31,6 +31,7 @@ import json
import logging
import os
import random
+import shutil
import sys
from nova import vendor
@@ -142,9 +143,21 @@ class Node(object, service.Service):
return retval
@defer.inlineCallbacks
- def report_state(self):
- logging.debug("Reporting State")
- return
+ def report_state(self, nodename, daemon):
+ # TODO(termie): Termie has an idea for wrapping this connection failure
+ # pattern to be more elegant. -todd
+ try:
+ record = model.Daemon(nodename, daemon)
+ record.heartbeat()
+ if getattr(self, "model_disconnected", False):
+ self.model_disconnected = False
+ logging.error("Recovered model server connection!")
+
+ except model.ConnectionError, ex:
+ if not getattr(self, "model_disconnected", False):
+ self.model_disconnected = True
+ logging.exception("model server went away")
+ yield
# @exception.wrap_exception
def run_instance(self, instance_id, **_kwargs):
@@ -153,7 +166,8 @@ class Node(object, service.Service):
inst = self.instdir.get(instance_id)
# TODO: Get the real security group of launch in here
security_group = "default"
- net = network.BridgedNetwork.get_network_for_project(inst['user_id'], inst['project_id'],
+ net = network.BridgedNetwork.get_network_for_project(inst['user_id'],
+ inst['project_id'],
security_group).express()
inst['node_name'] = FLAGS.node_name
inst.save()
@@ -333,7 +347,7 @@ class Instance(object):
@property
def name(self):
- return self._s['name']
+ return self.datamodel['name']
def is_pending(self):
return (self.state == Instance.NOSTATE or self.state == 'pending')
@@ -346,7 +360,7 @@ class Instance(object):
return (self.state == Instance.RUNNING or self.state == 'running')
def describe(self):
- return self._s
+ return self.datamodel
def info(self):
logging.debug("Getting info for dom %s" % self.name)
@@ -360,7 +374,7 @@ class Instance(object):
'node_name': FLAGS.node_name}
def basepath(self, path=''):
- return os.path.abspath(os.path.join(self._s['basepath'], path))
+ return os.path.abspath(os.path.join(self.datamodel['basepath'], path))
def update_state(self):
self.datamodel.update(self.info())
@@ -450,7 +464,7 @@ class Instance(object):
@defer.inlineCallbacks
def _create_image(self, libvirt_xml):
# syntactic nicety
- data = self._s
+ data = self.datamodel
basepath = self.basepath
# ensure directories exist and are writable
@@ -528,7 +542,9 @@ class Instance(object):
local_d.callback(None)
timer.f = _wait_for_boot
timer.start(interval=0.5, now=True)
- except Exception:
+ except Exception, ex:
+ # FIXME(todd): this is just for debugging during testing
+ print "FUUUUUUUUUUUUUUUUUUUUUU: %s" % ex
logging.debug(ex)
self.set_state(Instance.SHUTDOWN)
diff --git a/nova/datastore.py b/nova/datastore.py
index 6b8a01ca5..5a9b80c62 100644
--- a/nova/datastore.py
+++ b/nova/datastore.py
@@ -66,92 +66,6 @@ class Redis(object):
return cls._instance
-class RedisModel(object):
- """ Wrapper around redis-backed properties """
- object_type = 'generic'
- def __init__(self, object_id):
- """ loads an object from the datastore if exists """
- self.object_id = object_id
- self.initial_state = {}
- self.state = Redis.instance().hgetall(self.__redis_key)
- if self.state:
- self.initial_state = self.state
- else:
- self.set_default_state()
-
- def set_default_state(self):
- self.state = {'state': 0,
- 'state_description': 'pending',
- 'node_name': 'unassigned',
- 'project_id': 'unassigned',
- 'user_id': 'unassigned'}
- self.state[self.object_type+"_id"] = self.object_id
- self.state["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
-
- @property
- def project(self):
- if self.state.get('project_id', None):
- return self.state['project_id']
- return self.state.get('owner_id', 'unassigned')
-
- @property
- def __redis_key(self):
- """ Magic string for keys """
- return '%s:%s' % (self.object_type, self.object_id)
-
- def __repr__(self):
- return "<%s:%s>" % (self.object_type, self.object_id)
-
- def __str__(self):
- return str(self.state)
-
- def keys(self):
- return self.state.keys()
-
- def copy(self):
- copyDict = {}
- for item in self.keys():
- copyDict[item] = self[item]
- return copyDict
-
- def get(self, item, default):
- return self.state.get(item, default)
-
- def __getitem__(self, item):
- return self.state[item]
-
- def __setitem__(self, item, val):
- self.state[item] = val
- return self.state[item]
-
- def __delitem__(self, item):
- """ We don't support this """
- raise Exception("Silly monkey, we NEED all our properties.")
-
- def save(self):
- """ update the directory with the state from this instance """
- # TODO(ja): implement hmset in redis-py and use it
- # instead of multiple calls to hset
- for key, val in self.state.iteritems():
- # if (not self.initial_state.has_key(key)
- # or self.initial_state[key] != val):
- Redis.instance().hset(self.__redis_key, key, val)
- if self.initial_state == {}:
- self.first_save()
- self.initial_state = self.state
- return True
-
- def first_save(self):
- pass
-
- def destroy(self):
- """ deletes all related records from datastore.
- does NOT do anything to running state.
- """
- Redis.instance().delete(self.__redis_key)
- return True
-
-
def slugify(key, prefix=None):
"""
Key has to be a valid filename. Slugify solves that.
diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py
index ccc0472af..4471a4ef4 100644
--- a/nova/endpoint/admin.py
+++ b/nova/endpoint/admin.py
@@ -36,16 +36,10 @@ def user_dict(user, base64_file=None):
else:
return {}
-def node_dict(node):
- """Convert a node object to a result dict"""
- if node:
- return {
- 'node_id': node.id,
- 'workers': ", ".join(node.workers),
- 'disks': ", ".join(node.disks),
- 'ram': node.memory,
- 'load_average' : node.load_average,
- }
+def host_dict(host):
+ """Convert a host model object to a result dict"""
+ if host:
+ return host.state
else:
return {}
@@ -63,34 +57,32 @@ def admin_only(target):
class AdminController(object):
"""
- API Controller for users, node status, and worker mgmt.
+ API Controller for users, hosts, nodes, and workers.
Trivial admin_only wrapper will be replaced with RBAC,
allowing project managers to administer project users.
"""
- def __init__(self, user_manager, node_manager=None):
+
+ def __init__(self, user_manager, host_manager):
self.user_manager = user_manager
- self.node_manager = node_manager
+ self.host_manager = host_manager
def __str__(self):
return 'AdminController'
@admin_only
def describe_user(self, _context, name, **_kwargs):
- """Returns user data, including access and secret keys.
- """
+ """Returns user data, including access and secret keys."""
return user_dict(self.user_manager.get_user(name))
@admin_only
def describe_users(self, _context, **_kwargs):
- """Returns all users - should be changed to deal with a list.
- """
+ """Returns all users - should be changed to deal with a list."""
return {'userSet':
[user_dict(u) for u in self.user_manager.get_users()] }
@admin_only
def register_user(self, _context, name, **_kwargs):
- """ Creates a new user, and returns generated credentials.
- """
+ """Creates a new user, and returns generated credentials."""
return user_dict(self.user_manager.create_user(name))
@admin_only
@@ -116,7 +108,7 @@ class AdminController(object):
return user_dict(user, base64.b64encode(project.get_credentials(user)))
@admin_only
- def describe_nodes(self, _context, **_kwargs):
+ def describe_hosts(self, _context, **_kwargs):
"""Returns status info for all nodes. Includes:
* Disk Space
* Instance List
@@ -125,11 +117,9 @@ class AdminController(object):
* DHCP servers running
* Iptables / bridges
"""
- return {'nodeSet':
- [node_dict(n) for n in self.node_manager.get_nodes()] }
+ return {'hostSet': [host_dict(h) for h in self.host_manager.all()]}
@admin_only
- def describe_node(self, _context, name, **_kwargs):
- """Returns status info for single node.
- """
- return node_dict(self.node_manager.get_node(name))
+ def describe_host(self, _context, name, **_kwargs):
+ """Returns status info for single node."""
+ return host_dict(self.host_manager.lookup(name))
diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py
new file mode 100644
index 000000000..23e2f9e73
--- /dev/null
+++ b/nova/tests/model_unittest.py
@@ -0,0 +1,205 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright [2010] [Anso Labs, LLC]
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+
+from nova import vendor
+from twisted.internet import defer
+
+from nova import exception
+from nova import flags
+from nova import test
+from nova import utils
+from nova.compute import model
+from nova.compute import node
+
+
+FLAGS = flags.FLAGS
+
+
+class ModelTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(ModelTestCase, self).setUp()
+ self.flags(fake_libvirt=True,
+ fake_storage=True,
+ fake_users=True)
+
+ def tearDown(self):
+ model.Instance('i-test').destroy()
+ model.Host('testhost').destroy()
+ model.Daemon('testhost', 'nova-testdaemon').destroy()
+
+ def create_instance(self):
+ inst = model.Instance('i-test')
+ inst['reservation_id'] = 'r-test'
+ inst['launch_time'] = '10'
+ inst['user_id'] = 'fake'
+ inst['project_id'] = 'fake'
+ inst['instance_type'] = 'm1.tiny'
+ inst['node_name'] = FLAGS.node_name
+ inst['mac_address'] = utils.generate_mac()
+ inst['ami_launch_index'] = 0
+ inst.save()
+ return inst
+
+ def create_host(self):
+ host = model.Host('testhost')
+ host.save()
+ return host
+
+ def create_daemon(self):
+ daemon = model.Daemon('testhost', 'nova-testdaemon')
+ daemon.save()
+ return daemon
+
+ @defer.inlineCallbacks
+ def test_create_instance(self):
+ """store with create_instace, then test that a load finds it"""
+ instance = yield self.create_instance()
+ old = yield model.Instance(instance.identifier)
+ self.assertFalse(old.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_delete_instance(self):
+ """create, then destroy, then make sure loads a new record"""
+ instance = yield self.create_instance()
+ yield instance.destroy()
+ newinst = yield model.Instance('i-test')
+ self.assertTrue(newinst.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_instance_added_to_set(self):
+ """create, then check that it is listed for the project"""
+ instance = yield self.create_instance()
+ found = False
+ for x in model.InstanceDirectory().all:
+ if x.identifier == 'i-test':
+ found = True
+ self.assert_(found)
+
+ @defer.inlineCallbacks
+ def test_instance_associates_project(self):
+ """create, then check that it is listed for the project"""
+ instance = yield self.create_instance()
+ found = False
+ for x in model.InstanceDirectory().by_project(instance.project):
+ if x.identifier == 'i-test':
+ found = True
+ self.assert_(found)
+
+ @defer.inlineCallbacks
+ def test_host_class_finds_hosts(self):
+ host = yield self.create_host()
+ self.assertEqual('testhost', model.Host.lookup('testhost').identifier)
+
+ @defer.inlineCallbacks
+ def test_host_class_doesnt_find_missing_hosts(self):
+ rv = yield model.Host.lookup('woahnelly')
+ self.assertEqual(None, rv)
+
+ @defer.inlineCallbacks
+ def test_create_host(self):
+ """store with create_host, then test that a load finds it"""
+ host = yield self.create_host()
+ old = yield model.Host(host.identifier)
+ self.assertFalse(old.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_delete_host(self):
+ """create, then destroy, then make sure loads a new record"""
+ instance = yield self.create_host()
+ yield instance.destroy()
+ newinst = yield model.Host('testhost')
+ self.assertTrue(newinst.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_host_added_to_set(self):
+ """create, then check that it is included in list"""
+ instance = yield self.create_host()
+ found = False
+ for x in model.Host.all():
+ if x.identifier == 'testhost':
+ found = True
+ self.assert_(found)
+
+ @defer.inlineCallbacks
+ def test_create_daemon_two_args(self):
+ """create a daemon with two arguments"""
+ d = yield self.create_daemon()
+ d = model.Daemon('testhost', 'nova-testdaemon')
+ self.assertFalse(d.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_create_daemon_single_arg(self):
+ """Create a daemon using the combined host:bin format"""
+ d = yield model.Daemon("testhost:nova-testdaemon")
+ d.save()
+ d = model.Daemon('testhost:nova-testdaemon')
+ self.assertFalse(d.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_equality_of_daemon_single_and_double_args(self):
+ """Create a daemon using the combined host:bin arg, find with 2"""
+ d = yield model.Daemon("testhost:nova-testdaemon")
+ d.save()
+ d = model.Daemon('testhost', 'nova-testdaemon')
+ self.assertFalse(d.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_equality_daemon_of_double_and_single_args(self):
+ """Create a daemon using the combined host:bin arg, find with 2"""
+ d = yield self.create_daemon()
+ d = model.Daemon('testhost:nova-testdaemon')
+ self.assertFalse(d.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_delete_daemon(self):
+ """create, then destroy, then make sure loads a new record"""
+ instance = yield self.create_daemon()
+ yield instance.destroy()
+ newinst = yield model.Daemon('testhost', 'nova-testdaemon')
+ self.assertTrue(newinst.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_daemon_heartbeat(self):
+ """Create a daemon, sleep, heartbeat, check for update"""
+ d = yield self.create_daemon()
+ ts = d['updated_at']
+ time.sleep(2)
+ d.heartbeat()
+ d2 = model.Daemon('testhost', 'nova-testdaemon')
+ ts2 = d2['updated_at']
+ self.assert_(ts2 > ts)
+
+ @defer.inlineCallbacks
+ def test_daemon_added_to_set(self):
+ """create, then check that it is included in list"""
+ instance = yield self.create_daemon()
+ found = False
+ for x in model.Daemon.all():
+ if x.identifier == 'testhost:nova-testdaemon':
+ found = True
+ self.assert_(found)
+
+ @defer.inlineCallbacks
+ def test_daemon_associates_host(self):
+ """create, then check that it is listed for the host"""
+ instance = yield self.create_daemon()
+ found = False
+ for x in model.Daemon.by_host('testhost'):
+ if x.identifier == 'testhost:nova-testdaemon':
+ found = True
+ self.assertTrue(found)
diff --git a/nova/utils.py b/nova/utils.py
index 325b062ee..094de5d74 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -29,6 +29,7 @@ import os.path
import inspect
import subprocess
import random
+from datetime import datetime
from nova import flags
@@ -114,3 +115,8 @@ def get_my_ip():
(addr, port) = csock.getsockname()
csock.close()
return addr
+
+def isotime(at=None):
+ if not at:
+ at = datetime.utcnow()
+ return at.strftime("%Y-%m-%dT%H:%M:%SZ")
diff --git a/nova/volume/storage.py b/nova/volume/storage.py
index 9c58358bd..273a6afd1 100644
--- a/nova/volume/storage.py
+++ b/nova/volume/storage.py
@@ -41,6 +41,7 @@ from nova import flags
from nova import rpc
from nova import utils
from nova import validate
+from nova.compute import model
FLAGS = flags.FLAGS
@@ -151,18 +152,23 @@ class FakeBlockStore(BlockStore):
pass
-class Volume(datastore.RedisModel):
-
- object_type = 'volume'
+class Volume(model.BasicModel):
def __init__(self, volume_id=None):
- super(Volume, self).__init__(object_id=volume_id)
+ self.volume_id = volume_id
+ super(Volume, self).__init__()
+
+ @property
+ def identifier(self):
+ self.volume_id
+
+ def default_state(self):
+ return {"volume_id": self.volume_id}
@classmethod
def create(cls, size, user_id, project_id):
volume_id = utils.generate_uid('vol')
- vol = cls(volume_id=volume_id)
- vol['volume_id'] = volume_id
+ vol = cls(volume_id)
vol['node_name'] = FLAGS.storage_name
vol['size'] = size
vol['user_id'] = user_id
@@ -171,7 +177,6 @@ class Volume(datastore.RedisModel):
vol["instance_id"] = 'none'
vol["mountpoint"] = 'none'
vol['attach_time'] = 'none'
- vol["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
vol['status'] = "creating" # creating | available | in-use
vol['attach_status'] = "detached" # attaching | attached | detaching | detached
vol['delete_on_termination'] = 'False'
@@ -190,7 +195,7 @@ class Volume(datastore.RedisModel):
self['mountpoint'] = mountpoint
self['status'] = "in-use"
self['attach_status'] = "attaching"
- self['attach_time'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
+ self['attach_time'] = utils.utctime()
self['delete_on_termination'] = 'False'
self.save()
diff --git a/run_tests.py b/run_tests.py
index aea766c92..91e886c76 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -53,6 +53,7 @@ from nova.tests.access_unittest import *
from nova.tests.api_unittest import *
from nova.tests.cloud_unittest import *
from nova.tests.keeper_unittest import *
+from nova.tests.model_unittest import *
from nova.tests.network_unittest import *
from nova.tests.node_unittest import *
from nova.tests.objectstore_unittest import *