summaryrefslogtreecommitdiffstats
path: root/nova/compute
diff options
context:
space:
mode:
authorEwan Mellor <ewan.mellor@citrix.com>2010-07-29 00:11:02 +0100
committerEwan Mellor <ewan.mellor@citrix.com>2010-07-29 00:11:02 +0100
commit9f4996e8738991a95a23cba2caa660f7002f94cd (patch)
treee40bc2e56ea96131200d5e12918bf5a429bb5cc1 /nova/compute
parentc5edaa2186add12947185cb1fd47e0a48eccafa9 (diff)
parentf61b62983f62aa10d7bed3bc1c406717663be923 (diff)
Merge with trunk, including fixing up conflicts with the removal of fake_users
and the reworking of node.py -> service.py.
Diffstat (limited to 'nova/compute')
-rw-r--r--nova/compute/disk.py7
-rw-r--r--nova/compute/linux_net.py2
-rw-r--r--nova/compute/model.py74
-rw-r--r--nova/compute/network.py17
-rw-r--r--nova/compute/service.py (renamed from nova/compute/node.py)39
5 files changed, 111 insertions, 28 deletions
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index 08a22556e..1ffcca685 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -40,7 +40,8 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
formatted as ext2.
In the diagram below, dashes represent drive sectors.
- 0 a b c d e
+ +-----+------. . .-------+------. . .------+
+ | 0 a| b c|d e|
+-----+------. . .-------+------. . .------+
| mbr | primary partiton | local partition |
+-----+------. . .-------+------. . .------+
@@ -64,8 +65,8 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
last_sector = local_last # e
# create an empty file
- execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
- % (outfile, last_sector, sector_size))
+ yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
+ % (outfile, last_sector, sector_size))
# make mbr partition
yield execute('parted --script %s mklabel msdos' % outfile)
diff --git a/nova/compute/linux_net.py b/nova/compute/linux_net.py
index 48e07da66..861ce779b 100644
--- a/nova/compute/linux_net.py
+++ b/nova/compute/linux_net.py
@@ -29,7 +29,7 @@ from nova import flags
FLAGS=flags.FLAGS
flags.DEFINE_string('dhcpbridge_flagfile',
- '/etc/nova-dhcpbridge.conf',
+ '/etc/nova/nova-dhcpbridge.conf',
'location of flagfile for dhcpbridge')
def execute(cmd, addl_env=None):
diff --git a/nova/compute/model.py b/nova/compute/model.py
index cda188183..212830d3c 100644
--- a/nova/compute/model.py
+++ b/nova/compute/model.py
@@ -40,9 +40,11 @@ True
True
"""
+import datetime
import logging
import time
import redis
+import uuid
from nova import datastore
from nova import exception
@@ -228,6 +230,78 @@ class Daemon(datastore.BasicModel):
for x in cls.associated_to("host", hostname):
yield x
+class SessionToken(datastore.BasicModel):
+ """This is a short-lived auth token that is passed through web requests"""
+
+ def __init__(self, session_token):
+ self.token = session_token
+ self.default_ttl = FLAGS.auth_token_ttl
+ super(SessionToken, self).__init__()
+
+ @property
+ def identifier(self):
+ return self.token
+
+ def default_state(self):
+ now = datetime.datetime.utcnow()
+ diff = datetime.timedelta(seconds=self.default_ttl)
+ expires = now + diff
+ return {'user': None, 'session_type': None, 'token': self.token,
+ 'expiry': expires.strftime(utils.TIME_FORMAT)}
+
+ def save(self):
+ """Call into superclass to save object, then save associations"""
+ if not self['user']:
+ raise exception.Invalid("SessionToken requires a User association")
+ success = super(SessionToken, self).save()
+ if success:
+ self.associate_with("user", self['user'])
+ return True
+
+ @classmethod
+ def lookup(cls, key):
+ token = super(SessionToken, cls).lookup(key)
+ if token:
+ expires_at = utils.parse_isotime(token['expiry'])
+ if datetime.datetime.utcnow() >= expires_at:
+ token.destroy()
+ return None
+ return token
+
+ @classmethod
+ def generate(cls, userid, session_type=None):
+ """make a new token for the given user"""
+ token = str(uuid.uuid4())
+ while cls.lookup(token):
+ token = str(uuid.uuid4())
+ instance = cls(token)
+ instance['user'] = userid
+ instance['session_type'] = session_type
+ instance.save()
+ return instance
+
+ def update_expiry(self, **kwargs):
+ """updates the expirty attribute, but doesn't save"""
+ if not kwargs:
+ kwargs['seconds'] = self.default_ttl
+ time = datetime.datetime.utcnow()
+ diff = datetime.timedelta(**kwargs)
+ expires = time + diff
+ self['expiry'] = expires.strftime(utils.TIME_FORMAT)
+
+ def is_expired(self):
+ now = datetime.datetime.utcnow()
+ expires = utils.parse_isotime(self['expiry'])
+ return expires <= now
+
+ def ttl(self):
+ """number of seconds remaining before expiration"""
+ now = datetime.datetime.utcnow()
+ expires = utils.parse_isotime(self['expiry'])
+ delta = expires - now
+ return (delta.seconds + (delta.days * 24 * 3600))
+
+
if __name__ == "__main__":
import doctest
doctest.testmod()
diff --git a/nova/compute/network.py b/nova/compute/network.py
index 43011f696..62d892e58 100644
--- a/nova/compute/network.py
+++ b/nova/compute/network.py
@@ -29,7 +29,7 @@ from nova import datastore
from nova import exception
from nova import flags
from nova import utils
-from nova.auth import users
+from nova.auth import manager
from nova.compute import exception as compute_exception
from nova.compute import linux_net
@@ -144,7 +144,7 @@ class Vlan(datastore.BasicModel):
@datastore.absorb_connection_error
def destroy(self):
set_name = self._redis_set_name(self.__class__.__name__)
- datastore.Redis.instance().hdel(set_name, self.project)
+ datastore.Redis.instance().hdel(set_name, self.project_id)
def subnet(self):
vlan = int(self.vlan_id)
@@ -210,11 +210,11 @@ class BaseNetwork(datastore.BasicModel):
@property
def user(self):
- return users.UserManager.instance().get_user(self['user_id'])
+ return manager.AuthManager().get_user(self['user_id'])
@property
def project(self):
- return users.UserManager.instance().get_project(self['project_id'])
+ return manager.AuthManager().get_project(self['project_id'])
@property
def _hosts_key(self):
@@ -516,7 +516,7 @@ def get_vlan_for_project(project_id):
if not known_vlans.has_key(vstr):
return Vlan.create(project_id, vnum)
old_project_id = known_vlans[vstr]
- if not users.UserManager.instance().get_project(old_project_id):
+ if not manager.AuthManager().get_project(old_project_id):
vlan = Vlan.lookup(old_project_id)
if vlan:
# NOTE(todd): This doesn't check for vlan id match, because
@@ -529,6 +529,7 @@ def get_vlan_for_project(project_id):
# don't orphan any VLANs. It is basically
# garbage collection for after projects abandoned
# their reference.
+ vlan.destroy()
vlan.project_id = project_id
vlan.save()
return vlan
@@ -542,7 +543,7 @@ def get_network_by_interface(iface, security_group='default'):
def get_network_by_address(address):
logging.debug("Get Network By Address: %s" % address)
- for project in users.UserManager.instance().get_projects():
+ for project in manager.AuthManager().get_projects():
net = get_project_network(project.id)
if address in net.assigned:
logging.debug("Found %s in %s" % (address, project.id))
@@ -582,7 +583,7 @@ def get_project_network(project_id, security_group='default'):
""" get a project's private network, allocating one if needed """
# TODO(todd): It looks goofy to get a project from a UserManager.
# Refactor to still use the LDAP backend, but not User specific.
- project = users.UserManager.instance().get_project(project_id)
+ project = manager.AuthManager().get_project(project_id)
if not project:
raise exception.Error("Project %s doesn't exist, uhoh." %
project_id)
@@ -592,5 +593,5 @@ def get_project_network(project_id, security_group='default'):
def restart_nets():
""" Ensure the network for each user is enabled"""
- for project in users.UserManager.instance().get_projects():
+ for project in manager.AuthManager().get_projects():
get_project_network(project.id).express()
diff --git a/nova/compute/node.py b/nova/compute/service.py
index 533670b12..9b162edc7 100644
--- a/nova/compute/node.py
+++ b/nova/compute/service.py
@@ -17,31 +17,34 @@
# under the License.
"""
-Compute Node:
+Compute Service:
- Runs on each compute node, managing the
+ Runs on each compute host, managing the
hypervisor using the virt module.
"""
import base64
+import json
import logging
import os
import sys
from twisted.internet import defer
from twisted.internet import task
-from twisted.application import service
from nova import exception
from nova import flags
from nova import process
+from nova import service
from nova import utils
+from nova.compute import disk
from nova.compute import model
from nova.compute import network
from nova.compute import power_state
from nova.compute.instance_types import INSTANCE_TYPES
+from nova.objectstore import image # for image_path flag
from nova.virt import connection as virt_connection
-from nova.volume import storage
+from nova.volume import service as volume_service
FLAGS = flags.FLAGS
@@ -49,13 +52,13 @@ flags.DEFINE_string('instances_path', utils.abspath('../instances'),
'where instances are stored on disk')
-class Node(object, service.Service):
+class ComputeService(service.Service):
"""
Manages the running instances.
"""
def __init__(self):
""" load configuration options for this node and connect to the hypervisor"""
- super(Node, self).__init__()
+ super(ComputeService, self).__init__()
self._instances = {}
self._conn = virt_connection.get_connection()
self.instdir = model.InstanceDirectory()
@@ -174,29 +177,33 @@ class Node(object, service.Service):
@exception.wrap_exception
def attach_volume(self, instance_id = None,
volume_id = None, mountpoint = None):
- volume = storage.get_volume(volume_id)
+ volume = volume_service.get_volume(volume_id)
yield self._init_aoe()
- yield utils.runthis("Attached Volume: %s",
- "sudo virsh attach-disk %s /dev/etherd/%s %s"
- % (instance_id, volume['aoe_device'], mountpoint.split("/")[-1]))
+ yield process.simple_execute(
+ "sudo virsh attach-disk %s /dev/etherd/%s %s" %
+ (instance_id,
+ volume['aoe_device'],
+ mountpoint.rpartition('/dev/')[2]))
volume.finish_attach()
defer.returnValue(True)
+ @defer.inlineCallbacks
def _init_aoe(self):
- utils.runthis("Doin an AoE discover, returns %s", "sudo aoe-discover")
- utils.runthis("Doin an AoE stat, returns %s", "sudo aoe-stat")
+ yield process.simple_execute("sudo aoe-discover")
+ yield process.simple_execute("sudo aoe-stat")
+ @defer.inlineCallbacks
@exception.wrap_exception
def detach_volume(self, instance_id, volume_id):
""" detach a volume from an instance """
# despite the documentation, virsh detach-disk just wants the device
# name without the leading /dev/
- volume = storage.get_volume(volume_id)
+ volume = volume_service.get_volume(volume_id)
target = volume['mountpoint'].rpartition('/dev/')[2]
- utils.runthis("Detached Volume: %s", "sudo virsh detach-disk %s %s "
- % (instance_id, target))
+ yield process.simple_execute(
+ "sudo virsh detach-disk %s %s " % (instance_id, target))
volume.finish_detach()
- return defer.succeed(True)
+ defer.returnValue(True)
class Group(object):