From 35dc27bbcb3a29ae662a0f8ccfed7a0f76a49c13 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Mon, 19 Jul 2010 14:17:43 -0700 Subject: Ask curl to set exit code if resource was not found --- nova/compute/node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/node.py b/nova/compute/node.py index d681ec661..8f829995b 100644 --- a/nova/compute/node.py +++ b/nova/compute/node.py @@ -450,7 +450,7 @@ class Instance(object): def _fetch_s3_image(self, image, path): url = _image_url('%s/image' % image) - d = self._pool.simpleExecute('curl --silent %s -o %s' % (url, path)) + d = self._pool.simpleExecute('curl --silent --fail %s -o %s' % (url, path)) return d def _fetch_local_image(self, image, path): -- cgit From 63513736b0340efd197a7b905208fa90d63ab260 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 20 Jul 2010 00:32:42 -0700 Subject: Able to boot without kernel or ramdisk. libvirt.xml.template is now a Cheetah template --- nova/compute/libvirt.xml.template | 26 +++++++++++++++----------- nova/compute/node.py | 26 ++++++++++++++++++++++---- nova/endpoint/cloud.py | 4 ++-- 3 files changed, 39 insertions(+), 17 deletions(-) diff --git a/nova/compute/libvirt.xml.template b/nova/compute/libvirt.xml.template index a763e8a4d..6fb63feae 100644 --- a/nova/compute/libvirt.xml.template +++ b/nova/compute/libvirt.xml.template @@ -1,31 +1,35 @@ - %(name)s + ${name} - hvm - %(basepath)s/kernel - %(basepath)s/ramdisk + hvm +#if $getVar('kernel', None) + ${kernel} + #if $getVar('ramdisk', None) + ${ramdisk} + #end if root=/dev/vda1 console=ttyS0 +#end if - %(memory_kb)s - %(vcpus)s + ${memory_kb} + ${vcpus} /usr/bin/kvm - + - - + + - + - %(nova)s + ${nova} diff --git a/nova/compute/node.py b/nova/compute/node.py index 8f829995b..7c3b9a677 100644 --- a/nova/compute/node.py +++ b/nova/compute/node.py @@ -34,6 +34,7 @@ from twisted.internet import defer from twisted.internet import task from twisted.application import service +from Cheetah.Template import Template try: import libvirt @@ -312,13 +313,30 @@ class Instance(object): def toXml(self): # TODO(termie): cache? logging.debug("Starting the toXML method") - libvirt_xml = open(FLAGS.libvirt_xml_template).read() + template_contents = open(FLAGS.libvirt_xml_template).read() xml_info = self.datamodel.copy() # TODO(joshua): Make this xml express the attached disks as well # TODO(termie): lazy lazy hack because xml is annoying xml_info['nova'] = json.dumps(self.datamodel.copy()) - libvirt_xml = libvirt_xml % xml_info + + if xml_info['kernel_id']: + xml_info['kernel'] = xml_info['basepath'] + "/kernel" + + if xml_info['ramdisk_id']: + xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk" + + if xml_info['ramdisk_id'] or xml_info['kernel_id']: + xml_info['disk'] = xml_info['basepath'] + "/disk" + else: + xml_info['disk'] = xml_info['basepath'] + "/disk-raw" + + try: + libvirt_xml = str(Template(template_contents, searchList=[ xml_info ] )) + except Exception as e: + logging.warning("Error running template: %s" % e) + raise + logging.debug("Finished the toXML method") return libvirt_xml @@ -487,9 +505,9 @@ class Instance(object): if not os.path.exists(basepath('disk')): yield _fetch_file(data['image_id'], basepath('disk-raw')) - if not os.path.exists(basepath('kernel')): + if data['kernel_id'] and not os.path.exists(basepath('kernel')): yield _fetch_file(data['kernel_id'], basepath('kernel')) - if not os.path.exists(basepath('ramdisk')): + if data['ramdisk_id'] and not os.path.exists(basepath('ramdisk')): yield _fetch_file(data['ramdisk_id'], basepath('ramdisk')) execute = lambda cmd, input=None: self._pool.simpleExecute(cmd=cmd, diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 3b7b4804b..b7a4fe20e 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -544,8 +544,8 @@ class CloudController(object): for num in range(int(kwargs['max_count'])): inst = self.instdir.new() inst['image_id'] = image_id - inst['kernel_id'] = kernel_id - inst['ramdisk_id'] = ramdisk_id + inst['kernel_id'] = kernel_id or '' + inst['ramdisk_id'] = ramdisk_id or '' inst['user_data'] = kwargs.get('user_data', '') inst['instance_type'] = kwargs.get('instance_type', 'm1.small') inst['reservation_id'] = reservation_id -- cgit From 9cf32d6d65035299ecfcb0563cef8ddab3c0ee4c Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 20 Jul 2010 14:25:43 -0700 Subject: Able to set up DNS, and remove udev network rules --- nova/compute/disk.py | 29 +++++++++++++++----- nova/compute/libvirt.xml.template | 1 + nova/compute/network.py | 3 +++ nova/compute/node.py | 57 ++++++++++++++++++++++++++------------- nova/compute/resolv.conf.template | 1 + 5 files changed, 66 insertions(+), 25 deletions(-) create mode 100644 nova/compute/resolv.conf.template diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 08a22556e..abbbc01c2 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -84,7 +84,7 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None): % (infile, outfile, sector_size, primary_first)) @defer.inlineCallbacks -def inject_data(image, key=None, net=None, partition=None, execute=None): +def inject_data(image, key=None, net=None, dns=None, remove_network_udev=False, partition=None, execute=None): """Injects a ssh key and optionally net data into a disk image. it will mount the image as a fully partitioned disk and attempt to inject @@ -93,7 +93,7 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): If partition is not specified it mounts the image as a single partition. """ - out, err = yield execute('sudo losetup -f --show %s' % image) + out, err = yield execute('sudo losetup --find --show %s' % image) if err: raise exception.Error('Could not attach image to loopback: %s' % err) device = out.strip() @@ -107,6 +107,8 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): partition) else: mapped_device = device + + # Configure ext2fs so that it doesn't auto-check every N boots out, err = yield execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) tmpdir = tempfile.mkdtemp() @@ -123,6 +125,11 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): yield _inject_key_into_fs(key, tmpdir, execute=execute) if net: yield _inject_net_into_fs(net, tmpdir, execute=execute) + if dns: + yield _inject_dns_into_fs(dns, tmpdir, execute=execute) + if remove_network_udev: + yield _remove_network_udev(tmpdir, execute=execute) + finally: # unmount device yield execute('sudo umount %s' % mapped_device) @@ -134,11 +141,11 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): yield execute('sudo kpartx -d %s' % device) finally: # remove loopback - yield execute('sudo losetup -d %s' % device) + yield execute('sudo losetup --detach %s' % device) @defer.inlineCallbacks def _inject_key_into_fs(key, fs, execute=None): - sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh') + sshdir = os.path.join(fs, 'root', '.ssh') yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter yield execute('sudo chown root %s' % sshdir) yield execute('sudo chmod 700 %s' % sshdir) @@ -147,7 +154,17 @@ def _inject_key_into_fs(key, fs, execute=None): @defer.inlineCallbacks def _inject_net_into_fs(net, fs, execute=None): - netfile = os.path.join(os.path.join(os.path.join( - fs, 'etc'), 'network'), 'interfaces') + netfile = os.path.join(fs, 'etc', 'network', 'interfaces') yield execute('sudo tee %s' % netfile, net) +@defer.inlineCallbacks +def _inject_dns_into_fs(dns, fs, execute=None): + dnsfile = os.path.join(fs, 'etc', 'resolv.conf') + yield execute('sudo tee %s' % dnsfile, dns) + +@defer.inlineCallbacks +def _remove_network_udev(fs, execute=None): + # This is correct for Ubuntu, but might not be right for other distros + rulesfile = os.path.join(fs, 'etc', 'udev', 'rules.d', '70-persistent-net.rules') + yield execute('rm -f %s' % rulesfile) + diff --git a/nova/compute/libvirt.xml.template b/nova/compute/libvirt.xml.template index 6fb63feae..3bfec9b3a 100644 --- a/nova/compute/libvirt.xml.template +++ b/nova/compute/libvirt.xml.template @@ -16,6 +16,7 @@ ${memory_kb} ${vcpus} + /usr/bin/kvm diff --git a/nova/compute/network.py b/nova/compute/network.py index bdb8a22f9..0b24ca7d1 100644 --- a/nova/compute/network.py +++ b/nova/compute/network.py @@ -62,6 +62,9 @@ flags.DEFINE_list('simple_network_ips', ['192.168.0.2'], flags.DEFINE_string('simple_network_template', utils.abspath('compute/interfaces.template'), 'Template file for simple network') +flags.DEFINE_string('simple_network_dns_template', + utils.abspath('compute/resolv.conf.template'), + 'Template file for DNS settings for simple network') flags.DEFINE_string('simple_network_netmask', '255.255.255.0', 'Netmask for simple network') flags.DEFINE_string('simple_network_network', '192.168.0.0', diff --git a/nova/compute/node.py b/nova/compute/node.py index 7c3b9a677..56429b0f0 100644 --- a/nova/compute/node.py +++ b/nova/compute/node.py @@ -505,37 +505,56 @@ class Instance(object): if not os.path.exists(basepath('disk')): yield _fetch_file(data['image_id'], basepath('disk-raw')) - if data['kernel_id'] and not os.path.exists(basepath('kernel')): - yield _fetch_file(data['kernel_id'], basepath('kernel')) - if data['ramdisk_id'] and not os.path.exists(basepath('ramdisk')): - yield _fetch_file(data['ramdisk_id'], basepath('ramdisk')) + + using_kernel = data['kernel_id'] and True + + if using_kernel: + if os.path.exists(basepath('kernel')): + yield _fetch_file(data['kernel_id'], basepath('kernel')) + if data['ramdisk_id'] and not os.path.exists(basepath('ramdisk')): + yield _fetch_file(data['ramdisk_id'], basepath('ramdisk')) execute = lambda cmd, input=None: self._pool.simpleExecute(cmd=cmd, input=input, error_ok=1) + # For now, we assume that if we're not using a kernel, we're using a partitioned disk image + # where the target partition is the first partition + target_partition = None + if not using_kernel: + target_partition = "1" + key = data['key_data'] net = None + dns = None if FLAGS.simple_network: + network_info = { + 'address': data['private_dns_name'], + 'network': FLAGS.simple_network_network, + 'netmask': FLAGS.simple_network_netmask, + 'gateway': FLAGS.simple_network_gateway, + 'broadcast': FLAGS.simple_network_broadcast, + 'dns': FLAGS.simple_network_dns} + with open(FLAGS.simple_network_template) as f: - net = f.read() % {'address': data['private_dns_name'], - 'network': FLAGS.simple_network_network, - 'netmask': FLAGS.simple_network_netmask, - 'gateway': FLAGS.simple_network_gateway, - 'broadcast': FLAGS.simple_network_broadcast, - 'dns': FLAGS.simple_network_dns} - if key or net: + net = f.read() % network_info + + with open(FLAGS.simple_network_dns_template) as f: + dns =str(Template(f.read(), searchList=[ network_info ] )) + + if key or net or dns: logging.info('Injecting data into image %s', data['image_id']) - yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) - - if os.path.exists(basepath('disk')): - yield self._pool.simpleExecute('rm -f %s' % basepath('disk')) + yield disk.inject_data(basepath('disk-raw'), key=key, net=net, dns=dns, remove_network_udev=True, partition=target_partition, execute=execute) - bytes = (INSTANCE_TYPES[data['instance_type']]['local_gb'] - * 1024 * 1024 * 1024) - yield disk.partition( - basepath('disk-raw'), basepath('disk'), bytes, execute=execute) + if using_kernel: + if os.path.exists(basepath('disk')): + yield self._pool.simpleExecute('rm -f %s' % basepath('disk')) + bytes = (INSTANCE_TYPES[data['instance_type']]['local_gb'] + * 1024 * 1024 * 1024) + yield disk.partition( + basepath('disk-raw'), basepath('disk'), bytes, execute=execute) + @defer.inlineCallbacks @exception.wrap_exception def spawn(self): diff --git a/nova/compute/resolv.conf.template b/nova/compute/resolv.conf.template new file mode 100644 index 000000000..7ddb55190 --- /dev/null +++ b/nova/compute/resolv.conf.template @@ -0,0 +1 @@ +nameserver ${dns} -- cgit From 5bb1d8d5ab8581696a98a159b293b992e72eec49 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 20 Jul 2010 14:31:55 -0700 Subject: Cleanups --- nova/compute/node.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/compute/node.py b/nova/compute/node.py index 56429b0f0..ba140a219 100644 --- a/nova/compute/node.py +++ b/nova/compute/node.py @@ -509,7 +509,7 @@ class Instance(object): using_kernel = data['kernel_id'] and True if using_kernel: - if os.path.exists(basepath('kernel')): + if not os.path.exists(basepath('kernel')): yield _fetch_file(data['kernel_id'], basepath('kernel')) if data['ramdisk_id'] and not os.path.exists(basepath('ramdisk')): yield _fetch_file(data['ramdisk_id'], basepath('ramdisk')) @@ -540,7 +540,7 @@ class Instance(object): net = f.read() % network_info with open(FLAGS.simple_network_dns_template) as f: - dns =str(Template(f.read(), searchList=[ network_info ] )) + dns = str(Template(f.read(), searchList=[ network_info ] )) if key or net or dns: logging.info('Injecting data into image %s', data['image_id']) -- cgit From c599914304b262067c19b2968ab50826b4d9bcd3 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 30 Jul 2010 12:54:03 -0700 Subject: Removed duplicate toXml method --- nova/virt/libvirt_conn.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 43b5eded2..6bc5ebf89 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -228,20 +228,6 @@ class LibvirtConnection(object): return os.path.abspath(os.path.join(instance.datamodel['basepath'], path)) - def toXml(self, instance): - # TODO(termie): cache? - logging.debug("Starting the toXML method") - libvirt_xml = open(FLAGS.libvirt_xml_template).read() - xml_info = instance.datamodel.copy() - # TODO(joshua): Make this xml express the attached disks as well - - # TODO(termie): lazy lazy hack because xml is annoying - xml_info['nova'] = json.dumps(instance.datamodel.copy()) - libvirt_xml = libvirt_xml % xml_info - logging.debug("Finished the toXML method") - - return libvirt_xml - def toXml(self): # TODO(termie): cache? logging.debug("Starting the toXML method") -- cgit From 6526f21d9bf5a53546240ec29099d68933165500 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 30 Jul 2010 13:13:00 -0700 Subject: Added Cheetah to pip-requires --- tools/pip-requires | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pip-requires b/tools/pip-requires index 4eb47ca2b..a235d6246 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -1,4 +1,5 @@ IPy==0.70 +Cheetah==2.4.2.1 M2Crypto==0.20.2 amqplib==0.6.1 anyjson==0.2.4 -- cgit From 7985f5fb32432630b3fef775f601900f89346ed2 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 30 Jul 2010 13:35:21 -0700 Subject: Accept a configurable libvirt_uri --- nova/virt/libvirt_conn.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 6bc5ebf89..8d473296a 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -49,6 +49,9 @@ FLAGS = flags.FLAGS flags.DEFINE_string('libvirt_xml_template', utils.abspath('compute/libvirt.xml.template'), 'Libvirt XML Template') +flags.DEFINE_string('libvirt_uri', + 'qemu:///system', + 'Libvirt URI to connect to') def get_connection(read_only): # These are loaded late so that there's no need to install these @@ -67,10 +70,11 @@ class LibvirtConnection(object): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], 'root', None] + libvirt_uri = str(FLAGS.libvirt_uri) if read_only: - self._conn = libvirt.openReadOnly('qemu:///system') + self._conn = libvirt.openReadOnly(libvirt_uri) else: - self._conn = libvirt.openAuth('qemu:///system', auth, 0) + self._conn = libvirt.openAuth(libvirt_uri, auth, 0) def list_instances(self): -- cgit From 6227cb13c311624998b9c2c6e55fc0d261cfcb6c Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sat, 31 Jul 2010 20:49:21 -0700 Subject: Recognize 'magic' kernel value that means "don't use a kernel" - currently aki-00000000 --- nova/endpoint/cloud.py | 10 ++++++++-- nova/flags.py | 3 +++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index b4157e48c..344352a93 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -523,9 +523,15 @@ class CloudController(object): kernel_id = kwargs.get('kernel_id', kernel_id) ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) + if kernel_id == str(FLAGS.null_kernel): + kernel_id = None + ramdisk_id = None + # make sure we have access to kernel and ramdisk - self._get_image(context, kernel_id) - self._get_image(context, ramdisk_id) + if kernel_id: + self._get_image(context, kernel_id) + if ramdisk_id: + self._get_image(context, ramdisk_id) logging.debug("Going to run instances...") reservation_id = utils.generate_uid('r') diff --git a/nova/flags.py b/nova/flags.py index f35f5fa10..caae33e14 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -57,6 +57,9 @@ DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', 'Url to ec2 api server') +DEFINE_string('null_kernel', + 'aki-00000000', + 'Kernel image that indicates not to use a kernel (use a raw disk image instead)') DEFINE_string('default_image', 'ami-11111', 'default image to use, testing only') -- cgit From 6c32e87c1be80230cf058586cee5a94cd25670b8 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sat, 31 Jul 2010 21:35:58 -0700 Subject: Fixed up some of the raw disk stuff that broke in the abstraction out of libvirt --- nova/compute/disk.py | 3 +++ nova/virt/libvirt_conn.py | 48 ++++++++++++++++++++++++++++++++++------------- 2 files changed, 38 insertions(+), 13 deletions(-) diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 848ce8b54..cace849fa 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -109,6 +109,9 @@ def inject_data(image, key=None, net=None, dns=None, remove_network_udev=False, else: mapped_device = device + if not os.path.exists(mapped_device): + raise exception.Error('Mapped device was not found: %s' % mapped_device) + # Configure ext2fs so that it doesn't auto-check every N boots out, err = yield execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 8d473296a..d18158678 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -195,32 +195,54 @@ class LibvirtConnection(object): user = manager.AuthManager().get_user(data['user_id']) if not os.path.exists(basepath('disk')): yield images.fetch(data['image_id'], basepath('disk-raw'), user) - if not os.path.exists(basepath('kernel')): - yield images.fetch(data['kernel_id'], basepath('kernel'), user) - if not os.path.exists(basepath('ramdisk')): - yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user) + + using_kernel = data['kernel_id'] and True + + if using_kernel: + if not os.path.exists(basepath('kernel')): + yield images.fetch(data['kernel_id'], basepath('kernel'), user) + if not os.path.exists(basepath('ramdisk')): + yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user) execute = lambda cmd, input=None: \ process.simple_execute(cmd=cmd, input=input, error_ok=1) + # For now, we assume that if we're not using a kernel, we're using a partitioned disk image + # where the target partition is the first partition + target_partition = None + if not using_kernel: + target_partition = "1" + key = data['key_data'] net = None if FLAGS.simple_network: - with open(FLAGS.simple_network_template) as f: - net = f.read() % {'address': data['private_dns_name'], + network_info = {'address': data['private_dns_name'], 'network': FLAGS.simple_network_network, 'netmask': FLAGS.simple_network_netmask, 'gateway': FLAGS.simple_network_gateway, 'broadcast': FLAGS.simple_network_broadcast, 'dns': FLAGS.simple_network_dns} - if key or net: + + with open(FLAGS.simple_network_template) as f: + net = f.read() % network_info + + with open(FLAGS.simple_network_dns_template) as f: + dns = str(Template(f.read(), searchList=[ network_info ] )) + + + if key or net or dns: logging.info('Injecting data into image %s', data['image_id']) - yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) + try: + yield disk.inject_data(basepath('disk-raw'), key=key, net=net, dns=dns, remove_network_udev=True, partition=target_partition, execute=execute) + except Exception as e: + # This could be a windows image, or a vmdk format disk + logging.warn('Could not inject data; ignoring. (%s)' % e) - if os.path.exists(basepath('disk')): - yield process.simple_execute('rm -f %s' % basepath('disk')) + if using_kernel: + if os.path.exists(basepath('disk')): + yield process.simple_execute('rm -f %s' % basepath('disk')) bytes = (instance_types.INSTANCE_TYPES[data['instance_type']]['local_gb'] * 1024 * 1024 * 1024) @@ -232,15 +254,15 @@ class LibvirtConnection(object): return os.path.abspath(os.path.join(instance.datamodel['basepath'], path)) - def toXml(self): + def toXml(self, instance): # TODO(termie): cache? logging.debug("Starting the toXML method") template_contents = open(FLAGS.libvirt_xml_template).read() - xml_info = self.datamodel.copy() + xml_info = instance.datamodel.copy() # TODO(joshua): Make this xml express the attached disks as well # TODO(termie): lazy lazy hack because xml is annoying - xml_info['nova'] = json.dumps(self.datamodel.copy()) + xml_info['nova'] = json.dumps(instance.datamodel.copy()) if xml_info['kernel_id']: xml_info['kernel'] = xml_info['basepath'] + "/kernel" -- cgit From 759bab6059ef2e4c463a73e12fe85fe4b147eba7 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sat, 21 Aug 2010 13:07:03 +0100 Subject: Clarified what the 'Mapped device not found' exception really means. Fixed TODO. Some formatting to be closer to 80 chars --- nova/compute/disk.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 6099eb4ba..4ede9a7aa 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -87,7 +87,9 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None): @defer.inlineCallbacks -def inject_data(image, key=None, net=None, dns=None, remove_network_udev=False, partition=None, execute=None): +def inject_data( image, key=None, net=None, dns=None, + remove_network_udev=False, + partition=None, execute=None): """Injects a ssh key and optionally net data into a disk image. it will mount the image as a fully partitioned disk and attempt to inject @@ -111,8 +113,12 @@ def inject_data(image, key=None, net=None, dns=None, remove_network_udev=False, else: mapped_device = device + # We can only loopback mount raw images. If the device isn't there, + # it's normally because it's a .vmdk or a .vdi etc if not os.path.exists(mapped_device): - raise exception.Error('Mapped device was not found: %s' % mapped_device) + raise exception.Error( + 'Mapped device was not found (we can only inject raw disk images): %s' + % mapped_device) # Configure ext2fs so that it doesn't auto-check every N boots out, err = yield execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) @@ -172,7 +178,9 @@ def _inject_dns_into_fs(dns, fs, execute=None): @defer.inlineCallbacks def _remove_network_udev(fs, execute=None): - # This is correct for Ubuntu, but might not be right for other distros + # TODO(justinsb): This is correct for Ubuntu, but might not be right for + # other distros. There is a much bigger discussion to be had about what + # we inject and how we inject it. rulesfile = os.path.join(fs, 'etc', 'udev', 'rules.d', '70-persistent-net.rules') yield execute('rm -f %s' % rulesfile) -- cgit From 9732c0af89f21490cc8d6bc80799bbc8b36fb441 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 14 Oct 2010 13:38:35 -0700 Subject: Minimized diff, fixed formatting --- nova/compute/disk.py | 22 +--------------------- nova/compute/resolv.conf.template | 1 - nova/flags.py | 3 ++- nova/virt/libvirt.uml.xml.template | 2 +- nova/virt/libvirt.xen.xml.template | 2 +- nova/virt/libvirt_conn.py | 2 ++ 6 files changed, 7 insertions(+), 25 deletions(-) delete mode 100644 nova/compute/resolv.conf.template diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 4ede9a7aa..b1ddb0a1f 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -87,9 +87,7 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None): @defer.inlineCallbacks -def inject_data( image, key=None, net=None, dns=None, - remove_network_udev=False, - partition=None, execute=None): +def inject_data(image, key=None, net=None, partition=None, execute=None): """Injects a ssh key and optionally net data into a disk image. it will mount the image as a fully partitioned disk and attempt to inject @@ -137,11 +135,6 @@ def inject_data( image, key=None, net=None, dns=None, yield _inject_key_into_fs(key, tmpdir, execute=execute) if net: yield _inject_net_into_fs(net, tmpdir, execute=execute) - if dns: - yield _inject_dns_into_fs(dns, tmpdir, execute=execute) - if remove_network_udev: - yield _remove_network_udev(tmpdir, execute=execute) - finally: # unmount device yield execute('sudo umount %s' % mapped_device) @@ -171,16 +164,3 @@ def _inject_net_into_fs(net, fs, execute=None): netfile = os.path.join(fs, 'etc', 'network', 'interfaces') yield execute('sudo tee %s' % netfile, net) -@defer.inlineCallbacks -def _inject_dns_into_fs(dns, fs, execute=None): - dnsfile = os.path.join(fs, 'etc', 'resolv.conf') - yield execute('sudo tee %s' % dnsfile, dns) - -@defer.inlineCallbacks -def _remove_network_udev(fs, execute=None): - # TODO(justinsb): This is correct for Ubuntu, but might not be right for - # other distros. There is a much bigger discussion to be had about what - # we inject and how we inject it. - rulesfile = os.path.join(fs, 'etc', 'udev', 'rules.d', '70-persistent-net.rules') - yield execute('rm -f %s' % rulesfile) - diff --git a/nova/compute/resolv.conf.template b/nova/compute/resolv.conf.template deleted file mode 100644 index 7ddb55190..000000000 --- a/nova/compute/resolv.conf.template +++ /dev/null @@ -1 +0,0 @@ -nameserver ${dns} diff --git a/nova/flags.py b/nova/flags.py index 2b96a15f7..d2c22e46b 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -202,7 +202,8 @@ DEFINE_string('default_ramdisk', 'ari-11111', DEFINE_string('default_instance_type', 'm1.small', 'default instance type to use, testing only') DEFINE_string('null_kernel', 'aki-00000000', - 'kernel image that indicates not to use a kernel, to use a raw disk image instead') + 'kernel image that indicates not to use a kernel, ' + ' but to use a raw disk image instead') DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server') DEFINE_string('vpn_key_suffix', diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template index f6e5fad69..da9588049 100644 --- a/nova/virt/libvirt.uml.xml.template +++ b/nova/virt/libvirt.uml.xml.template @@ -12,7 +12,7 @@ - + diff --git a/nova/virt/libvirt.xen.xml.template b/nova/virt/libvirt.xen.xml.template index 9508ad3b7..8f650e512 100644 --- a/nova/virt/libvirt.xen.xml.template +++ b/nova/virt/libvirt.xen.xml.template @@ -23,7 +23,7 @@ - + diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ba5d6dbac..ece98087b 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -72,6 +72,7 @@ flags.DEFINE_bool('allow_project_net_traffic', True, 'Whether to allow in project network traffic') + def get_connection(read_only): # These are loaded late so that there's no need to install these # libraries when not using libvirt. @@ -126,6 +127,7 @@ class LibvirtConnection(object): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], 'root', None] + if read_only: return libvirt.openReadOnly(uri) else: -- cgit From f8028c0a4cd1c3cfb8a9c6b4c397fd67ce912cce Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 14 Oct 2010 13:48:34 -0700 Subject: Removed stray spaces that were causing an unnecessary diff line --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ece98087b..68791c28a 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -127,7 +127,7 @@ class LibvirtConnection(object): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], 'root', None] - + if read_only: return libvirt.openReadOnly(uri) else: -- cgit From 98623a160078dfed7347dcd1539b0cd27e51644a Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 15 Oct 2010 11:06:16 -0700 Subject: Removed 'and True' oddity --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 68791c28a..d78a8f0db 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -324,7 +324,7 @@ class LibvirtConnection(object): if not os.path.exists(basepath('disk')): yield images.fetch(inst.image_id, basepath('disk-raw'), user, project) - using_kernel = inst.kernel_id and True + using_kernel = inst.kernel_id if using_kernel: if not os.path.exists(basepath('kernel')): yield images.fetch(inst.kernel_id, basepath('kernel'), user, project) -- cgit From 2337fab0979b72bbc7e7730e94518a0e835a2751 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Mon, 25 Oct 2010 03:45:19 +0900 Subject: part way through porting the codebase off of twisted this provides a very basic eventlet-based service replacement for the twistd-based services, a replacement for task.LoopingCall also adds nova-combined with the goal of running a single service when doing local testing and dev --- bin/nova-combined | 61 ++++++++++ bin/nova-compute | 12 +- bin/nova-network | 14 +-- nova/compute/disk.py | 66 +++++------ nova/compute/manager.py | 28 ++--- nova/flags.py | 7 ++ nova/manager.py | 1 - nova/network/manager.py | 4 +- nova/server.py | 6 +- nova/service_eventlet.py | 288 ++++++++++++++++++++++++++++++++++++++++++++++ nova/utils.py | 36 ++++++ nova/virt/fake.py | 6 +- nova/virt/libvirt_conn.py | 172 +++++++++++++-------------- nova/virt/xenapi.py | 103 +++++++---------- 14 files changed, 574 insertions(+), 230 deletions(-) create mode 100755 bin/nova-combined create mode 100644 nova/service_eventlet.py diff --git a/bin/nova-combined b/bin/nova-combined new file mode 100755 index 000000000..65865acd9 --- /dev/null +++ b/bin/nova-combined @@ -0,0 +1,61 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" + Twistd daemon for the nova compute nodes. +""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + +from eventlet import greenthread + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from nova import api +from nova import flags +from nova import service_eventlet +from nova import wsgi + + +FLAGS = flags.FLAGS +flags.DEFINE_integer('api_port', 8773, 'API port') + + +if __name__ == '__main__': + FLAGS(sys.argv) + + compute = service_eventlet.Service.create(binary='nova-compute') + network = service_eventlet.Service.create(binary='nova-network') + volume = service_eventlet.Service.create(binary='nova-volume') + scheduler = service_eventlet.Service.create(binary='nova-scheduler') + #objectstore = service_eventlet.Service.create(binary='nova-objectstore') + + service_eventlet.serve(compute, network, volume, scheduler) + wsgi.run_server(api.API(), FLAGS.api_port) + diff --git a/bin/nova-compute b/bin/nova-compute index 1724e9659..600fbb897 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -21,6 +21,9 @@ Twistd daemon for the nova compute nodes. """ +import eventlet +eventlet.monkey_patch() + import os import sys @@ -32,12 +35,7 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -from nova import service -from nova import twistd - +from nova import service_eventlet if __name__ == '__main__': - twistd.serve(__file__) - -if __name__ == '__builtin__': - application = service.Service.create() # pylint: disable=C0103 + service_eventlet.serve() diff --git a/bin/nova-network b/bin/nova-network index fa88aeb47..600fbb897 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -18,9 +18,12 @@ # under the License. """ - Twistd daemon for the nova network nodes. + Twistd daemon for the nova compute nodes. """ +import eventlet +eventlet.monkey_patch() + import os import sys @@ -32,12 +35,7 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -from nova import service -from nova import twistd - +from nova import service_eventlet if __name__ == '__main__': - twistd.serve(__file__) - -if __name__ == '__builtin__': - application = service.Service.create() # pylint: disable-msg=C0103 + service_eventlet.serve() diff --git a/nova/compute/disk.py b/nova/compute/disk.py index e362b4507..ad4c2c092 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -25,8 +25,6 @@ import logging import os import tempfile -from twisted.internet import defer - from nova import exception from nova import flags @@ -38,7 +36,6 @@ flags.DEFINE_integer('block_size', 1024 * 1024 * 256, 'block_size to use for dd') -@defer.inlineCallbacks def partition(infile, outfile, local_bytes=0, resize=True, local_type='ext2', execute=None): """Takes a single partition represented by infile and writes a bootable @@ -60,10 +57,10 @@ def partition(infile, outfile, local_bytes=0, resize=True, file_size = os.path.getsize(infile) if resize and file_size < FLAGS.minimum_root_size: last_sector = FLAGS.minimum_root_size / sector_size - 1 - yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' - % (infile, last_sector, sector_size)) - yield execute('e2fsck -fp %s' % infile, check_exit_code=False) - yield execute('resize2fs %s' % infile) + execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' + % (infile, last_sector, sector_size)) + execute('e2fsck -fp %s' % infile, check_exit_code=False) + execute('resize2fs %s' % infile) file_size = FLAGS.minimum_root_size elif file_size % sector_size != 0: logging.warn("Input partition size not evenly divisible by" @@ -82,30 +79,29 @@ def partition(infile, outfile, local_bytes=0, resize=True, last_sector = local_last # e # create an empty file - yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' - % (outfile, mbr_last, sector_size)) + execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' + % (outfile, mbr_last, sector_size)) # make mbr partition - yield execute('parted --script %s mklabel msdos' % outfile) + execute('parted --script %s mklabel msdos' % outfile) # append primary file - yield execute('dd if=%s of=%s bs=%s conv=notrunc,fsync oflag=append' - % (infile, outfile, FLAGS.block_size)) + execute('dd if=%s of=%s bs=%s conv=notrunc,fsync oflag=append' + % (infile, outfile, FLAGS.block_size)) # make primary partition - yield execute('parted --script %s mkpart primary %ds %ds' - % (outfile, primary_first, primary_last)) + execute('parted --script %s mkpart primary %ds %ds' + % (outfile, primary_first, primary_last)) if local_bytes > 0: # make the file bigger - yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' - % (outfile, last_sector, sector_size)) + execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' + % (outfile, last_sector, sector_size)) # make and format local partition - yield execute('parted --script %s mkpartfs primary %s %ds %ds' - % (outfile, local_type, local_first, local_last)) + execute('parted --script %s mkpartfs primary %s %ds %ds' + % (outfile, local_type, local_first, local_last)) -@defer.inlineCallbacks def inject_data(image, key=None, net=None, partition=None, execute=None): """Injects a ssh key and optionally net data into a disk image. @@ -115,26 +111,26 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): If partition is not specified it mounts the image as a single partition. """ - out, err = yield execute('sudo losetup -f --show %s' % image) + out, err = execute('sudo losetup -f --show %s' % image) if err: raise exception.Error('Could not attach image to loopback: %s' % err) device = out.strip() try: if not partition is None: # create partition - out, err = yield execute('sudo kpartx -a %s' % device) + out, err = execute('sudo kpartx -a %s' % device) if err: raise exception.Error('Failed to load partition: %s' % err) mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1], partition) else: mapped_device = device - out, err = yield execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) + out, err = execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) tmpdir = tempfile.mkdtemp() try: # mount loopback to dir - out, err = yield execute( + out, err = execute( 'sudo mount %s %s' % (mapped_device, tmpdir)) if err: raise exception.Error('Failed to mount filesystem: %s' % err) @@ -142,35 +138,33 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): try: if key: # inject key file - yield _inject_key_into_fs(key, tmpdir, execute=execute) + _inject_key_into_fs(key, tmpdir, execute=execute) if net: - yield _inject_net_into_fs(net, tmpdir, execute=execute) + _inject_net_into_fs(net, tmpdir, execute=execute) finally: # unmount device - yield execute('sudo umount %s' % mapped_device) + execute('sudo umount %s' % mapped_device) finally: # remove temporary directory - yield execute('rmdir %s' % tmpdir) + execute('rmdir %s' % tmpdir) if not partition is None: # remove partitions - yield execute('sudo kpartx -d %s' % device) + execute('sudo kpartx -d %s' % device) finally: # remove loopback - yield execute('sudo losetup -d %s' % device) + execute('sudo losetup -d %s' % device) -@defer.inlineCallbacks def _inject_key_into_fs(key, fs, execute=None): sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh') - yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter - yield execute('sudo chown root %s' % sshdir) - yield execute('sudo chmod 700 %s' % sshdir) + execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter + execute('sudo chown root %s' % sshdir) + execute('sudo chmod 700 %s' % sshdir) keyfile = os.path.join(sshdir, 'authorized_keys') - yield execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n') + execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n') -@defer.inlineCallbacks def _inject_net_into_fs(net, fs, execute=None): netfile = os.path.join(os.path.join(os.path.join( fs, 'etc'), 'network'), 'interfaces') - yield execute('sudo tee %s' % netfile, net) + execute('sudo tee %s' % netfile, net) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 523bb8893..a105a1dd0 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -25,8 +25,6 @@ import datetime import logging import os -from twisted.internet import defer - from nova import exception from nova import flags from nova import manager @@ -54,7 +52,7 @@ class ComputeManager(manager.Manager): self.network_manager = utils.import_object(FLAGS.network_manager) self.volume_manager = utils.import_object(FLAGS.volume_manager) super(ComputeManager, self).__init__(*args, **kwargs) - + def _update_state(self, context, instance_id): """Update the state of an instance from the driver info""" # FIXME(ja): include other fields from state? @@ -62,12 +60,10 @@ class ComputeManager(manager.Manager): state = self.driver.get_info(instance_ref.name)['state'] self.db.instance_set_state(context, instance_id, state) - @defer.inlineCallbacks @exception.wrap_exception def refresh_security_group(self, context, security_group_id, **_kwargs): - yield self.driver.refresh_security_group(security_group_id) + self.driver.refresh_security_group(security_group_id) - @defer.inlineCallbacks @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): """Launch a new instance with specified options.""" @@ -89,7 +85,7 @@ class ComputeManager(manager.Manager): 'spawning') try: - yield self.driver.spawn(instance_ref) + self.driver.spawn(instance_ref) now = datetime.datetime.utcnow() self.db.instance_update(context, instance_id, @@ -103,7 +99,6 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) - @defer.inlineCallbacks @exception.wrap_exception def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" @@ -116,12 +111,11 @@ class ComputeManager(manager.Manager): raise exception.Error('trying to destroy already destroyed' ' instance: %s' % instance_id) - yield self.driver.destroy(instance_ref) + self.driver.destroy(instance_ref) # TODO(ja): should we keep it in a terminated state for a bit? self.db.instance_destroy(context, instance_id) - @defer.inlineCallbacks @exception.wrap_exception def reboot_instance(self, context, instance_id): """Reboot an instance on this server.""" @@ -142,7 +136,7 @@ class ComputeManager(manager.Manager): instance_id, power_state.NOSTATE, 'rebooting') - yield self.driver.reboot(instance_ref) + self.driver.reboot(instance_ref) self._update_state(context, instance_id) @exception.wrap_exception @@ -154,7 +148,6 @@ class ComputeManager(manager.Manager): return self.driver.get_console_output(instance_ref) - @defer.inlineCallbacks @exception.wrap_exception def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" @@ -164,13 +157,12 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) dev_path = yield self.volume_manager.setup_compute_volume(context, volume_id) - yield self.driver.attach_volume(instance_ref['ec2_id'], - dev_path, - mountpoint) + self.driver.attach_volume(instance_ref['ec2_id'], + dev_path, + mountpoint) self.db.volume_attached(context, volume_id, instance_id, mountpoint) defer.returnValue(True) - @defer.inlineCallbacks @exception.wrap_exception def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" @@ -180,7 +172,7 @@ class ComputeManager(manager.Manager): volume_id) instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) - yield self.driver.detach_volume(instance_ref['ec2_id'], - volume_ref['mountpoint']) + self.driver.detach_volume(instance_ref['ec2_id'], + volume_ref['mountpoint']) self.db.volume_detached(context, volume_id) defer.returnValue(True) diff --git a/nova/flags.py b/nova/flags.py index f3b0384ad..da9987700 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -139,6 +139,8 @@ class FlagValues(gflags.FlagValues): FLAGS = FlagValues() +gflags.FLAGS = FLAGS + def _wrapper(func): def _wrapped(*args, **kw): @@ -159,6 +161,11 @@ DEFINE_list = _wrapper(gflags.DEFINE_list) DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist) DEFINE_multistring = _wrapper(gflags.DEFINE_multistring) DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int) +DEFINE_flag = _wrapper(gflags.DEFINE_flag) + +HelpFlag = gflags.HelpFlag +HelpshortFlag = gflags.HelpshortFlag +HelpXMLFlag = gflags.HelpXMLFlag def DECLARE(name, module_string, flag_values=FLAGS): diff --git a/nova/manager.py b/nova/manager.py index 4244b2db4..994d6e7af 100644 --- a/nova/manager.py +++ b/nova/manager.py @@ -39,7 +39,6 @@ class Manager(object): db_driver = FLAGS.db_driver self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103 - @defer.inlineCallbacks def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval""" yield diff --git a/nova/network/manager.py b/nova/network/manager.py index fddb77663..37a15ae05 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -25,7 +25,6 @@ import logging import math import IPy -from twisted.internet import defer from nova import context from nova import db @@ -315,10 +314,9 @@ class FlatDHCPManager(NetworkManager): class VlanManager(NetworkManager): """Vlan network with dhcp""" - @defer.inlineCallbacks def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval""" - yield super(VlanManager, self).periodic_tasks(context) + super(VlanManager, self).periodic_tasks(context) now = datetime.datetime.utcnow() timeout = FLAGS.fixed_ip_disassociate_timeout time = now - datetime.timedelta(seconds=timeout) diff --git a/nova/server.py b/nova/server.py index cb424caa1..fba340a54 100644 --- a/nova/server.py +++ b/nova/server.py @@ -135,9 +135,9 @@ def daemonize(args, name, main): with daemon.DaemonContext( detach_process=FLAGS.daemonize, working_directory=FLAGS.working_directory, - pidfile=pidlockfile.TimeoutPIDLockFile(FLAGS.pidfile, - acquire_timeout=1, - threaded=False), + #pidfile=pidlockfile.TimeoutPIDLockFile(FLAGS.pidfile, + # acquire_timeout=1, + # threaded=False), stdin=stdin, stdout=stdout, stderr=stderr, diff --git a/nova/service_eventlet.py b/nova/service_eventlet.py new file mode 100644 index 000000000..eac45a981 --- /dev/null +++ b/nova/service_eventlet.py @@ -0,0 +1,288 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Generic Node baseclass for all workers that run on hosts +""" + +import inspect +import logging +import os +import sys + +from eventlet import event +from eventlet import greenthread +from eventlet import greenpool + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import rpc +from nova import utils + + +FLAGS = flags.FLAGS +flags.DEFINE_integer('report_interval', 10, + 'seconds between nodes reporting state to datastore', + lower_bound=1) + +flags.DEFINE_integer('periodic_interval', 60, + 'seconds between running periodic tasks', + lower_bound=1) + +flags.DEFINE_string('pidfile', None, + 'pidfile to use for this service') + + +flags.DEFINE_flag(flags.HelpFlag()) +flags.DEFINE_flag(flags.HelpshortFlag()) +flags.DEFINE_flag(flags.HelpXMLFlag()) + + +class Service(object): + """Base class for workers that run on hosts.""" + + def __init__(self, host, binary, topic, manager, report_interval=None, + periodic_interval=None, *args, **kwargs): + self.host = host + self.binary = binary + self.topic = topic + self.manager_class_name = manager + self.report_interval = report_interval + self.periodic_interval = periodic_interval + super(Service, self).__init__(*args, **kwargs) + self.saved_args, self.saved_kwargs = args, kwargs + + def start(self): + manager_class = utils.import_class(self.manager_class_name) + self.manager = manager_class(host=self.host, *self.saved_args, + **self.saved_kwargs) + self.manager.init_host() + self.model_disconnected = False + ctxt = context.get_admin_context() + try: + service_ref = db.service_get_by_args(ctxt, + self.host, + self.binary) + self.service_id = service_ref['id'] + except exception.NotFound: + self._create_service_ref(ctxt) + + conn1 = rpc.Connection.instance(new=True) + conn2 = rpc.Connection.instance(new=True) + if self.report_interval: + consumer_all = rpc.AdapterConsumer( + connection=conn1, + topic=self.topic, + proxy=self) + consumer_node = rpc.AdapterConsumer( + connection=conn2, + topic='%s.%s' % (self.topic, self.host), + proxy=self) + + consumer_all.attach_to_eventlet() + consumer_node.attach_to_eventlet() + + pulse = utils.LoopingCall(self.report_state) + pulse.start(interval=self.report_interval, now=False) + + if self.periodic_interval: + pulse = utils.LoopingCall(self.periodic_tasks) + pulse.start(interval=self.periodic_interval, now=False) + + def _create_service_ref(self, context): + service_ref = db.service_create(context, + {'host': self.host, + 'binary': self.binary, + 'topic': self.topic, + 'report_count': 0}) + self.service_id = service_ref['id'] + + def __getattr__(self, key): + manager = self.__dict__.get('manager', None) + return getattr(manager, key) + + @classmethod + def create(cls, + host=None, + binary=None, + topic=None, + manager=None, + report_interval=None, + periodic_interval=None): + """Instantiates class and passes back application object. + + Args: + host, defaults to FLAGS.host + binary, defaults to basename of executable + topic, defaults to bin_name - "nova-" part + manager, defaults to FLAGS._manager + report_interval, defaults to FLAGS.report_interval + periodic_interval, defaults to FLAGS.periodic_interval + """ + if not host: + host = FLAGS.host + if not binary: + binary = os.path.basename(inspect.stack()[-1][1]) + if not topic: + topic = binary.rpartition("nova-")[2] + if not manager: + manager = FLAGS.get('%s_manager' % topic, None) + if not report_interval: + report_interval = FLAGS.report_interval + if not periodic_interval: + periodic_interval = FLAGS.periodic_interval + logging.warn("Starting %s node", topic) + service_obj = cls(host, binary, topic, manager, + report_interval, periodic_interval) + + return service_obj + + def kill(self): + """Destroy the service object in the datastore""" + try: + db.service_destroy(context.get_admin_context(), self.service_id) + except exception.NotFound: + logging.warn("Service killed that has no database entry") + + def periodic_tasks(self): + """Tasks to be run at a periodic interval""" + self.manager.periodic_tasks(context.get_admin_context()) + + def report_state(self): + """Update the state of this service in the datastore.""" + ctxt = context.get_admin_context() + try: + try: + service_ref = db.service_get(ctxt, self.service_id) + except exception.NotFound: + logging.debug("The service database object disappeared, " + "Recreating it.") + self._create_service_ref(ctxt) + service_ref = db.service_get(ctxt, self.service_id) + + db.service_update(ctxt, + self.service_id, + {'report_count': service_ref['report_count'] + 1}) + + # TODO(termie): make this pattern be more elegant. + if getattr(self, "model_disconnected", False): + self.model_disconnected = False + logging.error("Recovered model server connection!") + + # TODO(vish): this should probably only catch connection errors + except Exception: # pylint: disable-msg=W0702 + if not getattr(self, "model_disconnected", False): + self.model_disconnected = True + logging.exception("model server went away") + + +def stop(pidfile): + """ + Stop the daemon + """ + # Get the pid from the pidfile + try: + pf = file(pidfile, 'r') + pid = int(pf.read().strip()) + pf.close() + except IOError: + pid = None + + if not pid: + message = "pidfile %s does not exist. Daemon not running?\n" + sys.stderr.write(message % pidfile) + # Not an error in a restart + return + + # Try killing the daemon process + try: + while 1: + os.kill(pid, signal.SIGKILL) + time.sleep(0.1) + except OSError, err: + err = str(err) + if err.find("No such process") > 0: + if os.path.exists(pidfile): + os.remove(pidfile) + else: + print str(err) + sys.exit(1) + + +def serve(*services): + argv = FLAGS(sys.argv) + + if not services: + services = [Service.create()] + + name = '_'.join(x.binary for x in services) + logging.debug("Serving %s" % name) + + logging.getLogger('amqplib').setLevel(logging.DEBUG) + + if not FLAGS.pidfile: + FLAGS.pidfile = '%s.pid' % name + # NOTE(vish): if we're running nodaemon, redirect the log to stdout + #if FLAGS.nodaemon and not FLAGS.logfile: + # FLAGS.logfile = "-" + #if not FLAGS.logfile: + # FLAGS.logfile = '%s.log' % name + #if not FLAGS.prefix: + # FLAGS.prefix = name + #elif FLAGS.prefix.endswith('twisted'): + # FLAGS.prefix = FLAGS.prefix.replace('twisted', name) + + action = 'start' + if len(argv) > 1: + action = argv.pop() + + if action == 'stop': + stop(FLAGS.pidfile) + sys.exit() + elif action == 'restart': + stop(FLAGS.pidfile) + elif action == 'start': + pass + else: + print 'usage: %s [options] [start|stop|restart]' % argv[0] + sys.exit(1) + + #formatter = logging.Formatter( + # '(%(name)s): %(levelname)s %(message)s') + #handler = logging.StreamHandler() + #handler.setFormatter(formatter) + #logging.getLogger().addHandler(handler) + + if FLAGS.verbose: + logging.getLogger().setLevel(logging.DEBUG) + else: + logging.getLogger().setLevel(logging.WARNING) + + logging.debug("Full set of FLAGS:") + for flag in FLAGS: + logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) + + for x in services: + x.start() + + #while True: + # greenthread.sleep(5) + + diff --git a/nova/utils.py b/nova/utils.py index 7683fc9f4..a219e47bf 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -29,6 +29,9 @@ import subprocess import socket import sys +from eventlet import event +from eventlet import greenthread + from twisted.internet.threads import deferToThread from nova import exception @@ -212,3 +215,36 @@ def deferredToThread(f): def g(*args, **kwargs): return deferToThread(f, *args, **kwargs) return g + + +class LoopingCall(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + + def start(self, interval, now=True): + self._running = True + done = event.Event() + def _inner(): + if not now: + greenthread.sleep(interval) + try: + while self._running: + self.f(*self.args, **self.kw) + greenthread.sleep(interval) + except Exception: + logging.exception('hhmm') + done.send_exception(*sys.exc_info()) + return + + done.send(True) + + greenthread.spawn(_inner) + return done + + def stop(self): + self._running = False + + diff --git a/nova/virt/fake.py b/nova/virt/fake.py index eaa2261f5..0684a0877 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -24,8 +24,6 @@ This module also documents the semantics of real hypervisor connections. import logging -from twisted.internet import defer - from nova.compute import power_state @@ -105,7 +103,6 @@ class FakeConnection(object): fake_instance = FakeInstance() self.instances[instance.name] = fake_instance fake_instance._state = power_state.RUNNING - return defer.succeed(None) def reboot(self, instance): """ @@ -117,7 +114,7 @@ class FakeConnection(object): The work will be done asynchronously. This function returns a Deferred that allows the caller to detect when it is complete. """ - return defer.succeed(None) + pass def destroy(self, instance): """ @@ -130,7 +127,6 @@ class FakeConnection(object): Deferred that allows the caller to detect when it is complete. """ del self.instances[instance.name] - return defer.succeed(None) def attach_volume(self, instance_name, device_path, mountpoint): """Attach the disk at device_path to the instance at mountpoint""" diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 509ed97a0..9ca97bd1b 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -25,10 +25,10 @@ import logging import os import shutil +from eventlet import event +from eventlet import tpool + import IPy -from twisted.internet import defer -from twisted.internet import task -from twisted.internet import threads from nova import context from nova import db @@ -145,13 +145,12 @@ class LibvirtConnection(object): except Exception as _err: pass # If the instance is already terminated, we're still happy - d = defer.Deferred() - d.addCallback(lambda _: self._cleanup(instance)) - # FIXME: What does this comment mean? - # TODO(termie): short-circuit me for tests - # WE'LL save this for when we do shutdown, + + done = event.Event() + + # We'll save this for when we do shutdown, # instead of destroy - but destroy returns immediately - timer = task.LoopingCall(f=None) + timer = utils.LoopingCall(f=None) def _wait_for_shutdown(): try: @@ -160,17 +159,26 @@ class LibvirtConnection(object): instance['id'], state) if state == power_state.SHUTDOWN: timer.stop() - d.callback(None) except Exception: db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() - d.callback(None) timer.f = _wait_for_shutdown - timer.start(interval=0.5, now=True) - return d + timer_done = timer.start(interval=0.5, now=True) + + # NOTE(termie): this is strictly superfluous (we could put the + # cleanup code in the timer), but this emulates the + # previous model so I am keeping it around until + # everything has been vetted a bit + def _wait_for_timer(): + timer_done.wait() + self._cleanup(instance) + done.send() + + greenthread.spawn(_wait_for_time) + return done def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) @@ -179,32 +187,28 @@ class LibvirtConnection(object): if os.path.exists(target): shutil.rmtree(target) - @defer.inlineCallbacks @exception.wrap_exception def attach_volume(self, instance_name, device_path, mountpoint): - yield process.simple_execute("sudo virsh attach-disk %s %s %s" % - (instance_name, - device_path, - mountpoint.rpartition('/dev/')[2])) + process.simple_execute("sudo virsh attach-disk %s %s %s" % + (instance_name, + device_path, + mountpoint.rpartition('/dev/')[2])) - @defer.inlineCallbacks @exception.wrap_exception def detach_volume(self, instance_name, mountpoint): # NOTE(vish): despite the documentation, virsh detach-disk just # wants the device name without the leading /dev/ - yield process.simple_execute("sudo virsh detach-disk %s %s" % - (instance_name, - mountpoint.rpartition('/dev/')[2])) + process.simple_execute("sudo virsh detach-disk %s %s" % + (instance_name, + mountpoint.rpartition('/dev/')[2])) - @defer.inlineCallbacks @exception.wrap_exception def reboot(self, instance): xml = self.to_xml(instance) - yield self._conn.lookupByName(instance['name']).destroy() - yield self._conn.createXML(xml, 0) + self._conn.lookupByName(instance['name']).destroy() + self._conn.createXML(xml, 0) - d = defer.Deferred() - timer = task.LoopingCall(f=None) + timer = utils.LoopingCall(f=None) def _wait_for_reboot(): try: @@ -214,20 +218,16 @@ class LibvirtConnection(object): if state == power_state.RUNNING: logging.debug('instance %s: rebooted', instance['name']) timer.stop() - d.callback(None) except Exception, exn: logging.error('_wait_for_reboot failed: %s', exn) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() - d.callback(None) timer.f = _wait_for_reboot - timer.start(interval=0.5, now=True) - yield d + return timer.start(interval=0.5, now=True) - @defer.inlineCallbacks @exception.wrap_exception def spawn(self, instance): xml = self.to_xml(instance) @@ -235,16 +235,12 @@ class LibvirtConnection(object): instance['id'], power_state.NOSTATE, 'launching') - yield NWFilterFirewall(self._conn).\ - setup_nwfilters_for_instance(instance) - yield self._create_image(instance, xml) - yield self._conn.createXML(xml, 0) - # TODO(termie): this should actually register - # a callback to check for successful boot + NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance) + self._create_image(instance, xml) + self._conn.createXML(xml, 0) logging.debug("instance %s: is running", instance['name']) - local_d = defer.Deferred() - timer = task.LoopingCall(f=None) + timer = utils.LoopingCall(f=None) def _wait_for_boot(): try: @@ -254,7 +250,6 @@ class LibvirtConnection(object): if state == power_state.RUNNING: logging.debug('instance %s: booted', instance['name']) timer.stop() - local_d.callback(None) except: logging.exception('instance %s: failed to boot', instance['name']) @@ -262,10 +257,9 @@ class LibvirtConnection(object): instance['id'], power_state.SHUTDOWN) timer.stop() - local_d.callback(None) + timer.f = _wait_for_boot - timer.start(interval=0.5, now=True) - yield local_d + return timer.start(interval=0.5, now=True) def _flush_xen_console(self, virsh_output): logging.info('virsh said: %r' % (virsh_output,)) @@ -273,10 +267,9 @@ class LibvirtConnection(object): if virsh_output.startswith('/dev/'): logging.info('cool, it\'s a device') - d = process.simple_execute("sudo dd if=%s iflag=nonblock" % + r = process.simple_execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) - d.addCallback(lambda r: r[0]) - return d + return r[0] else: return '' @@ -296,21 +289,21 @@ class LibvirtConnection(object): def get_console_output(self, instance): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - d = process.simple_execute('sudo chown %d %s' % (os.getuid(), - console_log)) + + process.simple_execute('sudo chown %d %s' % (os.getuid(), + console_log)) + if FLAGS.libvirt_type == 'xen': - # Xen is spethial - d.addCallback(lambda _: - process.simple_execute("virsh ttyconsole %s" % - instance['name'])) - d.addCallback(self._flush_xen_console) - d.addCallback(self._append_to_file, console_log) + # Xen is special + virsh_output = process.simple_execute("virsh ttyconsole %s" % + instance['name']) + data = self._flush_xen_console(virsh_output) + fpath = self._append_to_file(data, console_log) else: - d.addCallback(lambda _: defer.succeed(console_log)) - d.addCallback(self._dump_file) - return d + fpath = console_log + + return self._dump_file(fpath) - @defer.inlineCallbacks def _create_image(self, inst, libvirt_xml): # syntactic nicety basepath = lambda fname='': os.path.join(FLAGS.instances_path, @@ -318,8 +311,8 @@ class LibvirtConnection(object): fname) # ensure directories exist and are writable - yield process.simple_execute('mkdir -p %s' % basepath()) - yield process.simple_execute('chmod 0777 %s' % basepath()) + process.simple_execute('mkdir -p %s' % basepath()) + process.simple_execute('chmod 0777 %s' % basepath()) # TODO(termie): these are blocking calls, it would be great # if they weren't. @@ -335,19 +328,19 @@ class LibvirtConnection(object): project = manager.AuthManager().get_project(inst['project_id']) if not os.path.exists(basepath('disk')): - yield images.fetch(inst.image_id, basepath('disk-raw'), user, - project) + images.fetch(inst.image_id, basepath('disk-raw'), user, + project) if not os.path.exists(basepath('kernel')): - yield images.fetch(inst.kernel_id, basepath('kernel'), user, - project) + images.fetch(inst.kernel_id, basepath('kernel'), user, + project) if not os.path.exists(basepath('ramdisk')): - yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, - project) - - execute = lambda cmd, process_input=None, check_exit_code=True: \ - process.simple_execute(cmd=cmd, - process_input=process_input, - check_exit_code=check_exit_code) + images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, + project) + + def execute(cmd, process_input=None, check_exit_code=True): + return process.simple_execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) key = str(inst['key_data']) net = None @@ -369,23 +362,23 @@ class LibvirtConnection(object): if net: logging.info('instance %s: injecting net into image %s', inst['name'], inst.image_id) - yield disk.inject_data(basepath('disk-raw'), key, net, - execute=execute) + disk.inject_data(basepath('disk-raw'), key, net, + execute=execute) if os.path.exists(basepath('disk')): - yield process.simple_execute('rm -f %s' % basepath('disk')) + process.simple_execute('rm -f %s' % basepath('disk')) local_bytes = (instance_types.INSTANCE_TYPES[inst.instance_type] ['local_gb'] * 1024 * 1024 * 1024) resize = inst['instance_type'] != 'm1.tiny' - yield disk.partition(basepath('disk-raw'), basepath('disk'), - local_bytes, resize, execute=execute) + disk.partition(basepath('disk-raw'), basepath('disk'), + local_bytes, resize, execute=execute) if FLAGS.libvirt_type == 'uml': - yield process.simple_execute('sudo chown root %s' % - basepath('disk')) + process.simple_execute('sudo chown root %s' % + basepath('disk')) def to_xml(self, instance): # TODO(termie): cache? @@ -637,15 +630,15 @@ class NWFilterFirewall(object): def _define_filter(self, xml): if callable(xml): xml = xml() - d = threads.deferToThread(self._conn.nwfilterDefineXML, xml) - return d + + # execute in a native thread and block until done + tpool.execute(self._conn.nwfilterDefineXML, xml) @staticmethod def _get_net_and_mask(cidr): net = IPy.IP(cidr) return str(net.net()), str(net.netmask()) - @defer.inlineCallbacks def setup_nwfilters_for_instance(self, instance): """ Creates an NWFilter for the given instance. In the process, @@ -653,10 +646,10 @@ class NWFilterFirewall(object): the base filter are all in place. """ - yield self._define_filter(self.nova_base_ipv4_filter) - yield self._define_filter(self.nova_base_ipv6_filter) - yield self._define_filter(self.nova_dhcp_filter) - yield self._define_filter(self.nova_base_filter) + self._define_filter(self.nova_base_ipv4_filter) + self._define_filter(self.nova_base_ipv6_filter) + self._define_filter(self.nova_dhcp_filter) + self._define_filter(self.nova_base_filter) nwfilter_xml = "\n" \ " \n" % \ @@ -668,20 +661,19 @@ class NWFilterFirewall(object): net, mask = self._get_net_and_mask(network_ref['cidr']) project_filter = self.nova_project_filter(instance['project_id'], net, mask) - yield self._define_filter(project_filter) + self._define_filter(project_filter) nwfilter_xml += " \n" % \ instance['project_id'] for security_group in instance.security_groups: - yield self.ensure_security_group_filter(security_group['id']) + self.ensure_security_group_filter(security_group['id']) nwfilter_xml += " \n" % \ security_group['id'] nwfilter_xml += "" - yield self._define_filter(nwfilter_xml) - return + self._define_filter(nwfilter_xml) def ensure_security_group_filter(self, security_group_id): return self._define_filter( diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index a17e405ab..f997d01d7 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -36,11 +36,10 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. """ import logging +import sys import xmlrpclib -from twisted.internet import defer -from twisted.internet import reactor -from twisted.internet import task +from eventlet import tpool from nova import db from nova import flags @@ -110,36 +109,33 @@ class XenAPIConnection(object): return [self._conn.xenapi.VM.get_name_label(vm) \ for vm in self._conn.xenapi.VM.get_all()] - @defer.inlineCallbacks def spawn(self, instance): - vm = yield self._lookup(instance.name) + vm = self._lookup(instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % instance.name) network = db.project_get_network(None, instance.project_id) - network_ref = \ - yield self._find_network_with_bridge(network.bridge) + network_ref = self._find_network_with_bridge(network.bridge) user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - vdi_uuid = yield self._fetch_image( + vdi_uuid = self._fetch_image( instance.image_id, user, project, True) - kernel = yield self._fetch_image( + kernel = self._fetch_image( instance.kernel_id, user, project, False) - ramdisk = yield self._fetch_image( + ramdisk = self._fetch_image( instance.ramdisk_id, user, project, False) - vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid) + vdi_ref = self._call_xenapi('VDI.get_by_uuid', vdi_uuid) - vm_ref = yield self._create_vm(instance, kernel, ramdisk) - yield self._create_vbd(vm_ref, vdi_ref, 0, True) + vm_ref = self._create_vm(instance, kernel, ramdisk) + self._create_vbd(vm_ref, vdi_ref, 0, True) if network_ref: - yield self._create_vif(vm_ref, network_ref, instance.mac_address) + self._create_vif(vm_ref, network_ref, instance.mac_address) logging.debug('Starting VM %s...', vm_ref) - yield self._call_xenapi('VM.start', vm_ref, False, False) + self._call_xenapi('VM.start', vm_ref, False, False) logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - @defer.inlineCallbacks def _create_vm(self, instance, kernel, ramdisk): """Create a VM record. Returns a Deferred that gives the new VM reference.""" @@ -177,11 +173,10 @@ class XenAPIConnection(object): 'other_config': {}, } logging.debug('Created VM %s...', instance.name) - vm_ref = yield self._call_xenapi('VM.create', rec) + vm_ref = self._call_xenapi('VM.create', rec) logging.debug('Created VM %s as %s.', instance.name, vm_ref) - defer.returnValue(vm_ref) + return vm_ref - @defer.inlineCallbacks def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): """Create a VBD record. Returns a Deferred that gives the new VBD reference.""" @@ -200,12 +195,11 @@ class XenAPIConnection(object): vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) - vbd_ref = yield self._call_xenapi('VBD.create', vbd_rec) + vbd_ref = self._call_xenapi('VBD.create', vbd_rec) logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, vdi_ref) - defer.returnValue(vbd_ref) + return vbd_ref - @defer.inlineCallbacks def _create_vif(self, vm_ref, network_ref, mac_address): """Create a VIF record. Returns a Deferred that gives the new VIF reference.""" @@ -221,24 +215,22 @@ class XenAPIConnection(object): vif_rec['qos_algorithm_params'] = {} logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, network_ref) - vif_ref = yield self._call_xenapi('VIF.create', vif_rec) + vif_ref = self._call_xenapi('VIF.create', vif_rec) logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, vm_ref, network_ref) - defer.returnValue(vif_ref) + return vif_ref - @defer.inlineCallbacks def _find_network_with_bridge(self, bridge): expr = 'field "bridge" = "%s"' % bridge - networks = yield self._call_xenapi('network.get_all_records_where', - expr) + networks = self._call_xenapi('network.get_all_records_where', + expr) if len(networks) == 1: - defer.returnValue(networks.keys()[0]) + return networks.keys()[0] elif len(networks) > 1: raise Exception('Found non-unique network for bridge %s' % bridge) else: raise Exception('Found no network for bridge %s' % bridge) - @defer.inlineCallbacks def _fetch_image(self, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for @@ -255,33 +247,31 @@ class XenAPIConnection(object): args['password'] = user.secret if use_sr: args['add_partition'] = 'true' - task = yield self._async_call_plugin('objectstore', fn, args) - uuid = yield self._wait_for_task(task) - defer.returnValue(uuid) + task = self._async_call_plugin('objectstore', fn, args) + uuid = self._wait_for_task(task) + return uuid - @defer.inlineCallbacks def reboot(self, instance): - vm = yield self._lookup(instance.name) + vm = self._lookup(instance.name) if vm is None: raise Exception('instance not present %s' % instance.name) - task = yield self._call_xenapi('Async.VM.clean_reboot', vm) - yield self._wait_for_task(task) + task = self._call_xenapi('Async.VM.clean_reboot', vm) + self._wait_for_task(task) - @defer.inlineCallbacks def destroy(self, instance): - vm = yield self._lookup(instance.name) + vm = self._lookup(instance.name) if vm is None: # Don't complain, just return. This lets us clean up instances # that have already disappeared from the underlying platform. - defer.returnValue(None) + return try: - task = yield self._call_xenapi('Async.VM.hard_shutdown', vm) - yield self._wait_for_task(task) + task = self._call_xenapi('Async.VM.hard_shutdown', vm) + self._wait_for_task(task) except Exception, exc: logging.warn(exc) try: - task = yield self._call_xenapi('Async.VM.destroy', vm) - yield self._wait_for_task(task) + task = self._call_xenapi('Async.VM.destroy', vm) + self._wait_for_task(task) except Exception, exc: logging.warn(exc) @@ -299,7 +289,6 @@ class XenAPIConnection(object): def get_console_output(self, instance): return 'FAKE CONSOLE OUTPUT' - @utils.deferredToThread def _lookup(self, i): return self._lookup_blocking(i) @@ -316,35 +305,32 @@ class XenAPIConnection(object): def _wait_for_task(self, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" - d = defer.Deferred() - reactor.callLater(0, self._poll_task, task, d) - return d - @utils.deferredToThread - def _poll_task(self, task, deferred): + done = event.Event() + loop = utis.LoopingTask(self._poll_task, task, done) + loop.start(FLAGS.xenapi_task_poll_interval, now=True) + return done.wait() + + def _poll_task(self, task, done): """Poll the given XenAPI task, and fire the given Deferred if we get a result.""" try: - #logging.debug('Polling task %s...', task) status = self._conn.xenapi.task.get_status(task) if status == 'pending': - reactor.callLater(FLAGS.xenapi_task_poll_interval, - self._poll_task, task, deferred) + return elif status == 'success': result = self._conn.xenapi.task.get_result(task) logging.info('Task %s status: success. %s', task, result) - deferred.callback(_parse_xmlrpc_value(result)) + done.send(_parse_xmlrpc_value(result)) else: error_info = self._conn.xenapi.task.get_error_info(task) logging.warn('Task %s status: %s. %s', task, status, error_info) - deferred.errback(XenAPI.Failure(error_info)) - #logging.debug('Polling task %s done.', task) + done.send_exception(XenAPI.Failure(error_info)) except Exception, exc: logging.warn(exc) - deferred.errback(exc) + done.send_exception(*sys.exc_info()) - @utils.deferredToThread def _call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread. Returns a Deferred for the result.""" @@ -353,11 +339,10 @@ class XenAPIConnection(object): f = f.__getattr__(m) return f(*args) - @utils.deferredToThread def _async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread. Returns a Deferred with the task reference.""" - return _unwrap_plugin_exceptions( + return tpool.execute(_unwrap_plugin_exceptions, self._conn.xenapi.Async.host.call_plugin, self._get_xenapi_host(), plugin, fn, args) -- cgit From f127d85d7790585d6e735648dfab13416d79fbde Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sat, 6 Nov 2010 00:02:36 +0000 Subject: Per-project vpns, certificates, and revocation --- CA/INTER/.gitignore | 1 - CA/INTER/.placeholder | 0 CA/geninter.sh | 26 +++-- CA/genrootca.sh | 1 + CA/openssl.cnf.tmpl | 3 +- CA/projects/.gitignore | 1 + CA/projects/.placeholder | 0 bin/nova-manage | 75 +++++++++----- nova/api/__init__.py | 2 - nova/api/cloudpipe/__init__.py | 69 ------------- nova/auth/manager.py | 73 +++++++------- nova/cloudpipe/bootscript.sh | 63 ------------ nova/cloudpipe/bootscript.template | 50 ++++++++++ nova/cloudpipe/pipelib.py | 123 +++++++++++++++-------- nova/crypto.py | 195 +++++++++++++++++++++++++++++-------- nova/db/api.py | 39 ++++++++ nova/db/sqlalchemy/api.py | 78 +++++++++++++++ nova/db/sqlalchemy/models.py | 12 ++- nova/tests/auth_unittest.py | 17 ++-- 19 files changed, 533 insertions(+), 295 deletions(-) delete mode 100644 CA/INTER/.gitignore delete mode 100644 CA/INTER/.placeholder create mode 100644 CA/projects/.gitignore create mode 100644 CA/projects/.placeholder delete mode 100644 nova/api/cloudpipe/__init__.py delete mode 100755 nova/cloudpipe/bootscript.sh create mode 100755 nova/cloudpipe/bootscript.template diff --git a/CA/INTER/.gitignore b/CA/INTER/.gitignore deleted file mode 100644 index 72e8ffc0d..000000000 --- a/CA/INTER/.gitignore +++ /dev/null @@ -1 +0,0 @@ -* diff --git a/CA/INTER/.placeholder b/CA/INTER/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/CA/geninter.sh b/CA/geninter.sh index 7d6c280d5..1fbcc9e73 100755 --- a/CA/geninter.sh +++ b/CA/geninter.sh @@ -16,16 +16,24 @@ # License for the specific language governing permissions and limitations # under the License. -# ARG is the id of the user -export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-intCA-$1" -mkdir INTER/$1 -cd INTER/$1 +# $1 is the id of the project and $2 is the subject of the cert +NAME=$1 +SUBJ=$2 +mkdir -p projects/$NAME +cd projects/$NAME cp ../../openssl.cnf.tmpl openssl.cnf -sed -i -e s/%USERNAME%/$1/g openssl.cnf +sed -i -e s/%USERNAME%/$NAME/g openssl.cnf mkdir certs crl newcerts private +openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes echo "10" > serial touch index.txt -openssl genrsa -out private/cakey.pem 1024 -config ./openssl.cnf -batch -nodes -openssl req -new -sha2 -key private/cakey.pem -out ../../reqs/inter$1.csr -batch -subj "$SUBJ" -cd ../../ -openssl ca -extensions v3_ca -days 365 -out INTER/$1/cacert.pem -in reqs/inter$1.csr -config openssl.cnf -batch +# NOTE(vish): Disabling intermediate ca's because we don't actually need them. +# It makes more sense to have each project have its own root ca. +# openssl genrsa -out private/cakey.pem 1024 -config ./openssl.cnf -batch -nodes +# openssl req -new -sha256 -key private/cakey.pem -out ../../reqs/inter$NAME.csr -batch -subj "$SUBJ" +openssl ca -gencrl -config ./openssl.cnf -out crl.pem +if [ "`id -u`" != "`grep nova /etc/passwd | cut -d':' -f3`" ]; then + sudo chown -R nova:nogroup . +fi +# cd ../../ +# openssl ca -extensions v3_ca -days 365 -out INTER/$NAME/cacert.pem -in reqs/inter$NAME.csr -config openssl.cnf -batch diff --git a/CA/genrootca.sh b/CA/genrootca.sh index 31976092e..8f2c3ee3f 100755 --- a/CA/genrootca.sh +++ b/CA/genrootca.sh @@ -25,4 +25,5 @@ else openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes touch index.txt echo "10" > serial + openssl ca -gencrl -config ./openssl.cnf -out crl.pem fi diff --git a/CA/openssl.cnf.tmpl b/CA/openssl.cnf.tmpl index 639b8e80a..dd81f1c2b 100644 --- a/CA/openssl.cnf.tmpl +++ b/CA/openssl.cnf.tmpl @@ -24,7 +24,6 @@ dir = . [ ca ] default_ca = CA_default -unique_subject = no [ CA_default ] serial = $dir/serial @@ -32,6 +31,8 @@ database = $dir/index.txt new_certs_dir = $dir/newcerts certificate = $dir/cacert.pem private_key = $dir/private/cakey.pem +unique_subject = no +default_crl_days = 365 default_days = 365 default_md = md5 preserve = no diff --git a/CA/projects/.gitignore b/CA/projects/.gitignore new file mode 100644 index 000000000..72e8ffc0d --- /dev/null +++ b/CA/projects/.gitignore @@ -0,0 +1 @@ +* diff --git a/CA/projects/.placeholder b/CA/projects/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/bin/nova-manage b/bin/nova-manage index 08b3da123..b788ee62d 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -69,6 +69,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) from nova import context +from nova import crypto from nova import db from nova import exception from nova import flags @@ -93,32 +94,36 @@ class VpnCommands(object): self.manager = manager.AuthManager() self.pipe = pipelib.CloudPipe() - def list(self): - """Print a listing of the VPNs for all projects.""" + def list(self, project=None): + """Print a listing of the VPN data for one or all projects. + + args: [project=all]""" print "%-12s\t" % 'project', print "%-20s\t" % 'ip:port', + print "%-20s\t" % 'private_ip', print "%s" % 'state' - for project in self.manager.get_projects(): + if project: + projects = [self.manager.get_project(project)] + else: + projects = self.manager.get_projects() + for project in projects: print "%-12s\t" % project.name, - - try: - s = "%s:%s" % (project.vpn_ip, project.vpn_port) - except exception.NotFound: - s = "None" - print "%-20s\t" % s, - + ipport = "%s:%s" % (project.vpn_ip, project.vpn_port) + print "%-20s\t" % ipport, vpn = self._vpn_for(project.id) if vpn: - command = "ping -c1 -w1 %s > /dev/null; echo $?" - out, _err = utils.execute(command % vpn['private_dns_name'], - check_exit_code=False) - if out.strip() == '0': - net = 'up' - else: - net = 'down' - print vpn['private_dns_name'], - print vpn['node_name'], - print vpn['instance_id'], + net = 'down' + address = None + if vpn.get('fixed_ip', None): + address = vpn['fixed_ip']['address'] + command = "ping -c1 -w1 %s > /dev/null; echo $?" + out, _err = utils.execute(command % address, + check_exit_code=False) + if out.strip() == '0': + net = 'up' + print address, + print vpn['host'], + print vpn['ec2_id'], print vpn['state_description'], print net @@ -127,11 +132,11 @@ class VpnCommands(object): def _vpn_for(self, project_id): """Get the VPN instance for a project ID.""" - for instance in db.instance_get_all(context.get_admin_context()): + ctxt = context.get_admin_context() + for instance in db.instance_get_all_by_project(ctxt, project_id): if (instance['image_id'] == FLAGS.vpn_image_id and not instance['state_description'] in - ['shutting_down', 'shutdown'] - and instance['project_id'] == project_id): + ['shutting_down', 'shutdown']): return instance def spawn(self): @@ -146,6 +151,22 @@ class VpnCommands(object): """Start the VPN for a given project.""" self.pipe.launch_vpn_instance(project_id) + def change(self, project_id, ip, port): + """Change the ip and port for a vpn. + + args: project, ip, port""" + project = self.manager.get_project(project_id) + if not project: + print 'No project %s' % (project_id) + return + admin = context.get_admin_context() + network_ref = db.project_get_network(admin, project_id) + db.network_update(admin, + network_ref['id'], + {'vpn_public_address': ip, + 'vpn_public_port': int(port)}) + + class ShellCommands(object): def bpython(self): @@ -292,6 +313,14 @@ class UserCommands(object): is_admin = False self.manager.modify_user(name, access_key, secret_key, is_admin) + def revoke(self, user_id, project_id=None): + """revoke certs for a user + arguments: user_id [project_id]""" + if project_id: + crypto.revoke_certs_by_user_and_project(user_id, project_id) + else: + crypto.revoke_certs_by_user(user_id) + class ProjectCommands(object): """Class for managing projects.""" diff --git a/nova/api/__init__.py b/nova/api/__init__.py index 707c1623e..176d571a8 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -25,7 +25,6 @@ import webob.dec from nova import flags from nova import wsgi -from nova.api import cloudpipe from nova.api import ec2 from nova.api import openstack from nova.api.ec2 import metadatarequesthandler @@ -74,7 +73,6 @@ class API(wsgi.Router): mapper.connect('%s/{path_info:.*}' % s, controller=mrh, conditions=ec2api_subdomain) - mapper.connect("/cloudpipe/{path_info:.*}", controller=cloudpipe.API()) super(API, self).__init__(mapper) @webob.dec.wsgify diff --git a/nova/api/cloudpipe/__init__.py b/nova/api/cloudpipe/__init__.py deleted file mode 100644 index 6d40990a8..000000000 --- a/nova/api/cloudpipe/__init__.py +++ /dev/null @@ -1,69 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -REST API Request Handlers for CloudPipe -""" - -import logging -import urllib -import webob -import webob.dec -import webob.exc - -from nova import crypto -from nova import wsgi -from nova.auth import manager -from nova.api.ec2 import cloud - - -_log = logging.getLogger("api") -_log.setLevel(logging.DEBUG) - - -class API(wsgi.Application): - - def __init__(self): - self.controller = cloud.CloudController() - - @webob.dec.wsgify - def __call__(self, req): - if req.method == 'POST': - return self.sign_csr(req) - _log.debug("Cloudpipe path is %s" % req.path_info) - if req.path_info.endswith("/getca/"): - return self.send_root_ca(req) - return webob.exc.HTTPNotFound() - - def get_project_id_from_ip(self, ip): - # TODO(eday): This was removed with the ORM branch, fix! - instance = self.controller.get_instance_by_ip(ip) - return instance['project_id'] - - def send_root_ca(self, req): - _log.debug("Getting root ca") - project_id = self.get_project_id_from_ip(req.remote_addr) - res = webob.Response() - res.headers["Content-Type"] = "text/plain" - res.body = crypto.fetch_ca(project_id) - return res - - def sign_csr(self, req): - project_id = self.get_project_id_from_ip(req.remote_addr) - cert = self.str_params['cert'] - return crypto.sign_csr(urllib.unquote(cert), project_id) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 001a96875..c6d4b6e53 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -64,12 +64,8 @@ flags.DEFINE_string('credential_key_file', 'pk.pem', 'Filename of private key in credentials zip') flags.DEFINE_string('credential_cert_file', 'cert.pem', 'Filename of certificate in credentials zip') -flags.DEFINE_string('credential_rc_file', 'novarc', +flags.DEFINE_string('credential_rc_file', '%src', 'Filename of rc in credentials zip') -flags.DEFINE_string('credential_cert_subject', - '/C=US/ST=California/L=MountainView/O=AnsoLabs/' - 'OU=NovaDev/CN=%s-%s', - 'Subject for certificate for users') flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', 'Driver that auth manager uses') @@ -625,27 +621,37 @@ class AuthManager(object): with self.driver() as drv: drv.modify_user(uid, access_key, secret_key, admin) - def get_credentials(self, user, project=None): + def get_credentials(self, user, project=None, use_dmz=True): """Get credential zip for user in project""" if not isinstance(user, User): user = self.get_user(user) if project is None: project = user.id pid = Project.safe_id(project) - rc = self.__generate_rc(user.access, user.secret, pid) - private_key, signed_cert = self._generate_x509_cert(user.id, pid) + private_key, signed_cert = crypto.generate_x509_cert(user.id, pid) tmpdir = tempfile.mkdtemp() zf = os.path.join(tmpdir, "temp.zip") zippy = zipfile.ZipFile(zf, 'w') - zippy.writestr(FLAGS.credential_rc_file, rc) + if use_dmz and FLAGS.region_list: + regions = {} + for item in FLAGS.region_list: + region, _sep, region_host = item.partition("=") + regions[region] = region_host + else: + regions = {'nova': FLAGS.cc_host} + for region, host in regions.iteritems(): + rc = self.__generate_rc(user.access, + user.secret, + pid, + use_dmz, + host) + zippy.writestr(FLAGS.credential_rc_file % region, rc) + zippy.writestr(FLAGS.credential_key_file, private_key) zippy.writestr(FLAGS.credential_cert_file, signed_cert) - try: - (vpn_ip, vpn_port) = self.get_project_vpn_data(project) - except exception.NotFound: - vpn_ip = None + (vpn_ip, vpn_port) = self.get_project_vpn_data(project) if vpn_ip: configfile = open(FLAGS.vpn_client_template, "r") s = string.Template(configfile.read()) @@ -656,10 +662,9 @@ class AuthManager(object): port=vpn_port) zippy.writestr(FLAGS.credential_vpn_file, config) else: - logging.warn("No vpn data for project %s" % - pid) + LOG.warn("No vpn data for project %s", pid) - zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id)) + zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid)) zippy.close() with open(zf, 'rb') as f: read_buffer = f.read() @@ -667,38 +672,38 @@ class AuthManager(object): shutil.rmtree(tmpdir) return read_buffer - def get_environment_rc(self, user, project=None): + def get_environment_rc(self, user, project=None, use_dmz=True): """Get credential zip for user in project""" if not isinstance(user, User): user = self.get_user(user) if project is None: project = user.id pid = Project.safe_id(project) - return self.__generate_rc(user.access, user.secret, pid) + return self.__generate_rc(user.access, user.secret, pid, use_dmz) @staticmethod - def __generate_rc(access, secret, pid): + def __generate_rc(access, secret, pid, use_dmz=True, host=None): """Generate rc file for user""" + if use_dmz: + cc_host = FLAGS.cc_dmz + else: + cc_host = FLAGS.cc_host + # NOTE(vish): Always use the dmz since it is used from inside the + # instance + s3_host = FLAGS.s3_dmz + if host: + s3_host = host + cc_host = host rc = open(FLAGS.credentials_template).read() rc = rc % {'access': access, 'project': pid, 'secret': secret, - 'ec2': FLAGS.ec2_url, - 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port), + 'ec2': '%s://%s:%s%s' % (FLAGS.ec2_prefix, + cc_host, + FLAGS.cc_port, + FLAGS.ec2_suffix), + 's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port), 'nova': FLAGS.ca_file, 'cert': FLAGS.credential_cert_file, 'key': FLAGS.credential_key_file} return rc - - def _generate_x509_cert(self, uid, pid): - """Generate x509 cert for user""" - (private_key, csr) = crypto.generate_x509_cert( - self.__cert_subject(uid)) - # TODO(joshua): This should be async call back to the cloud controller - signed_cert = crypto.sign_csr(csr, pid) - return (private_key, signed_cert) - - @staticmethod - def __cert_subject(uid): - """Helper to generate cert subject""" - return FLAGS.credential_cert_subject % (uid, utils.isotime()) diff --git a/nova/cloudpipe/bootscript.sh b/nova/cloudpipe/bootscript.sh deleted file mode 100755 index 30d9ad102..000000000 --- a/nova/cloudpipe/bootscript.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This gets zipped and run on the cloudpipe-managed OpenVPN server - -export SUPERVISOR="http://10.255.255.1:8773/cloudpipe" -export VPN_IP=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{print $1}'` -export BROADCAST=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f3 | awk '{print $1}'` -export DHCP_MASK=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f4 | awk '{print $1}'` -export GATEWAY=`netstat -r | grep default | cut -d' ' -f10` -export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-vpn-$VPN_IP" - -DHCP_LOWER=`echo $BROADCAST | awk -F. '{print $1"."$2"."$3"." $4 - 10 }'` -DHCP_UPPER=`echo $BROADCAST | awk -F. '{print $1"."$2"."$3"." $4 - 1 }'` - -# generate a server DH -openssl dhparam -out /etc/openvpn/dh1024.pem 1024 - -# generate a server priv key -openssl genrsa -out /etc/openvpn/server.key 2048 - -# generate a server CSR -openssl req -new -key /etc/openvpn/server.key -out /etc/openvpn/server.csr -batch -subj "$SUBJ" - -# URLEncode the CSR -CSRTEXT=`cat /etc/openvpn/server.csr` -CSRTEXT=$(python -c "import urllib; print urllib.quote('''$CSRTEXT''')") - -# SIGN the csr and save as server.crt -# CURL fetch to the supervisor, POSTing the CSR text, saving the result as the CRT file -curl --fail $SUPERVISOR -d "cert=$CSRTEXT" > /etc/openvpn/server.crt -curl --fail $SUPERVISOR/getca/ > /etc/openvpn/ca.crt - -# Customize the server.conf.template -cd /etc/openvpn - -sed -e s/VPN_IP/$VPN_IP/g server.conf.template > server.conf -sed -i -e s/DHCP_SUBNET/$DHCP_MASK/g server.conf -sed -i -e s/DHCP_LOWER/$DHCP_LOWER/g server.conf -sed -i -e s/DHCP_UPPER/$DHCP_UPPER/g server.conf -sed -i -e s/max-clients\ 1/max-clients\ 10/g server.conf - -echo "\npush \"route 10.255.255.1 255.255.255.255 $GATEWAY\"\n" >> server.conf -echo "\npush \"route 10.255.255.253 255.255.255.255 $GATEWAY\"\n" >> server.conf -echo "\nduplicate-cn\n" >> server.conf - -/etc/init.d/openvpn start diff --git a/nova/cloudpipe/bootscript.template b/nova/cloudpipe/bootscript.template new file mode 100755 index 000000000..11578c134 --- /dev/null +++ b/nova/cloudpipe/bootscript.template @@ -0,0 +1,50 @@ +#!/bin/bash +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This gets zipped and run on the cloudpipe-managed OpenVPN server + +export VPN_IP=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{print $$1}'` +export BROADCAST=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f3 | awk '{print $$1}'` +export DHCP_MASK=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f4 | awk '{print $$1}'` +export GATEWAY=`netstat -r | grep default | cut -d' ' -f10` + +DHCP_LOWER=`echo $$BROADCAST | awk -F. '{print $$1"."$$2"."$$3"." $$4 - ${num_vpn} }'` +DHCP_UPPER=`echo $$BROADCAST | awk -F. '{print $$1"."$$2"."$$3"." $$4 - 1 }'` + +# generate a server DH +openssl dhparam -out /etc/openvpn/dh1024.pem 1024 + +cp crl.pem /etc/openvpn/ +cp server.key /etc/openvpn/ +cp ca.crt /etc/openvpn/ +cp server.crt /etc/openvpn/ +# Customize the server.conf.template +cd /etc/openvpn + +sed -e s/VPN_IP/$$VPN_IP/g server.conf.template > server.conf +sed -i -e s/DHCP_SUBNET/$$DHCP_MASK/g server.conf +sed -i -e s/DHCP_LOWER/$$DHCP_LOWER/g server.conf +sed -i -e s/DHCP_UPPER/$$DHCP_UPPER/g server.conf +sed -i -e s/max-clients\ 1/max-clients\ 10/g server.conf + +echo "push \"route ${dmz_net} ${dmz_mask} $$GATEWAY\"" >> server.conf +echo "duplicate-cn" >> server.conf +echo "crl-verify /etc/openvpn/crl.pem" >> server.conf + +/etc/init.d/openvpn start diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 3472201cd..147b9dd7b 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -22,13 +22,15 @@ an instance with it. """ -import base64 import logging import os +import string import tempfile import zipfile from nova import context +from nova import crypto +from nova import db from nova import exception from nova import flags from nova import utils @@ -39,8 +41,17 @@ from nova.api.ec2 import cloud FLAGS = flags.FLAGS flags.DEFINE_string('boot_script_template', - utils.abspath('cloudpipe/bootscript.sh'), + utils.abspath('cloudpipe/bootscript.template'), 'Template for script to run on cloudpipe instance boot') +flags.DEFINE_string('dmz_net', + '10.0.0.0', + 'Network to push into openvpn config') +flags.DEFINE_string('dmz_mask', + '255.255.255.0', + 'Netmask to push into openvpn config') + + +LOG = logging.getLogger('nova-cloudpipe') class CloudPipe(object): @@ -48,64 +59,96 @@ class CloudPipe(object): self.controller = cloud.CloudController() self.manager = manager.AuthManager() - def launch_vpn_instance(self, project_id): - logging.debug("Launching VPN for %s" % (project_id)) - project = self.manager.get_project(project_id) + def get_encoded_zip(self, project_id): # Make a payload.zip tmpfolder = tempfile.mkdtemp() filename = "payload.zip" zippath = os.path.join(tmpfolder, filename) z = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED) - - z.write(FLAGS.boot_script_template, 'autorun.sh') + shellfile = open(FLAGS.boot_script_template, "r") + s = string.Template(shellfile.read()) + shellfile.close() + boot_script = s.substitute(cc_dmz=FLAGS.cc_dmz, + cc_port=FLAGS.cc_port, + dmz_net=FLAGS.dmz_net, + dmz_mask=FLAGS.dmz_mask, + num_vpn=FLAGS.cnt_vpn_clients) + # genvpn, sign csr + crypto.generate_vpn_files(project_id) + z.writestr('autorun.sh', boot_script) + crl = os.path.join(crypto.ca_folder(project_id), 'crl.pem') + z.write(crl, 'crl.pem') + server_key = os.path.join(crypto.ca_folder(project_id), 'server.key') + z.write(server_key, 'server.key') + ca_crt = os.path.join(crypto.ca_path(project_id)) + z.write(ca_crt, 'ca.crt') + server_crt = os.path.join(crypto.ca_folder(project_id), 'server.crt') + z.write(server_crt, 'server.crt') z.close() - - key_name = self.setup_key_pair(project.project_manager_id, project_id) zippy = open(zippath, "r") - context = context.RequestContext(user=project.project_manager, - project=project) - - reservation = self.controller.run_instances(context, - # Run instances expects encoded userdata, it is decoded in the - # get_metadata_call. autorun.sh also decodes the zip file, hence - # the double encoding. - user_data=zippy.read().encode("base64").encode("base64"), + # NOTE(vish): run instances expects encoded userdata, it is decoded + # in the get_metadata_call. autorun.sh also decodes the zip file, + # hence the double encoding. + encoded = zippy.read().encode("base64").encode("base64") + zippy.close() + return encoded + + def launch_vpn_instance(self, project_id): + LOG.debug("Launching VPN for %s" % (project_id)) + project = self.manager.get_project(project_id) + ctxt = context.RequestContext(user=project.project_manager, + project=project) + key_name = self.setup_key_pair(ctxt) + group_name = self.setup_security_group(ctxt) + + reservation = self.controller.run_instances(ctxt, + user_data=self.get_encoded_zip(project_id), max_count=1, min_count=1, instance_type='m1.tiny', image_id=FLAGS.vpn_image_id, key_name=key_name, - security_groups=["vpn-secgroup"]) - zippy.close() + security_group=[group_name]) + + def setup_security_group(self, context): + group_name = '%s%s' % (context.project.id, FLAGS.vpn_key_suffix) + if db.security_group_exists(context, context.project.id, group_name): + return group_name + group = {'user_id': context.user.id, + 'project_id': context.project.id, + 'name': group_name, + 'description': 'Group for vpn'} + group_ref = db.security_group_create(context, group) + rule = {'parent_group_id': group_ref['id'], + 'cidr': '0.0.0.0/0', + 'protocol': 'udp', + 'from_port': 1194, + 'to_port': 1194} + db.security_group_rule_create(context, rule) + rule = {'parent_group_id': group_ref['id'], + 'cidr': '0.0.0.0/0', + 'protocol': 'icmp', + 'from_port': -1, + 'to_port': -1} + db.security_group_rule_create(context, rule) + # NOTE(vish): No need to trigger the group since the instance + # has not been run yet. + return group_name - def setup_key_pair(self, user_id, project_id): - key_name = '%s%s' % (project_id, FLAGS.vpn_key_suffix) + def setup_key_pair(self, context): + key_name = '%s%s' % (context.project.id, FLAGS.vpn_key_suffix) try: - private_key, fingerprint = self.manager.generate_key_pair(user_id, - key_name) + result = cloud._gen_key(context, context.user.id, key_name) + private_key = result['private_key'] try: - key_dir = os.path.join(FLAGS.keys_path, user_id) + key_dir = os.path.join(FLAGS.keys_path, context.user.id) if not os.path.exists(key_dir): os.makedirs(key_dir) - file_name = os.path.join(key_dir, '%s.pem' % key_name) - with open(file_name, 'w') as f: + key_path = os.path.join(key_dir, '%s.pem' % key_name) + with open(key_path, 'w') as f: f.write(private_key) except: pass except exception.Duplicate: pass return key_name - - # def setup_secgroups(self, username): - # conn = self.euca.connection_for(username) - # try: - # secgroup = conn.create_security_group("vpn-secgroup", - # "vpn-secgroup") - # secgroup.authorize(ip_protocol = "udp", from_port = "1194", - # to_port = "1194", cidr_ip = "0.0.0.0/0") - # secgroup.authorize(ip_protocol = "tcp", from_port = "80", - # to_port = "80", cidr_ip = "0.0.0.0/0") - # secgroup.authorize(ip_protocol = "tcp", from_port = "22", - # to_port = "22", cidr_ip = "0.0.0.0/0") - # except: - # pass diff --git a/nova/crypto.py b/nova/crypto.py index 16b4f5e1f..9e29f0b8d 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -17,7 +17,7 @@ # under the License. """ -Wrappers around standard crypto, including root and intermediate CAs, +Wrappers around standard crypto, including root and project CAs, SSH key_pairs and x509 certificates. """ @@ -33,28 +33,57 @@ import utils import M2Crypto -from nova import exception +from nova import context +from nova import db from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA') +flags.DEFINE_string('key_file', + os.path.join('private', 'cakey.pem'), + 'Filename of private key') +flags.DEFINE_string('crl_file', 'crl.pem', + 'Filename of root Certificate Revokation List') flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our keys') flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA') -flags.DEFINE_boolean('use_intermediate_ca', False, - 'Should we use intermediate CAs for each project?') +flags.DEFINE_boolean('use_project_ca', False, + 'Should we use a CA for each project?') +flags.DEFINE_string('user_cert_subject', + '/C=US/ST=California/L=MountainView/O=AnsoLabs/' + 'OU=NovaDev/CN=%s-%s-%s', + 'Subject for certificate for users, ' + '%s for project, user, timestamp') +flags.DEFINE_string('project_cert_subject', + '/C=US/ST=California/L=MountainView/O=AnsoLabs/' + 'OU=NovaDev/CN=project-ca-%s-%s', + 'Subject for certificate for projects, ' + '%s for project, timestamp') +flags.DEFINE_string('vpn_cert_subject', + '/C=US/ST=California/L=MountainView/O=AnsoLabs/' + 'OU=NovaDev/CN=project-vpn-%s-%s', + 'Subject for certificate for vpns, ' + '%s for project, timestamp') -def ca_path(project_id): - if project_id: - return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id) - return "%s/cacert.pem" % (FLAGS.ca_path) +def ca_folder(project_id=None): + if FLAGS.use_project_ca and project_id: + return os.path.join(FLAGS.ca_path, 'projects', project_id) + return FLAGS.ca_path + + +def ca_path(project_id=None): + return os.path.join(ca_folder(project_id), FLAGS.ca_file) + + +def key_path(project_id=None): + return os.path.join(ca_folder(project_id), FLAGS.key_file) def fetch_ca(project_id=None, chain=True): - if not FLAGS.use_intermediate_ca: + if not FLAGS.use_project_ca: project_id = None buffer = "" if project_id: @@ -91,8 +120,8 @@ def generate_key_pair(bits=1024): def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'): - pub_key_buffer = M2Crypto.BIO.MemoryBuffer(ssl_public_key) - rsa_key = M2Crypto.RSA.load_pub_key_bio(pub_key_buffer) + buf = M2Crypto.BIO.MemoryBuffer(ssl_public_key) + rsa_key = M2Crypto.RSA.load_pub_key_bio(buf) e, n = rsa_key.pub() key_type = 'ssh-rsa' @@ -105,53 +134,137 @@ def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'): return '%s %s %s@%s\n' % (key_type, b64_blob, name, suffix) -def generate_x509_cert(subject, bits=1024): +def revoke_cert(project_id, file_name): + """Revoke a cert by file name""" + start = os.getcwd() + os.chdir(ca_folder(project_id)) + # NOTE(vish): potential race condition here + utils.execute("openssl ca -config ./openssl.cnf -revoke '%s'" % file_name) + utils.execute("openssl ca -gencrl -config ./openssl.cnf -out '%s'" % + FLAGS.crl_file) + os.chdir(start) + + +def revoke_certs_by_user(user_id): + """Revoke all user certs""" + admin = context.get_admin_context() + for cert in db.certificate_get_all_by_user(admin, user_id): + revoke_cert(cert['project_id'], cert['file_name']) + + +def revoke_certs_by_project(project_id): + """Revoke all project certs""" + # NOTE(vish): This is somewhat useless because we can just shut down + # the vpn. + admin = context.get_admin_context() + for cert in db.certificate_get_all_by_project(admin, project_id): + revoke_cert(cert['project_id'], cert['file_name']) + + +def revoke_certs_by_user_and_project(user_id, project_id): + """Revoke certs for user in project""" + admin = context.get_admin_context() + for cert in db.certificate_get_all_by_user(admin, user_id, project_id): + revoke_cert(cert['project_id'], cert['file_name']) + + +def _project_cert_subject(project_id): + """Helper to generate user cert subject""" + return FLAGS.project_cert_subject % (project_id, utils.isotime()) + + +def _vpn_cert_subject(project_id): + """Helper to generate user cert subject""" + return FLAGS.vpn_cert_subject % (project_id, utils.isotime()) + + +def _user_cert_subject(user_id, project_id): + """Helper to generate user cert subject""" + return FLAGS.user_cert_subject % (project_id, user_id, utils.isotime()) + + +def generate_x509_cert(user_id, project_id, bits=1024): + """Generate and sign a cert for user in project""" + subject = _user_cert_subject(user_id, project_id) tmpdir = tempfile.mkdtemp() keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key')) csrfile = os.path.join(tmpdir, 'temp.csr') - logging.debug("openssl genrsa -out %s %s" % (keyfile, bits)) - utils.runthis("Generating private key: %s", - "openssl genrsa -out %s %s" % (keyfile, bits)) - utils.runthis("Generating CSR: %s", - "openssl req -new -key %s -out %s -batch -subj %s" % + utils.execute("openssl genrsa -out %s %s" % (keyfile, bits)) + utils.execute("openssl req -new -key %s -out %s -batch -subj %s" % (keyfile, csrfile, subject)) private_key = open(keyfile).read() csr = open(csrfile).read() shutil.rmtree(tmpdir) - return (private_key, csr) - - -def sign_csr(csr_text, intermediate=None): - if not FLAGS.use_intermediate_ca: - intermediate = None - if not intermediate: - return _sign_csr(csr_text, FLAGS.ca_path) - user_ca = "%s/INTER/%s" % (FLAGS.ca_path, intermediate) - if not os.path.exists(user_ca): + (serial, signed_csr) = sign_csr(csr, project_id) + strserial = "%X" % serial + if(len(strserial) % 2): + strserial = "0%s" % strserial + fname = os.path.join(ca_folder(project_id), "newcerts/%s.pem" % strserial) + cert = {'user_id': user_id, + 'project_id': project_id, + 'file_name': fname} + db.certificate_create(context.get_admin_context(), cert) + return (private_key, signed_csr) + + +def _ensure_project_folder(project_id): + if not os.path.exists(ca_path(project_id)): start = os.getcwd() - os.chdir(FLAGS.ca_path) - utils.runthis("Generating intermediate CA: %s", - "sh geninter.sh %s" % (intermediate)) + os.chdir(ca_folder()) + utils.execute("sh geninter.sh %s %s" % + (project_id, _project_cert_subject(project_id))) os.chdir(start) - return _sign_csr(csr_text, user_ca) + + +def generate_vpn_files(project_id): + project_folder = ca_folder(project_id) + csr_fn = os.path.join(project_folder, "server.csr") + crt_fn = os.path.join(project_folder, "server.crt") + + if os.path.exists(crt_fn): + return + _ensure_project_folder(project_id) + start = os.getcwd() + os.chdir(ca_folder()) + # TODO(vish): the shell scripts could all be done in python + utils.execute("sh genvpn.sh %s %s" % + (project_id, _vpn_cert_subject(project_id))) + with open(csr_fn, "r") as csrfile: + csr_text = csrfile.read() + (serial, signed_csr) = sign_csr(csr_text, project_id) + with open(crt_fn, "w") as crtfile: + crtfile.write(signed_csr) + os.chdir(start) + + +def sign_csr(csr_text, project_id=None): + if not FLAGS.use_project_ca: + project_id = None + if not project_id: + return _sign_csr(csr_text, ca_folder()) + _ensure_project_folder(project_id) + project_folder = ca_folder(project_id) + return _sign_csr(csr_text, ca_folder(project_id)) def _sign_csr(csr_text, ca_folder): tmpfolder = tempfile.mkdtemp() - csrfile = open("%s/inbound.csr" % (tmpfolder), "w") + inbound = os.path.join(tmpfolder, "inbound.csr") + outbound = os.path.join(tmpfolder, "outbound.csr") + csrfile = open(inbound, "w") csrfile.write(csr_text) csrfile.close() - logging.debug("Flags path: %s" % ca_folder) + logging.debug("Flags path: %s", ca_folder) start = os.getcwd() # Change working dir to CA os.chdir(ca_folder) - utils.runthis("Signing cert: %s", - "openssl ca -batch -out %s/outbound.crt " - "-config ./openssl.cnf -infiles %s/inbound.csr" % - (tmpfolder, tmpfolder)) + utils.execute("openssl ca -batch -out %s -config " + "./openssl.cnf -infiles %s" % (outbound, inbound)) + out, _err = utils.execute("openssl x509 -in %s -serial -noout" % outbound) + serial = int(out.rpartition("=")[2]) os.chdir(start) - with open("%s/outbound.crt" % (tmpfolder), "r") as crtfile: - return crtfile.read() + with open(outbound, "r") as crtfile: + return (serial, crtfile.read()) def mkreq(bits, subject="foo", ca=0): @@ -159,8 +272,7 @@ def mkreq(bits, subject="foo", ca=0): req = M2Crypto.X509.Request() rsa = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None) pk.assign_rsa(rsa) - # Should not be freed here - rsa = None + rsa = None # should not be freed here req.set_pubkey(pk) req.set_subject(subject) req.sign(pk, 'sha512') @@ -224,7 +336,6 @@ def mkcacert(subject='nova', years=1): # IN THE SOFTWARE. # http://code.google.com/p/boto - def compute_md5(fp): """ @type fp: file diff --git a/nova/db/api.py b/nova/db/api.py index 80563c452..aa77dd2ea 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -117,6 +117,45 @@ def service_update(context, service_id, values): ################### +def certificate_create(context, values): + """Create a certificate from the values dictionary.""" + return IMPL.certificate_create(context, values) + + +def certificate_destroy(context, certificate_id): + """Destroy the certificate or raise if it does not exist.""" + return IMPL.certificate_destroy(context, certificate_id) + + +def certificate_get_all_by_project(context, project_id): + """Get all certificates for a project.""" + return IMPL.certificate_get_all_by_project(context, project_id) + + +def certificate_get_all_by_user(context, user_id): + """Get all certificates for a user.""" + return IMPL.certificate_get_all_by_user(context, user_id) + + +def certificate_get_all_by_user_and_project(context, user_id, project_id): + """Get all certificates for a user and project.""" + return IMPL.certificate_get_all_by_user_and_project(context, + user_id, + project_id) + + +def certificate_update(context, certificate_id, values): + """Set the given properties on an certificate and update it. + + Raises NotFound if service does not exist. + + """ + return IMPL.service_update(context, certificate_id, values) + + +################### + + def floating_ip_allocate_address(context, host, project_id): """Allocate free floating ip and return the address. diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index db4d9f68f..9ae12f178 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -253,6 +253,84 @@ def service_update(context, service_id, values): ################### +@require_admin_context +def certificate_get(context, certificate_id, session=None): + if not session: + session = get_session() + + result = session.query(models.Certificate).\ + filter_by(id=certificate_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + + if not result: + raise exception.NotFound('No certificate for id %s' % certificate_id) + + return result + + +@require_admin_context +def certificate_create(context, values): + certificate_ref = models.Certificate() + for (key, value) in values.iteritems(): + certificate_ref[key] = value + certificate_ref.save() + return certificate_ref + + +@require_admin_context +def certificate_destroy(context, certificate_id): + session = get_session() + with session.begin(): + certificate_ref = certificate_get(context, + certificate_id, + session=session) + certificate_ref.delete(session=session) + + +@require_admin_context +def certificate_get_all_by_project(context, project_id): + session = get_session() + return session.query(models.Certificate).\ + filter_by(project_id=project_id).\ + filter_by(deleted=False).\ + all() + + +@require_admin_context +def certificate_get_all_by_user(context, user_id): + session = get_session() + return session.query(models.Certificate).\ + filter_by(user_id=user_id).\ + filter_by(deleted=False).\ + all() + + +@require_admin_context +def certificate_get_all_by_user_and_project(_context, user_id, project_id): + session = get_session() + return session.query(models.Certificate).\ + filter_by(user_id=user_id).\ + filter_by(project_id=project_id).\ + filter_by(deleted=False).\ + all() + + +@require_admin_context +def certificate_update(context, certificate_id, values): + session = get_session() + with session.begin(): + certificate_ref = certificate_get(context, + certificate_id, + session=session) + for (key, value) in values.iteritems(): + certificate_ref[key] = value + certificate_ref.save(session=session) + + +################### + + @require_context def floating_ip_allocate_address(context, host, project_id): authorize_project_context(context, project_id) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 1111b5cbd..434f88e5e 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -151,6 +151,16 @@ class Service(BASE, NovaBase): disabled = Column(Boolean, default=False) +class Certificate(BASE, NovaBase): + """Represents a an x509 certificate""" + __tablename__ = 'certificates' + id = Column(Integer, primary_key=True) + + user_id = Column(String(255)) + project_id = Column(String(255)) + file_name = Column(String(255)) + + class Instance(BASE, NovaBase): """Represents a guest vm""" __tablename__ = 'instances' @@ -521,7 +531,7 @@ def register_models(): """Register Models and create metadata""" from sqlalchemy import create_engine models = (Service, Instance, Volume, ExportDevice, IscsiTarget, FixedIp, - FloatingIp, Network, SecurityGroup, + FloatingIp, Network, SecurityGroup, Certificate, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, Project) # , Image, Host engine = create_engine(FLAGS.sql_connection, echo=False) diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index fe891beee..0d2082bdb 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -208,17 +208,13 @@ class AuthManagerTestCase(object): # so it probably belongs in crypto_unittest # but I'm leaving it where I found it. with user_and_project_generator(self.manager) as (user, project): - # NOTE(todd): Should mention why we must setup controller first - # (somebody please clue me in) - cloud_controller = cloud.CloudController() - cloud_controller.setup() - _key, cert_str = self.manager._generate_x509_cert('test1', - 'testproj') + # NOTE(vish): Setup runs genroot.sh if it hasn't been run + cloud.CloudController().setup() + _key, cert_str = crypto.generate_x509_cert(user.id, project.id) logging.debug(cert_str) - # Need to verify that it's signed by the right intermediate CA - full_chain = crypto.fetch_ca(project_id='testproj', chain=True) - int_cert = crypto.fetch_ca(project_id='testproj', chain=False) + full_chain = crypto.fetch_ca(project_id=project.id, chain=True) + int_cert = crypto.fetch_ca(project_id=project.id, chain=False) cloud_cert = crypto.fetch_ca() logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain) signed_cert = X509.load_cert_string(cert_str) @@ -227,7 +223,8 @@ class AuthManagerTestCase(object): cloud_cert = X509.load_cert_string(cloud_cert) self.assertTrue(signed_cert.verify(chain_cert.get_pubkey())) self.assertTrue(signed_cert.verify(int_cert.get_pubkey())) - if not FLAGS.use_intermediate_ca: + + if not FLAGS.use_project_ca: self.assertTrue(signed_cert.verify(cloud_cert.get_pubkey())) else: self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey())) -- cgit From 79fa23620a1846eae28eaed26cd79973571d6b99 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sat, 6 Nov 2010 00:58:05 +0000 Subject: add dmz to flags and change a couple defaults --- nova/flags.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 4ae86d9b2..acdfc6f6d 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -180,7 +180,8 @@ DEFINE_list('region_list', 'list of region=url pairs separated by commas') DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_integer('s3_port', 3333, 's3 port') -DEFINE_string('s3_host', '127.0.0.1', 's3 host') +DEFINE_string('s3_host', '127.0.0.1', 's3 host (for infrastructure)') +DEFINE_string('s3_dmz', '127.0.0.1', 's3 dmz ip (for instances)') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on') @@ -197,7 +198,8 @@ DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') -DEFINE_string('cc_host', '127.0.0.1', 'ip of api server') +DEFINE_string('cc_host', '127.0.0.1', 'ip of api server (for infrastructure') +DEFINE_string('cc_dmz', '127.0.0.1', 'ip of api server (for instances)') DEFINE_integer('cc_port', 8773, 'cloud controller port') DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', 'Url to ec2 api server') @@ -211,10 +213,10 @@ DEFINE_string('default_ramdisk', 'ari-11111', DEFINE_string('default_instance_type', 'm1.small', 'default instance type to use, testing only') -DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server') +DEFINE_string('vpn_image_id', 'ami-cloudpipe', 'AMI for cloudpipe vpn server') DEFINE_string('vpn_key_suffix', - '-key', - 'Suffix to add to project name for vpn key') + '-vpn', + 'Suffix to add to project name for vpn key and secgroups') DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') -- cgit From e59e7e9c3ad3e25545555986dcce0c384f9a7b6e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 17 Nov 2010 02:33:09 +0000 Subject: remove extra line and ref. to LOG that doesn't exist --- bin/nova-manage | 1 - nova/auth/manager.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index b788ee62d..4ab2e9832 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -167,7 +167,6 @@ class VpnCommands(object): 'vpn_public_port': int(port)}) - class ShellCommands(object): def bpython(self): """Runs a bpython shell. diff --git a/nova/auth/manager.py b/nova/auth/manager.py index c6d4b6e53..252c5e65b 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -662,7 +662,7 @@ class AuthManager(object): port=vpn_port) zippy.writestr(FLAGS.credential_vpn_file, config) else: - LOG.warn("No vpn data for project %s", pid) + logging.warn("No vpn data for project %s", pid) zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid)) zippy.close() -- cgit From 9ca0b3435d93a87407ca42a853562cd06aaa896e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 22 Nov 2010 12:57:03 +0000 Subject: added placeholders --- nova/virt/xenapi.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 0f563aa41..4ed5d047f 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -296,6 +296,14 @@ class XenAPIConnection(object): yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) + + @defer.inlineCallbacks + def attach_volume(self, instance_name, device_path, mountpoint): + return True + + @defer.inlineCallbacks + def detach_volume(self, instance_name, mountpoint): + return True def get_info(self, instance_id): vm = self._lookup_blocking(instance_id) -- cgit From 1638e71c4442187a315f44840453ad14cafb36ac Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 23 Nov 2010 21:16:25 +0000 Subject: the serial returned by x509 is already formatted in hex --- nova/crypto.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/nova/crypto.py b/nova/crypto.py index 9e29f0b8d..a438d232b 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -196,10 +196,7 @@ def generate_x509_cert(user_id, project_id, bits=1024): csr = open(csrfile).read() shutil.rmtree(tmpdir) (serial, signed_csr) = sign_csr(csr, project_id) - strserial = "%X" % serial - if(len(strserial) % 2): - strserial = "0%s" % strserial - fname = os.path.join(ca_folder(project_id), "newcerts/%s.pem" % strserial) + fname = os.path.join(ca_folder(project_id), "newcerts/%s.pem" % serial) cert = {'user_id': user_id, 'project_id': project_id, 'file_name': fname} @@ -261,7 +258,7 @@ def _sign_csr(csr_text, ca_folder): utils.execute("openssl ca -batch -out %s -config " "./openssl.cnf -infiles %s" % (outbound, inbound)) out, _err = utils.execute("openssl x509 -in %s -serial -noout" % outbound) - serial = int(out.rpartition("=")[2]) + serial = out.rpartition("=")[2] os.chdir(start) with open(outbound, "r") as crtfile: return (serial, crtfile.read()) -- cgit From a7a24e2ac54b88ba09afe5966806f42181784e9d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 23 Nov 2010 21:48:32 +0000 Subject: add vpn ping and optimize vpn list --- bin/nova-manage | 27 +++++++++------------------ nova/db/api.py | 5 +++++ nova/db/sqlalchemy/api.py | 12 ++++++++++++ nova/utils.py | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 72 insertions(+), 18 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 4ab2e9832..3d0122637 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -106,39 +106,30 @@ class VpnCommands(object): projects = [self.manager.get_project(project)] else: projects = self.manager.get_projects() + # NOTE(vish): This hits the database a lot. We could optimize + # by getting all networks in one query and all vpns + # in aother query, then doing lookups by project for project in projects: print "%-12s\t" % project.name, ipport = "%s:%s" % (project.vpn_ip, project.vpn_port) print "%-20s\t" % ipport, - vpn = self._vpn_for(project.id) + ctxt = context.get_admin_context() + vpn = db.instance_get_project_vpn(ctxt, project.id) if vpn: - net = 'down' address = None + state = 'down' if vpn.get('fixed_ip', None): address = vpn['fixed_ip']['address'] - command = "ping -c1 -w1 %s > /dev/null; echo $?" - out, _err = utils.execute(command % address, - check_exit_code=False) - if out.strip() == '0': - net = 'up' + if utils.vpn_ping(project.vpn_ip, project.vpn_port): + state = 'up' print address, print vpn['host'], print vpn['ec2_id'], print vpn['state_description'], - print net - + print state else: print None - def _vpn_for(self, project_id): - """Get the VPN instance for a project ID.""" - ctxt = context.get_admin_context() - for instance in db.instance_get_all_by_project(ctxt, project_id): - if (instance['image_id'] == FLAGS.vpn_image_id - and not instance['state_description'] in - ['shutting_down', 'shutdown']): - return instance - def spawn(self): """Run all VPNs.""" for p in reversed(self.manager.get_projects()): diff --git a/nova/db/api.py b/nova/db/api.py index aa77dd2ea..5e0852c8f 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -326,6 +326,11 @@ def instance_get_floating_address(context, instance_id): return IMPL.instance_get_floating_address(context, instance_id) +def instance_get_project_vpn(context, project_id): + """Get a vpn instance by project or return None.""" + return IMPL.instance_get_project_vpn(context, project_id) + + def instance_get_by_internal_id(context, internal_id): """Get an instance by ec2 id.""" return IMPL.instance_get_by_internal_id(context, internal_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 9ae12f178..01e0b1fd6 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -724,6 +724,18 @@ def instance_get_all_by_reservation(context, reservation_id): all() +@require_admin_context +def instance_get_project_vpn(context, project_id): + session = get_session() + return session.query(models.Instance).\ + options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload('security_groups')).\ + filter_by(project_id=project_id).\ + filter_by(image_id=FLAGS.vpn_image_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + + @require_context def instance_get_by_internal_id(context, internal_id): session = get_session() diff --git a/nova/utils.py b/nova/utils.py index e7892a212..b3297acd9 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -27,6 +27,7 @@ import os import random import subprocess import socket +import struct import sys from xml.sax import saxutils @@ -61,6 +62,51 @@ def import_object(import_str): return cls() +def vpn_ping(address, port, timeout=0.05, session_id=None): + """Sends a vpn negotiation packet and returns the server session. + + Returns False on a failure. Basic packet structure is below. + + Client packet (14 bytes):: + 0 1 8 9 13 + +-+--------+-----+ + |x| cli_id |?????| + +-+--------+-----+ + x = packet identifier 0x38 + cli_id = 64 bit identifier + ? = unknown, probably flags/padding + + Server packet (26 bytes):: + 0 1 8 9 13 14 21 2225 + +-+--------+-----+--------+----+ + |x| srv_id |?????| cli_id |????| + +-+--------+-----+--------+----+ + x = packet identifier 0x40 + cli_id = 64 bit identifier + ? = unknown, probably flags/padding + bit 9 was 1 and the rest were 0 in testing + """ + if session_id is None: + session_id = random.randint(0, 0xffffffffffffffff) + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + data = struct.pack("!BQxxxxxx", 0x38, session_id) + sock.sendto(data, (address, port)) + sock.settimeout(timeout) + try: + received = sock.recv(2048) + except socket.timeout: + return False + finally: + sock.close() + fmt = "!BQxxxxxQxxxx" + if len(received) != struct.calcsize(fmt): + print struct.calcsize(fmt) + return False + (identifier, server_sess, client_sess) = struct.unpack(fmt, received) + if identifier == 0x40 and client_sess == session_id: + return server_sess + + def fetchfile(url, target): logging.debug("Fetching %s" % url) # c = pycurl.Curl() -- cgit From f74094cd303b21c12e8a83790d405bcae8103be8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 24 Nov 2010 21:40:41 +0000 Subject: don't error on edge case where vpn has been launched but fails to get a network --- bin/nova-manage | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 3d0122637..7c07ce3f0 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -120,7 +120,8 @@ class VpnCommands(object): state = 'down' if vpn.get('fixed_ip', None): address = vpn['fixed_ip']['address'] - if utils.vpn_ping(project.vpn_ip, project.vpn_port): + if project.vpn_ip and utils.vpn_ping(project.vpn_ip, + project.vpn_port): state = 'up' print address, print vpn['host'], -- cgit From 9f722a0bcdb987c228f4ebf1e42c904a26d0ef73 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 25 Nov 2010 10:42:06 +0000 Subject: first cut of changes for the attach_volume call --- nova/virt/xenapi.py | 94 +++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 87 insertions(+), 7 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 4ed5d047f..ec5e7456a 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -63,6 +63,9 @@ from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +from xml.dom.minidom import parseString + + XenAPI = None @@ -90,7 +93,7 @@ XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, 'Paused': power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME + 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} @@ -126,7 +129,7 @@ class XenAPIConnection(object): def spawn(self, instance): vm = yield self._lookup(instance.name) if vm is not None: - raise Exception('Attempted to create non-unique name %s' % + raise Exception('Attempted to create non-unique name %s' % instance.name) network = db.project_get_network(None, instance.project_id) @@ -296,14 +299,34 @@ class XenAPIConnection(object): yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) - + @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): - return True + # NOTE: No Resource Pool concept so far + logging.debug("Attach_volume: %s, %s, %s", + instance_name, device_path, mountpoint) + volume_info = _parse_volume_info(device_path, mountpoint) + # Create the iSCSI SR, and the PDB through which hosts access SRs. + # But first, retrieve target info, like Host, IQN, LUN and SCSIID + target = yield self._get_target(volume_info) + label = 'SR-%s' % volume_info['volumeId'] + sr_ref = yield self._create_sr(target, label) + # Create VDI and attach VBD to VM + vm = None + try: + task = yield self._call_xenapi('', vm) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + yield True @defer.inlineCallbacks def detach_volume(self, instance_name, mountpoint): - return True + logging.debug("Detach_volume: %s, %s, %s", instance_name, mountpoint) + # Detach VBD from VM + # Forget SR/PDB info associated with host + # TODO: can we avoid destroying the SR every time we detach? + yield True def get_info(self, instance_id): vm = self._lookup_blocking(instance_id) @@ -333,6 +356,52 @@ class XenAPIConnection(object): else: return vms[0] + @utils.deferredToThread + def _get_target(self, volume_info): + return self._get_target_blocking(volume_info) + + def _get_target_blocking(self, volume_info): + target = {} + target['target'] = volume_info['targetHost'] + target['port'] = volume_info['targetPort'] + target['targetIQN'] = volume_info['iqn'] + # We expect SR_BACKEND_FAILURE_107 to retrieve params to create the SR + try: + self._conn.xenapi.SR.create(self._get_xenapi_host(), + target, '-1', '', '', + 'lvmoiscsi', '', False, {}) + except XenAPI.Failure, exc: + if exc.details[0] == 'SR_BACKEND_FAILURE_107': + xml_response = parseString(exc.details[3]) + isciTargets = xml_response.getElementsByTagName('iscsi-target') + # Make sure that only the correct Lun is visible + if len(isciTargets) > 1: + raise Exception('More than one ISCSI Target available') + isciLuns = isciTargets.item(0).getElementsByTagName('LUN') + if len(isciLuns) > 1: + raise Exception('More than one ISCSI Lun available') + # Parse params from the xml response into the dictionary + for n in isciLuns.item(0).childNodes: + if n.nodeType == 1: + target[n.nodeName] = str(n.firstChild.data).strip() + return target + + @utils.deferredToThread + def _create_sr(self, target, label): + return self._create_sr_blocking(target, label) + + def _create_sr_blocking(self, target, label): + # TODO: we might want to put all these string literals into constants + sr = self._conn.xenapi.SR.create(self._get_xenapi_host(), + target, + target['size'], + label, + '', + 'lvmoiscsi', + '', + True, {}) + return sr + def _wait_for_task(self, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" @@ -412,7 +481,18 @@ def _parse_xmlrpc_value(val): if not val: return val x = xmlrpclib.loads( - '' + - val + + '' + + val + '') return x[0][0] + + +def _parse_volume_info(device_path, mountpoint): + volume_info = {} + volume_info['volumeId'] = 'vol-qurmrzn9' + # XCP and XenServer add an x to the device name + volume_info['xenMountpoint'] = '/dev/xvdb' + volume_info['targetHost'] = '10.70.177.40' + volume_info['targetPort'] = '3260' # default 3260 + volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' + return volume_info -- cgit From 688d564668aefa4b644236421a3a45fc90486634 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 25 Nov 2010 20:31:32 +0000 Subject: work on attach_volume, with a few things to iron out --- nova/virt/xenapi.py | 99 ++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 83 insertions(+), 16 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 5bf98468e..d3167ebf3 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -147,7 +147,7 @@ class XenAPIConnection(object): vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid) vm_ref = yield self._create_vm(instance, kernel, ramdisk) - yield self._create_vbd(vm_ref, vdi_ref, 0, True) + yield self._create_vbd(vm_ref, vdi_ref, 0, True, True, False) if network_ref: yield self._create_vif(vm_ref, network_ref, instance.mac_address) logging.debug('Starting VM %s...', vm_ref) @@ -197,7 +197,21 @@ class XenAPIConnection(object): defer.returnValue(vm_ref) @defer.inlineCallbacks - def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): + def _create_vdi(self, sr_ref, size, type, label, description, read_only, sharable): + vdi_rec = {} + vdi_rec['read_only'] = read_only + vdi_rec['SR'] = sr_ref + vdi_rec['virtual_size'] = str(size) + vdi_rec['name_label'] = label + vdi_rec['name_description'] = description + vdi_rec['sharable'] = sharable + vdi_rec['type'] = type + vdi_rec['other_config'] = {} + vdi_ref = yield self._call_xenapi('VDI.create', vdi_rec) + defer.returnValue(vdi_ref) + + @defer.inlineCallbacks + def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable, unpluggable, empty): """Create a VBD record. Returns a Deferred that gives the new VBD reference.""" @@ -208,8 +222,8 @@ class XenAPIConnection(object): vbd_rec['bootable'] = bootable vbd_rec['mode'] = 'RW' vbd_rec['type'] = 'disk' - vbd_rec['unpluggable'] = True - vbd_rec['empty'] = False + vbd_rec['unpluggable'] = unpluggable + vbd_rec['empty'] = empty vbd_rec['other_config'] = {} vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} @@ -320,14 +334,33 @@ class XenAPIConnection(object): # But first, retrieve target info, like Host, IQN, LUN and SCSIID target = yield self._get_target(volume_info) label = 'SR-%s' % volume_info['volumeId'] - sr_ref = yield self._create_sr(target, label) + description = 'Attached-to:%s' % instance_name + # Create SR and check the physical space available for the VDI allocation + sr_ref = yield self._create_sr(target, label, description) + disk_size = yield self._get_sr_available_space(sr_ref) # Create VDI and attach VBD to VM - vm = None + vm_ref = yield self._lookup(instance_name) + logging.debug("Mounting disk of: %s GB", (disk_size / (1024*1024*1024.0))) try: - task = yield self._call_xenapi('', vm) - yield self._wait_for_task(task) + vdi_ref = yield self._create_vdi(sr_ref, disk_size, + 'user', volume_info['volumeId'], '', + False, False) except Exception, exc: logging.warn(exc) + if sr_ref: + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VDI on SR %s' % sr_ref) + else: + try: + userdevice = 2 # FIXME: this depends on the numbers of attached disks + vbd_ref = yield self._create_vbd(vm_ref, vdi_ref, userdevice, False, True, False) + task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + if sr_ref: + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VBD on SR %s' % sr_ref) yield True @defer.inlineCallbacks @@ -395,23 +428,57 @@ class XenAPIConnection(object): if n.nodeType == 1: target[n.nodeName] = str(n.firstChild.data).strip() return target + + @utils.deferredToThread + def _get_sr_available_space(self, sr_ref): + return self._get_sr_available_space_blocking(sr_ref) + + def _get_sr_available_space_blocking(self, sr_ref): + pu = self._conn.xenapi.SR.get_physical_utilisation(sr_ref) + ps = self._conn.xenapi.SR.get_physical_size(sr_ref) + return (int(ps) - int(pu)) - (8 * 1024 * 1024) @utils.deferredToThread - def _create_sr(self, target, label): - return self._create_sr_blocking(target, label) + def _create_sr(self, target, label, description): + return self._create_sr_blocking(target, label, description) - def _create_sr_blocking(self, target, label): + def _create_sr_blocking(self, target, label, description): # TODO: we might want to put all these string literals into constants - sr = self._conn.xenapi.SR.create(self._get_xenapi_host(), + sr_ref = self._conn.xenapi.SR.create(self._get_xenapi_host(), target, target['size'], label, - '', + description, 'lvmoiscsi', '', True, {}) - return sr + # TODO: there might be some timing issues here + self._conn.xenapi.SR.scan(sr_ref) + return sr_ref + @defer.inlineCallbacks + def _destroy_sr(self, sr_ref): + # Some clean-up depending on the state of the SR + #yield self._destroy_vdbs(sr_ref) + #yield self._destroy_vdis(sr_ref) + # Destroy PDBs + pbds = yield self._conn.xenapi.SR.get_PBDs(sr_ref) + for pbd_ref in pbds: + try: + task = yield self._call_xenapi('Async.PBD.unplug', pbd_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + else: + task = yield self._call_xenapi('Async.PBD.destroy', pbd_ref) + yield self._wait_for_task(task) + # Forget SR + try: + task = yield self._call_xenapi('Async.SR.forget', sr_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + @utils.deferredToThread def _lookup_vm_vdis(self, vm): return self._lookup_vm_vdis_blocking(vm) @@ -524,8 +591,8 @@ def _parse_xmlrpc_value(val): def _parse_volume_info(device_path, mountpoint): volume_info = {} volume_info['volumeId'] = 'vol-qurmrzn9' - # XCP and XenServer add an x to the device name - volume_info['xenMountpoint'] = '/dev/xvdb' + # Because XCP/XS want an x beforehand + volume_info['xenMountpoint'] = '/dev/xvdc' volume_info['targetHost'] = '10.70.177.40' volume_info['targetPort'] = '3260' # default 3260 volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' -- cgit From a9b900d24020b68284e402a98ee28c107de0bd71 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 25 Nov 2010 20:42:22 +0000 Subject: added attach_volume implementation --- nova/virt/xenapi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index d3167ebf3..236360f11 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -593,7 +593,7 @@ def _parse_volume_info(device_path, mountpoint): volume_info['volumeId'] = 'vol-qurmrzn9' # Because XCP/XS want an x beforehand volume_info['xenMountpoint'] = '/dev/xvdc' - volume_info['targetHost'] = '10.70.177.40' + volume_info['targetHost'] = '' volume_info['targetPort'] = '3260' # default 3260 volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' return volume_info -- cgit From 04b1740c991d6d499364c21c2524c46ed5fc2522 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 29 Nov 2010 17:26:44 +0000 Subject: changes --- nova/virt/xenapi.py | 93 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 68 insertions(+), 25 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 236360f11..f2ba71306 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -198,6 +198,9 @@ class XenAPIConnection(object): @defer.inlineCallbacks def _create_vdi(self, sr_ref, size, type, label, description, read_only, sharable): + """Create a VDI record. Returns a Deferred that gives the new + VDI reference.""" + vdi_rec = {} vdi_rec['read_only'] = read_only vdi_rec['SR'] = sr_ref @@ -337,7 +340,8 @@ class XenAPIConnection(object): description = 'Attached-to:%s' % instance_name # Create SR and check the physical space available for the VDI allocation sr_ref = yield self._create_sr(target, label, description) - disk_size = yield self._get_sr_available_space(sr_ref) + disk_size = int(target['size']) + #disk_size = yield self._get_sr_available_space(sr_ref) # Create VDI and attach VBD to VM vm_ref = yield self._lookup(instance_name) logging.debug("Mounting disk of: %s GB", (disk_size / (1024*1024*1024.0))) @@ -347,20 +351,30 @@ class XenAPIConnection(object): False, False) except Exception, exc: logging.warn(exc) - if sr_ref: - yield self._destroy_sr(sr_ref) - raise Exception('Unable to create VDI on SR %s' % sr_ref) + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VDI on SR %s for instance %s' + % (sr_ref, + instance_name)) else: - try: + try: userdevice = 2 # FIXME: this depends on the numbers of attached disks vbd_ref = yield self._create_vbd(vm_ref, vdi_ref, userdevice, False, True, False) - task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) - yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) - if sr_ref: + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VBD on SR %s for instance %s' + % (sr_ref, + instance_name)) + else: + try: + raise Exception('') + task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) yield self._destroy_sr(sr_ref) - raise Exception('Unable to create VBD on SR %s' % sr_ref) + raise Exception('Unable to attach volume to instance %s' % instance_name) + yield True @defer.inlineCallbacks @@ -412,7 +426,7 @@ class XenAPIConnection(object): try: self._conn.xenapi.SR.create(self._get_xenapi_host(), target, '-1', '', '', - 'lvmoiscsi', '', False, {}) + 'iscsi', '', False, {}) except XenAPI.Failure, exc: if exc.details[0] == 'SR_BACKEND_FAILURE_107': xml_response = parseString(exc.details[3]) @@ -427,6 +441,9 @@ class XenAPIConnection(object): for n in isciLuns.item(0).childNodes: if n.nodeType == 1: target[n.nodeName] = str(n.firstChild.data).strip() + else: + logging.warn(exc) + raise Exception('Unable to access SR') return target @utils.deferredToThread @@ -444,24 +461,45 @@ class XenAPIConnection(object): def _create_sr_blocking(self, target, label, description): # TODO: we might want to put all these string literals into constants - sr_ref = self._conn.xenapi.SR.create(self._get_xenapi_host(), + sr_ref = self._conn.xenapi.SR.get_by_name_label(label) + if sr_ref is None: + sr_ref = self._conn.xenapi.SR.create(self._get_xenapi_host(), target, target['size'], label, description, - 'lvmoiscsi', + 'iscsi', '', True, {}) - # TODO: there might be some timing issues here - self._conn.xenapi.SR.scan(sr_ref) - return sr_ref + if sr_ref: + #self._conn.xenapi.SR.scan(sr_ref) + return sr_ref + else: + raise Exception('Unable to create SR') @defer.inlineCallbacks def _destroy_sr(self, sr_ref): # Some clean-up depending on the state of the SR - #yield self._destroy_vdbs(sr_ref) - #yield self._destroy_vdis(sr_ref) - # Destroy PDBs + # Remove VBDs + #vbds = yield self._conn.xenapi.SR.get_VBDs(sr_ref) + #for vbd_ref in vbds: + # try: + # task = yield self._call_xenapi('Async.VBD.destroy', vbd_ref) + # yield self._wait_for_task(task) + # except Exception, exc: + # logging.warn(exc) + # Remove VDIs + #======================================================================= + # vdis = yield self._conn.xenapi.SR.get_VDIs(sr_ref) + # for vdi_ref in vdis: + # try: + # task = yield self._call_xenapi('Async.VDI.destroy', vdi_ref) + # yield self._wait_for_task(task) + # except Exception, exc: + # logging.warn(exc) + #======================================================================= + sr_rec = self._conn.xenapi.SR.get_record(sr_ref) + # Detach from host pbds = yield self._conn.xenapi.SR.get_PBDs(sr_ref) for pbd_ref in pbds: try: @@ -469,16 +507,21 @@ class XenAPIConnection(object): yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) - else: - task = yield self._call_xenapi('Async.PBD.destroy', pbd_ref) - yield self._wait_for_task(task) - # Forget SR + # Destroy SR try: - task = yield self._call_xenapi('Async.SR.forget', sr_ref) + task = yield self._call_xenapi('Async.SR.destroy', sr_ref) yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) + def _sr_dispose_action(self, action, records): + for rec_ref in records: + try: + task = yield self._call_xenapi(action, rec_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + @utils.deferredToThread def _lookup_vm_vdis(self, vm): return self._lookup_vm_vdis_blocking(vm) @@ -592,8 +635,8 @@ def _parse_volume_info(device_path, mountpoint): volume_info = {} volume_info['volumeId'] = 'vol-qurmrzn9' # Because XCP/XS want an x beforehand - volume_info['xenMountpoint'] = '/dev/xvdc' - volume_info['targetHost'] = '' + volume_info['mountpoint'] = '/dev/xvdc' # translate + volume_info['targetHost'] = '10.70.177.40' volume_info['targetPort'] = '3260' # default 3260 volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' return volume_info -- cgit From 40de074f44059f89caa15420a7174f63c76eec48 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 30 Nov 2010 19:03:13 +0000 Subject: iscsi volumes attach/detach complete. There is only one minor issue on how to discover targets from device_path --- nova/virt/xenapi/vm_utils.py | 38 +++++++ nova/virt/xenapi/volume_utils.py | 210 +++++++++++++++++++++++++++++++++++++++ nova/virt/xenapi/volumeops.py | 93 +++++++++++------ 3 files changed, 313 insertions(+), 28 deletions(-) create mode 100644 nova/virt/xenapi/volume_utils.py diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index b68df2791..6966e7b7b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -103,6 +103,44 @@ class VMHelper(): vdi_ref) defer.returnValue(vbd_ref) + @classmethod + @utils.deferredToThread + def find_vbd_by_number(self, session, vm_ref, number): + return VMHelper.find_vbd_by_number_blocking(session, vm_ref, number) + + @classmethod + def find_vbd_by_number_blocking(self, session, vm_ref, number): + vbds = session.get_xenapi().VM.get_VBDs(vm_ref) + if vbds: + for vbd in vbds: + try: + vbd_rec = session.get_xenapi().VBD.get_record(vbd) + if vbd_rec['userdevice'] == str(number): + return vbd + except Exception, exc: + logging.warn(exc) + raise Exception('VBD not found in instance %s' % vm_ref) + + @classmethod + @defer.inlineCallbacks + def unplug_vbd(self, session, vbd_ref): + try: + vbd_ref = yield session.call_xenapi('VBD.unplug', vbd_ref) + except Exception, exc: + logging.warn(exc) + if exc.details[0] != 'DEVICE_ALREADY_DETACHED': + raise Exception('Unable to unplug VBD %s' % vbd_ref) + + @classmethod + @defer.inlineCallbacks + def destroy_vbd(self, session, vbd_ref): + try: + task = yield session.call_xenapi('Async.VBD.destroy', vbd_ref) + yield session.wait_for_task(task) + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to destroy VBD %s' % vbd_ref) + @classmethod @defer.inlineCallbacks def create_vif(self, session, vm_ref, network_ref, mac_address): diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py new file mode 100644 index 000000000..b982ac124 --- /dev/null +++ b/nova/virt/xenapi/volume_utils.py @@ -0,0 +1,210 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper methods for operations related to the management of volumes, +and storage repositories +""" + +import logging +import re +import string + +from twisted.internet import defer + +from nova import utils +from nova import flags + +FLAGS = flags.FLAGS + +#FIXME: replace with proper target discovery +flags.DEFINE_string('target_host', None, 'iSCSI Target Host') +flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') +flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') + + +class VolumeHelper(): + def __init__(self, session): + return + + @classmethod + @utils.deferredToThread + def create_iscsi_storage(self, session, target, port, target_iqn, + username, password, label, description): + + return VolumeHelper.create_iscsi_storage_blocking(session, target, + port, + target_iqn, + username, + password, + label, + description) + + @classmethod + def create_iscsi_storage_blocking(self, session, target, port, target_iqn, + username, password, label, description): + + sr_ref = session.get_xenapi().SR.get_by_name_label(label) + if len(sr_ref) == 0: + logging.debug('Introducing %s...' % label) + try: + sr_ref = session.get_xenapi().SR.create( + session.get_xenapi_host(), + {'target': target, + 'port': port, + 'targetIQN': target_iqn + # TODO: when/if chap authentication is used + #'chapuser': username, + #'chappassword': password + }, + '0', label, description, 'iscsi', '', False, {}) + logging.debug('Introduced %s as %s.' % (label, sr_ref)) + return sr_ref + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to create Storage Repository') + else: + return sr_ref[0] + + @classmethod + @defer.inlineCallbacks + def find_sr_from_vbd(self, session, vbd_ref): + vdi_ref = yield session.get_xenapi().VBD.get_VDI(vbd_ref) + sr_ref = yield session.get_xenapi().VDI.get_SR(vdi_ref) + defer.returnValue(sr_ref) + + @classmethod + @utils.deferredToThread + def destroy_iscsi_storage(self, session, sr_ref): + VolumeHelper.destroy_iscsi_storage_blocking(session, sr_ref) + + @classmethod + def destroy_iscsi_storage_blocking(self, session, sr_ref): + logging.debug("Forgetting SR %s ... ", sr_ref) + pbds = [] + try: + pbds = session.get_xenapi().SR.get_PBDs(sr_ref) + except Exception, exc: + logging.warn('Ignoring exception %s when getting PBDs for %s', + exc, sr_ref) + for pbd in pbds: + try: + session.get_xenapi().PBD.unplug(pbd) + except Exception, exc: + logging.warn('Ignoring exception %s when unplugging PBD %s', + exc, pbd) + try: + session.get_xenapi().SR.forget(sr_ref) + logging.debug("Forgetting SR %s done.", sr_ref) + except Exception, exc: + logging.warn('Ignoring exception %s when forgetting SR %s', + exc, sr_ref) + + @classmethod + @utils.deferredToThread + def introduce_vdi(self, session, sr_ref): + return VolumeHelper.introduce_vdi_blocking(session, sr_ref) + + @classmethod + def introduce_vdi_blocking(self, session, sr_ref): + try: + vdis = session.get_xenapi().SR.get_VDIs(sr_ref) + except Exception, exc: + raise Exception('Unable to introduce VDI on SR %s' % sr_ref) + try: + vdi_rec = session.get_xenapi().VDI.get_record(vdis[0]) + except Exception, exc: + raise Exception('Unable to get record of VDI %s on' % vdis[0]) + else: + return session.get_xenapi().VDI.introduce( + vdi_rec['uuid'], + vdi_rec['name_label'], + vdi_rec['name_description'], + vdi_rec['SR'], + vdi_rec['type'], + vdi_rec['sharable'], + vdi_rec['read_only'], + vdi_rec['other_config'], + vdi_rec['location'], + vdi_rec['xenstore_data'], + vdi_rec['sm_config']) + + @classmethod + def parse_volume_info(self, device_path, mountpoint): + # Because XCP/XS want a device number instead of a mountpoint + device_number = VolumeHelper.mountpoint_to_number(mountpoint) + volume_id = _get_volume_id(device_path) + target_host = _get_target_host(device_path) + target_port = _get_target_port(device_path) + target_iqn = _get_iqn(device_path) + + if (device_number < 0) or \ + (volume_id is None) or \ + (target_host is None) or \ + (target_iqn is None): + raise Exception('Unable to obtain target information %s, %s' % + (device_path, mountpoint)) + + volume_info = {} + volume_info['deviceNumber'] = device_number + volume_info['volumeId'] = volume_id + volume_info['targetHost'] = target_host + volume_info['targetPort'] = target_port + volume_info['targeIQN'] = target_iqn + return volume_info + + @classmethod + def mountpoint_to_number(self, mountpoint): + if mountpoint.startswith('/dev/'): + mountpoint = mountpoint[5:] + if re.match('^[hs]d[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^vd[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^[0-9]+$', mountpoint): + return string.atoi(mountpoint, 10) + else: + logging.warn('Mountpoint cannot be translated: %s', mountpoint) + return -1 + + +def _get_volume_id(n): + # FIXME: n must contain at least the volume_id + # /vol- is for remote volumes + # -vol- is for local volumes + # see compute/manager->setup_compute_volume + volume_id = n[n.find('/vol-') + 1:] + if volume_id == n: + volume_id = n[n.find('-vol-') + 1:].replace('--', '-') + return volume_id + + +def _get_target_host(n): + # FIXME: if n is none fall back on flags + if n is None or FLAGS.target_host: + return FLAGS.target_host + + +def _get_target_port(n): + # FIXME: if n is none fall back on flags + return FLAGS.target_port + + +def _get_iqn(n): + # FIXME: n must contain at least the volume_id + volume_id = _get_volume_id(n) + if n is None or FLAGS.iqn_prefix: + return '%s:%s' % (FLAGS.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 5aefa0611..d5c309240 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -17,6 +17,12 @@ """ Management class for Storage-related functions (attach, detach, etc). """ +import logging + +from twisted.internet import defer + +from volume_utils import VolumeHelper +from vm_utils import VMHelper class VolumeOps(object): @@ -25,58 +31,89 @@ class VolumeOps(object): @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): + # Before we start, check that the VM exists + vm_ref = yield VMHelper.lookup(self._session, instance_name) + if vm_ref is None: + raise Exception('Instance %s does not exist' % instance_name) # NOTE: No Resource Pool concept so far logging.debug("Attach_volume: %s, %s, %s", instance_name, device_path, mountpoint) - volume_info = _parse_volume_info(device_path, mountpoint) + vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID - target = yield self._get_target(volume_info) - label = 'SR-%s' % volume_info['volumeId'] - description = 'Attached-to:%s' % instance_name - # Create SR and check the physical space available for the VDI allocation - sr_ref = yield self._create_sr(target, label, description) - disk_size = int(target['size']) - #disk_size = yield self._get_sr_available_space(sr_ref) - # Create VDI and attach VBD to VM - vm_ref = yield self._lookup(instance_name) - logging.debug("Mounting disk of: %s GB", (disk_size / (1024*1024*1024.0))) + label = 'SR-%s' % vol_rec['volumeId'] + description = 'Disk-for:%s' % instance_name + # Create SR + sr_ref = yield VolumeHelper.create_iscsi_storage(self._session, + vol_rec['targetHost'], + vol_rec['targetPort'], + vol_rec['targeIQN'], + '', # no CHAP auth + '', + label, + description) + # Introduce VDI and attach VBD to VM try: - vdi_ref = yield self._create_vdi(sr_ref, disk_size, - 'user', volume_info['volumeId'], '', - False, False) + vdi_ref = yield VolumeHelper.introduce_vdi(self._session, sr_ref) except Exception, exc: logging.warn(exc) - yield self._destroy_sr(sr_ref) + yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception('Unable to create VDI on SR %s for instance %s' % (sr_ref, instance_name)) else: try: - userdevice = 2 # FIXME: this depends on the numbers of attached disks - vbd_ref = yield self._create_vbd(vm_ref, vdi_ref, userdevice, False, True, False) + vbd_ref = yield VMHelper.create_vbd(self._session, + vm_ref, vdi_ref, + vol_rec['deviceNumber'], + False) except Exception, exc: logging.warn(exc) - yield self._destroy_sr(sr_ref) + yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception('Unable to create VBD on SR %s for instance %s' % (sr_ref, instance_name)) else: try: - raise Exception('') - task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) - yield self._wait_for_task(task) + #raise Exception('') + task = yield self._session.call_xenapi('Async.VBD.plug', + vbd_ref) + yield self._session.wait_for_task(task) except Exception, exc: logging.warn(exc) - yield self._destroy_sr(sr_ref) - raise Exception('Unable to attach volume to instance %s' % instance_name) - + yield VolumeHelper.destroy_iscsi_storage(self._session, + sr_ref) + raise Exception('Unable to attach volume to instance %s' % + instance_name) yield True @defer.inlineCallbacks def detach_volume(self, instance_name, mountpoint): - logging.debug("Detach_volume: %s, %s, %s", instance_name, mountpoint) + # Before we start, check that the VM exists + vm_ref = yield VMHelper.lookup(self._session, instance_name) + if vm_ref is None: + raise Exception('Instance %s does not exist' % instance_name) # Detach VBD from VM - # Forget SR/PDB info associated with host - # TODO: can we avoid destroying the SR every time we detach? - yield True \ No newline at end of file + logging.debug("Detach_volume: %s, %s", instance_name, mountpoint) + device_number = VolumeHelper.mountpoint_to_number(mountpoint) + try: + vbd_ref = yield VMHelper.find_vbd_by_number(self._session, + vm_ref, device_number) + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to locate volume %s' % mountpoint) + else: + try: + sr_ref = yield VolumeHelper.find_sr_from_vbd(self._session, + vbd_ref) + yield VMHelper.unplug_vbd(self._session, vbd_ref) + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to detach volume %s' % mountpoint) + try: + yield VMHelper.destroy_vbd(self._session, vbd_ref) + except Exception, exc: + logging.warn(exc) + # Forget SR + yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) + yield True -- cgit From 4b74a1b243d87d53e660029728d12a9c067deeac Mon Sep 17 00:00:00 2001 From: Rick Clark Date: Tue, 30 Nov 2010 13:08:39 -0600 Subject: Cleaned up pep8 errors --- nova/api/ec2/cloud.py | 2 +- nova/compute/disk.py | 6 +++--- nova/virt/libvirt_conn.py | 13 ++++++------- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 1cf4abcc9..a5acd1f6d 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -801,7 +801,7 @@ class CloudController(object): if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None - + # make sure we have access to kernel and ramdisk if kernel_id: self.image_service.show(context, kernel_id) diff --git a/nova/compute/disk.py b/nova/compute/disk.py index d5a08c01f..d762b8087 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -137,9 +137,9 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): # We can only loopback mount raw images. If the device isn't there, # it's normally because it's a .vmdk or a .vdi etc if not os.path.exists(mapped_device): - raise exception.Error( - 'Mapped device was not found (we can only inject raw disk images): %s' - % mapped_device) + raise exception.Error('Mapped device was not found (we can' + ' only inject raw disk images): %s' % + mapped_device) # Configure ext2fs so that it doesn't auto-check every N boots out, err = yield execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 09cb29773..81dbbaad5 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -467,13 +467,13 @@ class LibvirtConnection(object): process_input=process_input, check_exit_code=check_exit_code) - # For now, we assume that if we're not using a kernel, we're using a - # partitioned disk image where the target partition is the first + # For now, we assume that if we're not using a kernel, we're using a + # partitioned disk image where the target partition is the first # partition target_partition = None if not using_kernel: target_partition = "1" - + key = str(inst['key_data']) net = None network_ref = db.network_get_by_instance(context.get_admin_context(), @@ -493,11 +493,10 @@ class LibvirtConnection(object): inst['name'], inst.image_id) if net: logging.info('instance %s: injecting net into image %s', - inst['name'], inst.image_id) - execute=execute) + inst['name'], inst.image_id) try: yield disk.inject_data(basepath('disk-raw'), key, net, - partition=target_partition, + partition=target_partition, execute=execute) except Exception as e: # This could be a windows image, or a vmdk format disk @@ -557,7 +556,7 @@ class LibvirtConnection(object): if xml_info['ramdisk_id'] or xml_info['kernel_id']: xml_info['disk'] = xml_info['basepath'] + "/disk" - xml = str(Template(self.libvirt_xml, searchList=[ xml_info ] )) + xml = str(Template(self.libvirt_xml, searchList=[xml_info])) logging.debug('instance %s: finished toXML method', instance['name']) return xml -- cgit From ffa41022463c23a67dda2a6de74b6d5203cb37ac Mon Sep 17 00:00:00 2001 From: Rick Clark Date: Tue, 30 Nov 2010 16:09:31 -0600 Subject: Fixed termie's tiny bits from the prior merge request --- nova/compute/disk.py | 4 ++-- nova/flags.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/compute/disk.py b/nova/compute/disk.py index d762b8087..9ba827519 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -134,8 +134,8 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): else: mapped_device = device - # We can only loopback mount raw images. If the device isn't there, - # it's normally because it's a .vmdk or a .vdi etc + # We can only loopback mount raw images. If the device isn't there, + # it's normally because it's a .vmdk or a .vdi etc if not os.path.exists(mapped_device): raise exception.Error('Mapped device was not found (we can' ' only inject raw disk images): %s' % diff --git a/nova/flags.py b/nova/flags.py index cf481b55f..a61033201 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -237,7 +237,7 @@ DEFINE_string('default_ramdisk', 'ari-11111', DEFINE_string('default_instance_type', 'm1.small', 'default instance type to use, testing only') DEFINE_string('null_kernel', 'aki-00000000', - 'kernel image that indicates not to use a kernel, ' + 'kernel image that indicates not to use a kernel,' ' but to use a raw disk image instead') DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server') -- cgit From f26489ef1ad2a7df0e9f72a8c9ad4f2e3a65ae57 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 1 Dec 2010 12:02:02 +0000 Subject: minor refactoring --- nova/virt/xenapi/novadeps.py | 82 ++++++++++++++++++++++++++++++++++++++++ nova/virt/xenapi/volume_utils.py | 77 ------------------------------------- nova/virt/xenapi/volumeops.py | 6 ++- 3 files changed, 86 insertions(+), 79 deletions(-) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index ba62468fb..db51982a6 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -14,6 +14,9 @@ # License for the specific language governing permissions and limitations # under the License. +import re +import string + from nova import db from nova import flags from nova import process @@ -32,6 +35,15 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} +from nova import flags + +FLAGS = flags.FLAGS + +#FIXME: replace with proper target discovery +flags.DEFINE_string('target_host', None, 'iSCSI Target Host') +flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') +flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') + class Instance(object): @@ -101,3 +113,73 @@ class User(object): @classmethod def get_secret(self, user): return user.secret + + +class Volume(object): + + @classmethod + def parse_volume_info(self, device_path, mountpoint): + # Because XCP/XS want a device number instead of a mountpoint + device_number = Volume.mountpoint_to_number(mountpoint) + volume_id = Volume.get_volume_id(device_path) + target_host = Volume.get_target_host(device_path) + target_port = Volume.get_target_port(device_path) + target_iqn = Volume.get_iqn(device_path) + + if (device_number < 0) or \ + (volume_id is None) or \ + (target_host is None) or \ + (target_iqn is None): + raise Exception('Unable to obtain target information %s, %s' % + (device_path, mountpoint)) + + volume_info = {} + volume_info['deviceNumber'] = device_number + volume_info['volumeId'] = volume_id + volume_info['targetHost'] = target_host + volume_info['targetPort'] = target_port + volume_info['targeIQN'] = target_iqn + return volume_info + + @classmethod + def mountpoint_to_number(self, mountpoint): + if mountpoint.startswith('/dev/'): + mountpoint = mountpoint[5:] + if re.match('^[hs]d[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^vd[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^[0-9]+$', mountpoint): + return string.atoi(mountpoint, 10) + else: + logging.warn('Mountpoint cannot be translated: %s', mountpoint) + return -1 + + @classmethod + def get_volume_id(self, n): + # FIXME: n must contain at least the volume_id + # /vol- is for remote volumes + # -vol- is for local volumes + # see compute/manager->setup_compute_volume + volume_id = n[n.find('/vol-') + 1:] + if volume_id == n: + volume_id = n[n.find('-vol-') + 1:].replace('--', '-') + return volume_id + + @classmethod + def get_target_host(self, n): + # FIXME: if n is none fall back on flags + if n is None or FLAGS.target_host: + return FLAGS.target_host + + @classmethod + def get_target_port(self, n): + # FIXME: if n is none fall back on flags + return FLAGS.target_port + + @classmethod + def get_iqn(self, n): + # FIXME: n must contain at least the volume_id + volume_id = Volume.get_volume_id(n) + if n is None or FLAGS.iqn_prefix: + return '%s:%s' % (FLAGS.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index b982ac124..3b3e8894c 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -20,20 +20,10 @@ and storage repositories """ import logging -import re -import string from twisted.internet import defer from nova import utils -from nova import flags - -FLAGS = flags.FLAGS - -#FIXME: replace with proper target discovery -flags.DEFINE_string('target_host', None, 'iSCSI Target Host') -flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') -flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') class VolumeHelper(): @@ -141,70 +131,3 @@ class VolumeHelper(): vdi_rec['location'], vdi_rec['xenstore_data'], vdi_rec['sm_config']) - - @classmethod - def parse_volume_info(self, device_path, mountpoint): - # Because XCP/XS want a device number instead of a mountpoint - device_number = VolumeHelper.mountpoint_to_number(mountpoint) - volume_id = _get_volume_id(device_path) - target_host = _get_target_host(device_path) - target_port = _get_target_port(device_path) - target_iqn = _get_iqn(device_path) - - if (device_number < 0) or \ - (volume_id is None) or \ - (target_host is None) or \ - (target_iqn is None): - raise Exception('Unable to obtain target information %s, %s' % - (device_path, mountpoint)) - - volume_info = {} - volume_info['deviceNumber'] = device_number - volume_info['volumeId'] = volume_id - volume_info['targetHost'] = target_host - volume_info['targetPort'] = target_port - volume_info['targeIQN'] = target_iqn - return volume_info - - @classmethod - def mountpoint_to_number(self, mountpoint): - if mountpoint.startswith('/dev/'): - mountpoint = mountpoint[5:] - if re.match('^[hs]d[a-p]$', mountpoint): - return (ord(mountpoint[2:3]) - ord('a')) - elif re.match('^vd[a-p]$', mountpoint): - return (ord(mountpoint[2:3]) - ord('a')) - elif re.match('^[0-9]+$', mountpoint): - return string.atoi(mountpoint, 10) - else: - logging.warn('Mountpoint cannot be translated: %s', mountpoint) - return -1 - - -def _get_volume_id(n): - # FIXME: n must contain at least the volume_id - # /vol- is for remote volumes - # -vol- is for local volumes - # see compute/manager->setup_compute_volume - volume_id = n[n.find('/vol-') + 1:] - if volume_id == n: - volume_id = n[n.find('-vol-') + 1:].replace('--', '-') - return volume_id - - -def _get_target_host(n): - # FIXME: if n is none fall back on flags - if n is None or FLAGS.target_host: - return FLAGS.target_host - - -def _get_target_port(n): - # FIXME: if n is none fall back on flags - return FLAGS.target_port - - -def _get_iqn(n): - # FIXME: n must contain at least the volume_id - volume_id = _get_volume_id(n) - if n is None or FLAGS.iqn_prefix: - return '%s:%s' % (FLAGS.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index d5c309240..ec4343329 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -24,6 +24,8 @@ from twisted.internet import defer from volume_utils import VolumeHelper from vm_utils import VMHelper +from novadeps import Volume + class VolumeOps(object): def __init__(self, session): @@ -38,9 +40,9 @@ class VolumeOps(object): # NOTE: No Resource Pool concept so far logging.debug("Attach_volume: %s, %s, %s", instance_name, device_path, mountpoint) - vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID + vol_rec = Volume.parse_volume_info(device_path, mountpoint) label = 'SR-%s' % vol_rec['volumeId'] description = 'Disk-for:%s' % instance_name # Create SR @@ -95,7 +97,7 @@ class VolumeOps(object): raise Exception('Instance %s does not exist' % instance_name) # Detach VBD from VM logging.debug("Detach_volume: %s, %s", instance_name, mountpoint) - device_number = VolumeHelper.mountpoint_to_number(mountpoint) + device_number = Volume.mountpoint_to_number(mountpoint) try: vbd_ref = yield VMHelper.find_vbd_by_number(self._session, vm_ref, device_number) -- cgit From 364b4204ba9e4d04b0d0293a9c5fd62320ae3a63 Mon Sep 17 00:00:00 2001 From: Rick Clark Date: Wed, 1 Dec 2010 16:23:34 -0600 Subject: Changed null_kernel flag from aki-00000000 to nokernel --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index a61033201..be81fd7ed 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -236,7 +236,7 @@ DEFINE_string('default_ramdisk', 'ari-11111', 'default ramdisk to use, testing only') DEFINE_string('default_instance_type', 'm1.small', 'default instance type to use, testing only') -DEFINE_string('null_kernel', 'aki-00000000', +DEFINE_string('null_kernel', 'nokernel', 'kernel image that indicates not to use a kernel,' ' but to use a raw disk image instead') -- cgit From e4cfd7f3fe7d3c50d65c61abf21bf998fde85147 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 2 Dec 2010 14:09:23 +0000 Subject: minor refactoring after merge --- nova/virt/xenapi/novadeps.py | 28 ++++++++++++++++++++-------- nova/virt/xenapi_conn.py | 5 ++++- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index b4802764e..aa3535162 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -36,8 +36,6 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} -FLAGS = flags.FLAGS - flags.DEFINE_string('xenapi_connection_url', None, 'URL for connection to XenServer/Xen Cloud Platform.' @@ -81,6 +79,21 @@ class Configuration(object): def xenapi_task_poll_interval(self): return self._flags.xenapi_task_poll_interval + @property + def target_host(self): + return self._flags.target_host + + @property + def target_port(self): + return self._flags.target_port + + @property + def iqn_prefix(self): + return self._flags.iqn_prefix + + +config = Configuration() + class Instance(object): @@ -206,18 +219,17 @@ class Volume(object): @classmethod def get_target_host(self, n): # FIXME: if n is none fall back on flags - if n is None or FLAGS.target_host: - return FLAGS.target_host + if n is None or config.target_host: + return config.target_host @classmethod def get_target_port(self, n): # FIXME: if n is none fall back on flags - return FLAGS.target_port + return config.target_port @classmethod def get_iqn(self, n): # FIXME: n must contain at least the volume_id volume_id = Volume.get_volume_id(n) - if n is None or FLAGS.iqn_prefix: - return '%s:%s' % (FLAGS.iqn_prefix, volume_id) - + if n is None or config.iqn_prefix: + return '%s:%s' % (config.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 948fade7e..d3f66b12c 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -44,7 +44,10 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. :xenapi_task_poll_interval: The interval (seconds) used for polling of remote tasks (Async.VM.start, etc) (default: 0.5). - +:target_host: the iSCSI Target Host IP address, i.e. the IP + address for the nova-volume host +:target_port: iSCSI Target Port, 3260 Default +:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack' """ import logging -- cgit From da010f311c07ee31d7d00ceb48d0f8656f1825ca Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Fri, 3 Dec 2010 00:01:21 +0000 Subject: * Removes unused schema * Removes MUST uid from novaUser * Changes isAdmin to isNovaAdmin * Adds two new configuration options: ** ldap_user_id_attribute, with a default of uid ** ldap_user_name_attribute, with a default of cn * ldapdriver.py has been modified to use these changes Rationale: Removing uid from novaUser: Requiring uid makes the schema very posix specific. Other schemas don't use uid for identifiers at all. This change makes the schema more interoperable. Changing isAdmin to isNovaAdmin: This attribute is too generic. It doesn't describe what the user is an admin of, and in a pre-existing directory is out of place. This change is to make the attribute more specific to the software. Adding config options for id and name: This is another interoperability change. This change makes the driver more compatible with directories like AD, where sAMAccountName is used instead of uid. Also, some directory admins prefer to use displayName rather than CN for full names of users. --- nova/auth/ldapdriver.py | 21 ++++++++++++--------- nova/auth/nova_openldap.schema | 26 +++----------------------- nova/auth/nova_sun.schema | 6 ++---- nova/auth/openssh-lpk_openldap.schema | 19 ------------------- nova/auth/openssh-lpk_sun.schema | 10 ---------- 5 files changed, 17 insertions(+), 65 deletions(-) delete mode 100644 nova/auth/openssh-lpk_openldap.schema delete mode 100644 nova/auth/openssh-lpk_sun.schema diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index ceade1d65..e4c36c28d 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -37,6 +37,8 @@ flags.DEFINE_string('ldap_url', 'ldap://localhost', flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', 'DN of admin user') +flags.DEFINE_string('ldap_user_id_attribute', 'uid', 'Attribute to use as id') +flags.DEFINE_string('ldap_user_name_attribute', 'cn', 'Attribute to use as name') flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users') @@ -131,12 +133,12 @@ class LdapDriver(object): 'inetOrgPerson', 'novaUser']), ('ou', [FLAGS.ldap_user_unit]), - ('uid', [name]), + (FLAGS.ldap_user_id_attribute, [name]), ('sn', [name]), - ('cn', [name]), + (FLAGS.ldap_user_name_attribute, [name]), ('secretKey', [secret_key]), ('accessKey', [access_key]), - ('isAdmin', [str(is_admin).upper()]), + ('isNovaAdmin', [str(is_admin).upper()]), ] self.conn.add_s(self.__uid_to_dn(name), attr) return self.__to_user(dict(attr)) @@ -274,7 +276,7 @@ class LdapDriver(object): if secret_key: attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key)) if admin is not None: - attr.append((self.ldap.MOD_REPLACE, 'isAdmin', str(admin).upper())) + attr.append((self.ldap.MOD_REPLACE, 'isNovaAdmin', str(admin).upper())) self.conn.modify_s(self.__uid_to_dn(uid), attr) def __user_exists(self, uid): @@ -450,11 +452,11 @@ class LdapDriver(object): if attr == None: return None return { - 'id': attr['uid'][0], - 'name': attr['cn'][0], + 'id': attr[FLAGS.ldap_user_id_attribute][0], + 'name': attr[FLAGS.ldap_user_name_attribute][0], 'access': attr['accessKey'][0], 'secret': attr['secretKey'][0], - 'admin': (attr['isAdmin'][0] == 'TRUE')} + 'admin': (attr['isNovaAdmin'][0] == 'TRUE')} def __to_project(self, attr): """Convert ldap attributes to Project object""" @@ -474,9 +476,10 @@ class LdapDriver(object): return dn.split(',')[0].split('=')[1] @staticmethod - def __uid_to_dn(dn): + def __uid_to_dn(uid): """Convert uid to dn""" - return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree) + return FLAGS.ldap_user_id_attribute + '=%s,%s' \ + % (uid, FLAGS.ldap_user_subtree) class FakeLdapDriver(LdapDriver): diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index 4047361de..9e528f58b 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -30,20 +30,10 @@ attributetype ( SINGLE-VALUE ) -attributetype ( - novaAttrs:3 - NAME 'keyFingerprint' - DESC 'Fingerprint of private key' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - attributetype ( novaAttrs:4 - NAME 'isAdmin' - DESC 'Is user an administrator?' + NAME 'isNovaAdmin' + DESC 'Is user an nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE @@ -61,17 +51,7 @@ objectClass ( NAME 'novaUser' DESC 'access and secret keys' AUXILIARY - MUST ( uid ) - MAY ( accessKey $ secretKey $ isAdmin ) - ) - -objectClass ( - novaOCs:2 - NAME 'novaKeyPair' - DESC 'Key pair for User' - SUP top - STRUCTURAL - MUST ( cn $ sshPublicKey $ keyFingerprint ) + MAY ( accessKey $ secretKey $ isNovaAdmin ) ) objectClass ( diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index e925e05e4..decf10f06 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -8,9 +8,7 @@ dn: cn=schema attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) -attributeTypes: ( 1.3.6.1.3.1.666.666.3.3 NAME 'keyFingerprint' DESC 'Fingerprint of private key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE) -attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isAdmin' DESC 'Is user an administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) +attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isNovaAdmin' DESC 'Is user a nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) -objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MUST ( uid ) MAY ( accessKey $ secretKey $ isAdmin ) ) -objectClasses: ( 1.3.6.1.3.1.666.666.4.2 NAME 'novaKeyPair' DESC 'Key pair for User' SUP top STRUCTURAL MUST ( cn $ sshPublicKey $ keyFingerprint ) ) +objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) ) objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) ) diff --git a/nova/auth/openssh-lpk_openldap.schema b/nova/auth/openssh-lpk_openldap.schema deleted file mode 100644 index 93351da6d..000000000 --- a/nova/auth/openssh-lpk_openldap.schema +++ /dev/null @@ -1,19 +0,0 @@ -# -# LDAP Public Key Patch schema for use with openssh-ldappubkey -# Author: Eric AUGE -# -# Based on the proposal of : Mark Ruijter -# - - -# octetString SYNTAX -attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' - DESC 'MANDATORY: OpenSSH Public key' - EQUALITY octetStringMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) - -# printableString SYNTAX yes|no -objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY - DESC 'MANDATORY: OpenSSH LPK objectclass' - MAY ( sshPublicKey $ uid ) - ) diff --git a/nova/auth/openssh-lpk_sun.schema b/nova/auth/openssh-lpk_sun.schema deleted file mode 100644 index 5f52db3b6..000000000 --- a/nova/auth/openssh-lpk_sun.schema +++ /dev/null @@ -1,10 +0,0 @@ -# -# LDAP Public Key Patch schema for use with openssh-ldappubkey -# Author: Eric AUGE -# -# Schema for Sun Directory Server. -# Based on the original schema, modified by Stefan Fischer. -# -dn: cn=schema -attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) -objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) ) -- cgit From ee71c0accbb540bcb9d08cdcdc8b659f29a0edd6 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 6 Dec 2010 19:06:32 +0000 Subject: added interim solution for target discovery. Now info can either be passed via flags or discovered via iscsiadm. Long term solution is to add a few more fields to the db in the iscsi_target table with the necessary info and modify the iscsi driver to set them --- nova/virt/xenapi/novadeps.py | 89 ++++++++++++++++++++++++++++++++++--------- nova/virt/xenapi/volumeops.py | 2 +- 2 files changed, 71 insertions(+), 20 deletions(-) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index 65576019e..66c8233b8 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -27,6 +27,9 @@ import string from nova import db from nova import flags from nova import context +from nova import process + +from twisted.internet import defer from nova.compute import power_state from nova.auth.manager import AuthManager @@ -193,15 +196,28 @@ class User(object): class Volume(object): + """ Wraps up volume specifics """ @classmethod - def parse_volume_info(self, device_path, mountpoint): - # Because XCP/XS want a device number instead of a mountpoint + @defer.inlineCallbacks + def parse_volume_info(cls, device_path, mountpoint): + """ + Parse device_path and mountpoint as they can be used by XenAPI. + In particular, the mountpoint (e.g. /dev/sdc) must be translated + into a numeric literal. + FIXME: As for device_path, currently cannot be used as it is, + because it does not contain target information. As for interim + solution, target details are passed either via Flags or obtained + by iscsiadm. Long-term solution is to add a few more fields to the + db in the iscsi_target table with the necessary info and modify + the iscsi driver to set them. + """ device_number = Volume.mountpoint_to_number(mountpoint) volume_id = Volume.get_volume_id(device_path) - target_host = Volume.get_target_host(device_path) - target_port = Volume.get_target_port(device_path) - target_iqn = Volume.get_iqn(device_path) + (iscsi_name, iscsi_portal) = yield Volume.get_target(volume_id) + target_host = Volume.get_target_host(iscsi_portal) + target_port = Volume.get_target_port(iscsi_portal) + target_iqn = Volume.get_iqn(iscsi_name, volume_id) if (device_number < 0) or \ (volume_id is None) or \ @@ -216,10 +232,11 @@ class Volume(object): volume_info['targetHost'] = target_host volume_info['targetPort'] = target_port volume_info['targeIQN'] = target_iqn - return volume_info + defer.returnValue(volume_info) @classmethod - def mountpoint_to_number(self, mountpoint): + def mountpoint_to_number(cls, mountpoint): + """ Translate a mountpoint like /dev/sdc into a numberic """ if mountpoint.startswith('/dev/'): mountpoint = mountpoint[5:] if re.match('^[hs]d[a-p]$', mountpoint): @@ -233,8 +250,9 @@ class Volume(object): return -1 @classmethod - def get_volume_id(self, n): - # FIXME: n must contain at least the volume_id + def get_volume_id(cls, n): + """ Retrieve the volume id from device_path """ + # n must contain at least the volume_id # /vol- is for remote volumes # -vol- is for local volumes # see compute/manager->setup_compute_volume @@ -244,19 +262,52 @@ class Volume(object): return volume_id @classmethod - def get_target_host(self, n): - # FIXME: if n is none fall back on flags - if n is None or config.target_host: + def get_target_host(cls, n): + """ Retrieve target host """ + if n: + return n[0:n.find(':')] + elif n is None or config.target_host: return config.target_host @classmethod - def get_target_port(self, n): - # FIXME: if n is none fall back on flags - return config.target_port + def get_target_port(cls, n): + """ Retrieve target port """ + if n: + return n[n.find(':') + 1:] + elif n is None or config.target_port: + return config.target_port @classmethod - def get_iqn(self, n): - # FIXME: n must contain at least the volume_id - volume_id = Volume.get_volume_id(n) - if n is None or config.iqn_prefix: + def get_iqn(cls, n, id): + """ Retrieve target IQN """ + if n: + return n + elif n is None or config.iqn_prefix: + volume_id = Volume.get_volume_id(id) return '%s:%s' % (config.iqn_prefix, volume_id) + + @classmethod + @defer.inlineCallbacks + def get_target(self, volume_id): + """ + Gets iscsi name and portal from volume name and host. + For this method to work the following are needed: + 1) volume_ref['host'] to resolve the public IP address + 2) ietd to listen only to the public network interface + If any of the two are missing, fall back on Flags + """ + volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(), + volume_id) + + (r, _e) = yield process.simple_execute("sudo iscsiadm -m discovery -t " + "sendtargets -p %s" % + volume_ref['host']) + if len(_e) == 0: + for target in r.splitlines(): + if volume_id in target: + (location, _sep, iscsi_name) = target.partition(" ") + break + iscsi_portal = location.split(",")[0] + defer.returnValue((iscsi_name, iscsi_portal)) + else: + defer.returnValue((None, None)) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 6c48f6491..a052aaf95 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -47,7 +47,7 @@ class VolumeOps(object): instance_name, device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID - vol_rec = Volume.parse_volume_info(device_path, mountpoint) + vol_rec = yield Volume.parse_volume_info(device_path, mountpoint) label = 'SR-%s' % vol_rec['volumeId'] description = 'Disk-for:%s' % instance_name # Create SR -- cgit From 06c52051005f5e43a1f543e2d1c5922aa91c7918 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 6 Dec 2010 19:29:00 +0000 Subject: minor changes to docstrings --- nova/virt/xenapi/novadeps.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index 66c8233b8..3680a5dd2 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -60,10 +60,15 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'The interval used for polling of remote tasks ' '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') -#FIXME: replace with proper target discovery -flags.DEFINE_string('target_host', None, 'iSCSI Target Host') -flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') -flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') +flags.DEFINE_string('target_host', + None, + 'iSCSI Target Host') +flags.DEFINE_string('target_port', + '3260', + 'iSCSI Target Port, 3260 Default') +flags.DEFINE_string('iqn_prefix', + 'iqn.2010-10.org.openstack', + 'IQN Prefix') class Configuration(object): @@ -292,9 +297,9 @@ class Volume(object): """ Gets iscsi name and portal from volume name and host. For this method to work the following are needed: - 1) volume_ref['host'] to resolve the public IP address - 2) ietd to listen only to the public network interface - If any of the two are missing, fall back on Flags + 1) volume_ref['host'] must resolve to something rather than loopback + 2) ietd must bind only to the address as resolved above + If any of the two conditions are not met, fall back on Flags. """ volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(), volume_id) -- cgit From e9597d1370211de15ca96f1fa52fcbe3c9166a7e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 14:15:22 +0000 Subject: fixed pylint violations that slipped out from a previous check --- nova/virt/xenapi/vm_utils.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 762cbae83..2ee6737ab 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -29,6 +29,7 @@ from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.virt import images from nova.compute import power_state +from nova.virt.xenapi.volume_utils import StorageError XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, @@ -115,11 +116,13 @@ class VMHelper(): @classmethod @utils.deferredToThread - def find_vbd_by_number(self, session, vm_ref, number): + def find_vbd_by_number(cls, session, vm_ref, number): + """ Get the VBD reference from the device number """ return VMHelper.find_vbd_by_number_blocking(session, vm_ref, number) @classmethod - def find_vbd_by_number_blocking(self, session, vm_ref, number): + def find_vbd_by_number_blocking(cls, session, vm_ref, number): + """ Synchronous find_vbd_by_number """ vbds = session.get_xenapi().VM.get_VBDs(vm_ref) if vbds: for vbd in vbds: @@ -127,29 +130,31 @@ class VMHelper(): vbd_rec = session.get_xenapi().VBD.get_record(vbd) if vbd_rec['userdevice'] == str(number): return vbd - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) raise Exception('VBD not found in instance %s' % vm_ref) @classmethod @defer.inlineCallbacks - def unplug_vbd(self, session, vbd_ref): + def unplug_vbd(cls, session, vbd_ref): + """ Unplug VBD from VM """ try: vbd_ref = yield session.call_xenapi('VBD.unplug', vbd_ref) - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) if exc.details[0] != 'DEVICE_ALREADY_DETACHED': - raise Exception('Unable to unplug VBD %s' % vbd_ref) + raise StorageError('Unable to unplug VBD %s' % vbd_ref) @classmethod @defer.inlineCallbacks - def destroy_vbd(self, session, vbd_ref): + def destroy_vbd(cls, session, vbd_ref): + """ Destroy VBD from host database """ try: task = yield session.call_xenapi('Async.VBD.destroy', vbd_ref) yield session.wait_for_task(task) - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) - raise Exception('Unable to destroy VBD %s' % vbd_ref) + raise StorageError('Unable to destroy VBD %s' % vbd_ref) @classmethod @defer.inlineCallbacks @@ -244,6 +249,7 @@ class VMHelper(): @classmethod def compile_info(cls, record): + """ Fill record with VM status information """ return {'state': XENAPI_POWER_STATE[record['power_state']], 'max_mem': long(record['memory_static_max']) >> 10, 'mem': long(record['memory_dynamic_max']) >> 10, -- cgit From 88777c09ad909c68da8d433800cae862e9bbff4a Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 14:26:38 +0000 Subject: and yet another pylint fix --- nova/virt/xenapi/vm_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 2ee6737ab..039e72981 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -132,7 +132,7 @@ class VMHelper(): return vbd except XenAPI.Failure, exc: logging.warn(exc) - raise Exception('VBD not found in instance %s' % vm_ref) + raise StorageError('VBD not found in instance %s' % vm_ref) @classmethod @defer.inlineCallbacks -- cgit From c0fc8a5e9e72ecb780258d9cf41b32973620eb4c Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 15:35:56 +0000 Subject: small fixes on Exception handling --- nova/virt/xenapi/vm_utils.py | 2 +- nova/virt/xenapi/volume_utils.py | 10 +++++++--- nova/virt/xenapi/volumeops.py | 7 ++++--- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 039e72981..f29803136 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -132,7 +132,7 @@ class VMHelper(): return vbd except XenAPI.Failure, exc: logging.warn(exc) - raise StorageError('VBD not found in instance %s' % vm_ref) + raise StorageError('VBD not found in instance %s' % vm_ref) @classmethod @defer.inlineCallbacks diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 48aff7ef5..debaa6906 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -95,9 +95,13 @@ class VolumeHelper(): @defer.inlineCallbacks def find_sr_from_vbd(cls, session, vbd_ref): """ Find the SR reference from the VBD reference """ - vdi_ref = yield session.get_xenapi().VBD.get_VDI(vbd_ref) - sr_ref = yield session.get_xenapi().VDI.get_SR(vdi_ref) - defer.returnValue(sr_ref) + try: + vdi_ref = yield session.get_xenapi().VBD.get_VDI(vbd_ref) + sr_ref = yield session.get_xenapi().VDI.get_SR(vdi_ref) + defer.returnValue(sr_ref) + except XenAPI.Failure, exc: + logging.warn(exc) + raise StorageError('Unable to find SR from VBD %s' % vbd_ref) @classmethod @utils.deferredToThread diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 4055688e3..b9f260756 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -18,6 +18,7 @@ Management class for Storage-related functions (attach, detach, etc). """ import logging +import XenAPI from twisted.internet import defer @@ -68,10 +69,10 @@ class VolumeOps(object): vm_ref, vdi_ref, vol_rec['deviceNumber'], False) - except StorageError, exc: + except XenAPI.Failure, exc: logging.warn(exc) yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) - raise StorageError('Unable to use SR %s for instance %s' + raise Exception('Unable to use SR %s for instance %s' % (sr_ref, instance_name)) else: @@ -79,7 +80,7 @@ class VolumeOps(object): task = yield self._session.call_xenapi('Async.VBD.plug', vbd_ref) yield self._session.wait_for_task(task) - except StorageError, exc: + except XenAPI.Failure, exc: logging.warn(exc) yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) -- cgit From 699ac0785240307ef2396d688e6c0a2acb446665 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 22:22:48 +0000 Subject: pylint fixes --- nova/virt/xenapi/vm_utils.py | 2 +- nova/virt/xenapi/volume_utils.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 35d89d835..0549dc9fb 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -44,7 +44,7 @@ class VMHelper(): """ The class that wraps the helper methods together. """ - def __init__(self, session): + def __init__(self): return @classmethod diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 84eb82f15..051d0fe85 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -1,5 +1,4 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -from orca.scripts import self_voicing # Copyright (c) 2010 Citrix Systems, Inc. # @@ -46,7 +45,7 @@ class VolumeHelper(): """ The class that wraps the helper methods together. """ - def __init__(self, session): + def __init__(self): return @classmethod -- cgit From 03920759ac485e76c9104b4c9a1bf53231e2c47c Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 10:22:29 +0000 Subject: Removing novaProject from the schema. This change may look odd at first; here's how it works: Both roles are projects are groupOfNames. Previously, we were differentiating projects from project roles by using the novaProject objectclass on the project, and not on the roles. This change removes novaProject, and uses the owner attribute instead of the projectManager attribute. Only projects should have an owner. We can differentiate projects from project roles by checking for the existence of this attribute. To check for the existence of an attribute in LDAP, a wildcard search is used. The fake LDAP driver did not support wildcard searches, so I put in "all or nothing" support for it. The wildcard search support doesn't work exactly like wildcard searches in LDAP, but will work for the case that's required. --- nova/auth/fakeldap.py | 3 +++ nova/auth/ldapdriver.py | 16 ++++++++-------- nova/auth/nova_openldap.schema | 16 ---------------- nova/auth/nova_sun.schema | 2 -- nova/auth/opendj.sh | 2 -- nova/auth/slap.sh | 4 +--- 6 files changed, 12 insertions(+), 31 deletions(-) diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index 46e0135b4..2dcb69267 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -119,6 +119,9 @@ def _match(key, value, attrs): """Match a given key and value against an attribute list.""" if key not in attrs: return False + # This is a wild card search. Implemented as all or nothing for now. + if value == "*": + return True if key != "objectclass": return value in attrs[key] # it is an objectclass check, so check subclasses diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 871515663..705e89ee8 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -106,7 +106,7 @@ class LdapDriver(object): """Retrieve project by id""" dn = 'cn=%s,%s' % (pid, FLAGS.ldap_project_subtree) - attr = self.__find_object(dn, '(objectclass=novaProject)') + attr = self.__find_object(dn, '(owner=*)') return self.__to_project(attr) def get_users(self): @@ -122,7 +122,7 @@ class LdapDriver(object): def get_projects(self, uid=None): """Retrieve list of projects""" - pattern = '(objectclass=novaProject)' + pattern = '(owner=*)' if uid: pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid)) attrs = self.__find_objects(FLAGS.ldap_project_subtree, @@ -205,10 +205,10 @@ class LdapDriver(object): if not manager_dn in members: members.append(manager_dn) attr = [ - ('objectclass', ['novaProject']), + ('objectclass', ['groupOfNames']), ('cn', [name]), ('description', [description]), - ('projectManager', [manager_dn]), + ('owner', [manager_dn]), ('member', members)] self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr) return self.__to_project(dict(attr)) @@ -224,7 +224,7 @@ class LdapDriver(object): "manager %s doesn't exist" % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) - attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn)) + attr.append((self.ldap.MOD_REPLACE, 'owner', manager_dn)) if description: attr.append((self.ldap.MOD_REPLACE, 'description', description)) self.conn.modify_s('cn=%s,%s' % (project_id, @@ -286,7 +286,7 @@ class LdapDriver(object): project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) roles = self.__find_objects(project_dn, '(&(&(objectclass=groupOfNames)' - '(!(objectclass=novaProject)))' + '(!(owner=*)))' '(member=%s))' % self.__uid_to_dn(uid)) return [role['cn'][0] for role in roles] @@ -385,7 +385,7 @@ class LdapDriver(object): def __find_role_dns(self, tree): """Find dns of role objects in given tree""" return self.__find_dns(tree, - '(&(objectclass=groupOfNames)(!(objectclass=novaProject)))') + '(&(objectclass=groupOfNames)(!(owner=*)))') def __find_group_dns_with_member(self, tree, uid): """Find dns of group objects in a given tree that contain member""" @@ -534,7 +534,7 @@ class LdapDriver(object): return { 'id': attr['cn'][0], 'name': attr['cn'][0], - 'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]), + 'project_manager_id': self.__dn_to_uid(attr['owner'][0]), 'description': attr.get('description', [None])[0], 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index 9e528f58b..1a10a445d 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -39,13 +39,6 @@ attributetype ( SINGLE-VALUE ) -attributetype ( - novaAttrs:5 - NAME 'projectManager' - DESC 'Project Managers of a project' - SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 - ) - objectClass ( novaOCs:1 NAME 'novaUser' @@ -53,12 +46,3 @@ objectClass ( AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) ) - -objectClass ( - novaOCs:3 - NAME 'novaProject' - DESC 'Container for project' - SUP groupOfNames - STRUCTURAL - MUST ( cn $ projectManager ) - ) diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index decf10f06..1a04601b5 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -9,6 +9,4 @@ dn: cn=schema attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isNovaAdmin' DESC 'Is user a nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) -attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) ) -objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) ) diff --git a/nova/auth/opendj.sh b/nova/auth/opendj.sh index 8052c077d..9a9600342 100755 --- a/nova/auth/opendj.sh +++ b/nova/auth/opendj.sh @@ -30,9 +30,7 @@ fi abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` schemapath='/var/opendj/instance/config/schema' -cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif -chown opendj:opendj $schemapath/97-openssh-lpk_sun.ldif chown opendj:opendj $schemapath/98-nova_sun.ldif cat >/etc/ldap/ldap.conf </dev/null; echo "$PWD"/"${0##*/}")"` -cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema -cp $abspath/nova_openldap.schema /etc/ldap/schema/nova_openldap.schema +cp $abspath/nova_openldap.schema /etc/ldap/schema/nova.schema mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig cat >/etc/ldap/slapd.conf </etc/ldap/slapd.conf < Date: Wed, 8 Dec 2010 16:23:59 +0000 Subject: Adding support for choosing a schema version, so that users can more easily migrate from an old schema to the new schema. --- nova/auth/ldapdriver.py | 79 ++++++++++++++++++++++++------------------ nova/auth/nova_openldap.schema | 4 ++- nova/auth/nova_sun.schema | 5 +-- 3 files changed, 52 insertions(+), 36 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 705e89ee8..21d8f8065 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -32,6 +32,8 @@ from nova import flags FLAGS = flags.FLAGS +flags.DEFINE_integer('ldap_schema_version', 1, + 'Current version of the LDAP schema') flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') @@ -75,10 +77,20 @@ class LdapDriver(object): Defines enter and exit and therefore supports the with/as syntax. """ + project_pattern = '(owner=*)' + isadmin_attribute = 'isNovaAdmin' + project_attribute = 'owner' + project_objectclass = 'groupOfNames' + def __init__(self): """Imports the LDAP module""" self.ldap = __import__('ldap') self.conn = None + if FLAGS.ldap_schema_version == 1: + LdapDriver.project_pattern = '(objectclass=novaProject)' + LdapDriver.isadmin_attribute = 'isAdmin' + LdapDriver.project_attribute = 'projectManager' + LdapDriver.project_objectclass = 'novaProject' def __enter__(self): """Creates the connection to LDAP""" @@ -106,7 +118,7 @@ class LdapDriver(object): """Retrieve project by id""" dn = 'cn=%s,%s' % (pid, FLAGS.ldap_project_subtree) - attr = self.__find_object(dn, '(owner=*)') + attr = self.__find_object(dn, LdapDriver.project_pattern) return self.__to_project(attr) def get_users(self): @@ -122,7 +134,7 @@ class LdapDriver(object): def get_projects(self, uid=None): """Retrieve list of projects""" - pattern = '(owner=*)' + pattern = LdapDriver.project_pattern if uid: pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid)) attrs = self.__find_objects(FLAGS.ldap_project_subtree, @@ -152,11 +164,11 @@ class LdapDriver(object): else: attr.append((self.ldap.MOD_ADD, 'accessKey', \ [access_key])) - if 'isNovaAdmin' in user.keys(): - attr.append((self.ldap.MOD_REPLACE, 'isNovaAdmin', \ + if LdapDriver.isadmin_attribute in user.keys(): + attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, \ [str(is_admin).upper()])) else: - attr.append((self.ldap.MOD_ADD, 'isNovaAdmin', \ + attr.append((self.ldap.MOD_ADD, LdapDriver.isadmin_attribute, \ [str(is_admin).upper()])) self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) @@ -175,7 +187,7 @@ class LdapDriver(object): (FLAGS.ldap_user_name_attribute, [name]), ('secretKey', [secret_key]), ('accessKey', [access_key]), - ('isNovaAdmin', [str(is_admin).upper()]), + (LdapDriver.isadmin_attribute, [str(is_admin).upper()]), ] self.conn.add_s(self.__uid_to_dn(name), attr) return self.__to_user(dict(attr)) @@ -205,10 +217,10 @@ class LdapDriver(object): if not manager_dn in members: members.append(manager_dn) attr = [ - ('objectclass', ['groupOfNames']), + ('objectclass', [LdapDriver.project_objectclass]), ('cn', [name]), ('description', [description]), - ('owner', [manager_dn]), + (LdapDriver.project_attribute, [manager_dn]), ('member', members)] self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr) return self.__to_project(dict(attr)) @@ -224,7 +236,7 @@ class LdapDriver(object): "manager %s doesn't exist" % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) - attr.append((self.ldap.MOD_REPLACE, 'owner', manager_dn)) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute, manager_dn)) if description: attr.append((self.ldap.MOD_REPLACE, 'description', description)) self.conn.modify_s('cn=%s,%s' % (project_id, @@ -284,10 +296,9 @@ class LdapDriver(object): return roles else: project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) - roles = self.__find_objects(project_dn, - '(&(&(objectclass=groupOfNames)' - '(!(owner=*)))' - '(member=%s))' % self.__uid_to_dn(uid)) + query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' % + (LdapDriver.project_pattern, self.__uid_to_dn(uid))) + roles = self.__find_objects(project_dn, query) return [role['cn'][0] for role in roles] def delete_user(self, uid): @@ -306,9 +317,9 @@ class LdapDriver(object): if 'accessKey' in user.keys(): attr.append((self.ldap.MOD_DELETE, 'accessKey', \ user['accessKey'])) - if 'isNovaAdmin' in user.keys(): - attr.append((self.ldap.MOD_DELETE, 'isNovaAdmin', \ - user['isNovaAdmin'])) + if LdapDriver.isadmin_attribute in user.keys(): + attr.append((self.ldap.MOD_DELETE, LdapDriver.isadmin_attribute, \ + user[LdapDriver.isadmin_attribute])) self.conn.modify_s(self.__uid_to_dn(uid), attr) else: # Delete entry @@ -330,7 +341,7 @@ class LdapDriver(object): if secret_key: attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key)) if admin is not None: - attr.append((self.ldap.MOD_REPLACE, 'isNovaAdmin', str(admin).upper())) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, str(admin).upper())) self.conn.modify_s(self.__uid_to_dn(uid), attr) def __user_exists(self, uid): @@ -384,19 +395,20 @@ class LdapDriver(object): def __find_role_dns(self, tree): """Find dns of role objects in given tree""" - return self.__find_dns(tree, - '(&(objectclass=groupOfNames)(!(owner=*)))') + query = '(&(objectclass=groupOfNames)(!%s))' % LdapDriver.project_pattern + return self.__find_dns(tree, query) def __find_group_dns_with_member(self, tree, uid): """Find dns of group objects in a given tree that contain member""" - dns = self.__find_dns(tree, - '(&(objectclass=groupOfNames)(member=%s))' % - self.__uid_to_dn(uid)) + query = ('(&(objectclass=groupOfNames)(member=%s))' % + self.__uid_to_dn(uid)) + dns = self.__find_dns(tree, query) return dns def __group_exists(self, dn): """Check if group exists""" - return self.__find_object(dn, '(objectclass=groupOfNames)') is not None + query = '(objectclass=groupOfNames)' + return self.__find_object(dn, query) is not None @staticmethod def __role_to_dn(role, project_id=None): @@ -435,7 +447,7 @@ class LdapDriver(object): """Check if user is in group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be searched in group " - "becuase the user doesn't exist" % (uid,)) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -447,10 +459,10 @@ class LdapDriver(object): """Add user to group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be added to the group " - "becuase the user doesn't exist" % (uid,)) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + group_dn) if self.__is_in_group(uid, group_dn): raise exception.Duplicate("User %s is already a member of " "the group %s" % (uid, group_dn)) @@ -461,13 +473,13 @@ class LdapDriver(object): """Remove user from group""" if not self.__group_exists(group_dn): raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + group_dn) if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % (uid,)) + "group because the user doesn't exist" % uid) if not self.__is_in_group(uid, group_dn): raise exception.NotFound("User %s is not a member of the group" % - (uid,)) + uid) # NOTE(vish): remove user from group and any sub_groups sub_dns = self.__find_group_dns_with_member( group_dn, uid) @@ -489,7 +501,7 @@ class LdapDriver(object): """Remove user from all roles and projects""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % (uid,)) + "because the user doesn't exist" % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -516,13 +528,13 @@ class LdapDriver(object): if attr is None: return None if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \ - and 'isNovaAdmin' in attr.keys()): + and LdapDriver.isadmin_attribute in attr.keys()): return { 'id': attr[FLAGS.ldap_user_id_attribute][0], 'name': attr[FLAGS.ldap_user_name_attribute][0], 'access': attr['accessKey'][0], 'secret': attr['secretKey'][0], - 'admin': (attr['isNovaAdmin'][0] == 'TRUE')} + 'admin': (attr[LdapDriver.isadmin_attribute][0] == 'TRUE')} else: return None @@ -534,7 +546,8 @@ class LdapDriver(object): return { 'id': attr['cn'][0], 'name': attr['cn'][0], - 'project_manager_id': self.__dn_to_uid(attr['owner'][0]), + 'project_manager_id': + self.__dn_to_uid(attr[LdapDriver.project_attribute][0]), 'description': attr.get('description', [None])[0], 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index 1a10a445d..daa3a8442 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -1,7 +1,9 @@ # # Person object for Nova # inetorgperson with extra attributes -# Author: Vishvananda Ishaya +# Schema version: 2 +# Authors: Vishvananda Ishaya +# Ryan Lane # # diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index 1a04601b5..8e9052ded 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -1,8 +1,9 @@ # # Person object for Nova # inetorgperson with extra attributes -# Author: Vishvananda Ishaya -# Modified for strict RFC 4512 compatibility by: Ryan Lane +# Schema version: 2 +# Authors: Vishvananda Ishaya +# Ryan Lane # # using internet experimental oid arc as per BP64 3.1 dn: cn=schema -- cgit From 5e79e5957a016c1f38fb2d126f710078a4b7f9a2 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 16:26:12 +0000 Subject: Setting the default schema version to the new schema --- nova/auth/ldapdriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 21d8f8065..eac1db547 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -32,7 +32,7 @@ from nova import flags FLAGS = flags.FLAGS -flags.DEFINE_integer('ldap_schema_version', 1, +flags.DEFINE_integer('ldap_schema_version', 2, 'Current version of the LDAP schema') flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') -- cgit From 55bc83b07abc8700c2b619be6be88b348f42a4d8 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 16:38:35 +0000 Subject: PEP8 fixes --- nova/auth/ldapdriver.py | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index eac1db547..870262a15 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -40,7 +40,8 @@ flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', 'DN of admin user') flags.DEFINE_string('ldap_user_id_attribute', 'uid', 'Attribute to use as id') -flags.DEFINE_string('ldap_user_name_attribute', 'cn', 'Attribute to use as name') +flags.DEFINE_string('ldap_user_name_attribute', 'cn', + 'Attribute to use as name') flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users') @@ -153,23 +154,23 @@ class LdapDriver(object): # Malformed entries are useless, replace attributes found. attr = [] if 'secretKey' in user.keys(): - attr.append((self.ldap.MOD_REPLACE, 'secretKey', \ + attr.append((self.ldap.MOD_REPLACE, 'secretKey', [secret_key])) else: - attr.append((self.ldap.MOD_ADD, 'secretKey', \ + attr.append((self.ldap.MOD_ADD, 'secretKey', [secret_key])) if 'accessKey' in user.keys(): - attr.append((self.ldap.MOD_REPLACE, 'accessKey', \ + attr.append((self.ldap.MOD_REPLACE, 'accessKey', [access_key])) else: - attr.append((self.ldap.MOD_ADD, 'accessKey', \ + attr.append((self.ldap.MOD_ADD, 'accessKey', [access_key])) if LdapDriver.isadmin_attribute in user.keys(): - attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, \ - [str(is_admin).upper()])) + attr.append((self.ldap.MOD_REPLACE, + LdapDriver.isadmin_attribute, [str(is_admin).upper()])) else: - attr.append((self.ldap.MOD_ADD, LdapDriver.isadmin_attribute, \ - [str(is_admin).upper()])) + attr.append((self.ldap.MOD_ADD, + LdapDriver.isadmin_attribute, [str(is_admin).upper()])) self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: @@ -236,7 +237,8 @@ class LdapDriver(object): "manager %s doesn't exist" % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) - attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute, manager_dn)) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute, + manager_dn)) if description: attr.append((self.ldap.MOD_REPLACE, 'description', description)) self.conn.modify_s('cn=%s,%s' % (project_id, @@ -312,14 +314,15 @@ class LdapDriver(object): # Retrieve user by name user = self.__get_ldap_user(uid) if 'secretKey' in user.keys(): - attr.append((self.ldap.MOD_DELETE, 'secretKey', \ - user['secretKey'])) + attr.append((self.ldap.MOD_DELETE, 'secretKey', + user['secretKey'])) if 'accessKey' in user.keys(): - attr.append((self.ldap.MOD_DELETE, 'accessKey', \ - user['accessKey'])) + attr.append((self.ldap.MOD_DELETE, 'accessKey', + user['accessKey'])) if LdapDriver.isadmin_attribute in user.keys(): - attr.append((self.ldap.MOD_DELETE, LdapDriver.isadmin_attribute, \ - user[LdapDriver.isadmin_attribute])) + attr.append((self.ldap.MOD_DELETE, + LdapDriver.isadmin_attribute, + user[LdapDriver.isadmin_attribute])) self.conn.modify_s(self.__uid_to_dn(uid), attr) else: # Delete entry @@ -341,7 +344,8 @@ class LdapDriver(object): if secret_key: attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key)) if admin is not None: - attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, str(admin).upper())) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, + str(admin).upper())) self.conn.modify_s(self.__uid_to_dn(uid), attr) def __user_exists(self, uid): @@ -395,7 +399,8 @@ class LdapDriver(object): def __find_role_dns(self, tree): """Find dns of role objects in given tree""" - query = '(&(objectclass=groupOfNames)(!%s))' % LdapDriver.project_pattern + query = ('(&(objectclass=groupOfNames)(!%s))' % + LdapDriver.project_pattern) return self.__find_dns(tree, query) def __find_group_dns_with_member(self, tree, uid): -- cgit From 63006a18701ff185e6837aa2b88f001052643460 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 8 Dec 2010 18:49:28 +0000 Subject: typo fix --- nova/virt/xenapi/volume_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 051d0fe85..d247066aa 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -78,14 +78,14 @@ class VolumeHelper(): if 'chapuser' in info and 'chappassword' in info: record = {'target': info['targetHost'], 'port': info['targetPort'], - 'targetIQN': info['targeIQN'], + 'targetIQN': info['targetIQN'], 'chapuser': info['chapuser'], 'chappassword': info['chappassword'] } else: record = {'target': info['targetHost'], 'port': info['targetPort'], - 'targetIQN': info['targeIQN'] + 'targetIQN': info['targetIQN'] } try: sr_ref = session.get_xenapi().SR.create( @@ -211,7 +211,7 @@ class VolumeHelper(): volume_info['volumeId'] = volume_id volume_info['targetHost'] = target_host volume_info['targetPort'] = target_port - volume_info['targeIQN'] = target_iqn + volume_info['targetIQN'] = target_iqn defer.returnValue(volume_info) @classmethod -- cgit From 3c85f1b7ed593a2d4d126a34241f217da5cf7ce6 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Wed, 8 Dec 2010 17:18:27 -0800 Subject: intermediate commit to checkpoint progress all relevant tests are passing except volume, next step is volume manager fixery --- nova/manager.py | 3 +- nova/objectstore/image.py | 3 + nova/rpc.py | 75 +++++++---------------- nova/service.py | 1 - nova/test.py | 52 ++++++++-------- nova/tests/api/__init__.py | 3 +- nova/tests/auth_unittest.py | 5 +- nova/tests/cloud_unittest.py | 4 +- nova/tests/compute_unittest.py | 39 ++++++------ nova/tests/rpc_unittest.py | 34 +++++------ nova/tests/service_unittest.py | 9 +-- nova/tests/virt_unittest.py | 7 +-- nova/tests/volume_unittest.py | 56 ++++++++---------- nova/utils.py | 23 ++++---- nova/virt/images.py | 9 +-- nova/virt/libvirt_conn.py | 28 ++++----- nova/virt/xenapi/vm_utils.py | 28 ++++----- nova/virt/xenapi_conn.py | 1 - nova/volume/driver.py | 131 ++++++++++++++++++----------------------- run_tests.py | 14 ++++- 20 files changed, 230 insertions(+), 295 deletions(-) diff --git a/nova/manager.py b/nova/manager.py index 5e067bd08..a343d7fc6 100644 --- a/nova/manager.py +++ b/nova/manager.py @@ -55,7 +55,6 @@ from nova import utils from nova import flags from nova.db import base -from twisted.internet import defer FLAGS = flags.FLAGS @@ -69,7 +68,7 @@ class Manager(base.Base): def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval""" - yield + return def init_host(self): """Do any initialization that needs to be run if this is a standalone diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index 7292dbab8..2fe0b0117 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -26,6 +26,7 @@ Requires decryption using keys in the manifest. import binascii import glob import json +import logging import os import shutil import tarfile @@ -264,6 +265,8 @@ class Image(object): if err: raise exception.Error("Failed to decrypt initialization " "vector: %s" % err) + logging.debug(iv) + _out, err = utils.execute( 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' % (encrypted_filename, key, iv, decrypted_filename), diff --git a/nova/rpc.py b/nova/rpc.py index 86a29574f..652b9e4aa 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -25,18 +25,18 @@ import json import logging import sys import time +import traceback import uuid from carrot import connection as carrot_connection from carrot import messaging from eventlet import greenthread -from twisted.internet import defer -from twisted.internet import task +from nova import context from nova import exception from nova import fakerabbit from nova import flags -from nova import context +from nova import utils FLAGS = flags.FLAGS @@ -128,17 +128,9 @@ class Consumer(messaging.Consumer): def attach_to_eventlet(self): """Only needed for unit tests!""" - def fetch_repeatedly(): - while True: - self.fetch(enable_callbacks=True) - greenthread.sleep(0.1) - greenthread.spawn(fetch_repeatedly) - - def attach_to_twisted(self): - """Attach a callback to twisted that fires 10 times a second""" - loop = task.LoopingCall(self.fetch, enable_callbacks=True) - loop.start(interval=0.1) - return loop + timer = utils.LoopingCall(self.fetch, enable_callbacks=True) + timer.start(0.1) + return timer class Publisher(messaging.Publisher): @@ -197,10 +189,13 @@ class AdapterConsumer(TopicConsumer): node_args = dict((str(k), v) for k, v in args.iteritems()) # NOTE(vish): magic is fun! # pylint: disable-msg=W0142 - d = defer.maybeDeferred(node_func, context=ctxt, **node_args) - if msg_id: - d.addCallback(lambda rval: msg_reply(msg_id, rval, None)) - d.addErrback(lambda e: msg_reply(msg_id, None, e)) + try: + rval = node_func(context=ctxt, **node_args) + if msg_id: + msg_reply(msg_id, rval, None) + except Exception as e: + if msg_id: + msg_reply(msg_id, None, sys.exc_info()) return @@ -244,11 +239,11 @@ def msg_reply(msg_id, reply=None, failure=None): failure should be a twisted failure object""" if failure: - message = failure.getErrorMessage() - traceback = failure.getTraceback() + message = str(failure[1]) + tb = traceback.format_exception(*failure) logging.error("Returning exception %s to caller", message) - logging.error(traceback) - failure = (failure.type.__name__, str(failure.value), traceback) + logging.error(tb) + failure = (failure[0].__name__, str(failure[1]), tb) conn = Connection.instance() publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: @@ -313,8 +308,8 @@ def call(context, topic, msg): _pack_context(msg, context) class WaitMessage(object): - def __call__(self, data, message): + LOG.debug('data %s, msg %s', data, message) """Acks message and sets result.""" message.ack() if data['failure']: @@ -337,41 +332,11 @@ def call(context, topic, msg): except StopIteration: pass consumer.close() + if isinstance(wait_msg.result, Exception): + raise wait_msg.result return wait_msg.result -def call_twisted(context, topic, msg): - """Sends a message on a topic and wait for a response""" - LOG.debug("Making asynchronous call...") - msg_id = uuid.uuid4().hex - msg.update({'_msg_id': msg_id}) - LOG.debug("MSG_ID is %s" % (msg_id)) - _pack_context(msg, context) - - conn = Connection.instance() - d = defer.Deferred() - consumer = DirectConsumer(connection=conn, msg_id=msg_id) - - def deferred_receive(data, message): - """Acks message and callbacks or errbacks""" - message.ack() - if data['failure']: - return d.errback(RemoteError(*data['failure'])) - else: - return d.callback(data['result']) - - consumer.register_callback(deferred_receive) - injected = consumer.attach_to_twisted() - - # clean up after the injected listened and return x - d.addCallback(lambda x: injected.stop() and x or x) - - publisher = TopicPublisher(connection=conn, topic=topic) - publisher.send(msg) - publisher.close() - return d - - def cast(context, topic, msg): """Sends a message on a topic without waiting for a response""" LOG.debug("Making asynchronous cast...") diff --git a/nova/service.py b/nova/service.py index 9454d4049..55a0bb212 100644 --- a/nova/service.py +++ b/nova/service.py @@ -160,7 +160,6 @@ class Service(object, service.Service): except exception.NotFound: logging.warn("Service killed that has no database entry") - @defer.inlineCallbacks def periodic_tasks(self): """Tasks to be run at a periodic interval""" yield self.manager.periodic_tasks(context.get_admin_context()) diff --git a/nova/test.py b/nova/test.py index 5c2a72819..bbf063aca 100644 --- a/nova/test.py +++ b/nova/test.py @@ -25,11 +25,11 @@ and some black magic for inline callbacks. import datetime import sys import time +import unittest import mox import stubout from twisted.internet import defer -from twisted.trial import unittest from nova import context from nova import db @@ -94,7 +94,7 @@ class TrialTestCase(unittest.TestCase): db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host, self.start) db.network_disassociate_all(ctxt) - rpc.Consumer.attach_to_twisted = self.originalAttach + rpc.Consumer.attach_to_eventlet = self.originalAttach for x in self.injected: try: x.stop() @@ -125,31 +125,31 @@ class TrialTestCase(unittest.TestCase): for k, v in self._original_flags.iteritems(): setattr(FLAGS, k, v) - def run(self, result=None): - test_method = getattr(self, self._testMethodName) - setattr(self, - self._testMethodName, - self._maybeInlineCallbacks(test_method, result)) - rv = super(TrialTestCase, self).run(result) - setattr(self, self._testMethodName, test_method) - return rv - - def _maybeInlineCallbacks(self, func, result): - def _wrapped(): - g = func() - if isinstance(g, defer.Deferred): - return g - if not hasattr(g, 'send'): - return defer.succeed(g) - - inlined = defer.inlineCallbacks(func) - d = inlined() - return d - _wrapped.func_name = func.func_name - return _wrapped + #def run(self, result=None): + # test_method = getattr(self, self._testMethodName) + # setattr(self, + # self._testMethodName, + # self._maybeInlineCallbacks(test_method, result)) + # rv = super(TrialTestCase, self).run(result) + # setattr(self, self._testMethodName, test_method) + # return rv + + #def _maybeInlineCallbacks(self, func, result): + # def _wrapped(): + # g = func() + # if isinstance(g, defer.Deferred): + # return g + # if not hasattr(g, 'send'): + # return defer.succeed(g) + + # inlined = defer.inlineCallbacks(func) + # d = inlined() + # return d + # _wrapped.func_name = func.func_name + # return _wrapped def _monkey_patch_attach(self): - self.originalAttach = rpc.Consumer.attach_to_twisted + self.originalAttach = rpc.Consumer.attach_to_eventlet def _wrapped(innerSelf): rv = self.originalAttach(innerSelf) @@ -157,4 +157,4 @@ class TrialTestCase(unittest.TestCase): return rv _wrapped.func_name = self.originalAttach.func_name - rpc.Consumer.attach_to_twisted = _wrapped + rpc.Consumer.attach_to_eventlet = _wrapped diff --git a/nova/tests/api/__init__.py b/nova/tests/api/__init__.py index 9caa8c9d0..cdc1bbf00 100644 --- a/nova/tests/api/__init__.py +++ b/nova/tests/api/__init__.py @@ -78,4 +78,5 @@ class Test(unittest.TestCase): if __name__ == '__main__': - unittest.main() + pass + #unittest.main() diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index fe891beee..129ff223d 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -16,10 +16,13 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +#import logging from M2Crypto import X509 import unittest +import eventlet +logging = eventlet.import_patched('logging') + from nova import crypto from nova import flags from nova import test diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 9886a2449..b7b856da5 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -27,8 +27,6 @@ import tempfile import time from eventlet import greenthread -from twisted.internet import defer -import unittest from xml.etree import ElementTree from nova import context @@ -186,7 +184,7 @@ class CloudTestCase(test.TrialTestCase): logging.debug("Need to watch instance %s until it's running..." % instance['instance_id']) while True: - rv = yield defer.succeed(time.sleep(1)) + greenthread.sleep(1) info = self.cloud._get_instance(instance['instance_id']) logging.debug(info['state']) if info['state'] == power_state.RUNNING: diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 6f3ef96cb..67cea72c9 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -22,8 +22,6 @@ Tests For Compute import datetime import logging -from twisted.internet import defer - from nova import context from nova import db from nova import exception @@ -33,6 +31,7 @@ from nova import utils from nova.auth import manager from nova.compute import api as compute_api + FLAGS = flags.FLAGS @@ -94,24 +93,22 @@ class ComputeTestCase(test.TrialTestCase): db.security_group_destroy(self.context, group['id']) db.instance_destroy(self.context, ref[0]['id']) - @defer.inlineCallbacks def test_run_terminate(self): """Make sure it is possible to run and terminate instance""" instance_id = self._create_instance() - yield self.compute.run_instance(self.context, instance_id) + self.compute.run_instance(self.context, instance_id) instances = db.instance_get_all(context.get_admin_context()) logging.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) - yield self.compute.terminate_instance(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) instances = db.instance_get_all(context.get_admin_context()) logging.info("After terminating instances: %s", instances) self.assertEqual(len(instances), 0) - @defer.inlineCallbacks def test_run_terminate_timestamps(self): """Make sure timestamps are set for launched and destroyed""" instance_id = self._create_instance() @@ -119,42 +116,40 @@ class ComputeTestCase(test.TrialTestCase): self.assertEqual(instance_ref['launched_at'], None) self.assertEqual(instance_ref['deleted_at'], None) launch = datetime.datetime.utcnow() - yield self.compute.run_instance(self.context, instance_id) + self.compute.run_instance(self.context, instance_id) instance_ref = db.instance_get(self.context, instance_id) self.assert_(instance_ref['launched_at'] > launch) self.assertEqual(instance_ref['deleted_at'], None) terminate = datetime.datetime.utcnow() - yield self.compute.terminate_instance(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) self.context = self.context.elevated(True) instance_ref = db.instance_get(self.context, instance_id) self.assert_(instance_ref['launched_at'] < terminate) self.assert_(instance_ref['deleted_at'] > terminate) - @defer.inlineCallbacks def test_reboot(self): """Ensure instance can be rebooted""" instance_id = self._create_instance() - yield self.compute.run_instance(self.context, instance_id) - yield self.compute.reboot_instance(self.context, instance_id) - yield self.compute.terminate_instance(self.context, instance_id) + self.compute.run_instance(self.context, instance_id) + self.compute.reboot_instance(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) - @defer.inlineCallbacks def test_console_output(self): """Make sure we can get console output from instance""" instance_id = self._create_instance() - yield self.compute.run_instance(self.context, instance_id) + self.compute.run_instance(self.context, instance_id) - console = yield self.compute.get_console_output(self.context, + console = self.compute.get_console_output(self.context, instance_id) self.assert_(console) - yield self.compute.terminate_instance(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) - @defer.inlineCallbacks def test_run_instance_existing(self): """Ensure failure when running an instance that already exists""" instance_id = self._create_instance() - yield self.compute.run_instance(self.context, instance_id) - self.assertFailure(self.compute.run_instance(self.context, - instance_id), - exception.Error) - yield self.compute.terminate_instance(self.context, instance_id) + self.compute.run_instance(self.context, instance_id) + self.assertRaises(exception.Error, + self.compute.run_instance, + self.context, + instance_id) + self.compute.terminate_instance(self.context, instance_id) diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py index f35b65a39..c2ad5cd79 100644 --- a/nova/tests/rpc_unittest.py +++ b/nova/tests/rpc_unittest.py @@ -20,8 +20,6 @@ Unit Tests for remote procedure calls using queue """ import logging -from twisted.internet import defer - from nova import context from nova import flags from nova import rpc @@ -40,23 +38,22 @@ class RpcTestCase(test.TrialTestCase): self.consumer = rpc.AdapterConsumer(connection=self.conn, topic='test', proxy=self.receiver) - self.consumer.attach_to_twisted() + self.consumer.attach_to_eventlet() self.context = context.get_admin_context() def test_call_succeed(self): """Get a value through rpc call""" value = 42 - result = yield rpc.call_twisted(self.context, - 'test', {"method": "echo", + result = rpc.call(self.context, 'test', {"method": "echo", "args": {"value": value}}) self.assertEqual(value, result) def test_context_passed(self): """Makes sure a context is passed through rpc call""" value = 42 - result = yield rpc.call_twisted(self.context, - 'test', {"method": "context", - "args": {"value": value}}) + result = rpc.call(self.context, + 'test', {"method": "context", + "args": {"value": value}}) self.assertEqual(self.context.to_dict(), result) def test_call_exception(self): @@ -67,14 +64,17 @@ class RpcTestCase(test.TrialTestCase): to an int in the test. """ value = 42 - self.assertFailure(rpc.call_twisted(self.context, 'test', - {"method": "fail", - "args": {"value": value}}), - rpc.RemoteError) + self.assertRaises(rpc.RemoteError, + rpc.call, + self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) try: - yield rpc.call_twisted(self.context, - 'test', {"method": "fail", - "args": {"value": value}}) + rpc.call(self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) self.fail("should have thrown rpc.RemoteError") except rpc.RemoteError as exc: self.assertEqual(int(exc.value), value) @@ -89,13 +89,13 @@ class TestReceiver(object): def echo(context, value): """Simply returns whatever value is sent in""" logging.debug("Received %s", value) - return defer.succeed(value) + return value @staticmethod def context(context, value): """Returns dictionary version of context""" logging.debug("Received %s", context) - return defer.succeed(context.to_dict()) + return context.to_dict() @staticmethod def fail(context, value): diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py index a268bc4fe..4f8d2d550 100644 --- a/nova/tests/service_unittest.py +++ b/nova/tests/service_unittest.py @@ -143,7 +143,6 @@ class ServiceTestCase(test.TrialTestCase): # whether it is disconnected, it looks for a variable on itself called # 'model_disconnected' and report_state doesn't really do much so this # these are mostly just for coverage - @defer.inlineCallbacks def test_report_state_no_service(self): host = 'foo' binary = 'bar' @@ -174,9 +173,8 @@ class ServiceTestCase(test.TrialTestCase): topic, 'nova.tests.service_unittest.FakeManager') serv.startService() - yield serv.report_state() + serv.report_state() - @defer.inlineCallbacks def test_report_state_newly_disconnected(self): host = 'foo' binary = 'bar' @@ -205,10 +203,9 @@ class ServiceTestCase(test.TrialTestCase): topic, 'nova.tests.service_unittest.FakeManager') serv.startService() - yield serv.report_state() + serv.report_state() self.assert_(serv.model_disconnected) - @defer.inlineCallbacks def test_report_state_newly_connected(self): host = 'foo' binary = 'bar' @@ -240,6 +237,6 @@ class ServiceTestCase(test.TrialTestCase): 'nova.tests.service_unittest.FakeManager') serv.startService() serv.model_disconnected = True - yield serv.report_state() + serv.report_state() self.assert_(not serv.model_disconnected) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index d49383fb7..a4a8d3acf 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -235,7 +235,7 @@ class NWFilterTestCase(test.TrialTestCase): 'project_id': 'fake'}) inst_id = instance_ref['id'] - def _ensure_all_called(_): + def _ensure_all_called(): instance_filter = 'nova-instance-%s' % instance_ref['name'] secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] for required in [secgroup_filter, 'allow-dhcp-server', @@ -253,7 +253,6 @@ class NWFilterTestCase(test.TrialTestCase): instance = db.instance_get(self.context, inst_id) d = self.fw.setup_nwfilters_for_instance(instance) - d.addCallback(_ensure_all_called) - d.addCallback(lambda _: self.teardown_security_group()) - + _ensure_all_called() + self.teardown_security_group() return d diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 12321a96f..93d2ceab7 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -21,8 +21,6 @@ Tests for Volume Code. """ import logging -from twisted.internet import defer - from nova import context from nova import exception from nova import db @@ -56,51 +54,48 @@ class VolumeTestCase(test.TrialTestCase): vol['attach_status'] = "detached" return db.volume_create(context.get_admin_context(), vol)['id'] - @defer.inlineCallbacks def test_create_delete_volume(self): """Test volume can be created and deleted.""" volume_id = self._create_volume() - yield self.volume.create_volume(self.context, volume_id) + self.volume.create_volume(self.context, volume_id) self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), volume_id).id) - yield self.volume.delete_volume(self.context, volume_id) + self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume_id) - @defer.inlineCallbacks def test_too_big_volume(self): """Ensure failure if a too large of a volume is requested.""" # FIXME(vish): validation needs to move into the data layer in # volume_create - defer.returnValue(True) + return True try: volume_id = self._create_volume('1001') - yield self.volume.create_volume(self.context, volume_id) + self.volume.create_volume(self.context, volume_id) self.fail("Should have thrown TypeError") except TypeError: pass - @defer.inlineCallbacks def test_too_many_volumes(self): """Ensure that NoMoreTargets is raised when we run out of volumes.""" vols = [] total_slots = FLAGS.iscsi_num_targets for _index in xrange(total_slots): volume_id = self._create_volume() - yield self.volume.create_volume(self.context, volume_id) + self.volume.create_volume(self.context, volume_id) vols.append(volume_id) volume_id = self._create_volume() - self.assertFailure(self.volume.create_volume(self.context, - volume_id), - db.NoMoreTargets) + self.assertRaises(db.NoMoreTargets, + self.volume.create_volume, + self.context, + volume_id) db.volume_destroy(context.get_admin_context(), volume_id) for volume_id in vols: - yield self.volume.delete_volume(self.context, volume_id) + self.volume.delete_volume(self.context, volume_id) - @defer.inlineCallbacks def test_run_attach_detach_volume(self): """Make sure volume can be attached and detached from instance.""" inst = {} @@ -115,15 +110,15 @@ class VolumeTestCase(test.TrialTestCase): instance_id = db.instance_create(self.context, inst)['id'] mountpoint = "/dev/sdf" volume_id = self._create_volume() - yield self.volume.create_volume(self.context, volume_id) + self.volume.create_volume(self.context, volume_id) if FLAGS.fake_tests: db.volume_attached(self.context, volume_id, instance_id, mountpoint) else: - yield self.compute.attach_volume(self.context, - instance_id, - volume_id, - mountpoint) + self.compute.attach_volume(self.context, + instance_id, + volume_id, + mountpoint) vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(vol['status'], "in-use") self.assertEqual(vol['attach_status'], "attached") @@ -131,25 +126,26 @@ class VolumeTestCase(test.TrialTestCase): instance_ref = db.volume_get_instance(self.context, volume_id) self.assertEqual(instance_ref['id'], instance_id) - self.assertFailure(self.volume.delete_volume(self.context, volume_id), - exception.Error) + self.assertRaises(exception.Error, + self.volume.delete_volume, + self.context, + volume_id) if FLAGS.fake_tests: db.volume_detached(self.context, volume_id) else: - yield self.compute.detach_volume(self.context, - instance_id, - volume_id) + self.compute.detach_volume(self.context, + instance_id, + volume_id) vol = db.volume_get(self.context, volume_id) self.assertEqual(vol['status'], "available") - yield self.volume.delete_volume(self.context, volume_id) + self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.Error, db.volume_get, self.context, volume_id) db.instance_destroy(self.context, instance_id) - @defer.inlineCallbacks def test_concurrent_volumes_get_different_targets(self): """Ensure multiple concurrent volumes get different targets.""" volume_ids = [] @@ -164,15 +160,11 @@ class VolumeTestCase(test.TrialTestCase): self.assert_(iscsi_target not in targets) targets.append(iscsi_target) logging.debug("Target %s allocated", iscsi_target) - deferreds = [] total_slots = FLAGS.iscsi_num_targets for _index in xrange(total_slots): volume_id = self._create_volume() d = self.volume.create_volume(self.context, volume_id) - d.addCallback(_check) - d.addErrback(self.fail) - deferreds.append(d) - yield defer.DeferredList(deferreds) + _check(d) for volume_id in volume_ids: self.volume.delete_volume(self.context, volume_id) diff --git a/nova/utils.py b/nova/utils.py index 66047ae8b..2c43203d8 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -34,8 +34,6 @@ from xml.sax import saxutils from eventlet import event from eventlet import greenthread -from twisted.internet.threads import deferToThread - from nova import exception from nova import flags from nova.exception import ProcessExecutionError @@ -78,7 +76,7 @@ def fetchfile(url, target): def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): - logging.debug("Running cmd: %s", cmd) + logging.debug("Running cmd (subprocess): %s", cmd) env = os.environ.copy() if addl_env: env.update(addl_env) @@ -98,6 +96,10 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): stdout=stdout, stderr=stderr, cmd=cmd) + # NOTE(termie): this appears to be necessary to let the subprocess call + # clean something up in between calls, without it two + # execute calls in a row hangs the second one + greenthread.sleep(0) return result @@ -126,13 +128,14 @@ def debug(arg): def runthis(prompt, cmd, check_exit_code=True): logging.debug("Running %s" % (cmd)) - exit_code = subprocess.call(cmd.split(" ")) - logging.debug(prompt % (exit_code)) - if check_exit_code and exit_code != 0: - raise ProcessExecutionError(exit_code=exit_code, - stdout=None, - stderr=None, - cmd=cmd) + rv, err = execute(cmd, check_exit_code=check_exit_code) + #exit_code = subprocess.call(cmd.split(" ")) + #logging.debug(prompt % (exit_code)) + #if check_exit_code and exit_code != 0: + # raise ProcessExecutionError(exit_code=exit_code, + # stdout=None, + # stderr=None, + # cmd=cmd) def generate_uid(topic, size=8): diff --git a/nova/virt/images.py b/nova/virt/images.py index 981aa5cf3..4d7c65f12 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -26,7 +26,7 @@ import time import urlparse from nova import flags -from nova import process +from nova import utils from nova.auth import manager from nova.auth import signer from nova.objectstore import image @@ -63,15 +63,16 @@ def _fetch_s3_image(image, path, user, project): cmd = ['/usr/bin/curl', '--fail', '--silent', url] for (k, v) in headers.iteritems(): - cmd += ['-H', '%s: %s' % (k, v)] + cmd += ['-H', '"%s: %s"' % (k, v)] cmd += ['-o', path] - return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) + cmd_out = ' '.join(cmd) + return utils.execute(cmd_out) def _fetch_local_image(image, path, user, project): source = _image_path('%s/image' % image) - return process.simple_execute('cp %s %s' % (source, path)) + return utils.execute('cp %s %s' % (source, path)) def _image_path(path): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index c09a7c01d..715e7234c 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -54,7 +54,6 @@ from nova import context from nova import db from nova import exception from nova import flags -from nova import process from nova import utils #from nova.api import context from nova.auth import manager @@ -366,8 +365,8 @@ class LibvirtConnection(object): if virsh_output.startswith('/dev/'): logging.info('cool, it\'s a device') - r = process.simple_execute("sudo dd if=%s iflag=nonblock" % - virsh_output, check_exit_code=False) + r = utils.execute("sudo dd if=%s iflag=nonblock" % + virsh_output, check_exit_code=False) return r[0] else: return '' @@ -389,13 +388,13 @@ class LibvirtConnection(object): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - process.simple_execute('sudo chown %d %s' % (os.getuid(), - console_log)) + utils.execute('sudo chown %d %s' % (os.getuid(), + console_log)) if FLAGS.libvirt_type == 'xen': # Xen is special - virsh_output = process.simple_execute("virsh ttyconsole %s" % - instance['name']) + virsh_output = utils.execute("virsh ttyconsole %s" % + instance['name']) data = self._flush_xen_console(virsh_output) fpath = self._append_to_file(data, console_log) else: @@ -411,8 +410,8 @@ class LibvirtConnection(object): prefix + fname) # ensure directories exist and are writable - process.simple_execute('mkdir -p %s' % basepath(prefix='')) - process.simple_execute('chmod 0777 %s' % basepath(prefix='')) + utils.execute('mkdir -p %s' % basepath(prefix='')) + utils.execute('chmod 0777 %s' % basepath(prefix='')) # TODO(termie): these are blocking calls, it would be great # if they weren't. @@ -443,9 +442,9 @@ class LibvirtConnection(object): project) def execute(cmd, process_input=None, check_exit_code=True): - return process.simple_execute(cmd=cmd, - process_input=process_input, - check_exit_code=check_exit_code) + return utils.execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) key = str(inst['key_data']) net = None @@ -471,7 +470,7 @@ class LibvirtConnection(object): execute=execute) if os.path.exists(basepath('disk')): - process.simple_execute('rm -f %s' % basepath('disk')) + utils.execute('rm -f %s' % basepath('disk')) local_bytes = (instance_types.INSTANCE_TYPES[inst.instance_type] ['local_gb'] @@ -485,8 +484,7 @@ class LibvirtConnection(object): local_bytes, resize, execute=execute) if FLAGS.libvirt_type == 'uml': - process.simple_execute('sudo chown root %s' % - basepath('disk')) + utils.execute('sudo chown root %s' % basepath('disk')) def to_xml(self, instance, rescue=False): # TODO(termie): cache? diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 99d484ca2..b72b8e13d 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -21,14 +21,13 @@ their attributes like VDIs, VIFs, as well as their lookup functions. import logging -from twisted.internet import defer - from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state from nova.virt import images + XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, @@ -36,6 +35,7 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} + XenAPI = None @@ -49,7 +49,6 @@ class VMHelper(): XenAPI = __import__('XenAPI') @classmethod - @defer.inlineCallbacks def create_vm(cls, session, instance, kernel, ramdisk): """Create a VM record. Returns a Deferred that gives the new VM reference.""" @@ -87,12 +86,11 @@ class VMHelper(): 'other_config': {}, } logging.debug('Created VM %s...', instance.name) - vm_ref = yield session.call_xenapi('VM.create', rec) + vm_ref = session.call_xenapi('VM.create', rec) logging.debug('Created VM %s as %s.', instance.name, vm_ref) - defer.returnValue(vm_ref) + return vm_ref @classmethod - @defer.inlineCallbacks def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable): """Create a VBD record. Returns a Deferred that gives the new VBD reference.""" @@ -111,13 +109,12 @@ class VMHelper(): vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) - vbd_ref = yield session.call_xenapi('VBD.create', vbd_rec) + vbd_ref = session.call_xenapi('VBD.create', vbd_rec) logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, vdi_ref) - defer.returnValue(vbd_ref) + return vbd_ref @classmethod - @defer.inlineCallbacks def create_vif(cls, session, vm_ref, network_ref, mac_address): """Create a VIF record. Returns a Deferred that gives the new VIF reference.""" @@ -133,13 +130,12 @@ class VMHelper(): vif_rec['qos_algorithm_params'] = {} logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, network_ref) - vif_ref = yield session.call_xenapi('VIF.create', vif_rec) + vif_ref = session.call_xenapi('VIF.create', vif_rec) logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, vm_ref, network_ref) - defer.returnValue(vif_ref) + return vif_ref @classmethod - @defer.inlineCallbacks def fetch_image(cls, session, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for @@ -156,12 +152,11 @@ class VMHelper(): args['password'] = user.secret if use_sr: args['add_partition'] = 'true' - task = yield session.async_call_plugin('objectstore', fn, args) - uuid = yield session.wait_for_task(task) - defer.returnValue(uuid) + task = session.async_call_plugin('objectstore', fn, args) + uuid = session.wait_for_task(task) + return uuid @classmethod - @utils.deferredToThread def lookup(cls, session, i): """ Look the instance i up, and returns it if available """ return VMHelper.lookup_blocking(session, i) @@ -179,7 +174,6 @@ class VMHelper(): return vms[0] @classmethod - @utils.deferredToThread def lookup_vm_vdis(cls, session, vm): """ Look for the VDIs that are attached to the VM """ return VMHelper.lookup_vm_vdis_blocking(session, vm) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index dacf9fe2b..96d211cc0 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -167,7 +167,6 @@ class XenAPISession(object): self.get_xenapi_host(), plugin, fn, args) def wait_for_task(self, task): ->>>>>>> MERGE-SOURCE """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 156aad2a0..f675c9132 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -23,8 +23,6 @@ Drivers for volumes. import logging import os -from twisted.internet import defer - from nova import exception from nova import flags from nova import process @@ -55,14 +53,13 @@ flags.DEFINE_string('iscsi_ip_prefix', '127.0', class VolumeDriver(object): """Executes commands relating to Volumes.""" - def __init__(self, execute=process.simple_execute, + def __init__(self, execute=utils.execute, sync_exec=utils.execute, *args, **kwargs): # NOTE(vish): db is set by Manager self.db = None self._execute = execute self._sync_exec = sync_exec - @defer.inlineCallbacks def _try_execute(self, command): # NOTE(vish): Volume commands can partially fail due to timing, but # running them a second time on failure will usually @@ -70,15 +67,15 @@ class VolumeDriver(object): tries = 0 while True: try: - yield self._execute(command) - defer.returnValue(True) + self._execute(command) + return True except exception.ProcessExecutionError: tries = tries + 1 if tries >= FLAGS.num_shell_tries: raise logging.exception("Recovering from a failed execute." "Try number %s", tries) - yield self._execute("sleep %s" % tries ** 2) + self._execute("sleep %s" % tries ** 2) def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" @@ -86,53 +83,46 @@ class VolumeDriver(object): raise exception.Error("volume group %s doesn't exist" % FLAGS.volume_group) - @defer.inlineCallbacks def create_volume(self, volume): """Creates a logical volume.""" if int(volume['size']) == 0: sizestr = '100M' else: sizestr = '%sG' % volume['size'] - yield self._try_execute("sudo lvcreate -L %s -n %s %s" % - (sizestr, + self._try_execute("sudo lvcreate -L %s -n %s %s" % + (sizestr, volume['name'], FLAGS.volume_group)) - @defer.inlineCallbacks def delete_volume(self, volume): """Deletes a logical volume.""" - yield self._try_execute("sudo lvremove -f %s/%s" % - (FLAGS.volume_group, + self._try_execute("sudo lvremove -f %s/%s" % + (FLAGS.volume_group, volume['name'])) - @defer.inlineCallbacks def local_path(self, volume): - yield # NOTE(vish): stops deprecation warning + # NOTE(vish): stops deprecation warning escaped_group = FLAGS.volume_group.replace('-', '--') escaped_name = volume['name'].replace('-', '--') - defer.returnValue("/dev/mapper/%s-%s" % (escaped_group, - escaped_name)) + return "/dev/mapper/%s-%s" % (escaped_group, + escaped_name) def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" raise NotImplementedError() - @defer.inlineCallbacks def create_export(self, context, volume): """Exports the volume.""" raise NotImplementedError() - @defer.inlineCallbacks def remove_export(self, context, volume): """Removes an export for a logical volume.""" raise NotImplementedError() - @defer.inlineCallbacks def discover_volume(self, volume): """Discover volume on a remote host.""" raise NotImplementedError() - @defer.inlineCallbacks def undiscover_volume(self, volume): """Undiscover volume on a remote host.""" raise NotImplementedError() @@ -155,14 +145,13 @@ class AOEDriver(VolumeDriver): dev = {'shelf_id': shelf_id, 'blade_id': blade_id} self.db.export_device_create_safe(context, dev) - @defer.inlineCallbacks def create_export(self, context, volume): """Creates an export for a logical volume.""" self._ensure_blades(context) (shelf_id, blade_id) = self.db.volume_allocate_shelf_and_blade(context, volume['id']) - yield self._try_execute( + self._try_execute( "sudo vblade-persist setup %s %s %s /dev/%s/%s" % (shelf_id, blade_id, @@ -176,33 +165,30 @@ class AOEDriver(VolumeDriver): # still works for the other volumes, so we # just wait a bit for the current volume to # be ready and ignore any errors. - yield self._execute("sleep 2") - yield self._execute("sudo vblade-persist auto all", - check_exit_code=False) - yield self._execute("sudo vblade-persist start all", - check_exit_code=False) + self._execute("sleep 2") + self._execute("sudo vblade-persist auto all", + check_exit_code=False) + self._execute("sudo vblade-persist start all", + check_exit_code=False) - @defer.inlineCallbacks def remove_export(self, context, volume): """Removes an export for a logical volume.""" (shelf_id, blade_id) = self.db.volume_get_shelf_and_blade(context, volume['id']) - yield self._try_execute("sudo vblade-persist stop %s %s" % - (shelf_id, blade_id)) - yield self._try_execute("sudo vblade-persist destroy %s %s" % - (shelf_id, blade_id)) + self._try_execute("sudo vblade-persist stop %s %s" % + (shelf_id, blade_id)) + self._try_execute("sudo vblade-persist destroy %s %s" % + (shelf_id, blade_id)) - @defer.inlineCallbacks def discover_volume(self, _volume): """Discover volume on a remote host.""" - yield self._execute("sudo aoe-discover") - yield self._execute("sudo aoe-stat", check_exit_code=False) + self._execute("sudo aoe-discover") + self._execute("sudo aoe-stat", check_exit_code=False) - @defer.inlineCallbacks def undiscover_volume(self, _volume): """Undiscover volume on a remote host.""" - yield + pass class FakeAOEDriver(AOEDriver): @@ -252,7 +238,6 @@ class ISCSIDriver(VolumeDriver): target = {'host': host, 'target_num': target_num} self.db.iscsi_target_create_safe(context, target) - @defer.inlineCallbacks def create_export(self, context, volume): """Creates an export for a logical volume.""" self._ensure_iscsi_targets(context, volume['host']) @@ -261,61 +246,57 @@ class ISCSIDriver(VolumeDriver): volume['host']) iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) - yield self._execute("sudo ietadm --op new " - "--tid=%s --params Name=%s" % - (iscsi_target, iscsi_name)) - yield self._execute("sudo ietadm --op new --tid=%s " - "--lun=0 --params Path=%s,Type=fileio" % - (iscsi_target, volume_path)) - - @defer.inlineCallbacks + self._execute("sudo ietadm --op new " + "--tid=%s --params Name=%s" % + (iscsi_target, iscsi_name)) + self._execute("sudo ietadm --op new --tid=%s " + "--lun=0 --params Path=%s,Type=fileio" % + (iscsi_target, volume_path)) + def remove_export(self, context, volume): """Removes an export for a logical volume.""" iscsi_target = self.db.volume_get_iscsi_target_num(context, volume['id']) - yield self._execute("sudo ietadm --op delete --tid=%s " - "--lun=0" % iscsi_target) - yield self._execute("sudo ietadm --op delete --tid=%s" % - iscsi_target) + self._execute("sudo ietadm --op delete --tid=%s " + "--lun=0" % iscsi_target) + self._execute("sudo ietadm --op delete --tid=%s" % + iscsi_target) - @defer.inlineCallbacks def _get_name_and_portal(self, volume_name, host): """Gets iscsi name and portal from volume name and host.""" - (out, _err) = yield self._execute("sudo iscsiadm -m discovery -t " - "sendtargets -p %s" % host) + (out, _err) = self._execute("sudo iscsiadm -m discovery -t " + "sendtargets -p %s" % host) for target in out.splitlines(): if FLAGS.iscsi_ip_prefix in target and volume_name in target: (location, _sep, iscsi_name) = target.partition(" ") break iscsi_portal = location.split(",")[0] - defer.returnValue((iscsi_name, iscsi_portal)) + return (iscsi_name, iscsi_portal) - @defer.inlineCallbacks def discover_volume(self, volume): """Discover volume on a remote host.""" (iscsi_name, - iscsi_portal) = yield self._get_name_and_portal(volume['name'], - volume['host']) - yield self._execute("sudo iscsiadm -m node -T %s -p %s --login" % - (iscsi_name, iscsi_portal)) - yield self._execute("sudo iscsiadm -m node -T %s -p %s --op update " - "-n node.startup -v automatic" % - (iscsi_name, iscsi_portal)) - defer.returnValue("/dev/iscsi/%s" % volume['name']) - - @defer.inlineCallbacks + iscsi_portal) = self._get_name_and_portal(volume['name'], + volume['host']) + self._execute("sudo iscsiadm -m node -T %s -p %s --login" % + (iscsi_name, iscsi_portal)) + self._execute("sudo iscsiadm -m node -T %s -p %s --op update " + "-n node.startup -v automatic" % + (iscsi_name, iscsi_portal)) + return "/dev/iscsi/%s" % volume['name'] + def undiscover_volume(self, volume): """Undiscover volume on a remote host.""" (iscsi_name, - iscsi_portal) = yield self._get_name_and_portal(volume['name'], - volume['host']) - yield self._execute("sudo iscsiadm -m node -T %s -p %s --op update " - "-n node.startup -v manual" % - (iscsi_name, iscsi_portal)) - yield self._execute("sudo iscsiadm -m node -T %s -p %s --logout " % - (iscsi_name, iscsi_portal)) - yield self._execute("sudo iscsiadm -m node --op delete " - "--targetname %s" % iscsi_name) + iscsi_portal) = self._get_name_and_portal(volume['name'], + volume['host']) + self._execute("sudo iscsiadm -m node -T %s -p %s --op update " + "-n node.startup -v manual" % + (iscsi_name, iscsi_portal)) + self._execute("sudo iscsiadm -m node -T %s -p %s --logout " % + (iscsi_name, iscsi_portal)) + self._execute("sudo iscsiadm -m node --op delete " + "--targetname %s" % iscsi_name) class FakeISCSIDriver(ISCSIDriver): diff --git a/run_tests.py b/run_tests.py index 3d427d8af..883d2b768 100644 --- a/run_tests.py +++ b/run_tests.py @@ -39,11 +39,16 @@ Due to our use of multiprocessing it we frequently get some ignorable """ +import eventlet +eventlet.monkey_patch() + import __main__ import os import sys + from twisted.scripts import trial as trial_script +import unittest from nova import flags from nova import twistd @@ -56,12 +61,12 @@ from nova.tests.compute_unittest import * from nova.tests.flags_unittest import * from nova.tests.misc_unittest import * from nova.tests.network_unittest import * -from nova.tests.objectstore_unittest import * -from nova.tests.process_unittest import * +#from nova.tests.objectstore_unittest import * +#from nova.tests.process_unittest import * from nova.tests.quota_unittest import * from nova.tests.rpc_unittest import * from nova.tests.scheduler_unittest import * -from nova.tests.service_unittest import * +#from nova.tests.service_unittest import * from nova.tests.twistd_unittest import * from nova.tests.validator_unittest import * from nova.tests.virt_unittest import * @@ -82,6 +87,8 @@ if __name__ == '__main__': config = OptionsClass() argv = config.parseOptions() + argv = FLAGS(sys.argv) + FLAGS.verbose = True # TODO(termie): these should make a call instead of doing work on import @@ -90,6 +97,7 @@ if __name__ == '__main__': else: from nova.tests.real_flags import * + # Establish redirect for STDERR sys.stderr.flush() err = open(FLAGS.tests_stderr, 'w+', 0) -- cgit From 653373842815a1ba9992d3d662431ba102ac8ce1 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Wed, 8 Dec 2010 17:21:43 -0800 Subject: port volume manager to eventlet also --- nova/volume/manager.py | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 589e7d7d9..7da125cac 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -45,7 +45,6 @@ intact. import logging import datetime -from twisted.internet import defer from nova import context from nova import exception @@ -86,7 +85,6 @@ class VolumeManager(manager.Manager): for volume in volumes: self.driver.ensure_export(ctxt, volume) - @defer.inlineCallbacks def create_volume(self, context, volume_id): """Creates and exports the volume.""" context = context.elevated() @@ -102,19 +100,18 @@ class VolumeManager(manager.Manager): logging.debug("volume %s: creating lv of size %sG", volume_ref['name'], volume_ref['size']) - yield self.driver.create_volume(volume_ref) + self.driver.create_volume(volume_ref) logging.debug("volume %s: creating export", volume_ref['name']) - yield self.driver.create_export(context, volume_ref) + self.driver.create_export(context, volume_ref) now = datetime.datetime.utcnow() self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) logging.debug("volume %s: created successfully", volume_ref['name']) - defer.returnValue(volume_id) + return volume_id - @defer.inlineCallbacks def delete_volume(self, context, volume_id): """Deletes and unexports volume.""" context = context.elevated() @@ -124,14 +121,13 @@ class VolumeManager(manager.Manager): if volume_ref['host'] != self.host: raise exception.Error("Volume is not local to this node") logging.debug("volume %s: removing export", volume_ref['name']) - yield self.driver.remove_export(context, volume_ref) + self.driver.remove_export(context, volume_ref) logging.debug("volume %s: deleting", volume_ref['name']) - yield self.driver.delete_volume(volume_ref) + self.driver.delete_volume(volume_ref) self.db.volume_destroy(context, volume_id) logging.debug("volume %s: deleted successfully", volume_ref['name']) - defer.returnValue(True) + return True - @defer.inlineCallbacks def setup_compute_volume(self, context, volume_id): """Setup remote volume on compute host. @@ -139,17 +135,16 @@ class VolumeManager(manager.Manager): context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) if volume_ref['host'] == self.host and FLAGS.use_local_volumes: - path = yield self.driver.local_path(volume_ref) + path = self.driver.local_path(volume_ref) else: - path = yield self.driver.discover_volume(volume_ref) - defer.returnValue(path) + path = self.driver.discover_volume(volume_ref) + return path - @defer.inlineCallbacks def remove_compute_volume(self, context, volume_id): """Remove remote volume on compute host.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) if volume_ref['host'] == self.host and FLAGS.use_local_volumes: - defer.returnValue(True) + return True else: - yield self.driver.undiscover_volume(volume_ref) + self.driver.undiscover_volume(volume_ref) -- cgit From 15f7361f5497c3d27dcafbb27d314af76069ed42 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Wed, 8 Dec 2010 17:25:57 -0800 Subject: remove some unused files --- nova/process.py | 209 --------------------------------------- nova/tests/process_unittest.py | 132 ------------------------- nova/tests/validator_unittest.py | 42 -------- nova/validate.py | 94 ------------------ 4 files changed, 477 deletions(-) delete mode 100644 nova/process.py delete mode 100644 nova/tests/process_unittest.py delete mode 100644 nova/tests/validator_unittest.py delete mode 100644 nova/validate.py diff --git a/nova/process.py b/nova/process.py deleted file mode 100644 index b33df048b..000000000 --- a/nova/process.py +++ /dev/null @@ -1,209 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2010 FathomDB Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Process pool using twisted threading -""" - -import logging -import StringIO - -from twisted.internet import defer -from twisted.internet import error -from twisted.internet import protocol -from twisted.internet import reactor - -from nova import flags -from nova.exception import ProcessExecutionError - -FLAGS = flags.FLAGS -flags.DEFINE_integer('process_pool_size', 4, - 'Number of processes to use in the process pool') - - -# This is based on _BackRelay from twister.internal.utils, but modified to -# capture both stdout and stderr, without odd stderr handling, and also to -# handle stdin -class BackRelayWithInput(protocol.ProcessProtocol): - """ - Trivial protocol for communicating with a process and turning its output - into the result of a L{Deferred}. - - @ivar deferred: A L{Deferred} which will be called back with all of stdout - and all of stderr as well (as a tuple). C{terminate_on_stderr} is true - and any bytes are received over stderr, this will fire with an - L{_ProcessExecutionError} instance and the attribute will be set to - C{None}. - - @ivar onProcessEnded: If C{terminate_on_stderr} is false and bytes are - received over stderr, this attribute will refer to a L{Deferred} which - will be called back when the process ends. This C{Deferred} is also - associated with the L{_ProcessExecutionError} which C{deferred} fires - with earlier in this case so that users can determine when the process - has actually ended, in addition to knowing when bytes have been - received via stderr. - """ - - def __init__(self, deferred, cmd, started_deferred=None, - terminate_on_stderr=False, check_exit_code=True, - process_input=None): - self.deferred = deferred - self.cmd = cmd - self.stdout = StringIO.StringIO() - self.stderr = StringIO.StringIO() - self.started_deferred = started_deferred - self.terminate_on_stderr = terminate_on_stderr - self.check_exit_code = check_exit_code - self.process_input = process_input - self.on_process_ended = None - - def _build_execution_error(self, exit_code=None): - return ProcessExecutionError(cmd=self.cmd, - exit_code=exit_code, - stdout=self.stdout.getvalue(), - stderr=self.stderr.getvalue()) - - def errReceived(self, text): - self.stderr.write(text) - if self.terminate_on_stderr and (self.deferred is not None): - self.on_process_ended = defer.Deferred() - self.deferred.errback(self._build_execution_error()) - self.deferred = None - self.transport.loseConnection() - - def outReceived(self, text): - self.stdout.write(text) - - def processEnded(self, reason): - if self.deferred is not None: - stdout, stderr = self.stdout.getvalue(), self.stderr.getvalue() - exit_code = reason.value.exitCode - if self.check_exit_code and exit_code != 0: - self.deferred.errback(self._build_execution_error(exit_code)) - else: - try: - if self.check_exit_code: - reason.trap(error.ProcessDone) - self.deferred.callback((stdout, stderr)) - except: - # NOTE(justinsb): This logic is a little suspicious to me. - # If the callback throws an exception, then errback will - # be called also. However, this is what the unit tests - # test for. - exec_error = self._build_execution_error(exit_code) - self.deferred.errback(exec_error) - elif self.on_process_ended is not None: - self.on_process_ended.errback(reason) - - def connectionMade(self): - if self.started_deferred: - self.started_deferred.callback(self) - if self.process_input: - self.transport.write(str(self.process_input)) - self.transport.closeStdin() - - -def get_process_output(executable, args=None, env=None, path=None, - process_reactor=None, check_exit_code=True, - process_input=None, started_deferred=None, - terminate_on_stderr=False): - if process_reactor is None: - process_reactor = reactor - args = args and args or () - env = env and env and {} - deferred = defer.Deferred() - cmd = executable - if args: - cmd = " ".join([cmd] + args) - logging.debug("Running cmd: %s", cmd) - process_handler = BackRelayWithInput( - deferred, - cmd, - started_deferred=started_deferred, - check_exit_code=check_exit_code, - process_input=process_input, - terminate_on_stderr=terminate_on_stderr) - # NOTE(vish): commands come in as unicode, but self.executes needs - # strings or process.spawn raises a deprecation warning - executable = str(executable) - if not args is None: - args = [str(x) for x in args] - process_reactor.spawnProcess(process_handler, executable, - (executable,) + tuple(args), env, path) - return deferred - - -class ProcessPool(object): - """ A simple process pool implementation using Twisted's Process bits. - - This is pretty basic right now, but hopefully the API will be the correct - one so that it can be optimized later. - """ - def __init__(self, size=None): - self.size = size and size or FLAGS.process_pool_size - self._pool = defer.DeferredSemaphore(self.size) - - def simple_execute(self, cmd, **kw): - """ Weak emulation of the old utils.execute() function. - - This only exists as a way to quickly move old execute methods to - this new style of code. - - NOTE(termie): This will break on args with spaces in them. - """ - parsed = cmd.split(' ') - executable, args = parsed[0], parsed[1:] - return self.execute(executable, args, **kw) - - def execute(self, *args, **kw): - deferred = self._pool.acquire() - - def _associate_process(proto): - deferred.process = proto.transport - return proto.transport - - started = defer.Deferred() - started.addCallback(_associate_process) - kw.setdefault('started_deferred', started) - - deferred.process = None - deferred.started = started - - deferred.addCallback(lambda _: get_process_output(*args, **kw)) - deferred.addBoth(self._release) - return deferred - - def _release(self, retval=None): - self._pool.release() - return retval - - -class SharedPool(object): - _instance = None - - def __init__(self): - if SharedPool._instance is None: - self.__class__._instance = ProcessPool() - - def __getattr__(self, key): - return getattr(self._instance, key) - - -def simple_execute(cmd, **kwargs): - return SharedPool().simple_execute(cmd, **kwargs) diff --git a/nova/tests/process_unittest.py b/nova/tests/process_unittest.py deleted file mode 100644 index 67245af03..000000000 --- a/nova/tests/process_unittest.py +++ /dev/null @@ -1,132 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -from twisted.internet import defer -from twisted.internet import reactor -from xml.etree import ElementTree - -from nova import exception -from nova import flags -from nova import process -from nova import test -from nova import utils - -FLAGS = flags.FLAGS - - -class ProcessTestCase(test.TrialTestCase): - def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) - super(ProcessTestCase, self).setUp() - - def test_execute_stdout(self): - pool = process.ProcessPool(2) - d = pool.simple_execute('echo test') - - def _check(rv): - self.assertEqual(rv[0], 'test\n') - self.assertEqual(rv[1], '') - - d.addCallback(_check) - d.addErrback(self.fail) - return d - - def test_execute_stderr(self): - pool = process.ProcessPool(2) - d = pool.simple_execute('cat BAD_FILE', check_exit_code=False) - - def _check(rv): - self.assertEqual(rv[0], '') - self.assert_('No such file' in rv[1]) - - d.addCallback(_check) - d.addErrback(self.fail) - return d - - def test_execute_unexpected_stderr(self): - pool = process.ProcessPool(2) - d = pool.simple_execute('cat BAD_FILE') - d.addCallback(lambda x: self.fail('should have raised an error')) - d.addErrback(lambda failure: failure.trap(IOError)) - return d - - def test_max_processes(self): - pool = process.ProcessPool(2) - d1 = pool.simple_execute('sleep 0.01') - d2 = pool.simple_execute('sleep 0.01') - d3 = pool.simple_execute('sleep 0.005') - d4 = pool.simple_execute('sleep 0.005') - - called = [] - - def _called(rv, name): - called.append(name) - - d1.addCallback(_called, 'd1') - d2.addCallback(_called, 'd2') - d3.addCallback(_called, 'd3') - d4.addCallback(_called, 'd4') - - # Make sure that d3 and d4 had to wait on the other two and were called - # in order - # NOTE(termie): there may be a race condition in this test if for some - # reason one of the sleeps takes longer to complete - # than it should - d4.addCallback(lambda x: self.assertEqual(called[2], 'd3')) - d4.addCallback(lambda x: self.assertEqual(called[3], 'd4')) - d4.addErrback(self.fail) - return d4 - - def test_kill_long_process(self): - pool = process.ProcessPool(2) - - d1 = pool.simple_execute('sleep 1') - d2 = pool.simple_execute('sleep 0.005') - - timeout = reactor.callLater(0.1, self.fail, 'should have been killed') - - # kill d1 and wait on it to end then cancel the timeout - d2.addCallback(lambda _: d1.process.signalProcess('KILL')) - d2.addCallback(lambda _: d1) - d2.addBoth(lambda _: timeout.active() and timeout.cancel()) - d2.addErrback(self.fail) - return d2 - - def test_process_exit_is_contained(self): - pool = process.ProcessPool(2) - - d1 = pool.simple_execute('sleep 1') - d1.addCallback(lambda x: self.fail('should have errbacked')) - d1.addErrback(lambda fail: fail.trap(IOError)) - reactor.callLater(0.05, d1.process.signalProcess, 'KILL') - - return d1 - - def test_shared_pool_is_singleton(self): - pool1 = process.SharedPool() - pool2 = process.SharedPool() - self.assertEqual(id(pool1._instance), id(pool2._instance)) - - def test_shared_pool_works_as_singleton(self): - d1 = process.simple_execute('sleep 1') - d2 = process.simple_execute('sleep 0.005') - # lp609749: would have failed with - # exceptions.AssertionError: Someone released me too many times: - # too many tokens! - return d1 diff --git a/nova/tests/validator_unittest.py b/nova/tests/validator_unittest.py deleted file mode 100644 index b5f1c0667..000000000 --- a/nova/tests/validator_unittest.py +++ /dev/null @@ -1,42 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import unittest - -from nova import flags -from nova import test -from nova import validate - - -class ValidationTestCase(test.TrialTestCase): - def setUp(self): - super(ValidationTestCase, self).setUp() - - def tearDown(self): - super(ValidationTestCase, self).tearDown() - - def test_type_validation(self): - self.assertTrue(type_case("foo", 5, 1)) - self.assertRaises(TypeError, type_case, "bar", "5", 1) - self.assertRaises(TypeError, type_case, None, 5, 1) - - -@validate.typetest(instanceid=str, size=int, number_of_instances=int) -def type_case(instanceid, size, number_of_instances): - return True diff --git a/nova/validate.py b/nova/validate.py deleted file mode 100644 index 7ea27daa6..000000000 --- a/nova/validate.py +++ /dev/null @@ -1,94 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Decorators for argument validation, courtesy of -http://rmi.net/~lutz/rangetest.html""" - - -def rangetest(**argchecks): - """Validate ranges for both + defaults""" - - def onDecorator(func): - """onCall remembers func and argchecks""" - import sys - code = func.__code__ if sys.version_info[0] == 3 else func.func_code - allargs = code.co_varnames[:code.co_argcount] - funcname = func.__name__ - - def onCall(*pargs, **kargs): - # all pargs match first N args by position - # the rest must be in kargs or omitted defaults - positionals = list(allargs) - positionals = positionals[:len(pargs)] - - for (argname, (low, high)) in argchecks.items(): - # for all args to be checked - if argname in kargs: - # was passed by name - if float(kargs[argname]) < low or \ - float(kargs[argname]) > high: - errmsg = '{0} argument "{1}" not in {2}..{3}' - errmsg = errmsg.format(funcname, argname, low, high) - raise TypeError(errmsg) - - elif argname in positionals: - # was passed by position - position = positionals.index(argname) - if float(pargs[position]) < low or \ - float(pargs[position]) > high: - errmsg = '{0} argument "{1}" with value of {4} ' \ - 'not in {2}..{3}' - errmsg = errmsg.format(funcname, argname, low, high, - pargs[position]) - raise TypeError(errmsg) - else: - pass - - return func(*pargs, **kargs) # okay: run original call - return onCall - return onDecorator - - -def typetest(**argchecks): - def onDecorator(func): - import sys - code = func.__code__ if sys.version_info[0] == 3 else func.func_code - allargs = code.co_varnames[:code.co_argcount] - funcname = func.__name__ - - def onCall(*pargs, **kargs): - positionals = list(allargs)[:len(pargs)] - for (argname, typeof) in argchecks.items(): - if argname in kargs: - if not isinstance(kargs[argname], typeof): - errmsg = '{0} argument "{1}" not of type {2}' - errmsg = errmsg.format(funcname, argname, typeof) - raise TypeError(errmsg) - elif argname in positionals: - position = positionals.index(argname) - if not isinstance(pargs[position], typeof): - errmsg = '{0} argument "{1}" with value of {2} ' \ - 'not of type {3}' - errmsg = errmsg.format(funcname, argname, - pargs[position], typeof) - raise TypeError(errmsg) - else: - pass - return func(*pargs, **kargs) - return onCall - return onDecorator -- cgit From 5f72a004dee0cb8de3f2daee1976fa978f6e51f3 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 9 Dec 2010 16:41:35 +0000 Subject: pause from compute.manager <-> xenapi --- nova/compute/manager.py | 32 ++++++++++++++++++++++++++++++++ nova/virt/xenapi/vmops.py | 20 ++++++++++++++++++++ nova/virt/xenapi_conn.py | 8 ++++++++ 3 files changed, 60 insertions(+) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index dd8d41129..c0339a71f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -194,6 +194,38 @@ class ComputeManager(manager.Manager): yield self.driver.unrescue(instance_ref) self._update_state(context, instance_id) + @defer.inlineCallbacks + @exception.wrap_exception + def pause_instance(self, context, instance_id): + """Pause an instance on this server.""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + logging.debug('instance %s: pausing', + instance_ref['internal_id']) + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'pausing') + yield self.driver.pause(instance_ref) + self._update_state(context, instance_id) + + @defer.inlineCallbacks + @exception.wrap_exception + def resume_instance(self, context, instance_id): + """Resume a paused instance on this server.""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + logging.debug('instance %s: resuming', + instance_ref['internal_id']) + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'resume') + yield self.driver.resume(instance_ref) + self._update_state(context, instance_id) + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index d36cdaea5..353e83873 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -120,6 +120,26 @@ class VMOps(object): except XenAPI.Failure, exc: logging.warn(exc) + @defer.inlineCallbacks + def pause(self, instance): + """ Pause VM instance """ + instance_name = instance.name + vm = yield VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception('instance not present %s' % instance_name) + task = yield self._session.call_xenapi('Async.VM.pause', vm) + yield self._session.wait_for_task(task) + + @defer.inlineCallbacks + def unpause(self, instance): + """ Unpause VM instance """ + instance_name = instance.name + vm = yield VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception('instance not present %s' % instance_name) + task = yield self._session.call_xenapi('Async.VM.unpause', vm) + yield self._session.wait_for_task(task) + def get_info(self, instance_id): """ Return data about VM instance """ vm = VMHelper.lookup_blocking(self._session, instance_id) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 26b30bf92..df405e75f 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -122,6 +122,14 @@ class XenAPIConnection(object): """ Destroy VM instance """ self._vmops.destroy(instance) + def pause(self, instance): + """ Pause VM instance """ + self._vmops.pause(instance) + + def unpause(self, instance): + """ Unpause paused VM instance """ + self._vmops.unpause(instance) + def get_info(self, instance_id): """ Return data about VM instance """ return self._vmops.get_info(instance_id) -- cgit From 294102984964e8853fcaf32485c273cbdcc7748a Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 9 Dec 2010 17:54:30 +0000 Subject: changed resume to unpause --- nova/compute/manager.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c0339a71f..61ed3136b 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -212,18 +212,18 @@ class ComputeManager(manager.Manager): @defer.inlineCallbacks @exception.wrap_exception - def resume_instance(self, context, instance_id): - """Resume a paused instance on this server.""" + def unpause_instance(self, context, instance_id): + """Unpause a paused instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug('instance %s: resuming', + logging.debug('instance %s: unpausing', instance_ref['internal_id']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, - 'resume') - yield self.driver.resume(instance_ref) + 'unpausing') + yield self.driver.unpause(instance_ref) self._update_state(context, instance_id) @exception.wrap_exception -- cgit From 90c89f5f7b24bb6c95d405d42f7f15292b5452a9 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Thu, 9 Dec 2010 17:03:49 -0400 Subject: pause and unpause code/tests in place. To the point it stuffs request in the queue. --- nova/api/openstack/__init__.py | 13 ++++++++----- nova/api/openstack/servers.py | 33 ++++++++++++++++++++++++++++++++ nova/compute/api.py | 21 ++++++++++++++++++++ nova/tests/api/openstack/test_servers.py | 31 +++++++++++++++++++++++++++++- 4 files changed, 92 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index c9efe5222..24042b42b 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -171,9 +171,16 @@ class APIRouter(wsgi.Router): def __init__(self): mapper = routes.Mapper() + + server_members = {'action': 'POST'} + if FLAGS.allow_admin_api: + logging.debug("Including admin operations in API.") + server_members['pause'] = 'POST' + server_members['unpause'] = 'POST' + mapper.resource("server", "servers", controller=servers.Controller(), collection={'detail': 'GET'}, - member={'action': 'POST'}) + member=server_members) mapper.resource("backup_schedule", "backup_schedules", controller=backup_schedules.Controller(), @@ -187,10 +194,6 @@ class APIRouter(wsgi.Router): mapper.resource("sharedipgroup", "sharedipgroups", controller=sharedipgroups.Controller()) - if FLAGS.allow_admin_api: - logging.debug("Including admin operations in API.") - # TODO: Place routes for admin operations here. - super(APIRouter, self).__init__(mapper) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 6f2f6fed9..ade0d7eb9 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -15,6 +15,9 @@ # License for the specific language governing permissions and limitations # under the License. +import logging +import traceback + from webob import exc from nova import context @@ -28,6 +31,10 @@ from nova.compute import power_state import nova.api.openstack +LOG = logging.getLogger('server') +LOG.setLevel(logging.DEBUG) + + def _entity_list(entities): """ Coerces a list of servers into proper dictionary format """ return dict(servers=entities) @@ -173,3 +180,29 @@ class Controller(wsgi.Controller): except: return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + + def pause(self, req, id): + """ Permit Admins to Pause the server. """ + user_id = req.environ['nova.context']['user']['id'] + ctxt = context.RequestContext(user_id, user_id) + try: + self.compute_api.pause(ctxt, id) + except: + readable = traceback.format_exc() + logging.error("Compute.api::pause %s", readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def unpause(self, req, id): + """ Permit Admins to Unpause the server. """ + user_id = req.environ['nova.context']['user']['id'] + ctxt = context.RequestContext(user_id, user_id) + try: + self.compute_api.unpause(ctxt, id) + except: + readable = traceback.format_exc() + logging.error("Compute.api::unpause %s", readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + diff --git a/nova/compute/api.py b/nova/compute/api.py index 8e0efa4cc..e25e6cc78 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -275,6 +275,27 @@ class ComputeAPI(base.Base): {"method": "reboot_instance", "args": {"instance_id": instance['id']}}) + def pause(self, context, instance_id): + """Pause the given instance.""" + logging.debug("IN PAUSE - 1") + instance = self.db.instance_get_by_internal_id(context, instance_id) + logging.debug("IN PAUSE - 2") + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "pause_instance", + "args": {"instance_id": instance['id']}}) + + def unpause(self, context, instance_id): + """Unpause the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unpause_instance", + "args": {"instance_id": instance['id']}}) + + def rescue(self, context, instance_id): """Rescue the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 8444b6fce..636da30fd 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -56,9 +56,11 @@ def instance_address(context, instance_id): def stub_instance(id, user_id=1): - return Instance(id=id + 123456, state=0, image_id=10, user_id=user_id, + return Instance(id=int(id) + 123456, state=0, image_id=10, user_id=user_id, display_name='server%s' % id, internal_id=id) +def fake_compute_api(cls, req, id): + return True class ServersTest(unittest.TestCase): def setUp(self): @@ -82,9 +84,12 @@ class ServersTest(unittest.TestCase): instance_address) self.stubs.Set(nova.db.api, 'instance_get_floating_address', instance_address) + self.stubs.Set(nova.compute.api.ComputeAPI, 'pause', fake_compute_api) + self.allow_admin = FLAGS.allow_admin_api def tearDown(self): self.stubs.UnsetAll() + FLAGS.allow_admin_api = self.allow_admin def test_get_server_by_id(self): req = webob.Request.blank('/v1.0/servers/1') @@ -211,6 +216,30 @@ class ServersTest(unittest.TestCase): self.assertEqual(s['imageId'], 10) i += 1 + def test_server_pause(self): + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/pause') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(nova.api.API('os')) + self.assertEqual(res.status_int, 202) + + def test_server_unpause(self): + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/unpause') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(nova.api.API('os')) + self.assertEqual(res.status_int, 202) + def test_server_reboot(self): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, -- cgit From 54e4174a0b6a3c1dd4105617b06bb7a69f45202c Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Thu, 9 Dec 2010 17:32:27 -0400 Subject: remove debug messages --- nova/compute/api.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index e25e6cc78..79da79cd1 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -277,9 +277,7 @@ class ComputeAPI(base.Base): def pause(self, context, instance_id): """Pause the given instance.""" - logging.debug("IN PAUSE - 1") instance = self.db.instance_get_by_internal_id(context, instance_id) - logging.debug("IN PAUSE - 2") host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), -- cgit From 00f329d479564ad8349ed32a27990da2ed3a396e Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Thu, 9 Dec 2010 14:36:23 -0800 Subject: make nova binaries use eventlet --- bin/nova-api | 10 +++++----- bin/nova-combined | 14 +++++++++++--- bin/nova-compute | 1 + bin/nova-network | 1 + bin/nova-scheduler | 15 ++++++--------- bin/nova-volume | 15 ++++++--------- nova/service_eventlet.py | 4 +++- 7 files changed, 33 insertions(+), 27 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index a9c53dbcd..3215ad5ef 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -32,9 +32,11 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) +from nova import api from nova import flags from nova import utils from nova import server +from nova import wsgi FLAGS = flags.FLAGS flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port') @@ -43,9 +45,8 @@ flags.DEFINE_integer('ec2api_port', 8773, 'EC2 API port') flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host') -def main(_args): - from nova import api - from nova import wsgi +def main(): + FLAGS(sys.argv) server = wsgi.Server() server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host) server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host) @@ -53,5 +54,4 @@ def main(_args): if __name__ == '__main__': - utils.default_flagfile() - server.serve('nova-api', main) + main() diff --git a/bin/nova-combined b/bin/nova-combined index 65865acd9..c86584328 100755 --- a/bin/nova-combined +++ b/bin/nova-combined @@ -27,8 +27,6 @@ eventlet.monkey_patch() import os import sys -from eventlet import greenthread - # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), @@ -46,6 +44,12 @@ from nova import wsgi FLAGS = flags.FLAGS flags.DEFINE_integer('api_port', 8773, 'API port') +FLAGS = flags.FLAGS +flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port') +flags.DEFINE_string('osapi_host', '0.0.0.0', 'OpenStack API host') +flags.DEFINE_integer('ec2api_port', 8773, 'EC2 API port') +flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host') + if __name__ == '__main__': FLAGS(sys.argv) @@ -57,5 +61,9 @@ if __name__ == '__main__': #objectstore = service_eventlet.Service.create(binary='nova-objectstore') service_eventlet.serve(compute, network, volume, scheduler) - wsgi.run_server(api.API(), FLAGS.api_port) + + server = wsgi.Server() + server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host) + server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host) + server.wait() diff --git a/bin/nova-compute b/bin/nova-compute index 600fbb897..307f7cb59 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -39,3 +39,4 @@ from nova import service_eventlet if __name__ == '__main__': service_eventlet.serve() + service_eventlet.wait() diff --git a/bin/nova-network b/bin/nova-network index 600fbb897..307f7cb59 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -39,3 +39,4 @@ from nova import service_eventlet if __name__ == '__main__': service_eventlet.serve() + service_eventlet.wait() diff --git a/bin/nova-scheduler b/bin/nova-scheduler index 4d1a40cf1..e4aa0dcae 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -21,6 +21,9 @@ Twistd daemon for the nova scheduler nodes. """ +import eventlet +eventlet.monkey_patch() + import os import sys @@ -32,14 +35,8 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -from nova import service -from nova import twistd -from nova import utils - +from nova import service_eventlet if __name__ == '__main__': - utils.default_flagfile() - twistd.serve(__file__) - -if __name__ == '__builtin__': - application = service.Service.create() + service_eventlet.serve() + service_eventlet.wait() diff --git a/bin/nova-volume b/bin/nova-volume index e7281d6c0..395d16041 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -21,6 +21,9 @@ Twistd daemon for the nova volume nodes. """ +import eventlet +eventlet.monkey_patch() + import os import sys @@ -32,14 +35,8 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -from nova import service -from nova import twistd -from nova import utils - +from nova import service_eventlet if __name__ == '__main__': - utils.default_flagfile() - twistd.serve(__file__) - -if __name__ == '__builtin__': - application = service.Service.create() # pylint: disable-msg=C0103 + service_eventlet.serve() + service_eventlet.wait() diff --git a/nova/service_eventlet.py b/nova/service_eventlet.py index eac45a981..82291ffe1 100644 --- a/nova/service_eventlet.py +++ b/nova/service_eventlet.py @@ -285,4 +285,6 @@ def serve(*services): #while True: # greenthread.sleep(5) - +def wait(): + while True: + greenthread.sleep(5) -- cgit From 2cad5375dc3784397ac8e6a0aa170a161e97ea7e Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Thu, 9 Dec 2010 14:37:43 -0800 Subject: whitespace fix --- nova/service_eventlet.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/service_eventlet.py b/nova/service_eventlet.py index 82291ffe1..83f0692ce 100644 --- a/nova/service_eventlet.py +++ b/nova/service_eventlet.py @@ -285,6 +285,7 @@ def serve(*services): #while True: # greenthread.sleep(5) + def wait(): while True: greenthread.sleep(5) -- cgit From 3b376b8ad167e91119e21180bbff41eceef22e26 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Thu, 9 Dec 2010 15:19:56 -0800 Subject: get service unittests runnning again --- nova/service_eventlet.py | 20 +++++-- nova/test.py | 118 +++++++++++++++++++++++++++++++-------- nova/tests/scheduler_unittest.py | 22 ++++---- nova/tests/service_unittest.py | 44 +++++++-------- nova/utils.py | 7 ++- run_tests.py | 4 +- 6 files changed, 149 insertions(+), 66 deletions(-) diff --git a/nova/service_eventlet.py b/nova/service_eventlet.py index 83f0692ce..576cf8b85 100644 --- a/nova/service_eventlet.py +++ b/nova/service_eventlet.py @@ -68,6 +68,7 @@ class Service(object): self.periodic_interval = periodic_interval super(Service, self).__init__(*args, **kwargs) self.saved_args, self.saved_kwargs = args, kwargs + self.timers = [] def start(self): manager_class = utils.import_class(self.manager_class_name) @@ -96,15 +97,17 @@ class Service(object): topic='%s.%s' % (self.topic, self.host), proxy=self) - consumer_all.attach_to_eventlet() - consumer_node.attach_to_eventlet() + self.timers.append(consumer_all.attach_to_eventlet()) + self.timers.append(consumer_node.attach_to_eventlet()) pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) + self.timers.append(pulse) if self.periodic_interval: - pulse = utils.LoopingCall(self.periodic_tasks) - pulse.start(interval=self.periodic_interval, now=False) + periodic = utils.LoopingCall(self.periodic_tasks) + periodic.start(interval=self.periodic_interval, now=False) + self.timers.append(periodic) def _create_service_ref(self, context): service_ref = db.service_create(context, @@ -156,11 +159,20 @@ class Service(object): def kill(self): """Destroy the service object in the datastore""" + self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: logging.warn("Service killed that has no database entry") + def stop(self): + for x in self.timers: + try: + x.stop() + except Exception: + pass + self.timers = [] + def periodic_tasks(self): """Tasks to be run at a periodic interval""" self.manager.periodic_tasks(context.get_admin_context()) diff --git a/nova/test.py b/nova/test.py index bbf063aca..e27d29166 100644 --- a/nova/test.py +++ b/nova/test.py @@ -30,6 +30,7 @@ import unittest import mox import stubout from twisted.internet import defer +from twisted.trial import unittest as trial_unittest from nova import context from nova import db @@ -54,7 +55,6 @@ def skip_if_fake(func): return func(*args, **kw) return _skipper - class TrialTestCase(unittest.TestCase): """Test case base class for all unit tests""" def setUp(self): @@ -125,29 +125,6 @@ class TrialTestCase(unittest.TestCase): for k, v in self._original_flags.iteritems(): setattr(FLAGS, k, v) - #def run(self, result=None): - # test_method = getattr(self, self._testMethodName) - # setattr(self, - # self._testMethodName, - # self._maybeInlineCallbacks(test_method, result)) - # rv = super(TrialTestCase, self).run(result) - # setattr(self, self._testMethodName, test_method) - # return rv - - #def _maybeInlineCallbacks(self, func, result): - # def _wrapped(): - # g = func() - # if isinstance(g, defer.Deferred): - # return g - # if not hasattr(g, 'send'): - # return defer.succeed(g) - - # inlined = defer.inlineCallbacks(func) - # d = inlined() - # return d - # _wrapped.func_name = func.func_name - # return _wrapped - def _monkey_patch_attach(self): self.originalAttach = rpc.Consumer.attach_to_eventlet @@ -158,3 +135,96 @@ class TrialTestCase(unittest.TestCase): _wrapped.func_name = self.originalAttach.func_name rpc.Consumer.attach_to_eventlet = _wrapped + + +class OLDTrialTestCase(trial_unittest.TestCase): + """Test case base class for all unit tests""" + def setUp(self): + """Run before each test method to initialize test environment""" + super(TrialTestCase, self).setUp() + # NOTE(vish): We need a better method for creating fixtures for tests + # now that we have some required db setup for the system + # to work properly. + self.start = datetime.datetime.utcnow() + ctxt = context.get_admin_context() + if db.network_count(ctxt) != 5: + network_manager.VlanManager().create_networks(ctxt, + FLAGS.fixed_range, + 5, 16, + FLAGS.vlan_start, + FLAGS.vpn_start) + + # emulate some of the mox stuff, we can't use the metaclass + # because it screws with our generators + self.mox = mox.Mox() + self.stubs = stubout.StubOutForTesting() + self.flag_overrides = {} + self.injected = [] + self._original_flags = FLAGS.FlagValuesDict() + + def tearDown(self): + """Runs after each test method to finalize/tear down test + environment.""" + try: + self.mox.UnsetStubs() + self.stubs.UnsetAll() + self.stubs.SmartUnsetAll() + self.mox.VerifyAll() + # NOTE(vish): Clean up any ips associated during the test. + ctxt = context.get_admin_context() + db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host, + self.start) + db.network_disassociate_all(ctxt) + rpc.Consumer.attach_to_eventlet = self.originalAttach + for x in self.injected: + try: + x.stop() + except AssertionError: + pass + + if FLAGS.fake_rabbit: + fakerabbit.reset_all() + + db.security_group_destroy_all(ctxt) + super(TrialTestCase, self).tearDown() + finally: + self.reset_flags() + + def flags(self, **kw): + """Override flag variables for a test""" + for k, v in kw.iteritems(): + if k in self.flag_overrides: + self.reset_flags() + raise Exception( + 'trying to override already overriden flag: %s' % k) + self.flag_overrides[k] = getattr(FLAGS, k) + setattr(FLAGS, k, v) + + def reset_flags(self): + """Resets all flag variables for the test. Runs after each test""" + FLAGS.Reset() + for k, v in self._original_flags.iteritems(): + setattr(FLAGS, k, v) + + def run(self, result=None): + test_method = getattr(self, self._testMethodName) + setattr(self, + self._testMethodName, + self._maybeInlineCallbacks(test_method, result)) + rv = super(TrialTestCase, self).run(result) + setattr(self, self._testMethodName, test_method) + return rv + + def _maybeInlineCallbacks(self, func, result): + def _wrapped(): + g = func() + if isinstance(g, defer.Deferred): + return g + if not hasattr(g, 'send'): + return defer.succeed(g) + + inlined = defer.inlineCallbacks(func) + d = inlined() + return d + _wrapped.func_name = func.func_name + return _wrapped diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py index cb5fe6b9c..37d15567f 100644 --- a/nova/tests/scheduler_unittest.py +++ b/nova/tests/scheduler_unittest.py @@ -22,7 +22,7 @@ Tests For Scheduler from nova import context from nova import db from nova import flags -from nova import service +from nova import service_eventlet as service from nova import test from nova import rpc from nova import utils @@ -122,12 +122,12 @@ class SimpleDriverTestCase(test.TrialTestCase): 'nova-compute', 'compute', FLAGS.compute_manager) - compute1.startService() + compute1.start() compute2 = service.Service('host2', 'nova-compute', 'compute', FLAGS.compute_manager) - compute2.startService() + compute2.start() hosts = self.scheduler.driver.hosts_up(self.context, 'compute') self.assertEqual(len(hosts), 2) compute1.kill() @@ -139,12 +139,12 @@ class SimpleDriverTestCase(test.TrialTestCase): 'nova-compute', 'compute', FLAGS.compute_manager) - compute1.startService() + compute1.start() compute2 = service.Service('host2', 'nova-compute', 'compute', FLAGS.compute_manager) - compute2.startService() + compute2.start() instance_id1 = self._create_instance() compute1.run_instance(self.context, instance_id1) instance_id2 = self._create_instance() @@ -162,12 +162,12 @@ class SimpleDriverTestCase(test.TrialTestCase): 'nova-compute', 'compute', FLAGS.compute_manager) - compute1.startService() + compute1.start() compute2 = service.Service('host2', 'nova-compute', 'compute', FLAGS.compute_manager) - compute2.startService() + compute2.start() instance_ids1 = [] instance_ids2 = [] for index in xrange(FLAGS.max_cores): @@ -195,12 +195,12 @@ class SimpleDriverTestCase(test.TrialTestCase): 'nova-volume', 'volume', FLAGS.volume_manager) - volume1.startService() + volume1.start() volume2 = service.Service('host2', 'nova-volume', 'volume', FLAGS.volume_manager) - volume2.startService() + volume2.start() volume_id1 = self._create_volume() volume1.create_volume(self.context, volume_id1) volume_id2 = self._create_volume() @@ -218,12 +218,12 @@ class SimpleDriverTestCase(test.TrialTestCase): 'nova-volume', 'volume', FLAGS.volume_manager) - volume1.startService() + volume1.start() volume2 = service.Service('host2', 'nova-volume', 'volume', FLAGS.volume_manager) - volume2.startService() + volume2.start() volume_ids1 = [] volume_ids2 = [] for index in xrange(FLAGS.max_gigabytes): diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py index 4f8d2d550..a2bac9af0 100644 --- a/nova/tests/service_unittest.py +++ b/nova/tests/service_unittest.py @@ -22,14 +22,11 @@ Unit Tests for remote procedure calls using queue import mox -from twisted.application.app import startApplication -from twisted.internet import defer - from nova import exception from nova import flags from nova import rpc from nova import test -from nova import service +from nova import service_eventlet as service from nova import manager FLAGS = flags.FLAGS @@ -63,7 +60,7 @@ class ServiceManagerTestCase(test.TrialTestCase): 'test', 'test', 'nova.tests.service_unittest.FakeManager') - serv.startService() + serv.start() self.assertEqual(serv.test_method(), 'manager') def test_override_manager_method(self): @@ -71,7 +68,7 @@ class ServiceManagerTestCase(test.TrialTestCase): 'test', 'test', 'nova.tests.service_unittest.FakeManager') - serv.startService() + serv.start() self.assertEqual(serv.test_method(), 'service') @@ -94,8 +91,8 @@ class ServiceTestCase(test.TrialTestCase): self.mox.StubOutWithMock(rpc, 'AdapterConsumer', use_mock_anything=True) - self.mox.StubOutWithMock( - service.task, 'LoopingCall', use_mock_anything=True) + #self.mox.StubOutWithMock( + # service.task, 'LoopingCall', use_mock_anything=True) rpc.AdapterConsumer(connection=mox.IgnoreArg(), topic=topic, proxy=mox.IsA(service.Service)).AndReturn( @@ -106,19 +103,19 @@ class ServiceTestCase(test.TrialTestCase): proxy=mox.IsA(service.Service)).AndReturn( rpc.AdapterConsumer) - rpc.AdapterConsumer.attach_to_twisted() - rpc.AdapterConsumer.attach_to_twisted() + rpc.AdapterConsumer.attach_to_eventlet() + rpc.AdapterConsumer.attach_to_eventlet() # Stub out looping call a bit needlessly since we don't have an easy # way to cancel it (yet) when the tests finishes - service.task.LoopingCall(mox.IgnoreArg()).AndReturn( - service.task.LoopingCall) - service.task.LoopingCall.start(interval=mox.IgnoreArg(), - now=mox.IgnoreArg()) - service.task.LoopingCall(mox.IgnoreArg()).AndReturn( - service.task.LoopingCall) - service.task.LoopingCall.start(interval=mox.IgnoreArg(), - now=mox.IgnoreArg()) + #service.task.LoopingCall(mox.IgnoreArg()).AndReturn( + # service.task.LoopingCall) + #service.task.LoopingCall.start(interval=mox.IgnoreArg(), + # now=mox.IgnoreArg()) + #service.task.LoopingCall(mox.IgnoreArg()).AndReturn( + # service.task.LoopingCall) + #service.task.LoopingCall.start(interval=mox.IgnoreArg(), + # now=mox.IgnoreArg()) service_create = {'host': host, 'binary': binary, @@ -135,8 +132,9 @@ class ServiceTestCase(test.TrialTestCase): service.db.service_create(mox.IgnoreArg(), service_create).AndReturn(service_ref) self.mox.ReplayAll() - - startApplication(app, False) + + app.start() + app.stop() self.assert_(app) # We're testing sort of weird behavior in how report_state decides @@ -172,7 +170,7 @@ class ServiceTestCase(test.TrialTestCase): binary, topic, 'nova.tests.service_unittest.FakeManager') - serv.startService() + serv.start() serv.report_state() def test_report_state_newly_disconnected(self): @@ -202,7 +200,7 @@ class ServiceTestCase(test.TrialTestCase): binary, topic, 'nova.tests.service_unittest.FakeManager') - serv.startService() + serv.start() serv.report_state() self.assert_(serv.model_disconnected) @@ -235,7 +233,7 @@ class ServiceTestCase(test.TrialTestCase): binary, topic, 'nova.tests.service_unittest.FakeManager') - serv.startService() + serv.start() serv.model_disconnected = True serv.report_state() diff --git a/nova/utils.py b/nova/utils.py index 2c43203d8..ddbe0479e 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -253,13 +253,18 @@ class LoopingCall(object): return done.send(True) + + self.done = done greenthread.spawn(_inner) - return done + return self.done def stop(self): self._running = False + def wait(self): + return self.done.wait() + def xhtml_escape(value): """Escapes a string so it is valid within XML or XHTML. diff --git a/run_tests.py b/run_tests.py index 883d2b768..33bf51b18 100644 --- a/run_tests.py +++ b/run_tests.py @@ -48,7 +48,6 @@ import sys from twisted.scripts import trial as trial_script -import unittest from nova import flags from nova import twistd @@ -62,11 +61,10 @@ from nova.tests.flags_unittest import * from nova.tests.misc_unittest import * from nova.tests.network_unittest import * #from nova.tests.objectstore_unittest import * -#from nova.tests.process_unittest import * from nova.tests.quota_unittest import * from nova.tests.rpc_unittest import * from nova.tests.scheduler_unittest import * -#from nova.tests.service_unittest import * +from nova.tests.service_unittest import * from nova.tests.twistd_unittest import * from nova.tests.validator_unittest import * from nova.tests.virt_unittest import * -- cgit From b3f5aba0c465b263c1d0a15c7d249dafb3a98e6c Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Thu, 9 Dec 2010 15:25:14 -0800 Subject: remove service and rename service_eventlet to service --- bin/nova-combined | 14 +- bin/nova-compute | 6 +- bin/nova-network | 6 +- bin/nova-scheduler | 6 +- bin/nova-volume | 6 +- nova/service.py | 169 ++++++++++++++++++---- nova/service_eventlet.py | 303 --------------------------------------- nova/tests/scheduler_unittest.py | 2 +- nova/tests/service_unittest.py | 2 +- 9 files changed, 160 insertions(+), 354 deletions(-) delete mode 100644 nova/service_eventlet.py diff --git a/bin/nova-combined b/bin/nova-combined index c86584328..c70d1d9ef 100755 --- a/bin/nova-combined +++ b/bin/nova-combined @@ -37,7 +37,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): from nova import api from nova import flags -from nova import service_eventlet +from nova import service from nova import wsgi @@ -54,13 +54,13 @@ flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host') if __name__ == '__main__': FLAGS(sys.argv) - compute = service_eventlet.Service.create(binary='nova-compute') - network = service_eventlet.Service.create(binary='nova-network') - volume = service_eventlet.Service.create(binary='nova-volume') - scheduler = service_eventlet.Service.create(binary='nova-scheduler') - #objectstore = service_eventlet.Service.create(binary='nova-objectstore') + compute = service.Service.create(binary='nova-compute') + network = service.Service.create(binary='nova-network') + volume = service.Service.create(binary='nova-volume') + scheduler = service.Service.create(binary='nova-scheduler') + #objectstore = service.Service.create(binary='nova-objectstore') - service_eventlet.serve(compute, network, volume, scheduler) + service.serve(compute, network, volume, scheduler) server = wsgi.Server() server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host) diff --git a/bin/nova-compute b/bin/nova-compute index 307f7cb59..4baf47e29 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -35,8 +35,8 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -from nova import service_eventlet +from nova import service if __name__ == '__main__': - service_eventlet.serve() - service_eventlet.wait() + service.serve() + service.wait() diff --git a/bin/nova-network b/bin/nova-network index 307f7cb59..4baf47e29 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -35,8 +35,8 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -from nova import service_eventlet +from nova import service if __name__ == '__main__': - service_eventlet.serve() - service_eventlet.wait() + service.serve() + service.wait() diff --git a/bin/nova-scheduler b/bin/nova-scheduler index e4aa0dcae..a1b7ddf60 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -35,8 +35,8 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -from nova import service_eventlet +from nova import service if __name__ == '__main__': - service_eventlet.serve() - service_eventlet.wait() + service.serve() + service.wait() diff --git a/bin/nova-volume b/bin/nova-volume index 395d16041..ba4a3a502 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -35,8 +35,8 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -from nova import service_eventlet +from nova import service if __name__ == '__main__': - service_eventlet.serve() - service_eventlet.wait() + service.serve() + service.wait() diff --git a/nova/service.py b/nova/service.py index 55a0bb212..576cf8b85 100644 --- a/nova/service.py +++ b/nova/service.py @@ -17,21 +17,17 @@ # under the License. """ -A service is a very thin wrapper around a Manager object. It exposes the -manager's public methods to other components of the system via rpc. It will -report state periodically to the database and is responsible for initiating -any periodic tasts that need to be executed on a given host. - -This module contains Service, a generic baseclass for all workers. +Generic Node baseclass for all workers that run on hosts """ import inspect import logging import os +import sys -from twisted.internet import defer -from twisted.internet import task -from twisted.application import service +from eventlet import event +from eventlet import greenthread +from eventlet import greenpool from nova import context from nova import db @@ -50,8 +46,16 @@ flags.DEFINE_integer('periodic_interval', 60, 'seconds between running periodic tasks', lower_bound=1) +flags.DEFINE_string('pidfile', None, + 'pidfile to use for this service') + + +flags.DEFINE_flag(flags.HelpFlag()) +flags.DEFINE_flag(flags.HelpshortFlag()) +flags.DEFINE_flag(flags.HelpXMLFlag()) + -class Service(object, service.Service): +class Service(object): """Base class for workers that run on hosts.""" def __init__(self, host, binary, topic, manager, report_interval=None, @@ -64,8 +68,9 @@ class Service(object, service.Service): self.periodic_interval = periodic_interval super(Service, self).__init__(*args, **kwargs) self.saved_args, self.saved_kwargs = args, kwargs + self.timers = [] - def startService(self): # pylint: disable-msg C0103 + def start(self): manager_class = utils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *self.saved_args, **self.saved_kwargs) @@ -80,26 +85,29 @@ class Service(object, service.Service): except exception.NotFound: self._create_service_ref(ctxt) - conn = rpc.Connection.instance() + conn1 = rpc.Connection.instance(new=True) + conn2 = rpc.Connection.instance(new=True) if self.report_interval: consumer_all = rpc.AdapterConsumer( - connection=conn, + connection=conn1, topic=self.topic, proxy=self) consumer_node = rpc.AdapterConsumer( - connection=conn, + connection=conn2, topic='%s.%s' % (self.topic, self.host), proxy=self) - consumer_all.attach_to_twisted() - consumer_node.attach_to_twisted() - - pulse = task.LoopingCall(self.report_state) + self.timers.append(consumer_all.attach_to_eventlet()) + self.timers.append(consumer_node.attach_to_eventlet()) + + pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) + self.timers.append(pulse) if self.periodic_interval: - pulse = task.LoopingCall(self.periodic_tasks) - pulse.start(interval=self.periodic_interval, now=False) + periodic = utils.LoopingCall(self.periodic_tasks) + periodic.start(interval=self.periodic_interval, now=False) + self.timers.append(periodic) def _create_service_ref(self, context): service_ref = db.service_create(context, @@ -114,7 +122,7 @@ class Service(object, service.Service): return getattr(manager, key) @classmethod - def create(cls, + def create(cls, host=None, binary=None, topic=None, @@ -147,24 +155,28 @@ class Service(object, service.Service): service_obj = cls(host, binary, topic, manager, report_interval, periodic_interval) - # This is the parent service that twistd will be looking for when it - # parses this file, return it so that we can get it into globals. - application = service.Application(binary) - service_obj.setServiceParent(application) - return application + return service_obj def kill(self): """Destroy the service object in the datastore""" + self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: logging.warn("Service killed that has no database entry") + def stop(self): + for x in self.timers: + try: + x.stop() + except Exception: + pass + self.timers = [] + def periodic_tasks(self): """Tasks to be run at a periodic interval""" - yield self.manager.periodic_tasks(context.get_admin_context()) + self.manager.periodic_tasks(context.get_admin_context()) - @defer.inlineCallbacks def report_state(self): """Update the state of this service in the datastore.""" ctxt = context.get_admin_context() @@ -180,7 +192,7 @@ class Service(object, service.Service): db.service_update(ctxt, self.service_id, {'report_count': service_ref['report_count'] + 1}) - + # TODO(termie): make this pattern be more elegant. if getattr(self, "model_disconnected", False): self.model_disconnected = False @@ -191,4 +203,101 @@ class Service(object, service.Service): if not getattr(self, "model_disconnected", False): self.model_disconnected = True logging.exception("model server went away") - yield + + +def stop(pidfile): + """ + Stop the daemon + """ + # Get the pid from the pidfile + try: + pf = file(pidfile, 'r') + pid = int(pf.read().strip()) + pf.close() + except IOError: + pid = None + + if not pid: + message = "pidfile %s does not exist. Daemon not running?\n" + sys.stderr.write(message % pidfile) + # Not an error in a restart + return + + # Try killing the daemon process + try: + while 1: + os.kill(pid, signal.SIGKILL) + time.sleep(0.1) + except OSError, err: + err = str(err) + if err.find("No such process") > 0: + if os.path.exists(pidfile): + os.remove(pidfile) + else: + print str(err) + sys.exit(1) + + +def serve(*services): + argv = FLAGS(sys.argv) + + if not services: + services = [Service.create()] + + name = '_'.join(x.binary for x in services) + logging.debug("Serving %s" % name) + + logging.getLogger('amqplib').setLevel(logging.DEBUG) + + if not FLAGS.pidfile: + FLAGS.pidfile = '%s.pid' % name + # NOTE(vish): if we're running nodaemon, redirect the log to stdout + #if FLAGS.nodaemon and not FLAGS.logfile: + # FLAGS.logfile = "-" + #if not FLAGS.logfile: + # FLAGS.logfile = '%s.log' % name + #if not FLAGS.prefix: + # FLAGS.prefix = name + #elif FLAGS.prefix.endswith('twisted'): + # FLAGS.prefix = FLAGS.prefix.replace('twisted', name) + + action = 'start' + if len(argv) > 1: + action = argv.pop() + + if action == 'stop': + stop(FLAGS.pidfile) + sys.exit() + elif action == 'restart': + stop(FLAGS.pidfile) + elif action == 'start': + pass + else: + print 'usage: %s [options] [start|stop|restart]' % argv[0] + sys.exit(1) + + #formatter = logging.Formatter( + # '(%(name)s): %(levelname)s %(message)s') + #handler = logging.StreamHandler() + #handler.setFormatter(formatter) + #logging.getLogger().addHandler(handler) + + if FLAGS.verbose: + logging.getLogger().setLevel(logging.DEBUG) + else: + logging.getLogger().setLevel(logging.WARNING) + + logging.debug("Full set of FLAGS:") + for flag in FLAGS: + logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) + + for x in services: + x.start() + + #while True: + # greenthread.sleep(5) + + +def wait(): + while True: + greenthread.sleep(5) diff --git a/nova/service_eventlet.py b/nova/service_eventlet.py deleted file mode 100644 index 576cf8b85..000000000 --- a/nova/service_eventlet.py +++ /dev/null @@ -1,303 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Generic Node baseclass for all workers that run on hosts -""" - -import inspect -import logging -import os -import sys - -from eventlet import event -from eventlet import greenthread -from eventlet import greenpool - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import rpc -from nova import utils - - -FLAGS = flags.FLAGS -flags.DEFINE_integer('report_interval', 10, - 'seconds between nodes reporting state to datastore', - lower_bound=1) - -flags.DEFINE_integer('periodic_interval', 60, - 'seconds between running periodic tasks', - lower_bound=1) - -flags.DEFINE_string('pidfile', None, - 'pidfile to use for this service') - - -flags.DEFINE_flag(flags.HelpFlag()) -flags.DEFINE_flag(flags.HelpshortFlag()) -flags.DEFINE_flag(flags.HelpXMLFlag()) - - -class Service(object): - """Base class for workers that run on hosts.""" - - def __init__(self, host, binary, topic, manager, report_interval=None, - periodic_interval=None, *args, **kwargs): - self.host = host - self.binary = binary - self.topic = topic - self.manager_class_name = manager - self.report_interval = report_interval - self.periodic_interval = periodic_interval - super(Service, self).__init__(*args, **kwargs) - self.saved_args, self.saved_kwargs = args, kwargs - self.timers = [] - - def start(self): - manager_class = utils.import_class(self.manager_class_name) - self.manager = manager_class(host=self.host, *self.saved_args, - **self.saved_kwargs) - self.manager.init_host() - self.model_disconnected = False - ctxt = context.get_admin_context() - try: - service_ref = db.service_get_by_args(ctxt, - self.host, - self.binary) - self.service_id = service_ref['id'] - except exception.NotFound: - self._create_service_ref(ctxt) - - conn1 = rpc.Connection.instance(new=True) - conn2 = rpc.Connection.instance(new=True) - if self.report_interval: - consumer_all = rpc.AdapterConsumer( - connection=conn1, - topic=self.topic, - proxy=self) - consumer_node = rpc.AdapterConsumer( - connection=conn2, - topic='%s.%s' % (self.topic, self.host), - proxy=self) - - self.timers.append(consumer_all.attach_to_eventlet()) - self.timers.append(consumer_node.attach_to_eventlet()) - - pulse = utils.LoopingCall(self.report_state) - pulse.start(interval=self.report_interval, now=False) - self.timers.append(pulse) - - if self.periodic_interval: - periodic = utils.LoopingCall(self.periodic_tasks) - periodic.start(interval=self.periodic_interval, now=False) - self.timers.append(periodic) - - def _create_service_ref(self, context): - service_ref = db.service_create(context, - {'host': self.host, - 'binary': self.binary, - 'topic': self.topic, - 'report_count': 0}) - self.service_id = service_ref['id'] - - def __getattr__(self, key): - manager = self.__dict__.get('manager', None) - return getattr(manager, key) - - @classmethod - def create(cls, - host=None, - binary=None, - topic=None, - manager=None, - report_interval=None, - periodic_interval=None): - """Instantiates class and passes back application object. - - Args: - host, defaults to FLAGS.host - binary, defaults to basename of executable - topic, defaults to bin_name - "nova-" part - manager, defaults to FLAGS._manager - report_interval, defaults to FLAGS.report_interval - periodic_interval, defaults to FLAGS.periodic_interval - """ - if not host: - host = FLAGS.host - if not binary: - binary = os.path.basename(inspect.stack()[-1][1]) - if not topic: - topic = binary.rpartition("nova-")[2] - if not manager: - manager = FLAGS.get('%s_manager' % topic, None) - if not report_interval: - report_interval = FLAGS.report_interval - if not periodic_interval: - periodic_interval = FLAGS.periodic_interval - logging.warn("Starting %s node", topic) - service_obj = cls(host, binary, topic, manager, - report_interval, periodic_interval) - - return service_obj - - def kill(self): - """Destroy the service object in the datastore""" - self.stop() - try: - db.service_destroy(context.get_admin_context(), self.service_id) - except exception.NotFound: - logging.warn("Service killed that has no database entry") - - def stop(self): - for x in self.timers: - try: - x.stop() - except Exception: - pass - self.timers = [] - - def periodic_tasks(self): - """Tasks to be run at a periodic interval""" - self.manager.periodic_tasks(context.get_admin_context()) - - def report_state(self): - """Update the state of this service in the datastore.""" - ctxt = context.get_admin_context() - try: - try: - service_ref = db.service_get(ctxt, self.service_id) - except exception.NotFound: - logging.debug("The service database object disappeared, " - "Recreating it.") - self._create_service_ref(ctxt) - service_ref = db.service_get(ctxt, self.service_id) - - db.service_update(ctxt, - self.service_id, - {'report_count': service_ref['report_count'] + 1}) - - # TODO(termie): make this pattern be more elegant. - if getattr(self, "model_disconnected", False): - self.model_disconnected = False - logging.error("Recovered model server connection!") - - # TODO(vish): this should probably only catch connection errors - except Exception: # pylint: disable-msg=W0702 - if not getattr(self, "model_disconnected", False): - self.model_disconnected = True - logging.exception("model server went away") - - -def stop(pidfile): - """ - Stop the daemon - """ - # Get the pid from the pidfile - try: - pf = file(pidfile, 'r') - pid = int(pf.read().strip()) - pf.close() - except IOError: - pid = None - - if not pid: - message = "pidfile %s does not exist. Daemon not running?\n" - sys.stderr.write(message % pidfile) - # Not an error in a restart - return - - # Try killing the daemon process - try: - while 1: - os.kill(pid, signal.SIGKILL) - time.sleep(0.1) - except OSError, err: - err = str(err) - if err.find("No such process") > 0: - if os.path.exists(pidfile): - os.remove(pidfile) - else: - print str(err) - sys.exit(1) - - -def serve(*services): - argv = FLAGS(sys.argv) - - if not services: - services = [Service.create()] - - name = '_'.join(x.binary for x in services) - logging.debug("Serving %s" % name) - - logging.getLogger('amqplib').setLevel(logging.DEBUG) - - if not FLAGS.pidfile: - FLAGS.pidfile = '%s.pid' % name - # NOTE(vish): if we're running nodaemon, redirect the log to stdout - #if FLAGS.nodaemon and not FLAGS.logfile: - # FLAGS.logfile = "-" - #if not FLAGS.logfile: - # FLAGS.logfile = '%s.log' % name - #if not FLAGS.prefix: - # FLAGS.prefix = name - #elif FLAGS.prefix.endswith('twisted'): - # FLAGS.prefix = FLAGS.prefix.replace('twisted', name) - - action = 'start' - if len(argv) > 1: - action = argv.pop() - - if action == 'stop': - stop(FLAGS.pidfile) - sys.exit() - elif action == 'restart': - stop(FLAGS.pidfile) - elif action == 'start': - pass - else: - print 'usage: %s [options] [start|stop|restart]' % argv[0] - sys.exit(1) - - #formatter = logging.Formatter( - # '(%(name)s): %(levelname)s %(message)s') - #handler = logging.StreamHandler() - #handler.setFormatter(formatter) - #logging.getLogger().addHandler(handler) - - if FLAGS.verbose: - logging.getLogger().setLevel(logging.DEBUG) - else: - logging.getLogger().setLevel(logging.WARNING) - - logging.debug("Full set of FLAGS:") - for flag in FLAGS: - logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) - - for x in services: - x.start() - - #while True: - # greenthread.sleep(5) - - -def wait(): - while True: - greenthread.sleep(5) diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py index 37d15567f..f442a4bc2 100644 --- a/nova/tests/scheduler_unittest.py +++ b/nova/tests/scheduler_unittest.py @@ -22,7 +22,7 @@ Tests For Scheduler from nova import context from nova import db from nova import flags -from nova import service_eventlet as service +from nova import service from nova import test from nova import rpc from nova import utils diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py index a2bac9af0..9f6d1af7d 100644 --- a/nova/tests/service_unittest.py +++ b/nova/tests/service_unittest.py @@ -26,7 +26,7 @@ from nova import exception from nova import flags from nova import rpc from nova import test -from nova import service_eventlet as service +from nova import service from nova import manager FLAGS = flags.FLAGS -- cgit From a1640f352806ee12f6b485a8d69a65bd42b51411 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Thu, 9 Dec 2010 16:05:13 -0800 Subject: formatting and naming cleanup --- bin/nova-api | 6 +- bin/nova-combined | 4 +- bin/nova-compute | 4 +- bin/nova-network | 4 +- bin/nova-scheduler | 4 +- bin/nova-volume | 4 +- nova/flags.py | 4 +- nova/objectstore/image.py | 2 - nova/rpc.py | 5 +- nova/server.py | 151 ------------------------------------- nova/test.py | 9 +-- nova/tests/access_unittest.py | 2 +- nova/tests/api/__init__.py | 3 +- nova/tests/auth_unittest.py | 11 +-- nova/tests/cloud_unittest.py | 2 +- nova/tests/compute_unittest.py | 2 +- nova/tests/flags_unittest.py | 2 +- nova/tests/misc_unittest.py | 2 +- nova/tests/network_unittest.py | 2 +- nova/tests/objectstore_unittest.py | 4 +- nova/tests/quota_unittest.py | 2 +- nova/tests/rpc_unittest.py | 2 +- nova/tests/scheduler_unittest.py | 4 +- nova/tests/service_unittest.py | 4 +- nova/tests/virt_unittest.py | 4 +- nova/tests/volume_unittest.py | 2 +- nova/utils.py | 7 -- run_tests.py | 1 - 28 files changed, 39 insertions(+), 214 deletions(-) delete mode 100644 nova/server.py diff --git a/bin/nova-api b/bin/nova-api index 3215ad5ef..3505cefe7 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -17,9 +17,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -""" -Nova API daemon. -""" + +"""Starter script for Nova API.""" import os import sys @@ -35,7 +34,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): from nova import api from nova import flags from nova import utils -from nova import server from nova import wsgi FLAGS = flags.FLAGS diff --git a/bin/nova-combined b/bin/nova-combined index c70d1d9ef..964ffd438 100755 --- a/bin/nova-combined +++ b/bin/nova-combined @@ -17,9 +17,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" - Twistd daemon for the nova compute nodes. -""" +"""Combined starter script for Nova services.""" import eventlet eventlet.monkey_patch() diff --git a/bin/nova-compute b/bin/nova-compute index 4baf47e29..f224f0690 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -17,9 +17,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" - Twistd daemon for the nova compute nodes. -""" +"""Starter script for Nova Compute.""" import eventlet eventlet.monkey_patch() diff --git a/bin/nova-network b/bin/nova-network index 4baf47e29..589c75e5a 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -17,9 +17,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" - Twistd daemon for the nova compute nodes. -""" +"""Starter script for Nova Network.""" import eventlet eventlet.monkey_patch() diff --git a/bin/nova-scheduler b/bin/nova-scheduler index a1b7ddf60..cb345aa3f 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -17,9 +17,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" - Twistd daemon for the nova scheduler nodes. -""" +""" Starter script for Nova Scheduler.""" import eventlet eventlet.monkey_patch() diff --git a/bin/nova-volume b/bin/nova-volume index ba4a3a502..2f74f4b9a 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -17,9 +17,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" - Twistd daemon for the nova volume nodes. -""" +"""Starter script for Nova Volume.""" import eventlet eventlet.monkey_patch() diff --git a/nova/flags.py b/nova/flags.py index 034b136d8..87444565a 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -159,12 +159,11 @@ class StrWrapper(object): return str(val) raise KeyError(name) + FLAGS = FlagValues() gflags.FLAGS = FLAGS gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS) -gflags.FLAGS = FLAGS - def _wrapper(func): def _wrapped(*args, **kw): @@ -187,6 +186,7 @@ DEFINE_multistring = _wrapper(gflags.DEFINE_multistring) DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int) DEFINE_flag = _wrapper(gflags.DEFINE_flag) + HelpFlag = gflags.HelpFlag HelpshortFlag = gflags.HelpshortFlag HelpXMLFlag = gflags.HelpXMLFlag diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index 2fe0b0117..9e56e256c 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -26,7 +26,6 @@ Requires decryption using keys in the manifest. import binascii import glob import json -import logging import os import shutil import tarfile @@ -265,7 +264,6 @@ class Image(object): if err: raise exception.Error("Failed to decrypt initialization " "vector: %s" % err) - logging.debug(iv) _out, err = utils.execute( 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' diff --git a/nova/rpc.py b/nova/rpc.py index 652b9e4aa..6a634a4ec 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -309,7 +309,6 @@ def call(context, topic, msg): class WaitMessage(object): def __call__(self, data, message): - LOG.debug('data %s, msg %s', data, message) """Acks message and sets result.""" message.ack() if data['failure']: @@ -332,6 +331,10 @@ def call(context, topic, msg): except StopIteration: pass consumer.close() + # NOTE(termie): this is a little bit of a change from the original + # twisted-based code where returning a Failure + # instance from a deferred call is very similar to + # raising an exception if isinstance(wait_msg.result, Exception): raise wait_msg.result return wait_msg.result diff --git a/nova/server.py b/nova/server.py deleted file mode 100644 index a060d3283..000000000 --- a/nova/server.py +++ /dev/null @@ -1,151 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Base functionality for nova daemons - gradually being replaced with twistd.py. -""" - -import daemon -from daemon import pidlockfile -import logging -import logging.handlers -import os -import signal -import sys -import time - -from nova import flags - - -FLAGS = flags.FLAGS -flags.DEFINE_bool('daemonize', False, 'daemonize this process') -# NOTE(termie): right now I am defaulting to using syslog when we daemonize -# it may be better to do something else -shrug- -# NOTE(Devin): I think we should let each process have its own log file -# and put it in /var/logs/nova/(appname).log -# This makes debugging much easier and cuts down on sys log -# clutter. -flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing') -flags.DEFINE_string('logfile', None, 'log file to output to') -flags.DEFINE_string('logdir', None, 'directory to keep log files in ' - '(will be prepended to $logfile)') -flags.DEFINE_string('pidfile', None, 'pid file to output to') -flags.DEFINE_string('working_directory', './', 'working directory...') -flags.DEFINE_integer('uid', os.getuid(), 'uid under which to run') -flags.DEFINE_integer('gid', os.getgid(), 'gid under which to run') - - -def stop(pidfile): - """ - Stop the daemon - """ - # Get the pid from the pidfile - try: - pid = int(open(pidfile, 'r').read().strip()) - except IOError: - message = "pidfile %s does not exist. Daemon not running?\n" - sys.stderr.write(message % pidfile) - return - - # Try killing the daemon process - try: - while 1: - os.kill(pid, signal.SIGTERM) - time.sleep(0.1) - except OSError, err: - err = str(err) - if err.find("No such process") > 0: - if os.path.exists(pidfile): - os.remove(pidfile) - else: - print str(err) - sys.exit(1) - - -def serve(name, main): - """Controller for server""" - argv = FLAGS(sys.argv) - - if not FLAGS.pidfile: - FLAGS.pidfile = '%s.pid' % name - - logging.debug("Full set of FLAGS: \n\n\n") - for flag in FLAGS: - logging.debug("%s : %s", flag, FLAGS.get(flag, None)) - - action = 'start' - if len(argv) > 1: - action = argv.pop() - - if action == 'stop': - stop(FLAGS.pidfile) - sys.exit() - elif action == 'restart': - stop(FLAGS.pidfile) - elif action == 'start': - pass - else: - print 'usage: %s [options] [start|stop|restart]' % argv[0] - sys.exit(1) - daemonize(argv, name, main) - - -def daemonize(args, name, main): - """Does the work of daemonizing the process""" - logging.getLogger('amqplib').setLevel(logging.WARN) - files_to_keep = [] - if FLAGS.daemonize: - logger = logging.getLogger() - formatter = logging.Formatter( - name + '(%(name)s): %(levelname)s %(message)s') - if FLAGS.use_syslog and not FLAGS.logfile: - syslog = logging.handlers.SysLogHandler(address='/dev/log') - syslog.setFormatter(formatter) - logger.addHandler(syslog) - files_to_keep.append(syslog.socket) - else: - if not FLAGS.logfile: - FLAGS.logfile = '%s.log' % name - if FLAGS.logdir: - FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile) - logfile = logging.FileHandler(FLAGS.logfile) - logfile.setFormatter(formatter) - logger.addHandler(logfile) - files_to_keep.append(logfile.stream) - stdin, stdout, stderr = None, None, None - else: - stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr - - if FLAGS.verbose: - logging.getLogger().setLevel(logging.DEBUG) - else: - logging.getLogger().setLevel(logging.WARNING) - - with daemon.DaemonContext( - detach_process=FLAGS.daemonize, - working_directory=FLAGS.working_directory, - #pidfile=pidlockfile.TimeoutPIDLockFile(FLAGS.pidfile, - # acquire_timeout=1, - # threaded=False), - stdin=stdin, - stdout=stdout, - stderr=stderr, - uid=FLAGS.uid, - gid=FLAGS.gid, - files_preserve=files_to_keep): - main(args) diff --git a/nova/test.py b/nova/test.py index e27d29166..ecc97aa4d 100644 --- a/nova/test.py +++ b/nova/test.py @@ -55,11 +55,11 @@ def skip_if_fake(func): return func(*args, **kw) return _skipper -class TrialTestCase(unittest.TestCase): +class TestCase(unittest.TestCase): """Test case base class for all unit tests""" def setUp(self): """Run before each test method to initialize test environment""" - super(TrialTestCase, self).setUp() + super(TestCase, self).setUp() # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. @@ -105,7 +105,7 @@ class TrialTestCase(unittest.TestCase): fakerabbit.reset_all() db.security_group_destroy_all(ctxt) - super(TrialTestCase, self).tearDown() + super(TestCase, self).tearDown() finally: self.reset_flags() @@ -137,7 +137,7 @@ class TrialTestCase(unittest.TestCase): rpc.Consumer.attach_to_eventlet = _wrapped -class OLDTrialTestCase(trial_unittest.TestCase): +class TrialTestCase(trial_unittest.TestCase): """Test case base class for all unit tests""" def setUp(self): """Run before each test method to initialize test environment""" @@ -175,7 +175,6 @@ class OLDTrialTestCase(trial_unittest.TestCase): db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host, self.start) db.network_disassociate_all(ctxt) - rpc.Consumer.attach_to_eventlet = self.originalAttach for x in self.injected: try: x.stop() diff --git a/nova/tests/access_unittest.py b/nova/tests/access_unittest.py index 0f66c0a26..58fdea3b5 100644 --- a/nova/tests/access_unittest.py +++ b/nova/tests/access_unittest.py @@ -35,7 +35,7 @@ class Context(object): pass -class AccessTestCase(test.TrialTestCase): +class AccessTestCase(test.TestCase): def setUp(self): super(AccessTestCase, self).setUp() um = manager.AuthManager() diff --git a/nova/tests/api/__init__.py b/nova/tests/api/__init__.py index cdc1bbf00..9caa8c9d0 100644 --- a/nova/tests/api/__init__.py +++ b/nova/tests/api/__init__.py @@ -78,5 +78,4 @@ class Test(unittest.TestCase): if __name__ == '__main__': - pass - #unittest.main() + unittest.main() diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 129ff223d..4508d6721 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -16,13 +16,10 @@ # License for the specific language governing permissions and limitations # under the License. -#import logging +import logging from M2Crypto import X509 import unittest -import eventlet -logging = eventlet.import_patched('logging') - from nova import crypto from nova import flags from nova import test @@ -329,12 +326,12 @@ class AuthManagerTestCase(object): self.assertTrue(user.is_admin()) -class AuthManagerLdapTestCase(AuthManagerTestCase, test.TrialTestCase): +class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' def __init__(self, *args, **kwargs): AuthManagerTestCase.__init__(self) - test.TrialTestCase.__init__(self, *args, **kwargs) + test.TestCase.__init__(self, *args, **kwargs) import nova.auth.fakeldap as fakeldap FLAGS.redis_db = 8 if FLAGS.flush_db: @@ -346,7 +343,7 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TrialTestCase): self.skip = True -class AuthManagerDbTestCase(AuthManagerTestCase, test.TrialTestCase): +class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase): auth_driver = 'nova.auth.dbdriver.DbDriver' diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index b7b856da5..50834d990 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -51,7 +51,7 @@ IMAGES_PATH = os.path.join(OSS_TEMPDIR, 'images') os.makedirs(IMAGES_PATH) -class CloudTestCase(test.TrialTestCase): +class CloudTestCase(test.TestCase): def setUp(self): super(CloudTestCase, self).setUp() self.flags(connection_type='fake', images_path=IMAGES_PATH) diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 67cea72c9..c6353d357 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -35,7 +35,7 @@ from nova.compute import api as compute_api FLAGS = flags.FLAGS -class ComputeTestCase(test.TrialTestCase): +class ComputeTestCase(test.TestCase): """Test case for compute""" def setUp(self): logging.getLogger().setLevel(logging.DEBUG) diff --git a/nova/tests/flags_unittest.py b/nova/tests/flags_unittest.py index b97df075d..707300fcf 100644 --- a/nova/tests/flags_unittest.py +++ b/nova/tests/flags_unittest.py @@ -24,7 +24,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('flags_unittest', 'foo', 'for testing purposes only') -class FlagsTestCase(test.TrialTestCase): +class FlagsTestCase(test.TestCase): def setUp(self): super(FlagsTestCase, self).setUp() diff --git a/nova/tests/misc_unittest.py b/nova/tests/misc_unittest.py index 667c63ad0..3eab1da0a 100644 --- a/nova/tests/misc_unittest.py +++ b/nova/tests/misc_unittest.py @@ -20,7 +20,7 @@ from nova import test from nova.utils import parse_mailmap, str_dict_replace -class ProjectTestCase(test.TrialTestCase): +class ProjectTestCase(test.TestCase): def test_authors_up_to_date(self): if os.path.exists('../.bzr'): contributors = set() diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 6f4705719..bcac20585 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -33,7 +33,7 @@ from nova.auth import manager FLAGS = flags.FLAGS -class NetworkTestCase(test.TrialTestCase): +class NetworkTestCase(test.TestCase): """Test cases for network code""" def setUp(self): super(NetworkTestCase, self).setUp() diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index 061799923..ceac17adb 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -54,7 +54,7 @@ os.makedirs(os.path.join(OSS_TEMPDIR, 'images')) os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets')) -class ObjectStoreTestCase(test.TrialTestCase): +class ObjectStoreTestCase(test.TestCase): """Test objectstore API directly.""" def setUp(self): @@ -191,7 +191,7 @@ class TestSite(server.Site): protocol = TestHTTPChannel -class S3APITestCase(test.TrialTestCase): +class S3APITestCase(test.TestCase): """Test objectstore through S3 API.""" def setUp(self): diff --git a/nova/tests/quota_unittest.py b/nova/tests/quota_unittest.py index 1966b51f7..8cf2a5e54 100644 --- a/nova/tests/quota_unittest.py +++ b/nova/tests/quota_unittest.py @@ -32,7 +32,7 @@ from nova.api.ec2 import cloud FLAGS = flags.FLAGS -class QuotaTestCase(test.TrialTestCase): +class QuotaTestCase(test.TestCase): def setUp(self): logging.getLogger().setLevel(logging.DEBUG) super(QuotaTestCase, self).setUp() diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py index c2ad5cd79..4128c35b8 100644 --- a/nova/tests/rpc_unittest.py +++ b/nova/tests/rpc_unittest.py @@ -29,7 +29,7 @@ from nova import test FLAGS = flags.FLAGS -class RpcTestCase(test.TrialTestCase): +class RpcTestCase(test.TestCase): """Test cases for rpc""" def setUp(self): super(RpcTestCase, self).setUp() diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py index f442a4bc2..d1756b8fb 100644 --- a/nova/tests/scheduler_unittest.py +++ b/nova/tests/scheduler_unittest.py @@ -44,7 +44,7 @@ class TestDriver(driver.Scheduler): return 'named_host' -class SchedulerTestCase(test.TrialTestCase): +class SchedulerTestCase(test.TestCase): """Test case for scheduler""" def setUp(self): super(SchedulerTestCase, self).setUp() @@ -73,7 +73,7 @@ class SchedulerTestCase(test.TrialTestCase): scheduler.named_method(ctxt, 'topic', num=7) -class SimpleDriverTestCase(test.TrialTestCase): +class SimpleDriverTestCase(test.TestCase): """Test case for simple driver""" def setUp(self): super(SimpleDriverTestCase, self).setUp() diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py index 9f6d1af7d..c94af4a23 100644 --- a/nova/tests/service_unittest.py +++ b/nova/tests/service_unittest.py @@ -45,7 +45,7 @@ class ExtendedService(service.Service): return 'service' -class ServiceManagerTestCase(test.TrialTestCase): +class ServiceManagerTestCase(test.TestCase): """Test cases for Services""" def test_attribute_error_for_no_manager(self): @@ -72,7 +72,7 @@ class ServiceManagerTestCase(test.TrialTestCase): self.assertEqual(serv.test_method(), 'service') -class ServiceTestCase(test.TrialTestCase): +class ServiceTestCase(test.TestCase): """Test cases for Services""" def setUp(self): diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index a4a8d3acf..85e569858 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -30,7 +30,7 @@ FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') -class LibvirtConnTestCase(test.TrialTestCase): +class LibvirtConnTestCase(test.TestCase): def setUp(self): super(LibvirtConnTestCase, self).setUp() self.manager = manager.AuthManager() @@ -123,7 +123,7 @@ class LibvirtConnTestCase(test.TrialTestCase): self.manager.delete_user(self.user) -class NWFilterTestCase(test.TrialTestCase): +class NWFilterTestCase(test.TestCase): def setUp(self): super(NWFilterTestCase, self).setUp() diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 93d2ceab7..b13455fb0 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -31,7 +31,7 @@ from nova import utils FLAGS = flags.FLAGS -class VolumeTestCase(test.TrialTestCase): +class VolumeTestCase(test.TestCase): """Test Case for volumes.""" def setUp(self): diff --git a/nova/utils.py b/nova/utils.py index ddbe0479e..22bf5d8cf 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -129,13 +129,6 @@ def debug(arg): def runthis(prompt, cmd, check_exit_code=True): logging.debug("Running %s" % (cmd)) rv, err = execute(cmd, check_exit_code=check_exit_code) - #exit_code = subprocess.call(cmd.split(" ")) - #logging.debug(prompt % (exit_code)) - #if check_exit_code and exit_code != 0: - # raise ProcessExecutionError(exit_code=exit_code, - # stdout=None, - # stderr=None, - # cmd=cmd) def generate_uid(topic, size=8): diff --git a/run_tests.py b/run_tests.py index 33bf51b18..6d7830a29 100644 --- a/run_tests.py +++ b/run_tests.py @@ -95,7 +95,6 @@ if __name__ == '__main__': else: from nova.tests.real_flags import * - # Establish redirect for STDERR sys.stderr.flush() err = open(FLAGS.tests_stderr, 'w+', 0) -- cgit From af5c175dbc77048fb74311bf92569866676eee9c Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Thu, 9 Dec 2010 16:18:52 -0800 Subject: removed a few more references to twisted --- nova/rpc.py | 6 ++-- nova/service.py | 2 -- nova/virt/images.py | 2 +- nova/virt/xenapi/network_utils.py | 9 ++---- nova/virt/xenapi/vmops.py | 61 ++++++++++++++++++--------------------- 5 files changed, 36 insertions(+), 44 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 6a634a4ec..b5df4904b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -237,7 +237,9 @@ class DirectPublisher(Publisher): def msg_reply(msg_id, reply=None, failure=None): """Sends a reply or an error on the channel signified by msg_id - failure should be a twisted failure object""" + failure should be a sys.exc_info() tuple. + + """ if failure: message = str(failure[1]) tb = traceback.format_exception(*failure) @@ -332,7 +334,7 @@ def call(context, topic, msg): pass consumer.close() # NOTE(termie): this is a little bit of a change from the original - # twisted-based code where returning a Failure + # non-eventlet code where returning a Failure # instance from a deferred call is very similar to # raising an exception if isinstance(wait_msg.result, Exception): diff --git a/nova/service.py b/nova/service.py index 576cf8b85..5c171b2ae 100644 --- a/nova/service.py +++ b/nova/service.py @@ -258,8 +258,6 @@ def serve(*services): # FLAGS.logfile = '%s.log' % name #if not FLAGS.prefix: # FLAGS.prefix = name - #elif FLAGS.prefix.endswith('twisted'): - # FLAGS.prefix = FLAGS.prefix.replace('twisted', name) action = 'start' if len(argv) > 1: diff --git a/nova/virt/images.py b/nova/virt/images.py index 4d7c65f12..1c9b2e093 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -50,7 +50,7 @@ def _fetch_s3_image(image, path, user, project): # This should probably move somewhere else, like e.g. a download_as # method on User objects and at the same time get rewritten to use - # twisted web client. + # a web client. headers = {} headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index 8cb4cce3a..d8632f393 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -20,8 +20,6 @@ records and their attributes like bridges, PIFs, QoS, as well as their lookup functions. """ -from twisted.internet import defer - class NetworkHelper(): """ @@ -31,14 +29,13 @@ class NetworkHelper(): return @classmethod - @defer.inlineCallbacks def find_network_with_bridge(cls, session, bridge): """ Return the network on which the bridge is attached, if found """ expr = 'field "bridge" = "%s"' % bridge - networks = yield session.call_xenapi('network.get_all_records_where', - expr) + networks = session.call_xenapi('network.get_all_records_where', + expr) if len(networks) == 1: - defer.returnValue(networks.keys()[0]) + return networks.keys()[0] elif len(networks) > 1: raise Exception('Found non-unique network for bridge %s' % bridge) else: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index d36cdaea5..0223e512a 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -20,8 +20,6 @@ Management class for VM-related functions (spawn, reboot, etc). import logging -from twisted.internet import defer - from nova import db from nova import context from nova.auth.manager import AuthManager @@ -46,10 +44,9 @@ class VMOps(object): return [self._session.get_xenapi().VM.get_name_label(vm) \ for vm in self._session.get_xenapi().VM.get_all()] - @defer.inlineCallbacks def spawn(self, instance): """ Create VM instance """ - vm = yield VMHelper.lookup(self._session, instance.name) + vm = VMHelper.lookup(self._session, instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % instance.name) @@ -57,66 +54,64 @@ class VMOps(object): bridge = db.project_get_network(context.get_admin_context(), instance.project_id).bridge network_ref = \ - yield NetworkHelper.find_network_with_bridge(self._session, bridge) + NetworkHelper.find_network_with_bridge(self._session, bridge) user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - vdi_uuid = yield VMHelper.fetch_image(self._session, - instance.image_id, user, project, True) - kernel = yield VMHelper.fetch_image(self._session, - instance.kernel_id, user, project, False) - ramdisk = yield VMHelper.fetch_image(self._session, - instance.ramdisk_id, user, project, False) - vdi_ref = yield self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - vm_ref = yield VMHelper.create_vm(self._session, - instance, kernel, ramdisk) - yield VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) + vdi_uuid = VMHelper.fetch_image( + self._session, instance.image_id, user, project, True) + kernel = VMHelper.fetch_image( + self._session, instance.kernel_id, user, project, False) + ramdisk = VMHelper.fetch_image( + self._session, instance.ramdisk_id, user, project, False) + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + vm_ref = VMHelper.create_vm( + self._session, instance, kernel, ramdisk) + VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) if network_ref: - yield VMHelper.create_vif(self._session, vm_ref, - network_ref, instance.mac_address) + VMHelper.create_vif(self._session, vm_ref, + network_ref, instance.mac_address) logging.debug('Starting VM %s...', vm_ref) - yield self._session.call_xenapi('VM.start', vm_ref, False, False) + self._session.call_xenapi('VM.start', vm_ref, False, False) logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - @defer.inlineCallbacks def reboot(self, instance): """ Reboot VM instance """ instance_name = instance.name - vm = yield VMHelper.lookup(self._session, instance_name) + vm = VMHelper.lookup(self._session, instance_name) if vm is None: raise Exception('instance not present %s' % instance_name) - task = yield self._session.call_xenapi('Async.VM.clean_reboot', vm) - yield self._session.wait_for_task(task) + task = self._session.call_xenapi('Async.VM.clean_reboot', vm) + self._session.wait_for_task(task) - @defer.inlineCallbacks def destroy(self, instance): """ Destroy VM instance """ - vm = yield VMHelper.lookup(self._session, instance.name) + vm = VMHelper.lookup(self._session, instance.name) if vm is None: # Don't complain, just return. This lets us clean up instances # that have already disappeared from the underlying platform. - defer.returnValue(None) + return # Get the VDIs related to the VM - vdis = yield VMHelper.lookup_vm_vdis(self._session, vm) + vdis = VMHelper.lookup_vm_vdis(self._session, vm) try: - task = yield self._session.call_xenapi('Async.VM.hard_shutdown', + task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) - yield self._session.wait_for_task(task) + self._session.wait_for_task(task) except XenAPI.Failure, exc: logging.warn(exc) # Disk clean-up if vdis: for vdi in vdis: try: - task = yield self._session.call_xenapi('Async.VDI.destroy', - vdi) - yield self._session.wait_for_task(task) + task = self._session.call_xenapi('Async.VDI.destroy', + vdi) + self._session.wait_for_task(task) except XenAPI.Failure, exc: logging.warn(exc) try: - task = yield self._session.call_xenapi('Async.VM.destroy', vm) - yield self._session.wait_for_task(task) + task = self._session.call_xenapi('Async.VM.destroy', vm) + self._session.wait_for_task(task) except XenAPI.Failure, exc: logging.warn(exc) -- cgit From 4d06429290a373ae3a42b1f9b58d7253d269e048 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Thu, 9 Dec 2010 16:45:51 -0800 Subject: add back utils.default_flagflie --- bin/nova-api | 1 + bin/nova-combined | 2 ++ bin/nova-compute | 2 ++ bin/nova-network | 2 ++ bin/nova-scheduler | 2 ++ bin/nova-volume | 2 ++ 6 files changed, 11 insertions(+) diff --git a/bin/nova-api b/bin/nova-api index 3505cefe7..210486666 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -52,4 +52,5 @@ def main(): if __name__ == '__main__': + utils.default_flagfile() main() diff --git a/bin/nova-combined b/bin/nova-combined index 964ffd438..e82b9c9b6 100755 --- a/bin/nova-combined +++ b/bin/nova-combined @@ -36,6 +36,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): from nova import api from nova import flags from nova import service +from nova import utils from nova import wsgi @@ -50,6 +51,7 @@ flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host') if __name__ == '__main__': + utils.default_flagfile() FLAGS(sys.argv) compute = service.Service.create(binary='nova-compute') diff --git a/bin/nova-compute b/bin/nova-compute index f224f0690..8b02cd0f5 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -34,7 +34,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) from nova import service +from nova import utils if __name__ == '__main__': + utils.default_flagfile() service.serve() service.wait() diff --git a/bin/nova-network b/bin/nova-network index 589c75e5a..01cc9d00d 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -34,7 +34,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) from nova import service +from nova import utils if __name__ == '__main__': + utils.default_flagfile() service.serve() service.wait() diff --git a/bin/nova-scheduler b/bin/nova-scheduler index cb345aa3f..a46dd8dda 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -34,7 +34,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) from nova import service +from nova import utils if __name__ == '__main__': + utils.default_flagfile() service.serve() service.wait() diff --git a/bin/nova-volume b/bin/nova-volume index 2f74f4b9a..efb837ba2 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -34,7 +34,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) from nova import service +from nova import utils if __name__ == '__main__': + utils.default_flagfile() service.serve() service.wait() -- cgit From 1d22b67c9dc0e01c8e4afb66fbc6c9be8f843974 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 10 Dec 2010 16:28:23 +0000 Subject: added unittest for pause --- nova/tests/compute_unittest.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 6f3ef96cb..ad191795c 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -138,6 +138,15 @@ class ComputeTestCase(test.TrialTestCase): yield self.compute.reboot_instance(self.context, instance_id) yield self.compute.terminate_instance(self.context, instance_id) + @defer.inlineCallbacks + def test_pause(self): + """Ensure instance can be paused""" + instance_id = self._create_instance() + yield self.compute.run_instance(self.context, instance_id) + yield self.compute.pause_instance(self.context, instance_id) + yield self.compute.unpause_instance(self.context, instance_id) + yield self.compute.terminate_instance(self.context, instance_id) + @defer.inlineCallbacks def test_console_output(self): """Make sure we can get console output from instance""" -- cgit From f7862f6d212d52e09d2a3a076762c936618cf061 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 10 Dec 2010 17:55:21 +0000 Subject: added pause and unpause to fake connection --- nova/virt/fake.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index f855523d3..4526f0042 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -133,6 +133,18 @@ class FakeConnection(object): """ return defer.succeed(None) + def pause(self, instance): + """ + Pause the specified instance. + """ + return defer.succeed(None) + + def unpause(self, instance): + """ + Unpause the specified instance. + """ + return defer.succeed(None) + def destroy(self, instance): """ Destroy (shutdown and delete) the specified instance. -- cgit From 2a5ad56319dfdf75bf2eab1337032f035822f272 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Fri, 10 Dec 2010 18:37:17 +0000 Subject: There is always the odd change that one forgets! --- nova/virt/xenapi_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index b82862764..4ace6da14 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -65,7 +65,7 @@ from nova.virt.xenapi.volumeops import VolumeOps FLAGS = flags.FLAGS flags.DEFINE_boolean('xenapi_use_fake_session', - True, + False, 'Set to true in order to use the fake XenAPI SDK') flags.DEFINE_string('xenapi_connection_url', None, -- cgit From 8d08206cb4759328e7cf3b836eeff824e0d22052 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Fri, 10 Dec 2010 18:49:54 +0000 Subject: Format fixes and modification of Vish's email address. --- nova/auth/ldapdriver.py | 41 ++++++++++++++++++++++------------------- nova/auth/nova_openldap.schema | 2 +- nova/auth/nova_sun.schema | 2 +- 3 files changed, 24 insertions(+), 21 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 870262a15..1b928e7d8 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -125,7 +125,7 @@ class LdapDriver(object): def get_users(self): """Retrieve list of users""" attrs = self.__find_objects(FLAGS.ldap_user_subtree, - '(objectclass=novaUser)') + '(objectclass=novaUser)') users = [] for attr in attrs: user = self.__to_user(attr) @@ -155,22 +155,24 @@ class LdapDriver(object): attr = [] if 'secretKey' in user.keys(): attr.append((self.ldap.MOD_REPLACE, 'secretKey', - [secret_key])) + [secret_key])) else: attr.append((self.ldap.MOD_ADD, 'secretKey', - [secret_key])) + [secret_key])) if 'accessKey' in user.keys(): attr.append((self.ldap.MOD_REPLACE, 'accessKey', - [access_key])) + [access_key])) else: attr.append((self.ldap.MOD_ADD, 'accessKey', - [access_key])) + [access_key])) if LdapDriver.isadmin_attribute in user.keys(): attr.append((self.ldap.MOD_REPLACE, - LdapDriver.isadmin_attribute, [str(is_admin).upper()])) + LdapDriver.isadmin_attribute, + [str(is_admin).upper()])) else: attr.append((self.ldap.MOD_ADD, - LdapDriver.isadmin_attribute, [str(is_admin).upper()])) + LdapDriver.isadmin_attribute, + [str(is_admin).upper()])) self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: @@ -299,7 +301,7 @@ class LdapDriver(object): else: project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' % - (LdapDriver.project_pattern, self.__uid_to_dn(uid))) + (LdapDriver.project_pattern, self.__uid_to_dn(uid))) roles = self.__find_objects(project_dn, query) return [role['cn'][0] for role in roles] @@ -363,7 +365,7 @@ class LdapDriver(object): def __get_ldap_user(self, uid): """Retrieve LDAP user entry by id""" attr = self.__find_object(self.__uid_to_dn(uid), - '(objectclass=novaUser)') + '(objectclass=novaUser)') return attr def __find_object(self, dn, query=None, scope=None): @@ -406,7 +408,7 @@ class LdapDriver(object): def __find_group_dns_with_member(self, tree, uid): """Find dns of group objects in a given tree that contain member""" query = ('(&(objectclass=groupOfNames)(member=%s))' % - self.__uid_to_dn(uid)) + self.__uid_to_dn(uid)) dns = self.__find_dns(tree, query) return dns @@ -436,7 +438,8 @@ class LdapDriver(object): for member_uid in member_uids: if not self.__user_exists(member_uid): raise exception.NotFound("Group can't be created " - "because user %s doesn't exist" % member_uid) + "because user %s doesn't exist" % + member_uid) members.append(self.__uid_to_dn(member_uid)) dn = self.__uid_to_dn(uid) if not dn in members: @@ -452,7 +455,7 @@ class LdapDriver(object): """Check if user is in group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be searched in group " - "because the user doesn't exist" % uid) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -464,7 +467,7 @@ class LdapDriver(object): """Add user to group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be added to the group " - "because the user doesn't exist" % uid) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): raise exception.NotFound("The group at dn %s doesn't exist" % group_dn) @@ -481,13 +484,13 @@ class LdapDriver(object): group_dn) if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % uid) + "group because the user doesn't exist" % + uid) if not self.__is_in_group(uid, group_dn): raise exception.NotFound("User %s is not a member of the group" % uid) # NOTE(vish): remove user from group and any sub_groups - sub_dns = self.__find_group_dns_with_member( - group_dn, uid) + sub_dns = self.__find_group_dns_with_member(group_dn, uid) for sub_dn in sub_dns: self.__safe_remove_from_group(uid, sub_dn) @@ -506,7 +509,7 @@ class LdapDriver(object): """Remove user from all roles and projects""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % uid) + "because the user doesn't exist" % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -564,8 +567,8 @@ class LdapDriver(object): @staticmethod def __uid_to_dn(uid): """Convert uid to dn""" - return FLAGS.ldap_user_id_attribute + '=%s,%s' \ - % (uid, FLAGS.ldap_user_subtree) + return (FLAGS.ldap_user_id_attribute + '=%s,%s' + % (uid, FLAGS.ldap_user_subtree)) class FakeLdapDriver(LdapDriver): diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index daa3a8442..539a5c42d 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -2,7 +2,7 @@ # Person object for Nova # inetorgperson with extra attributes # Schema version: 2 -# Authors: Vishvananda Ishaya +# Authors: Vishvananda Ishaya # Ryan Lane # # diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index 8e9052ded..4a6a78839 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -2,7 +2,7 @@ # Person object for Nova # inetorgperson with extra attributes # Schema version: 2 -# Authors: Vishvananda Ishaya +# Authors: Vishvananda Ishaya # Ryan Lane # # using internet experimental oid arc as per BP64 3.1 -- cgit From c835c441981a17764931390bc1ace6121ab100a4 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Fri, 10 Dec 2010 11:53:17 -0800 Subject: port new patches --- nova/virt/xenapi/vmops.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 13871b479..b6b92b926 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -126,14 +126,13 @@ class VMOps(object): rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_info(rec) - @defer.inlineCallbacks def get_diagnostics(self, instance_id): """Return data about VM diagnostics""" - vm = yield VMHelper.lookup(self._session, instance_id) + vm = VMHelper.lookup(self._session, instance_id) if vm is None: raise Exception("instance not present %s" % instance_id) - rec = yield self._session.get_xenapi().VM.get_record(vm) - defer.returnValue(VMHelper.compile_diagnostics(self._session, rec)) + rec = self._session.get_xenapi().VM.get_record(vm) + return VMHelper.compile_diagnostics(self._session, rec) def get_console_output(self, instance): """ Return snapshot of console """ -- cgit From 12802a76c775a35e9d5a651bf896cfa25bec547f Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Sat, 11 Dec 2010 15:23:40 -0500 Subject: First round of i18n-ifying strings in Nova --- nova/api/cloudpipe/__init__.py | 4 +- nova/api/ec2/__init__.py | 6 +-- nova/api/ec2/apirequest.py | 4 +- nova/api/ec2/cloud.py | 51 +++++++++++++------------ nova/api/ec2/metadatarequesthandler.py | 2 +- nova/api/openstack/__init__.py | 6 +-- nova/auth/dbdriver.py | 20 +++++----- nova/auth/fakeldap.py | 2 +- nova/auth/ldapdriver.py | 69 +++++++++++++++++++--------------- nova/auth/manager.py | 30 +++++++-------- nova/cloudpipe/pipelib.py | 2 +- nova/compute/api.py | 12 +++--- nova/compute/disk.py | 16 ++++---- nova/compute/instance_types.py | 2 +- nova/compute/manager.py | 32 ++++++++-------- nova/compute/monitor.py | 12 +++--- nova/crypto.py | 18 ++++----- nova/db/sqlalchemy/api.py | 48 +++++++++++------------ nova/exception.py | 8 ++-- nova/fakerabbit.py | 12 +++--- nova/image/glance.py | 8 ++-- nova/image/s3.py | 3 +- nova/network/linux_net.py | 10 ++--- nova/network/manager.py | 17 +++++---- nova/objectstore/handler.py | 20 +++++----- nova/process.py | 2 +- nova/rpc.py | 36 +++++++++--------- nova/scheduler/chance.py | 2 +- nova/scheduler/driver.py | 2 +- nova/scheduler/manager.py | 2 +- nova/scheduler/simple.py | 13 ++++--- nova/server.py | 4 +- nova/service.py | 12 +++--- nova/twistd.py | 6 +-- nova/utils.py | 14 +++---- nova/validate.py | 12 +++--- 36 files changed, 267 insertions(+), 252 deletions(-) diff --git a/nova/api/cloudpipe/__init__.py b/nova/api/cloudpipe/__init__.py index 6d40990a8..00ad38913 100644 --- a/nova/api/cloudpipe/__init__.py +++ b/nova/api/cloudpipe/__init__.py @@ -45,7 +45,7 @@ class API(wsgi.Application): def __call__(self, req): if req.method == 'POST': return self.sign_csr(req) - _log.debug("Cloudpipe path is %s" % req.path_info) + _log.debug(_("Cloudpipe path is %s") % req.path_info) if req.path_info.endswith("/getca/"): return self.send_root_ca(req) return webob.exc.HTTPNotFound() @@ -56,7 +56,7 @@ class API(wsgi.Application): return instance['project_id'] def send_root_ca(self, req): - _log.debug("Getting root ca") + _log.debug(_("Getting root ca")) project_id = self.get_project_id_from_ip(req.remote_addr) res = webob.Response() res.headers["Content-Type"] = "text/plain" diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index a6ee16c33..dd87d1f71 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -77,7 +77,7 @@ class Authenticate(wsgi.Middleware): req.host, req.path) except exception.Error, ex: - logging.debug("Authentication Failure: %s" % ex) + logging.debug(_("Authentication Failure: %s") % ex) raise webob.exc.HTTPForbidden() # Authenticated! @@ -120,9 +120,9 @@ class Router(wsgi.Middleware): except: raise webob.exc.HTTPBadRequest() - _log.debug('action: %s' % action) + _log.debug(_('action: %s') % action) for key, value in args.items(): - _log.debug('arg: %s\t\tval: %s' % (key, value)) + _log.debug(_('arg: %s\t\tval: %s') % (key, value)) # Success! req.environ['ec2.controller'] = controller diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index 5758781b6..a90fbeb0c 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -92,8 +92,8 @@ class APIRequest(object): method = getattr(self.controller, _camelcase_to_underscore(self.action)) except AttributeError: - _error = ('Unsupported API request: controller = %s,' - 'action = %s') % (self.controller, self.action) + _error = _('Unsupported API request: controller = %s,' + 'action = %s') % (self.controller, self.action) _log.warning(_error) # TODO: Raise custom exception, trap in apiserver, # and reraise as 400 error. diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 05f8c3d0b..896e6c223 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -114,7 +114,7 @@ class CloudController(object): start = os.getcwd() os.chdir(FLAGS.ca_path) # TODO(vish): Do this with M2Crypto instead - utils.runthis("Generating root CA: %s", "sh genrootca.sh") + utils.runthis(_("Generating root CA: %s", "sh genrootca.sh")) os.chdir(start) def _get_mpi_data(self, context, project_id): @@ -318,11 +318,11 @@ class CloudController(object): ip_protocol = str(ip_protocol) if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: - raise InvalidInputException('%s is not a valid ipProtocol' % + raise InvalidInputException(_('%s is not a valid ipProtocol') % (ip_protocol,)) if ((min(from_port, to_port) < -1) or (max(from_port, to_port) > 65535)): - raise InvalidInputException('Invalid port range') + raise InvalidInputException(_('Invalid port range')) values['protocol'] = ip_protocol values['from_port'] = from_port @@ -360,7 +360,7 @@ class CloudController(object): criteria = self._revoke_rule_args_to_dict(context, **kwargs) if criteria == None: - raise exception.ApiError("No rule for the specified parameters.") + raise exception.ApiError(_("No rule for the specified parameters.")) for rule in security_group.rules: match = True @@ -371,7 +371,7 @@ class CloudController(object): db.security_group_rule_destroy(context, rule['id']) self._trigger_refresh_security_group(context, security_group) return True - raise exception.ApiError("No rule for the specified parameters.") + raise exception.ApiError(_("No rule for the specified parameters.")) # TODO(soren): This has only been tested with Boto as the client. # Unfortunately, it seems Boto is using an old API @@ -387,8 +387,8 @@ class CloudController(object): values['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values): - raise exception.ApiError('This rule already exists in group %s' % - group_name) + raise exception.ApiError(_('This rule already exists in group %s') + % group_name) security_group_rule = db.security_group_rule_create(context, values) @@ -416,7 +416,7 @@ class CloudController(object): def create_security_group(self, context, group_name, group_description): self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): - raise exception.ApiError('group %s already exists' % group_name) + raise exception.ApiError(_('group %s already exists') % group_name) group = {'user_id': context.user.id, 'project_id': context.project_id, @@ -527,13 +527,13 @@ class CloudController(object): def attach_volume(self, context, volume_id, instance_id, device, **kwargs): volume_ref = db.volume_get_by_ec2_id(context, volume_id) if not re.match("^/dev/[a-z]d[a-z]+$", device): - raise exception.ApiError("Invalid device specified: %s. " - "Example device: /dev/vdb" % device) + raise exception.ApiError(_("Invalid device specified: %s. " + "Example device: /dev/vdb") % device) # TODO(vish): abstract status checking? if volume_ref['status'] != "available": - raise exception.ApiError("Volume status must be available") + raise exception.ApiError(_("Volume status must be available")) if volume_ref['attach_status'] == "attached": - raise exception.ApiError("Volume is already attached") + raise exception.ApiError(_("Volume is already attached")) internal_id = ec2_id_to_internal_id(instance_id) instance_ref = self.compute_api.get_instance(context, internal_id) host = instance_ref['host'] @@ -555,10 +555,10 @@ class CloudController(object): instance_ref = db.volume_get_instance(context.elevated(), volume_ref['id']) if not instance_ref: - raise exception.ApiError("Volume isn't attached to anything!") + raise exception.ApiError(_("Volume isn't attached to anything!")) # TODO(vish): abstract status checking? if volume_ref['status'] == "available": - raise exception.ApiError("Volume is already detached") + raise exception.ApiError(_("Volume is already detached")) try: host = instance_ref['host'] rpc.cast(context, @@ -687,10 +687,11 @@ class CloudController(object): def allocate_address(self, context, **kwargs): # check quota if quota.allowed_floating_ips(context, 1) < 1: - logging.warn("Quota exceeeded for %s, tried to allocate address", + logging.warn(_("Quota exceeeded for %s, tried to allocate " + "address"), context.project_id) - raise quota.QuotaError("Address quota exceeded. You cannot " - "allocate any more addresses") + raise quota.QuotaError(_("Address quota exceeded. You cannot " + "allocate any more addresses")) network_topic = self._get_network_topic(context) public_ip = rpc.call(context, network_topic, @@ -803,7 +804,7 @@ class CloudController(object): # TODO: return error if not authorized volume_ref = db.volume_get_by_ec2_id(context, volume_id) if volume_ref['status'] != "available": - raise exception.ApiError("Volume status must be available") + raise exception.ApiError(_("Volume status must be available")) now = datetime.datetime.utcnow() db.volume_update(context, volume_ref['id'], {'status': 'deleting', 'terminated_at': now}) @@ -834,11 +835,12 @@ class CloudController(object): def describe_image_attribute(self, context, image_id, attribute, **kwargs): if attribute != 'launchPermission': - raise exception.ApiError('attribute not supported: %s' % attribute) + raise exception.ApiError(_('attribute not supported: %s') + % attribute) try: image = self.image_service.show(context, image_id) except IndexError: - raise exception.ApiError('invalid id: %s' % image_id) + raise exception.ApiError(_('invalid id: %s') % image_id) result = {'image_id': image_id, 'launchPermission': []} if image['isPublic']: result['launchPermission'].append({'group': 'all'}) @@ -848,13 +850,14 @@ class CloudController(object): operation_type, **kwargs): # TODO(devcamcar): Support users and groups other than 'all'. if attribute != 'launchPermission': - raise exception.ApiError('attribute not supported: %s' % attribute) + raise exception.ApiError(_('attribute not supported: %s') + % attribute) if not 'user_group' in kwargs: - raise exception.ApiError('user or group not specified') + raise exception.ApiError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': - raise exception.ApiError('only group "all" is supported') + raise exception.ApiError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: - raise exception.ApiError('operation_type must be add or remove') + raise exception.ApiError(_('operation_type must be add or remove')) return self.image_service.modify(context, image_id, operation_type) def update_image(self, context, image_id, **kwargs): diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 2f4f414cc..0e9e686ff 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -65,7 +65,7 @@ class MetadataRequestHandler(object): cc = cloud.CloudController() meta_data = cc.get_metadata(req.remote_addr) if meta_data is None: - logging.error('Failed to get metadata for ip: %s' % + logging.error(_('Failed to get metadata for ip: %s') % req.remote_addr) raise webob.exc.HTTPNotFound() data = self.lookup(req.path_info, meta_data) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index c9efe5222..45a2549c0 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -65,7 +65,7 @@ class API(wsgi.Middleware): try: return req.get_response(self.application) except Exception as ex: - logging.warn("Caught error: %s" % str(ex)) + logging.warn(_("Caught error: %s") % str(ex)) logging.debug(traceback.format_exc()) exc = webob.exc.HTTPInternalServerError(explanation=str(ex)) return faults.Fault(exc) @@ -134,7 +134,7 @@ class RateLimitingMiddleware(wsgi.Middleware): if delay: # TODO(gundlach): Get the retry-after format correct. exc = webob.exc.HTTPRequestEntityTooLarge( - explanation='Too many requests.', + explanation=_('Too many requests.'), headers={'Retry-After': time.time() + delay}) raise faults.Fault(exc) return self.application @@ -188,7 +188,7 @@ class APIRouter(wsgi.Router): controller=sharedipgroups.Controller()) if FLAGS.allow_admin_api: - logging.debug("Including admin operations in API.") + logging.debug(_("Including admin operations in API.")) # TODO: Place routes for admin operations here. super(APIRouter, self).__init__(mapper) diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py index a1584322b..47e435cb6 100644 --- a/nova/auth/dbdriver.py +++ b/nova/auth/dbdriver.py @@ -37,7 +37,6 @@ class DbDriver(object): def __init__(self): """Imports the LDAP module""" pass - db def __enter__(self): return self @@ -83,7 +82,7 @@ class DbDriver(object): user_ref = db.user_create(context.get_admin_context(), values) return self._db_user_to_auth_user(user_ref) except exception.Duplicate, e: - raise exception.Duplicate('User %s already exists' % name) + raise exception.Duplicate(_('User %s already exists') % name) def _db_user_to_auth_user(self, user_ref): return {'id': user_ref['id'], @@ -105,8 +104,9 @@ class DbDriver(object): """Create a project""" manager = db.user_get(context.get_admin_context(), manager_uid) if not manager: - raise exception.NotFound("Project can't be created because " - "manager %s doesn't exist" % manager_uid) + raise exception.NotFound(_("Project can't be created because " + "manager %s doesn't exist") + % manager_uid) # description is a required attribute if description is None: @@ -133,8 +133,8 @@ class DbDriver(object): try: project = db.project_create(context.get_admin_context(), values) except exception.Duplicate: - raise exception.Duplicate("Project can't be created because " - "project %s already exists" % name) + raise exception.Duplicate(_("Project can't be created because " + "project %s already exists") % name) for member in members: db.project_add_member(context.get_admin_context(), @@ -155,8 +155,8 @@ class DbDriver(object): if manager_uid: manager = db.user_get(context.get_admin_context(), manager_uid) if not manager: - raise exception.NotFound("Project can't be modified because " - "manager %s doesn't exist" % + raise exception.NotFound(_("Project can't be modified because " + "manager %s doesn't exist") % manager_uid) values['project_manager'] = manager['id'] if description: @@ -243,8 +243,8 @@ class DbDriver(object): def _validate_user_and_project(self, user_id, project_id): user = db.user_get(context.get_admin_context(), user_id) if not user: - raise exception.NotFound('User "%s" not found' % user_id) + raise exception.NotFound(_('User "%s" not found') % user_id) project = db.project_get(context.get_admin_context(), project_id) if not project: - raise exception.NotFound('Project "%s" not found' % project_id) + raise exception.NotFound(_('Project "%s" not found') % project_id) return user, project diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index 46e0135b4..cdab96b79 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -39,7 +39,7 @@ flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') class Redis(object): def __init__(self): if hasattr(self.__class__, '_instance'): - raise Exception('Attempted to instantiate singleton') + raise Exception(_('Attempted to instantiate singleton')) @classmethod def instance(cls): diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index c10939d74..e289ea5a2 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -159,7 +159,7 @@ class LdapDriver(object): self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: - raise exception.NotFound("LDAP object for %s doesn't exist" + raise exception.NotFound(_("LDAP object for %s doesn't exist") % name) else: attr = [ @@ -182,11 +182,12 @@ class LdapDriver(object): description=None, member_uids=None): """Create a project""" if self.__project_exists(name): - raise exception.Duplicate("Project can't be created because " - "project %s already exists" % name) + raise exception.Duplicate(_("Project can't be created because " + "project %s already exists") % name) if not self.__user_exists(manager_uid): - raise exception.NotFound("Project can't be created because " - "manager %s doesn't exist" % manager_uid) + raise exception.NotFound(_("Project can't be created because " + "manager %s doesn't exist") + % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) # description is a required attribute if description is None: @@ -195,8 +196,8 @@ class LdapDriver(object): if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Project can't be created " - "because user %s doesn't exist" + raise exception.NotFound(_("Project can't be created " + "because user %s doesn't exist") % member_uid) members.append(self.__uid_to_dn(member_uid)) # always add the manager as a member because members is required @@ -218,9 +219,9 @@ class LdapDriver(object): attr = [] if manager_uid: if not self.__user_exists(manager_uid): - raise exception.NotFound("Project can't be modified because " - "manager %s doesn't exist" % - manager_uid) + raise exception.NotFound(_("Project can't be modified because " + "manager %s doesn't exist") + % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn)) if description: @@ -416,8 +417,9 @@ class LdapDriver(object): if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Group can't be created " - "because user %s doesn't exist" % member_uid) + raise exception.NotFound(_("Group can't be created " + "because user %s doesn't exist") + % member_uid) members.append(self.__uid_to_dn(member_uid)) dn = self.__uid_to_dn(uid) if not dn in members: @@ -432,8 +434,9 @@ class LdapDriver(object): def __is_in_group(self, uid, group_dn): """Check if user is in group""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be searched in group " - "becuase the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be searched in group " + "because the user doesn't exist") + % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -444,28 +447,30 @@ class LdapDriver(object): def __add_to_group(self, uid, group_dn): """Add user to group""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be added to the group " - "becuase the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be added to the group " + "because the user doesn't exist") + % uid) if not self.__group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + raise exception.NotFound(_("The group at dn %s doesn't exist") + % group_dn) if self.__is_in_group(uid, group_dn): - raise exception.Duplicate("User %s is already a member of " - "the group %s" % (uid, group_dn)) + raise exception.Duplicate(_("User %s is already a member of " + "the group %s") % (uid, group_dn)) attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))] self.conn.modify_s(group_dn, attr) def __remove_from_group(self, uid, group_dn): """Remove user from group""" if not self.__group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + raise exception.NotFound(_("The group at dn %s doesn't exist") + % group_dn) if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be removed from the " + "group because the user doesn't exist") + % uid) if not self.__is_in_group(uid, group_dn): - raise exception.NotFound("User %s is not a member of the group" % - (uid,)) + raise exception.NotFound(_("User %s is not a member of the group") + % uid) # NOTE(vish): remove user from group and any sub_groups sub_dns = self.__find_group_dns_with_member( group_dn, uid) @@ -479,15 +484,16 @@ class LdapDriver(object): try: self.conn.modify_s(group_dn, attr) except self.ldap.OBJECT_CLASS_VIOLATION: - logging.debug("Attempted to remove the last member of a group. " - "Deleting the group at %s instead.", group_dn) + logging.debug(_("Attempted to remove the last member of a group. " + "Deleting the group at %s instead."), group_dn) self.__delete_group(group_dn) def __remove_from_all(self, uid): """Remove user from all roles and projects""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be removed from all " + "because the user doesn't exist") + % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -500,7 +506,8 @@ class LdapDriver(object): def __delete_group(self, group_dn): """Delete Group""" if not self.__group_exists(group_dn): - raise exception.NotFound("Group at dn %s doesn't exist" % group_dn) + raise exception.NotFound(_("Group at dn %s doesn't exist") + % group_dn) self.conn.delete_s(group_dn) def __delete_roles(self, project_dn): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 11c3bd6df..417f2b76d 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -257,12 +257,12 @@ class AuthManager(object): # TODO(vish): check for valid timestamp (access_key, _sep, project_id) = access.partition(':') - logging.info('Looking up user: %r', access_key) + logging.info(_('Looking up user: %r'), access_key) user = self.get_user_from_access_key(access_key) logging.info('user: %r', user) if user == None: - raise exception.NotFound('No user found for access key %s' % - access_key) + raise exception.NotFound(_('No user found for access key %s') + % access_key) # NOTE(vish): if we stop using project name as id we need better # logic to find a default project for user @@ -271,12 +271,12 @@ class AuthManager(object): project = self.get_project(project_id) if project == None: - raise exception.NotFound('No project called %s could be found' % - project_id) + raise exception.NotFound(_('No project called %s could be found') + % project_id) if not self.is_admin(user) and not self.is_project_member(user, project): - raise exception.NotFound('User %s is not a member of project %s' % - (user.id, project.id)) + raise exception.NotFound(_('User %s is not a member of project %s') + % (user.id, project.id)) if check_type == 's3': sign = signer.Signer(user.secret.encode()) expected_signature = sign.s3_authorization(headers, verb, path) @@ -284,7 +284,7 @@ class AuthManager(object): logging.debug('expected_signature: %s', expected_signature) logging.debug('signature: %s', signature) if signature != expected_signature: - raise exception.NotAuthorized('Signature does not match') + raise exception.NotAuthorized(_('Signature does not match')) elif check_type == 'ec2': # NOTE(vish): hmac can't handle unicode, so encode ensures that # secret isn't unicode @@ -294,7 +294,7 @@ class AuthManager(object): logging.debug('expected_signature: %s', expected_signature) logging.debug('signature: %s', signature) if signature != expected_signature: - raise exception.NotAuthorized('Signature does not match') + raise exception.NotAuthorized(_('Signature does not match')) return (user, project) def get_access_key(self, user, project): @@ -364,7 +364,7 @@ class AuthManager(object): with self.driver() as drv: if role == 'projectmanager': if not project: - raise exception.Error("Must specify project") + raise exception.Error(_("Must specify project")) return self.is_project_manager(user, project) global_role = drv.has_role(User.safe_id(user), @@ -398,9 +398,9 @@ class AuthManager(object): @param project: Project in which to add local role. """ if role not in FLAGS.allowed_roles: - raise exception.NotFound("The %s role can not be found" % role) + raise exception.NotFound(_("The %s role can not be found") % role) if project is not None and role in FLAGS.global_roles: - raise exception.NotFound("The %s role is global only" % role) + raise exception.NotFound(_("The %s role is global only") % role) with self.driver() as drv: drv.add_role(User.safe_id(user), role, Project.safe_id(project)) @@ -546,7 +546,8 @@ class AuthManager(object): Project.safe_id(project)) if not network_ref['vpn_public_port']: - raise exception.NotFound('project network data has not been set') + raise exception.NotFound(_('project network data has not ' + 'been set')) return (network_ref['vpn_public_address'], network_ref['vpn_public_port']) @@ -659,8 +660,7 @@ class AuthManager(object): port=vpn_port) zippy.writestr(FLAGS.credential_vpn_file, config) else: - logging.warn("No vpn data for project %s" % - pid) + logging.warn(_("No vpn data for project %s"), pid) zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id)) zippy.close() diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 3472201cd..bbe91a70c 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -49,7 +49,7 @@ class CloudPipe(object): self.manager = manager.AuthManager() def launch_vpn_instance(self, project_id): - logging.debug("Launching VPN for %s" % (project_id)) + logging.debug(_("Launching VPN for %s") % (project_id)) project = self.manager.get_project(project_id) # Make a payload.zip tmpfolder = tempfile.mkdtemp() diff --git a/nova/compute/api.py b/nova/compute/api.py index 8e0efa4cc..e701e540e 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -120,7 +120,7 @@ class ComputeAPI(base.Base): elevated = context.elevated() instances = [] - logging.debug("Going to run %s instances...", num_instances) + logging.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, @@ -157,7 +157,7 @@ class ComputeAPI(base.Base): {"method": "setup_fixed_ip", "args": {"address": address}}) - logging.debug("Casting to scheduler for %s/%s's instance %s", + logging.debug(_("Casting to scheduler for %s/%s's instance %s"), context.project_id, context.user_id, instance_id) rpc.cast(context, FLAGS.scheduler_topic, @@ -204,12 +204,12 @@ class ComputeAPI(base.Base): instance = self.db.instance_get_by_internal_id(context, instance_id) except exception.NotFound as e: - logging.warning("Instance %d was not found during terminate", + logging.warning(_("Instance %d was not found during terminate"), instance_id) raise e if (instance['state_description'] == 'terminating'): - logging.warning("Instance %d is already being terminated", + logging.warning(_("Instance %d is already being terminated"), instance_id) return @@ -223,7 +223,7 @@ class ComputeAPI(base.Base): address = self.db.instance_get_floating_address(context, instance['id']) if address: - logging.debug("Disassociating address %s" % address) + logging.debug(_("Disassociating address %s") % address) # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. Perhaps in the scheduler? @@ -234,7 +234,7 @@ class ComputeAPI(base.Base): address = self.db.instance_get_fixed_address(context, instance['id']) if address: - logging.debug("Deallocating address %s" % address) + logging.debug(_("Deallocating address %s") % address) # NOTE(vish): Currently, nothing needs to be done on the # network node until release. If this changes, # we will need to cast here. diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 4338d39f0..8701c3968 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -70,12 +70,12 @@ def partition(infile, outfile, local_bytes=0, resize=True, yield execute('resize2fs %s' % infile) file_size = FLAGS.minimum_root_size elif file_size % sector_size != 0: - logging.warn("Input partition size not evenly divisible by" - " sector size: %d / %d", file_size, sector_size) + logging.warn(_("Input partition size not evenly divisible by" + " sector size: %d / %d"), file_size, sector_size) primary_sectors = file_size / sector_size if local_bytes % sector_size != 0: - logging.warn("Bytes for local storage not evenly divisible" - " by sector size: %d / %d", local_bytes, sector_size) + logging.warn(_("Bytes for local storage not evenly divisible" + " by sector size: %d / %d"), local_bytes, sector_size) local_sectors = local_bytes / sector_size mbr_last = 62 # a @@ -121,14 +121,15 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): """ out, err = yield execute('sudo losetup -f --show %s' % image) if err: - raise exception.Error('Could not attach image to loopback: %s' % err) + raise exception.Error(_('Could not attach image to loopback: %s') + % err) device = out.strip() try: if not partition is None: # create partition out, err = yield execute('sudo kpartx -a %s' % device) if err: - raise exception.Error('Failed to load partition: %s' % err) + raise exception.Error(_('Failed to load partition: %s') % err) mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1], partition) else: @@ -141,7 +142,8 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): out, err = yield execute( 'sudo mount %s %s' % (mapped_device, tmpdir)) if err: - raise exception.Error('Failed to mount filesystem: %s' % err) + raise exception.Error(_('Failed to mount filesystem: %s') + % err) try: if key: diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index a2679e0fc..000b3a6d9 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -37,7 +37,7 @@ def get_by_type(instance_type): if instance_type is None: return FLAGS.default_instance_type if instance_type not in INSTANCE_TYPES: - raise exception.ApiError("Unknown instance type: %s", + raise exception.ApiError(_("Unknown instance type: %s"), instance_type) return instance_type diff --git a/nova/compute/manager.py b/nova/compute/manager.py index dd8d41129..a63ad5e1b 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -91,8 +91,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): - raise exception.Error("Instance has already been created") - logging.debug("instance %s: starting...", instance_id) + raise exception.Error(_("Instance has already been created")) + logging.debug(_("instance %s: starting..."), instance_id) self.network_manager.setup_compute_network(context, instance_id) self.db.instance_update(context, instance_id, @@ -111,7 +111,7 @@ class ComputeManager(manager.Manager): instance_id, {'launched_at': now}) except Exception: # pylint: disable-msg=W0702 - logging.exception("instance %s: Failed to spawn", + logging.exception(_("instance %s: Failed to spawn"), instance_ref['name']) self.db.instance_set_state(context, instance_id, @@ -124,7 +124,7 @@ class ComputeManager(manager.Manager): def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() - logging.debug("instance %s: terminating", instance_id) + logging.debug(_("instance %s: terminating"), instance_id) instance_ref = self.db.instance_get(context, instance_id) volumes = instance_ref.get('volumes', []) or [] @@ -132,8 +132,8 @@ class ComputeManager(manager.Manager): self.detach_volume(context, instance_id, volume['id']) if instance_ref['state'] == power_state.SHUTOFF: self.db.instance_destroy(context, instance_id) - raise exception.Error('trying to destroy already destroyed' - ' instance: %s' % instance_id) + raise exception.Error(_('trying to destroy already destroyed' + ' instance: %s') % instance_id) yield self.driver.destroy(instance_ref) # TODO(ja): should we keep it in a terminated state for a bit? @@ -148,13 +148,13 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) if instance_ref['state'] != power_state.RUNNING: - logging.warn('trying to reboot a non-running ' - 'instance: %s (state: %s excepted: %s)', + logging.warn(_('trying to reboot a non-running ' + 'instance: %s (state: %s excepted: %s)'), instance_ref['internal_id'], instance_ref['state'], power_state.RUNNING) - logging.debug('instance %s: rebooting', instance_ref['name']) + logging.debug(_('instance %s: rebooting'), instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -169,7 +169,7 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug('instance %s: rescuing', + logging.debug(_('instance %s: rescuing'), instance_ref['internal_id']) self.db.instance_set_state(context, instance_id, @@ -185,7 +185,7 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug('instance %s: unrescuing', + logging.debug(_('instance %s: unrescuing'), instance_ref['internal_id']) self.db.instance_set_state(context, instance_id, @@ -198,7 +198,7 @@ class ComputeManager(manager.Manager): def get_console_output(self, context, instance_id): """Send the console output for an instance.""" context = context.elevated() - logging.debug("instance %s: getting console output", instance_id) + logging.debug(_("instance %s: getting console output"), instance_id) instance_ref = self.db.instance_get(context, instance_id) return self.driver.get_console_output(instance_ref) @@ -208,7 +208,7 @@ class ComputeManager(manager.Manager): def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() - logging.debug("instance %s: attaching volume %s to %s", instance_id, + logging.debug(_("instance %s: attaching volume %s to %s"), instance_id, volume_id, mountpoint) instance_ref = self.db.instance_get(context, instance_id) dev_path = yield self.volume_manager.setup_compute_volume(context, @@ -225,7 +225,7 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - logging.exception("instance %s: attach failed %s, removing", + logging.exception(_("instance %s: attach failed %s, removing"), instance_id, mountpoint) yield self.volume_manager.remove_compute_volume(context, volume_id) @@ -237,13 +237,13 @@ class ComputeManager(manager.Manager): def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" context = context.elevated() - logging.debug("instance %s: detaching volume %s", + logging.debug(_("instance %s: detaching volume %s"), instance_id, volume_id) instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) if instance_ref['name'] not in self.driver.list_instances(): - logging.warn("Detaching volume from unknown instance %s", + logging.warn(_("Detaching volume from unknown instance %s"), instance_ref['name']) else: yield self.driver.detach_volume(instance_ref['name'], diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 22653113a..60c347a5e 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -255,7 +255,7 @@ class Instance(object): Updates the instances statistics and stores the resulting graphs in the internal object store on the cloud controller. """ - logging.debug('updating %s...', self.instance_id) + logging.debug(_('updating %s...'), self.instance_id) try: data = self.fetch_cpu_stats() @@ -285,7 +285,7 @@ class Instance(object): graph_disk(self, '1w') graph_disk(self, '1m') except Exception: - logging.exception('unexpected error during update') + logging.exception(_('unexpected error during update')) self.last_updated = utcnow() @@ -351,7 +351,7 @@ class Instance(object): rd += rd_bytes wr += wr_bytes except TypeError: - logging.error('Cannot get blockstats for "%s" on "%s"', + logging.error(_('Cannot get blockstats for "%s" on "%s"'), disk, self.instance_id) raise @@ -373,7 +373,7 @@ class Instance(object): rx += stats[0] tx += stats[4] except TypeError: - logging.error('Cannot get ifstats for "%s" on "%s"', + logging.error(_('Cannot get ifstats for "%s" on "%s"'), interface, self.instance_id) raise @@ -408,7 +408,7 @@ class InstanceMonitor(object, service.Service): try: conn = virt_connection.get_connection(read_only=True) except Exception, exn: - logging.exception('unexpected exception getting connection') + logging.exception(_('unexpected exception getting connection')) time.sleep(FLAGS.monitoring_instances_delay) return @@ -423,7 +423,7 @@ class InstanceMonitor(object, service.Service): if not domain_id in self._instances: instance = Instance(conn, domain_id) self._instances[domain_id] = instance - logging.debug('Found instance: %s', domain_id) + logging.debug(_('Found instance: %s'), domain_id) for key in self._instances.keys(): instance = self._instances[key] diff --git a/nova/crypto.py b/nova/crypto.py index aacc50b17..af4a06a0c 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -39,13 +39,13 @@ from nova import flags FLAGS = flags.FLAGS -flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA') +flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA')) flags.DEFINE_string('keys_path', '$state_path/keys', - 'Where we keep our keys') + _('Where we keep our keys')) flags.DEFINE_string('ca_path', '$state_path/CA', - 'Where we keep our root CA') + _('Where we keep our root CA')) flags.DEFINE_boolean('use_intermediate_ca', False, - 'Should we use intermediate CAs for each project?') + _('Should we use intermediate CAs for each project?')) def ca_path(project_id): @@ -111,9 +111,9 @@ def generate_x509_cert(subject, bits=1024): keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key')) csrfile = os.path.join(tmpdir, 'temp.csr') logging.debug("openssl genrsa -out %s %s" % (keyfile, bits)) - utils.runthis("Generating private key: %s", + utils.runthis(_("Generating private key: %s"), "openssl genrsa -out %s %s" % (keyfile, bits)) - utils.runthis("Generating CSR: %s", + utils.runthis(_("Generating CSR: %s"), "openssl req -new -key %s -out %s -batch -subj %s" % (keyfile, csrfile, subject)) private_key = open(keyfile).read() @@ -131,7 +131,7 @@ def sign_csr(csr_text, intermediate=None): if not os.path.exists(user_ca): start = os.getcwd() os.chdir(FLAGS.ca_path) - utils.runthis("Generating intermediate CA: %s", + utils.runthis(_("Generating intermediate CA: %s"), "sh geninter.sh %s" % (intermediate)) os.chdir(start) return _sign_csr(csr_text, user_ca) @@ -142,11 +142,11 @@ def _sign_csr(csr_text, ca_folder): csrfile = open("%s/inbound.csr" % (tmpfolder), "w") csrfile.write(csr_text) csrfile.close() - logging.debug("Flags path: %s" % ca_folder) + logging.debug(_("Flags path: %s") % ca_folder) start = os.getcwd() # Change working dir to CA os.chdir(ca_folder) - utils.runthis("Signing cert: %s", + utils.runthis(_("Signing cert: %s"), "openssl ca -batch -out %s/outbound.crt " "-config ./openssl.cnf -infiles %s/inbound.csr" % (tmpfolder, tmpfolder)) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 55036d1d1..4bae170a9 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -41,7 +41,7 @@ FLAGS = flags.FLAGS def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: - warnings.warn('Use of empty request context is deprecated', + warnings.warn(_('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin @@ -130,7 +130,7 @@ def service_get(context, service_id, session=None): first() if not result: - raise exception.NotFound('No service for id %s' % service_id) + raise exception.NotFound(_('No service for id %s') % service_id) return result @@ -227,7 +227,7 @@ def service_get_by_args(context, host, binary): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound('No service for %s, %s' % (host, binary)) + raise exception.NotFound(_('No service for %s, %s') % (host, binary)) return result @@ -491,7 +491,7 @@ def fixed_ip_get_by_address(context, address, session=None): options(joinedload('instance')).\ first() if not result: - raise exception.NotFound('No floating ip for address %s' % address) + raise exception.NotFound(_('No floating ip for address %s') % address) if is_user_context(context): authorize_project_context(context, result.instance.project_id) @@ -591,7 +591,7 @@ def instance_get(context, instance_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.NotFound('No instance for id %s' % instance_id) + raise exception.NotFound(_('No instance for id %s') % instance_id) return result @@ -669,7 +669,7 @@ def instance_get_by_internal_id(context, internal_id): filter_by(deleted=False).\ first() if not result: - raise exception.NotFound('Instance %s not found' % (internal_id)) + raise exception.NotFound(_('Instance %s not found') % (internal_id)) return result @@ -790,7 +790,7 @@ def key_pair_get(context, user_id, name, session=None): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound('no keypair for user %s, name %s' % + raise exception.NotFound(_('no keypair for user %s, name %s') % (user_id, name)) return result @@ -905,7 +905,7 @@ def network_get(context, network_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.NotFound('No network for id %s' % network_id) + raise exception.NotFound(_('No network for id %s') % network_id) return result @@ -933,7 +933,7 @@ def network_get_by_bridge(context, bridge): first() if not result: - raise exception.NotFound('No network for bridge %s' % bridge) + raise exception.NotFound(_('No network for bridge %s') % bridge) return result @@ -947,7 +947,7 @@ def network_get_by_instance(_context, instance_id): filter_by(deleted=False).\ first() if not rv: - raise exception.NotFound('No network for instance %s' % instance_id) + raise exception.NotFound(_('No network for instance %s') % instance_id) return rv @@ -961,7 +961,7 @@ def network_set_host(context, network_id, host_id): with_lockmode('update').\ first() if not network_ref: - raise exception.NotFound('No network for id %s' % network_id) + raise exception.NotFound(_('No network for id %s') % network_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues @@ -1073,7 +1073,7 @@ def auth_get_token(_context, token_hash): filter_by(token_hash=token_hash).\ first() if not tk: - raise exception.NotFound('Token %s does not exist' % token_hash) + raise exception.NotFound(_('Token %s does not exist') % token_hash) return tk @@ -1097,7 +1097,7 @@ def quota_get(context, project_id, session=None): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound('No quota for project_id %s' % project_id) + raise exception.NotFound(_('No quota for project_id %s') % project_id) return result @@ -1252,7 +1252,7 @@ def volume_get(context, volume_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.NotFound('No volume for id %s' % volume_id) + raise exception.NotFound(_('No volume for id %s') % volume_id) return result @@ -1308,7 +1308,7 @@ def volume_get_by_ec2_id(context, ec2_id): raise exception.NotAuthorized() if not result: - raise exception.NotFound('Volume %s not found' % ec2_id) + raise exception.NotFound(_('Volume %s not found') % ec2_id) return result @@ -1332,7 +1332,7 @@ def volume_get_instance(context, volume_id): options(joinedload('instance')).\ first() if not result: - raise exception.NotFound('Volume %s not found' % ec2_id) + raise exception.NotFound(_('Volume %s not found') % ec2_id) return result.instance @@ -1344,7 +1344,7 @@ def volume_get_shelf_and_blade(context, volume_id): filter_by(volume_id=volume_id).\ first() if not result: - raise exception.NotFound('No export device found for volume %s' % + raise exception.NotFound(_('No export device found for volume %s') % volume_id) return (result.shelf_id, result.blade_id) @@ -1357,7 +1357,7 @@ def volume_get_iscsi_target_num(context, volume_id): filter_by(volume_id=volume_id).\ first() if not result: - raise exception.NotFound('No target id found for volume %s' % + raise exception.NotFound(_('No target id found for volume %s') % volume_id) return result.target_num @@ -1402,7 +1402,7 @@ def security_group_get(context, security_group_id, session=None): options(joinedload_all('rules')).\ first() if not result: - raise exception.NotFound("No secuity group with id %s" % + raise exception.NotFound(_("No security group with id %s") % security_group_id) return result @@ -1419,7 +1419,7 @@ def security_group_get_by_name(context, project_id, group_name): first() if not result: raise exception.NotFound( - 'No security group named %s for project: %s' \ + _('No security group named %s for project: %s') % (group_name, project_id)) return result @@ -1507,7 +1507,7 @@ def security_group_rule_get(context, security_group_rule_id, session=None): filter_by(id=security_group_rule_id).\ first() if not result: - raise exception.NotFound("No secuity group rule with id %s" % + raise exception.NotFound(_("No secuity group rule with id %s") % security_group_rule_id) return result @@ -1543,7 +1543,7 @@ def user_get(context, id, session=None): first() if not result: - raise exception.NotFound('No user for id %s' % id) + raise exception.NotFound(_('No user for id %s') % id) return result @@ -1559,7 +1559,7 @@ def user_get_by_access_key(context, access_key, session=None): first() if not result: - raise exception.NotFound('No user for access key %s' % access_key) + raise exception.NotFound(_('No user for access key %s') % access_key) return result @@ -1621,7 +1621,7 @@ def project_get(context, id, session=None): first() if not result: - raise exception.NotFound("No project with id %s" % id) + raise exception.NotFound(_("No project with id %s") % id) return result diff --git a/nova/exception.py b/nova/exception.py index 6d6c37338..fd8b00368 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -30,11 +30,11 @@ class ProcessExecutionError(IOError): def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): if description is None: - description = "Unexpected error while running command." + description = _("Unexpected error while running command.") if exit_code is None: exit_code = '-' - message = "%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" % ( - description, cmd, exit_code, stdout, stderr) + message = _("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r")\ + % (description, cmd, exit_code, stdout, stderr) IOError.__init__(self, message) @@ -81,7 +81,7 @@ def wrap_exception(f): except Exception, e: if not isinstance(e, Error): #exc_type, exc_value, exc_traceback = sys.exc_info() - logging.exception('Uncaught exception') + logging.exception(_('Uncaught exception')) #logging.error(traceback.extract_stack(exc_traceback)) raise Error(str(e)) raise diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index c64617931..41e686cff 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -37,12 +37,12 @@ class Exchange(object): self._routes = {} def publish(self, message, routing_key=None): - logging.debug('(%s) publish (key: %s) %s', + logging.debug(_('(%s) publish (key: %s) %s'), self.name, routing_key, message) routing_key = routing_key.split('.')[0] if routing_key in self._routes: for f in self._routes[routing_key]: - logging.debug('Publishing to route %s', f) + logging.debug(_('Publishing to route %s'), f) f(message, routing_key=routing_key) def bind(self, callback, routing_key): @@ -82,16 +82,16 @@ class Backend(object): def queue_declare(self, queue, **kwargs): if queue not in self._queues: - logging.debug('Declaring queue %s', queue) + logging.debug(_('Declaring queue %s'), queue) self._queues[queue] = Queue(queue) def exchange_declare(self, exchange, type, *args, **kwargs): if exchange not in self._exchanges: - logging.debug('Declaring exchange %s', exchange) + logging.debug(_('Declaring exchange %s'), exchange) self._exchanges[exchange] = Exchange(exchange, type) def queue_bind(self, queue, exchange, routing_key, **kwargs): - logging.debug('Binding %s to %s with key %s', + logging.debug(_('Binding %s to %s with key %s'), queue, exchange, routing_key) self._exchanges[exchange].bind(self._queues[queue].push, routing_key) @@ -117,7 +117,7 @@ class Backend(object): content_type=content_type, content_encoding=content_encoding) message.result = True - logging.debug('Getting from %s: %s', queue, message) + logging.debug(_('Getting from %s: %s'), queue, message) return message def prepare_message(self, message_data, delivery_mode, diff --git a/nova/image/glance.py b/nova/image/glance.py index 1ca6cf2eb..cb3936df1 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -77,8 +77,8 @@ class ParallaxClient(object): data = json.loads(res.read())['images'] return data else: - logging.warn("Parallax returned HTTP error %d from " - "request for /images", res.status_int) + logging.warn(_("Parallax returned HTTP error %d from " + "request for /images"), res.status_int) return [] finally: c.close() @@ -96,8 +96,8 @@ class ParallaxClient(object): data = json.loads(res.read())['images'] return data else: - logging.warn("Parallax returned HTTP error %d from " - "request for /images/detail", res.status_int) + logging.warn(_("Parallax returned HTTP error %d from " + "request for /images/detail"), res.status_int) return [] finally: c.close() diff --git a/nova/image/s3.py b/nova/image/s3.py index 0a25161de..7b04aa072 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -79,7 +79,8 @@ class S3ImageService(service.BaseImageService): result = self.index(context) result = [i for i in result if i['imageId'] == image_id] if not result: - raise exception.NotFound('Image %s could not be found' % image_id) + raise exception.NotFound(_('Image %s could not be found') + % image_id) image = result[0] return image diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 0fefd9415..16add7689 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -135,7 +135,7 @@ def ensure_vlan(vlan_num): """Create a vlan unless it already exists""" interface = "vlan%s" % vlan_num if not _device_exists(interface): - logging.debug("Starting VLAN inteface %s", interface) + logging.debug(_("Starting VLAN inteface %s"), interface) _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num)) _execute("sudo ifconfig %s up" % interface) @@ -145,7 +145,7 @@ def ensure_vlan(vlan_num): def ensure_bridge(bridge, interface, net_attrs=None): """Create a bridge unless it already exists""" if not _device_exists(bridge): - logging.debug("Starting Bridge interface for %s", interface) + logging.debug(_("Starting Bridge interface for %s"), interface) _execute("sudo brctl addbr %s" % bridge) _execute("sudo brctl setfd %s 0" % bridge) # _execute("sudo brctl setageing %s 10" % bridge) @@ -202,9 +202,9 @@ def update_dhcp(context, network_id): _execute('sudo kill -HUP %d' % pid) return except Exception as exc: # pylint: disable-msg=W0703 - logging.debug("Hupping dnsmasq threw %s", exc) + logging.debug(_("Hupping dnsmasq threw %s"), exc) else: - logging.debug("Pid %d is stale, relaunching dnsmasq", pid) + logging.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, @@ -276,7 +276,7 @@ def _stop_dnsmasq(network): try: _execute('sudo kill -TERM %d' % pid) except Exception as exc: # pylint: disable-msg=W0703 - logging.debug("Killing dnsmasq threw %s", exc) + logging.debug(_("Killing dnsmasq threw %s"), exc) def _dhcp_file(bridge, kind): diff --git a/nova/network/manager.py b/nova/network/manager.py index a7298b47f..e3677459d 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -116,7 +116,7 @@ class NetworkManager(manager.Manager): def set_network_host(self, context, network_id): """Safely sets the host of the network.""" - logging.debug("setting network host") + logging.debug(_("setting network host")) host = self.db.network_set_host(context, network_id, self.host) @@ -175,10 +175,10 @@ class NetworkManager(manager.Manager): fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: - raise exception.Error("IP %s leased that isn't associated" % + raise exception.Error(_("IP %s leased that isn't associated") % address) if instance_ref['mac_address'] != mac: - raise exception.Error("IP %s leased to bad mac %s vs %s" % + raise exception.Error(_("IP %s leased to bad mac %s vs %s") % (address, instance_ref['mac_address'], mac)) now = datetime.datetime.utcnow() self.db.fixed_ip_update(context, @@ -186,7 +186,8 @@ class NetworkManager(manager.Manager): {'leased': True, 'updated_at': now}) if not fixed_ip_ref['allocated']: - logging.warn("IP %s leased that was already deallocated", address) + logging.warn(_("IP %s leased that was already deallocated"), + address) def release_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is released.""" @@ -194,13 +195,13 @@ class NetworkManager(manager.Manager): fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: - raise exception.Error("IP %s released that isn't associated" % + raise exception.Error(_("IP %s released that isn't associated") % address) if instance_ref['mac_address'] != mac: - raise exception.Error("IP %s released from bad mac %s vs %s" % + raise exception.Error(_("IP %s released from bad mac %s vs %s") % (address, instance_ref['mac_address'], mac)) if not fixed_ip_ref['leased']: - logging.warn("IP %s released that was not leased", address) + logging.warn(_("IP %s released that was not leased"), address) self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': False}) @@ -410,7 +411,7 @@ class VlanManager(NetworkManager): self.host, time) if num: - logging.debug("Dissassociated %s stale fixed ip(s)", num) + logging.debug(_("Dissassociated %s stale fixed ip(s)"), num) def init_host(self): """Do any initialization that needs to be run if this is a diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index c8920b00c..0c71c3705 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -102,7 +102,7 @@ def _render_parts(value, write_cb): _render_parts(subsubvalue, write_cb) write_cb('') else: - raise Exception("Unknown S3 value type %r", value) + raise Exception(_("Unknown S3 value type %r"), value) def get_argument(request, key, default_value): @@ -134,7 +134,7 @@ def get_context(request): check_type='s3') return context.RequestContext(user, project) except exception.Error as ex: - logging.debug("Authentication Failure: %s", ex) + logging.debug(_("Authentication Failure: %s"), ex) raise exception.NotAuthorized() @@ -227,7 +227,7 @@ class BucketResource(ErrorHandlingResource): def render_PUT(self, request): "Creates the bucket resource""" - logging.debug("Creating bucket %s", self.name) + logging.debug(_("Creating bucket %s"), self.name) logging.debug("calling bucket.Bucket.create(%r, %r)", self.name, request.context) @@ -237,7 +237,7 @@ class BucketResource(ErrorHandlingResource): def render_DELETE(self, request): """Deletes the bucket resource""" - logging.debug("Deleting bucket %s", self.name) + logging.debug(_("Deleting bucket %s"), self.name) bucket_object = bucket.Bucket(self.name) if not bucket_object.is_authorized(request.context): @@ -261,7 +261,7 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug("Getting object: %s / %s", self.bucket.name, self.name) + logging.debug(_("Getting object: %s / %s"), self.bucket.name, self.name) if not self.bucket.is_authorized(request.context): raise exception.NotAuthorized() @@ -279,7 +279,7 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug("Putting object: %s / %s", self.bucket.name, self.name) + logging.debug(_("Putting object: %s / %s"), self.bucket.name, self.name) if not self.bucket.is_authorized(request.context): raise exception.NotAuthorized() @@ -298,7 +298,7 @@ class ObjectResource(ErrorHandlingResource): authorized to delete the object. """ - logging.debug("Deleting object: %s / %s", + logging.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name) @@ -394,17 +394,17 @@ class ImagesResource(resource.Resource): image_id = get_argument(request, 'image_id', u'') image_object = image.Image(image_id) if not image_object.is_authorized(request.context): - logging.debug("not authorized for render_POST in images") + logging.debug(_("not authorized for render_POST in images")) raise exception.NotAuthorized() operation = get_argument(request, 'operation', u'') if operation: # operation implies publicity toggle - logging.debug("handling publicity toggle") + logging.debug(_("handling publicity toggle")) image_object.set_public(operation == 'add') else: # other attributes imply update - logging.debug("update user fields") + logging.debug(_("update user fields")) clean_args = {} for arg in request.args.keys(): clean_args[arg] = request.args[arg][0] diff --git a/nova/process.py b/nova/process.py index b33df048b..25b6723ec 100644 --- a/nova/process.py +++ b/nova/process.py @@ -131,7 +131,7 @@ def get_process_output(executable, args=None, env=None, path=None, cmd = executable if args: cmd = " ".join([cmd] + args) - logging.debug("Running cmd: %s", cmd) + logging.debug(_("Running cmd: %s"), cmd) process_handler = BackRelayWithInput( deferred, cmd, diff --git a/nova/rpc.py b/nova/rpc.py index 86a29574f..cc3c7dfc8 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -91,15 +91,15 @@ class Consumer(messaging.Consumer): self.failed_connection = False break except: # Catching all because carrot sucks - logging.exception("AMQP server on %s:%d is unreachable." \ - " Trying again in %d seconds." % ( + logging.exception(_("AMQP server on %s:%d is unreachable." + " Trying again in %d seconds.") % ( FLAGS.rabbit_host, FLAGS.rabbit_port, FLAGS.rabbit_retry_interval)) self.failed_connection = True if self.failed_connection: - logging.exception("Unable to connect to AMQP server" \ - " after %d tries. Shutting down." % FLAGS.rabbit_max_retries) + logging.exception(_("Unable to connect to AMQP server" + " after %d tries. Shutting down.") % FLAGS.rabbit_max_retries) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): @@ -116,14 +116,14 @@ class Consumer(messaging.Consumer): self.declare() super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) if self.failed_connection: - logging.error("Reconnected to queue") + logging.error(_("Reconnected to queue")) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't # exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: - logging.exception("Failed to fetch message from queue") + logging.exception(_("Failed to fetch message from queue")) self.failed_connection = True def attach_to_eventlet(self): @@ -161,7 +161,7 @@ class TopicConsumer(Consumer): class AdapterConsumer(TopicConsumer): """Calls methods on a proxy object based on method and args""" def __init__(self, connection=None, topic="broadcast", proxy=None): - LOG.debug('Initing the Adapter Consumer for %s' % (topic)) + LOG.debug(_('Initing the Adapter Consumer for %s') % (topic)) self.proxy = proxy super(AdapterConsumer, self).__init__(connection=connection, topic=topic) @@ -176,7 +176,7 @@ class AdapterConsumer(TopicConsumer): Example: {'method': 'echo', 'args': {'value': 42}} """ - LOG.debug('received %s' % (message_data)) + LOG.debug(_('received %s') % (message_data)) msg_id = message_data.pop('_msg_id', None) ctxt = _unpack_context(message_data) @@ -189,8 +189,8 @@ class AdapterConsumer(TopicConsumer): # messages stay in the queue indefinitely, so for now # we just log the message and send an error string # back to the caller - LOG.warn('no method for message: %s' % (message_data)) - msg_reply(msg_id, 'No method for message: %s' % message_data) + LOG.warn(_('no method for message: %s') % (message_data)) + msg_reply(msg_id, _('No method for message: %s') % message_data) return node_func = getattr(self.proxy, str(method)) @@ -246,7 +246,7 @@ def msg_reply(msg_id, reply=None, failure=None): if failure: message = failure.getErrorMessage() traceback = failure.getTraceback() - logging.error("Returning exception %s to caller", message) + logging.error(_("Returning exception %s to caller"), message) logging.error(traceback) failure = (failure.type.__name__, str(failure.value), traceback) conn = Connection.instance() @@ -287,7 +287,7 @@ def _unpack_context(msg): if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value - LOG.debug('unpacked context: %s', context_dict) + LOG.debug(_('unpacked context: %s'), context_dict) return context.RequestContext.from_dict(context_dict) @@ -306,10 +306,10 @@ def _pack_context(msg, context): def call(context, topic, msg): """Sends a message on a topic and wait for a response""" - LOG.debug("Making asynchronous call...") + LOG.debug(_("Making asynchronous call...")) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) - LOG.debug("MSG_ID is %s" % (msg_id)) + LOG.debug(_("MSG_ID is %s") % (msg_id)) _pack_context(msg, context) class WaitMessage(object): @@ -345,7 +345,7 @@ def call_twisted(context, topic, msg): LOG.debug("Making asynchronous call...") msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) - LOG.debug("MSG_ID is %s" % (msg_id)) + LOG.debug(_("MSG_ID is %s") % (msg_id)) _pack_context(msg, context) conn = Connection.instance() @@ -384,7 +384,7 @@ def cast(context, topic, msg): def generic_response(message_data, message): """Logs a result and exits""" - LOG.debug('response %s', message_data) + LOG.debug(_('response %s'), message_data) message.ack() sys.exit(0) @@ -393,8 +393,8 @@ def send_message(topic, message, wait=True): """Sends a message for testing""" msg_id = uuid.uuid4().hex message.update({'_msg_id': msg_id}) - LOG.debug('topic is %s', topic) - LOG.debug('message %s', message) + LOG.debug(_('topic is %s'), topic) + LOG.debug(_('message %s'), message) if wait: consumer = messaging.Consumer(connection=Connection.instance(), diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py index 7fd09b053..9deaa2777 100644 --- a/nova/scheduler/chance.py +++ b/nova/scheduler/chance.py @@ -34,5 +34,5 @@ class ChanceScheduler(driver.Scheduler): hosts = self.hosts_up(context, topic) if not hosts: - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) return hosts[int(random.random() * len(hosts))] diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index f271d573f..08d7033f5 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -58,4 +58,4 @@ class Scheduler(object): def schedule(self, context, topic, *_args, **_kwargs): """Must override at least this method for scheduler to work.""" - raise NotImplementedError("Must implement a fallback schedule") + raise NotImplementedError(_("Must implement a fallback schedule")) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 60a3d2b4b..44e21f2fd 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -65,4 +65,4 @@ class SchedulerManager(manager.Manager): db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) - logging.debug("Casting to %s %s for %s", topic, host, method) + logging.debug(_("Casting to %s %s for %s"), topic, host, method) diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index 7f5093656..f9171ab35 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -47,7 +47,7 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, instance_cores) = result if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores: - raise driver.NoValidHost("All hosts have too many cores") + raise driver.NoValidHost(_("All hosts have too many cores")) if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow @@ -57,7 +57,7 @@ class SimpleScheduler(chance.ChanceScheduler): {'host': service['host'], 'scheduled_at': now}) return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks a host that is up and has the fewest volumes.""" @@ -66,7 +66,8 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, volume_gigabytes) = result if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes: - raise driver.NoValidHost("All hosts have too many gigabytes") + raise driver.NoValidHost(_("All hosts have too many " + "gigabytes")) if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow @@ -76,7 +77,7 @@ class SimpleScheduler(chance.ChanceScheduler): {'host': service['host'], 'scheduled_at': now}) return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) def schedule_set_network_host(self, context, *_args, **_kwargs): """Picks a host that is up and has the fewest networks.""" @@ -85,7 +86,7 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, instance_count) = result if instance_count >= FLAGS.max_networks: - raise driver.NoValidHost("All hosts have too many networks") + raise driver.NoValidHost(_("All hosts have too many networks")) if self.service_is_up(service): return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) diff --git a/nova/server.py b/nova/server.py index a0ee54681..e5ce4475a 100644 --- a/nova/server.py +++ b/nova/server.py @@ -58,7 +58,7 @@ def stop(pidfile): try: pid = int(open(pidfile, 'r').read().strip()) except IOError: - message = "pidfile %s does not exist. Daemon not running?\n" + message = _("pidfile %s does not exist. Daemon not running?\n") sys.stderr.write(message % pidfile) return @@ -84,7 +84,7 @@ def serve(name, main): if not FLAGS.pidfile: FLAGS.pidfile = '%s.pid' % name - logging.debug("Full set of FLAGS: \n\n\n") + logging.debug(_("Full set of FLAGS: \n\n\n")) for flag in FLAGS: logging.debug("%s : %s", flag, FLAGS.get(flag, None)) diff --git a/nova/service.py b/nova/service.py index 9454d4049..348b1d192 100644 --- a/nova/service.py +++ b/nova/service.py @@ -143,7 +143,7 @@ class Service(object, service.Service): report_interval = FLAGS.report_interval if not periodic_interval: periodic_interval = FLAGS.periodic_interval - logging.warn("Starting %s node", topic) + logging.warn(_("Starting %s node"), topic) service_obj = cls(host, binary, topic, manager, report_interval, periodic_interval) @@ -158,7 +158,7 @@ class Service(object, service.Service): try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: - logging.warn("Service killed that has no database entry") + logging.warn(_("Service killed that has no database entry")) @defer.inlineCallbacks def periodic_tasks(self): @@ -173,8 +173,8 @@ class Service(object, service.Service): try: service_ref = db.service_get(ctxt, self.service_id) except exception.NotFound: - logging.debug("The service database object disappeared, " - "Recreating it.") + logging.debug(_("The service database object disappeared, " + "Recreating it.")) self._create_service_ref(ctxt) service_ref = db.service_get(ctxt, self.service_id) @@ -185,11 +185,11 @@ class Service(object, service.Service): # TODO(termie): make this pattern be more elegant. if getattr(self, "model_disconnected", False): self.model_disconnected = False - logging.error("Recovered model server connection!") + logging.error(_("Recovered model server connection!")) # TODO(vish): this should probably only catch connection errors except Exception: # pylint: disable-msg=W0702 if not getattr(self, "model_disconnected", False): self.model_disconnected = True - logging.exception("model server went away") + logging.exception(_("model server went away")) yield diff --git a/nova/twistd.py b/nova/twistd.py index cb5648ce6..c5b7fed8c 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -208,7 +208,7 @@ def stop(pidfile): pid = None if not pid: - message = "pidfile %s does not exist. Daemon not running?\n" + message = _("pidfile %s does not exist. Daemon not running?\n") sys.stderr.write(message % pidfile) # Not an error in a restart return @@ -229,7 +229,7 @@ def stop(pidfile): def serve(filename): - logging.debug("Serving %s" % filename) + logging.debug(_("Serving %s") % filename) name = os.path.basename(filename) OptionsClass = WrapTwistedOptions(TwistdServerOptions) options = OptionsClass() @@ -281,7 +281,7 @@ def serve(filename): else: logging.getLogger().setLevel(logging.WARNING) - logging.debug("Full set of FLAGS:") + logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) diff --git a/nova/utils.py b/nova/utils.py index 142584df8..f6f03b555 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -49,7 +49,7 @@ def import_class(import_str): __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ImportError, ValueError, AttributeError): - raise exception.NotFound('Class %s cannot be found' % class_str) + raise exception.NotFound(_('Class %s cannot be found') % class_str) def import_object(import_str): @@ -63,7 +63,7 @@ def import_object(import_str): def fetchfile(url, target): - logging.debug("Fetching %s" % url) + logging.debug(_("Fetching %s") % url) # c = pycurl.Curl() # fp = open(target, "wb") # c.setopt(c.URL, url) @@ -75,7 +75,7 @@ def fetchfile(url, target): def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): - logging.debug("Running cmd: %s", cmd) + logging.debug(_("Running cmd: %s"), cmd) env = os.environ.copy() if addl_env: env.update(addl_env) @@ -88,7 +88,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): result = obj.communicate() obj.stdin.close() if obj.returncode: - logging.debug("Result was %s" % (obj.returncode)) + logging.debug(_("Result was %s") % (obj.returncode)) if check_exit_code and obj.returncode != 0: (stdout, stderr) = result raise ProcessExecutionError(exit_code=obj.returncode, @@ -122,7 +122,7 @@ def debug(arg): def runthis(prompt, cmd, check_exit_code=True): - logging.debug("Running %s" % (cmd)) + logging.debug(_("Running %s") % (cmd)) exit_code = subprocess.call(cmd.split(" ")) logging.debug(prompt % (exit_code)) if check_exit_code and exit_code != 0: @@ -161,7 +161,7 @@ def get_my_ip(): csock.close() return addr except socket.gaierror as ex: - logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex) + logging.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex) return "127.0.0.1" @@ -205,7 +205,7 @@ class LazyPluggable(object): if not self.__backend: backend_name = self.__pivot.value if backend_name not in self.__backends: - raise exception.Error('Invalid backend: %s' % backend_name) + raise exception.Error(_('Invalid backend: %s') % backend_name) backend = self.__backends[backend_name] if type(backend) == type(tuple()): diff --git a/nova/validate.py b/nova/validate.py index 7ea27daa6..49578a24d 100644 --- a/nova/validate.py +++ b/nova/validate.py @@ -42,7 +42,7 @@ def rangetest(**argchecks): # was passed by name if float(kargs[argname]) < low or \ float(kargs[argname]) > high: - errmsg = '{0} argument "{1}" not in {2}..{3}' + errmsg = _('{0} argument "{1}" not in {2}..{3}') errmsg = errmsg.format(funcname, argname, low, high) raise TypeError(errmsg) @@ -51,8 +51,8 @@ def rangetest(**argchecks): position = positionals.index(argname) if float(pargs[position]) < low or \ float(pargs[position]) > high: - errmsg = '{0} argument "{1}" with value of {4} ' \ - 'not in {2}..{3}' + errmsg = _('{0} argument "{1}" with value of {4} ' + 'not in {2}..{3}') errmsg = errmsg.format(funcname, argname, low, high, pargs[position]) raise TypeError(errmsg) @@ -76,14 +76,14 @@ def typetest(**argchecks): for (argname, typeof) in argchecks.items(): if argname in kargs: if not isinstance(kargs[argname], typeof): - errmsg = '{0} argument "{1}" not of type {2}' + errmsg = _('{0} argument "{1}" not of type {2}') errmsg = errmsg.format(funcname, argname, typeof) raise TypeError(errmsg) elif argname in positionals: position = positionals.index(argname) if not isinstance(pargs[position], typeof): - errmsg = '{0} argument "{1}" with value of {2} ' \ - 'not of type {3}' + errmsg = _('{0} argument "{1}" with value of {2} ' + 'not of type {3}') errmsg = errmsg.format(funcname, argname, pargs[position], typeof) raise TypeError(errmsg) -- cgit From c00d99102c826f6a501ff7a530291dc8d7680df7 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Sat, 11 Dec 2010 15:42:05 -0500 Subject: Final round of marking translation strings --- nova/virt/connection.py | 2 +- nova/virt/fake.py | 3 ++- nova/virt/libvirt_conn.py | 41 ++++++++++++++++++++++------------------- nova/virt/xenapi_conn.py | 16 ++++++++-------- nova/volume/driver.py | 10 +++++----- nova/volume/manager.py | 20 ++++++++++---------- 6 files changed, 48 insertions(+), 44 deletions(-) diff --git a/nova/virt/connection.py b/nova/virt/connection.py index c40bb4bb4..61e99944e 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -66,6 +66,6 @@ def get_connection(read_only=False): raise Exception('Unknown connection type "%s"' % t) if conn is None: - logging.error('Failed to open connection to the hypervisor') + logging.error(_('Failed to open connection to the hypervisor')) sys.exit(1) return conn diff --git a/nova/virt/fake.py b/nova/virt/fake.py index f855523d3..26b01af91 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -167,7 +167,8 @@ class FakeConnection(object): knowledge of the instance """ if instance_name not in self.instances: - raise exception.NotFound("Instance %s Not Found" % instance_name) + raise exception.NotFound(_("Instance %s Not Found") + % instance_name) i = self.instances[instance_name] return {'state': i._state, 'max_mem': 0, diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 18085089f..bc435f4b5 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -132,7 +132,7 @@ class LibvirtConnection(object): @property def _conn(self): if not self._wrapped_conn or not self._test_connection(): - logging.debug('Connecting to libvirt: %s' % self.libvirt_uri) + logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri) self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn @@ -144,7 +144,7 @@ class LibvirtConnection(object): except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ e.get_error_domain() == libvirt.VIR_FROM_REMOTE: - logging.debug('Connection to libvirt broke') + logging.debug(_('Connection to libvirt broke')) return False raise @@ -214,7 +214,7 @@ class LibvirtConnection(object): def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) - logging.info('instance %s: deleting instance files %s', + logging.info(_('instance %s: deleting instance files %s'), instance['name'], target) if os.path.exists(target): shutil.rmtree(target) @@ -259,7 +259,7 @@ class LibvirtConnection(object): mount_device = mountpoint.rpartition("/")[2] xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device) if not xml: - raise exception.NotFound("No disk at %s" % mount_device) + raise exception.NotFound(_("No disk at %s") % mount_device) virt_dom.detachDevice(xml) yield @@ -279,11 +279,11 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: rebooted', instance['name']) + logging.debug(_('instance %s: rebooted'), instance['name']) timer.stop() d.callback(None) except Exception, exn: - logging.error('_wait_for_reboot failed: %s', exn) + logging.error(_('_wait_for_reboot failed: %s'), exn) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -314,11 +314,11 @@ class LibvirtConnection(object): state = self.get_info(instance['name'])['state'] db.instance_set_state(None, instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: rescued', instance['name']) + logging.debug(_('instance %s: rescued'), instance['name']) timer.stop() d.callback(None) except Exception, exn: - logging.error('_wait_for_rescue failed: %s', exn) + logging.error(_('_wait_for_rescue failed: %s'), exn) db.instance_set_state(None, instance['id'], power_state.SHUTDOWN) @@ -348,7 +348,7 @@ class LibvirtConnection(object): setup_nwfilters_for_instance(instance) yield self._create_image(instance, xml) yield self._conn.createXML(xml, 0) - logging.debug("instance %s: is running", instance['name']) + logging.debug(_("instance %s: is running"), instance['name']) local_d = defer.Deferred() timer = task.LoopingCall(f=None) @@ -359,11 +359,11 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: booted', instance['name']) + logging.debug(_('instance %s: booted'), instance['name']) timer.stop() local_d.callback(None) except: - logging.exception('instance %s: failed to boot', + logging.exception(_('instance %s: failed to boot'), instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], @@ -379,7 +379,7 @@ class LibvirtConnection(object): virsh_output = virsh_output[0].strip() if virsh_output.startswith('/dev/'): - logging.info('cool, it\'s a device') + logging.info(_('cool, it\'s a device')) d = process.simple_execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) d.addCallback(lambda r: r[0]) @@ -388,7 +388,7 @@ class LibvirtConnection(object): return '' def _append_to_file(self, data, fpath): - logging.info('data: %r, fpath: %r' % (data, fpath)) + logging.info(_('data: %r, fpath: %r') % (data, fpath)) fp = open(fpath, 'a+') fp.write(data) return fpath @@ -431,7 +431,7 @@ class LibvirtConnection(object): # TODO(termie): these are blocking calls, it would be great # if they weren't. - logging.info('instance %s: Creating image', inst['name']) + logging.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') f.write(libvirt_xml) f.close() @@ -477,10 +477,10 @@ class LibvirtConnection(object): 'dns': network_ref['dns']} if key or net: if key: - logging.info('instance %s: injecting key into image %s', + logging.info(_('instance %s: injecting key into image %s'), inst['name'], inst.image_id) if net: - logging.info('instance %s: injecting net into image %s', + logging.info(_('instance %s: injecting net into image %s'), inst['name'], inst.image_id) yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) @@ -504,7 +504,8 @@ class LibvirtConnection(object): def to_xml(self, instance, rescue=False): # TODO(termie): cache? - logging.debug('instance %s: starting toXML method', instance['name']) + logging.debug(_('instance %s: starting toXML method'), + instance['name']) network = db.project_get_network(context.get_admin_context(), instance['project_id']) # FIXME(vish): stick this in db @@ -528,7 +529,8 @@ class LibvirtConnection(object): libvirt_xml = self.rescue_xml % xml_info else: libvirt_xml = self.libvirt_xml % xml_info - logging.debug('instance %s: finished toXML method', instance['name']) + logging.debug(_('instance %s: finished toXML method'), + instance['name']) return libvirt_xml @@ -536,7 +538,8 @@ class LibvirtConnection(object): try: virt_dom = self._conn.lookupByName(instance_name) except: - raise exception.NotFound("Instance %s not found" % instance_name) + raise exception.NotFound(_("Instance %s not found") + % instance_name) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() return {'state': state, 'max_mem': max_mem, diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 2153810c8..c8c451dfe 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -92,10 +92,10 @@ def get_connection(_): username = FLAGS.xenapi_connection_username password = FLAGS.xenapi_connection_password if not url or password is None: - raise Exception('Must specify xenapi_connection_url, ' - 'xenapi_connection_username (optionally), and ' - 'xenapi_connection_password to use ' - 'connection_type=xenapi') + raise Exception(_('Must specify xenapi_connection_url, ' + 'xenapi_connection_username (optionally), and ' + 'xenapi_connection_password to use ' + 'connection_type=xenapi')) return XenAPIConnection(url, username, password) @@ -195,11 +195,11 @@ class XenAPISession(object): self._poll_task, task, deferred) elif status == 'success': result = self._session.xenapi.task.get_result(task) - logging.info('Task %s status: success. %s', task, result) + logging.info(_('Task %s status: success. %s'), task, result) deferred.callback(_parse_xmlrpc_value(result)) else: error_info = self._session.xenapi.task.get_error_info(task) - logging.warn('Task %s status: %s. %s', task, status, + logging.warn(_('Task %s status: %s. %s'), task, status, error_info) deferred.errback(XenAPI.Failure(error_info)) #logging.debug('Polling task %s done.', task) @@ -213,7 +213,7 @@ def _unwrap_plugin_exceptions(func, *args, **kwargs): try: return func(*args, **kwargs) except XenAPI.Failure, exc: - logging.debug("Got exception: %s", exc) + logging.debug(_("Got exception: %s"), exc) if (len(exc.details) == 4 and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[2] == 'Failure'): @@ -226,7 +226,7 @@ def _unwrap_plugin_exceptions(func, *args, **kwargs): else: raise except xmlrpclib.ProtocolError, exc: - logging.debug("Got exception: %s", exc) + logging.debug(_("Got exception: %s"), exc) raise diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 156aad2a0..1e7b184bb 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -76,14 +76,14 @@ class VolumeDriver(object): tries = tries + 1 if tries >= FLAGS.num_shell_tries: raise - logging.exception("Recovering from a failed execute." - "Try number %s", tries) + logging.exception(_("Recovering from a failed execute." + "Try number %s"), tries) yield self._execute("sleep %s" % tries ** 2) def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" if not os.path.isdir("/dev/%s" % FLAGS.volume_group): - raise exception.Error("volume group %s doesn't exist" + raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) @defer.inlineCallbacks @@ -220,7 +220,7 @@ class FakeAOEDriver(AOEDriver): @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" - logging.debug("FAKE AOE: %s", cmd) + logging.debug(_("FAKE AOE: %s"), cmd) return (None, None) @@ -332,5 +332,5 @@ class FakeISCSIDriver(ISCSIDriver): @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" - logging.debug("FAKE ISCSI: %s", cmd) + logging.debug(_("FAKE ISCSI: %s"), cmd) return (None, None) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 589e7d7d9..a48080511 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -82,7 +82,7 @@ class VolumeManager(manager.Manager): self.driver.check_for_setup_error() ctxt = context.get_admin_context() volumes = self.db.volume_get_all_by_host(ctxt, self.host) - logging.debug("Re-exporting %s volumes", len(volumes)) + logging.debug(_("Re-exporting %s volumes"), len(volumes)) for volume in volumes: self.driver.ensure_export(ctxt, volume) @@ -91,7 +91,7 @@ class VolumeManager(manager.Manager): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) - logging.info("volume %s: creating", volume_ref['name']) + logging.info(_("volume %s: creating"), volume_ref['name']) self.db.volume_update(context, volume_id, @@ -100,18 +100,18 @@ class VolumeManager(manager.Manager): # before passing it to the driver. volume_ref['host'] = self.host - logging.debug("volume %s: creating lv of size %sG", + logging.debug(_("volume %s: creating lv of size %sG"), volume_ref['name'], volume_ref['size']) yield self.driver.create_volume(volume_ref) - logging.debug("volume %s: creating export", volume_ref['name']) + logging.debug(_("volume %s: creating export"), volume_ref['name']) yield self.driver.create_export(context, volume_ref) now = datetime.datetime.utcnow() self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) - logging.debug("volume %s: created successfully", volume_ref['name']) + logging.debug(_("volume %s: created successfully"), volume_ref['name']) defer.returnValue(volume_id) @defer.inlineCallbacks @@ -120,15 +120,15 @@ class VolumeManager(manager.Manager): context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) if volume_ref['attach_status'] == "attached": - raise exception.Error("Volume is still attached") + raise exception.Error(_("Volume is still attached")) if volume_ref['host'] != self.host: - raise exception.Error("Volume is not local to this node") - logging.debug("volume %s: removing export", volume_ref['name']) + raise exception.Error(_("Volume is not local to this node")) + logging.debug(_("volume %s: removing export"), volume_ref['name']) yield self.driver.remove_export(context, volume_ref) - logging.debug("volume %s: deleting", volume_ref['name']) + logging.debug(_("volume %s: deleting"), volume_ref['name']) yield self.driver.delete_volume(volume_ref) self.db.volume_destroy(context, volume_id) - logging.debug("volume %s: deleted successfully", volume_ref['name']) + logging.debug(_("volume %s: deleted successfully"), volume_ref['name']) defer.returnValue(True) @defer.inlineCallbacks -- cgit From 669d5f5612840c9ed6449d91ee5aae97842cac72 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 18:22:56 +0000 Subject: second round for unit testing framework --- nova/tests/virt_unittest.py | 65 +++++++++++++++++++++++++++----------- nova/virt/xenapi/__init__.py | 5 +++ nova/virt/xenapi/fake.py | 75 ++++++++++++++++++++++++++++++++++++++++++-- nova/virt/xenapi/vmops.py | 3 +- nova/virt/xenapi_conn.py | 2 +- 5 files changed, 127 insertions(+), 23 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index ba3fba83b..611022632 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -30,6 +30,9 @@ from nova.virt.xenapi import volume_utils FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') +# Those are XenAPI related +flags.DECLARE('target_host', 'nova.virt.xenapi_conn') +FLAGS.target_host = '127.0.0.1' class LibvirtConnTestCase(test.TrialTestCase): @@ -270,36 +273,62 @@ class XenAPIVolumeTestCase(test.TrialTestCase): self.helper = volume_utils.VolumeHelper self.helper.late_import() + def _create_volume(self, size='0'): + """Create a volume object.""" + vol = {} + vol['size'] = size + vol['user_id'] = 'fake' + vol['project_id'] = 'fake' + vol['host'] = 'localhost' + vol['availability_zone'] = FLAGS.storage_availability_zone + vol['status'] = "creating" + vol['attach_status'] = "detached" + return db.volume_create(context.get_admin_context(), vol) + def test_create_iscsi_storage_raise_no_exception(self): - info = self.helper.parse_volume_info(None, None) - label = 'SR-' - description = '' - self.helper.create_iscsi_storage_blocking(self.session, - info, - label, - description) + vol = self._create_volume() + info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') + label = None # For testing new SRs + description = 'Test-SR' + self.session.fail_next_call = False + sr_ref = self.helper.create_iscsi_storage_blocking(self.session, + info, + label, + description) + self.assertEqual(sr_ref, self.session.SR.FAKE_REF) + db.volume_destroy(context.get_admin_context(), vol['id']) def test_create_iscsi_storage_raise_unable_to_create_sr_exception(self): - info = self.helper.parse_volume_info(None, None) - label = None + vol = self._create_volume() + info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') + label = None # For testing new SRs description = None - self.assertFailure(self.helper.create_iscsi_storage_blocking(self.session, - info, - label, - description), - StorageError) + self.session.fail_next_call = True + self.assertRaises(volume_utils.StorageError, + self.helper.create_iscsi_storage_blocking, + self.session, + info, + label, + description) def test_find_sr_from_vbd_raise_no_exception(self): - pass + sr_ref = yield self.helper.find_sr_from_vbd(self.session, + self.session.VBD.FAKE_REF) + self.assertEqual(sr_ref, self.session.SR.FAKE_REF) - def test_destroy_iscsi_storage_raise_no_exception(self): + def test_destroy_iscsi_storage(self): pass def test_introduce_vdi_raise_no_exception(self): - pass + sr_ref = self.session.SR.FAKE_REF + self.helper.introduce_vdi_blocking(self.session, sr_ref) def test_introduce_vdi_raise_unable_get_vdi_record_exception(self): - pass + sr_ref = self.session.SR.FAKE_REF + self.session.fail_next_call = True + self.assertRaises(volume_utils.StorageError, + self.helper.introduce_vdi_blocking, + self.session, sr_ref) def tearDown(self): super(XenAPIVolumeTestCase, self).tearDown() diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index 3d598c463..d9abe54c5 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -13,3 +13,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +""" +:mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI +================================================================== +""" diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 75ff587e1..3a01f9c3d 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -1,4 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 +from twisted.web.domhelpers import _get +from aptdaemon.defer import defer # Copyright (c) 2010 Citrix Systems, Inc. # @@ -33,15 +35,15 @@ class Failure(Exception): class FakeXenAPISession(object): """ The session to invoke XenAPI SDK calls """ def __init__(self): - pass + self.fail_next_call = False def get_xenapi(self): """ Return the xenapi object """ - raise NotImplementedError() + return self def get_xenapi_host(self): """ Return the xenapi host """ - raise NotImplementedError() + return 'FAKE_XENAPI_HOST' def call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread. Returns @@ -57,3 +59,70 @@ class FakeXenAPISession(object): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" raise NotImplementedError() + + def __getattr__(self, name): + return FakeXenAPIObject(name, self) + + +class FakeXenAPIObject(object): + def __init__(self, name, session): + self.name = name + self.session = session + self.FAKE_REF = 'FAKE_REFERENCE_%s' % name + + def get_by_name_label(self, label): + if label is None: + return '' # 'No object found' + else: + return 'FAKE_OBJECT_%s_%s' % (self.name, label) + + def getter(self, *args): + self._check_fail() + return self.FAKE_REF + + def ref_list(self, *args): + self._check_fail() + return [FakeXenAPIRecord()] + + def __getattr__(self, name): + if name == 'create': + return self._create + elif name == 'get_record': + return self._record + elif name == 'introduce': + return self._introduce + elif name.startswith('get_'): + getter = 'get_%s' % self.name + if name == getter: + return self.getter + else: + child = name[name.find('_') + 1:] + if child.endswith('s'): + return FakeXenAPIObject(child[:-1], self.session).ref_list + else: + return FakeXenAPIObject(child, self.session).getter + + def _create(self, *args): + self._check_fail() + return self.FAKE_REF + + def _record(self, *args): + self._check_fail() + return FakeXenAPIRecord() + + def _introduce(self, *args): + self._check_fail() + pass + + def _check_fail(self): + if self.session.fail_next_call: + self.session.fail_next_call = False # Reset! + raise Failure('Unable to create %s' % self.name) + + +class FakeXenAPIRecord(dict): + def __init__(self): + pass + + def __getitem__(self, attr): + return '' diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index a5d923a3b..9a8db0ad4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -24,6 +24,7 @@ from twisted.internet import defer from nova import db from nova import context +from nova import exception from nova.auth.manager import AuthManager from nova.virt.xenapi.network_utils import NetworkHelper @@ -123,7 +124,7 @@ class VMOps(object): """ Return data about VM instance """ vm = VMHelper.lookup_blocking(self._session, instance_id) if vm is None: - raise Exception('instance not present %s' % instance_id) + raise exception.NotFound('Instance not found %s' % instance_id) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_info(rec) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 4ace6da14..d8d21e24d 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -91,7 +91,7 @@ flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') flags.DEFINE_string('iqn_prefix', - 'iqn.2010-10.org.openstack', + 'iqn.2010-12.org.openstack', 'IQN Prefix') -- cgit From fe667352c3e25c744a989ca45f4f9ed472778ae3 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 18:43:24 +0000 Subject: removing imports that should have not been there --- nova/tests/virt_unittest.py | 3 ++- nova/virt/xenapi/fake.py | 10 +++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 611022632..1095662c3 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -317,7 +317,8 @@ class XenAPIVolumeTestCase(test.TrialTestCase): self.assertEqual(sr_ref, self.session.SR.FAKE_REF) def test_destroy_iscsi_storage(self): - pass + sr_ref = self.session.SR.FAKE_REF + self.helper.destroy_iscsi_storage_blocking(self.session, sr_ref) def test_introduce_vdi_raise_no_exception(self): sr_ref = self.session.SR.FAKE_REF diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 3a01f9c3d..2fed28609 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -1,6 +1,4 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -from twisted.web.domhelpers import _get -from aptdaemon.defer import defer # Copyright (c) 2010 Citrix Systems, Inc. # @@ -89,8 +87,10 @@ class FakeXenAPIObject(object): return self._create elif name == 'get_record': return self._record - elif name == 'introduce': - return self._introduce + elif name == 'introduce' or\ + name == 'forget' or\ + name == 'unplug': + return self._fake_action elif name.startswith('get_'): getter = 'get_%s' % self.name if name == getter: @@ -110,7 +110,7 @@ class FakeXenAPIObject(object): self._check_fail() return FakeXenAPIRecord() - def _introduce(self, *args): + def _fake_action(self, *args): self._check_fail() pass -- cgit From e30801445f8b543d78494ca63be60f85b94d3a53 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 20:31:33 +0000 Subject: moving xenapi unittests changes into another branch --- nova/tests/virt_unittest.py | 76 -------------------------- nova/virt/xenapi/fake.py | 128 -------------------------------------------- 2 files changed, 204 deletions(-) delete mode 100644 nova/virt/xenapi/fake.py diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 1095662c3..d49383fb7 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -25,14 +25,9 @@ from nova import utils from nova.api.ec2 import cloud from nova.auth import manager from nova.virt import libvirt_conn -from nova.virt.xenapi import fake -from nova.virt.xenapi import volume_utils FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') -# Those are XenAPI related -flags.DECLARE('target_host', 'nova.virt.xenapi_conn') -FLAGS.target_host = '127.0.0.1' class LibvirtConnTestCase(test.TrialTestCase): @@ -262,74 +257,3 @@ class NWFilterTestCase(test.TrialTestCase): d.addCallback(lambda _: self.teardown_security_group()) return d - - -class XenAPIVolumeTestCase(test.TrialTestCase): - - def setUp(self): - super(XenAPIVolumeTestCase, self).setUp() - self.flags(xenapi_use_fake_session=True) - self.session = fake.FakeXenAPISession() - self.helper = volume_utils.VolumeHelper - self.helper.late_import() - - def _create_volume(self, size='0'): - """Create a volume object.""" - vol = {} - vol['size'] = size - vol['user_id'] = 'fake' - vol['project_id'] = 'fake' - vol['host'] = 'localhost' - vol['availability_zone'] = FLAGS.storage_availability_zone - vol['status'] = "creating" - vol['attach_status'] = "detached" - return db.volume_create(context.get_admin_context(), vol) - - def test_create_iscsi_storage_raise_no_exception(self): - vol = self._create_volume() - info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') - label = None # For testing new SRs - description = 'Test-SR' - self.session.fail_next_call = False - sr_ref = self.helper.create_iscsi_storage_blocking(self.session, - info, - label, - description) - self.assertEqual(sr_ref, self.session.SR.FAKE_REF) - db.volume_destroy(context.get_admin_context(), vol['id']) - - def test_create_iscsi_storage_raise_unable_to_create_sr_exception(self): - vol = self._create_volume() - info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') - label = None # For testing new SRs - description = None - self.session.fail_next_call = True - self.assertRaises(volume_utils.StorageError, - self.helper.create_iscsi_storage_blocking, - self.session, - info, - label, - description) - - def test_find_sr_from_vbd_raise_no_exception(self): - sr_ref = yield self.helper.find_sr_from_vbd(self.session, - self.session.VBD.FAKE_REF) - self.assertEqual(sr_ref, self.session.SR.FAKE_REF) - - def test_destroy_iscsi_storage(self): - sr_ref = self.session.SR.FAKE_REF - self.helper.destroy_iscsi_storage_blocking(self.session, sr_ref) - - def test_introduce_vdi_raise_no_exception(self): - sr_ref = self.session.SR.FAKE_REF - self.helper.introduce_vdi_blocking(self.session, sr_ref) - - def test_introduce_vdi_raise_unable_get_vdi_record_exception(self): - sr_ref = self.session.SR.FAKE_REF - self.session.fail_next_call = True - self.assertRaises(volume_utils.StorageError, - self.helper.introduce_vdi_blocking, - self.session, sr_ref) - - def tearDown(self): - super(XenAPIVolumeTestCase, self).tearDown() diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py deleted file mode 100644 index 2fed28609..000000000 --- a/nova/virt/xenapi/fake.py +++ /dev/null @@ -1,128 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A fake XenAPI SDK. - -Allows for xenapi helper classes testing. -""" - - -class Failure(Exception): - def __init__(self, message=None): - super(Failure, self).__init__(message) - self.details = [] - - def __str__(self): - return 'Fake XenAPI Exception' - - -class FakeXenAPISession(object): - """ The session to invoke XenAPI SDK calls """ - def __init__(self): - self.fail_next_call = False - - def get_xenapi(self): - """ Return the xenapi object """ - return self - - def get_xenapi_host(self): - """ Return the xenapi host """ - return 'FAKE_XENAPI_HOST' - - def call_xenapi(self, method, *args): - """Call the specified XenAPI method on a background thread. Returns - a Deferred for the result.""" - raise NotImplementedError() - - def async_call_plugin(self, plugin, fn, args): - """Call Async.host.call_plugin on a background thread. Returns a - Deferred with the task reference.""" - raise NotImplementedError() - - def wait_for_task(self, task): - """Return a Deferred that will give the result of the given task. - The task is polled until it completes.""" - raise NotImplementedError() - - def __getattr__(self, name): - return FakeXenAPIObject(name, self) - - -class FakeXenAPIObject(object): - def __init__(self, name, session): - self.name = name - self.session = session - self.FAKE_REF = 'FAKE_REFERENCE_%s' % name - - def get_by_name_label(self, label): - if label is None: - return '' # 'No object found' - else: - return 'FAKE_OBJECT_%s_%s' % (self.name, label) - - def getter(self, *args): - self._check_fail() - return self.FAKE_REF - - def ref_list(self, *args): - self._check_fail() - return [FakeXenAPIRecord()] - - def __getattr__(self, name): - if name == 'create': - return self._create - elif name == 'get_record': - return self._record - elif name == 'introduce' or\ - name == 'forget' or\ - name == 'unplug': - return self._fake_action - elif name.startswith('get_'): - getter = 'get_%s' % self.name - if name == getter: - return self.getter - else: - child = name[name.find('_') + 1:] - if child.endswith('s'): - return FakeXenAPIObject(child[:-1], self.session).ref_list - else: - return FakeXenAPIObject(child, self.session).getter - - def _create(self, *args): - self._check_fail() - return self.FAKE_REF - - def _record(self, *args): - self._check_fail() - return FakeXenAPIRecord() - - def _fake_action(self, *args): - self._check_fail() - pass - - def _check_fail(self): - if self.session.fail_next_call: - self.session.fail_next_call = False # Reset! - raise Failure('Unable to create %s' % self.name) - - -class FakeXenAPIRecord(dict): - def __init__(self): - pass - - def __getitem__(self, attr): - return '' -- cgit From 1395d31badc43bdce036e8da3927af22a22ca91e Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 13 Dec 2010 18:25:17 -0800 Subject: Fixed power state update with Twisted callback --- nova/compute/manager.py | 21 +++++++++++++++++---- nova/virt/xenapi/vmops.py | 12 ++++++++---- nova/virt/xenapi_conn.py | 8 ++++---- 3 files changed, 29 insertions(+), 12 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 61ed3136b..ae33fe5b9 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -194,6 +194,13 @@ class ComputeManager(manager.Manager): yield self.driver.unrescue(instance_ref) self._update_state(context, instance_id) + @staticmethod + def _update_state_callback(self, context, instance_id, result): + """Update instance state when Deferred task completes. + This staticmethod must be wrappered in a + lambda to pass in self, context & instance_id.""" + self._update_state(context, instance_id) + @defer.inlineCallbacks @exception.wrap_exception def pause_instance(self, context, instance_id): @@ -207,8 +214,11 @@ class ComputeManager(manager.Manager): instance_id, power_state.NOSTATE, 'pausing') - yield self.driver.pause(instance_ref) - self._update_state(context, instance_id) + yield self.driver.pause(instance_ref, + lambda result : self._update_state_callback(self, + context, + instance_id, + result)) @defer.inlineCallbacks @exception.wrap_exception @@ -223,8 +233,11 @@ class ComputeManager(manager.Manager): instance_id, power_state.NOSTATE, 'unpausing') - yield self.driver.unpause(instance_ref) - self._update_state(context, instance_id) + yield self.driver.unpause(instance_ref, + lambda result : self._update_state_callback(self, + context, + instance_id, + result)) @exception.wrap_exception def get_console_output(self, context, instance_id): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 353e83873..03ee3dd58 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -121,24 +121,28 @@ class VMOps(object): logging.warn(exc) @defer.inlineCallbacks - def pause(self, instance): + def pause(self, instance, callback): """ Pause VM instance """ instance_name = instance.name vm = yield VMHelper.lookup(self._session, instance_name) if vm is None: raise Exception('instance not present %s' % instance_name) task = yield self._session.call_xenapi('Async.VM.pause', vm) - yield self._session.wait_for_task(task) + deferred = self._session.wait_for_task(task) + deferred.addCallback(callback) + yield deferred @defer.inlineCallbacks - def unpause(self, instance): + def unpause(self, instance, callback): """ Unpause VM instance """ instance_name = instance.name vm = yield VMHelper.lookup(self._session, instance_name) if vm is None: raise Exception('instance not present %s' % instance_name) task = yield self._session.call_xenapi('Async.VM.unpause', vm) - yield self._session.wait_for_task(task) + deferred = self._session.wait_for_task(task) + deferred.addCallback(callback) + yield deferred def get_info(self, instance_id): """ Return data about VM instance """ diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index df405e75f..bcfb48323 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -122,13 +122,13 @@ class XenAPIConnection(object): """ Destroy VM instance """ self._vmops.destroy(instance) - def pause(self, instance): + def pause(self, instance, callback): """ Pause VM instance """ - self._vmops.pause(instance) + self._vmops.pause(instance, callback) - def unpause(self, instance): + def unpause(self, instance, callback): """ Unpause paused VM instance """ - self._vmops.unpause(instance) + self._vmops.unpause(instance, callback) def get_info(self, instance_id): """ Return data about VM instance """ -- cgit From 99ba9bc7c4cd7bdb085e76a8f926ade27d558a84 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 14 Dec 2010 02:43:15 -0400 Subject: added callback param to fake_conn --- nova/virt/fake.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 4526f0042..c56907175 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -133,13 +133,13 @@ class FakeConnection(object): """ return defer.succeed(None) - def pause(self, instance): + def pause(self, instance, callback): """ Pause the specified instance. """ return defer.succeed(None) - def unpause(self, instance): + def unpause(self, instance, callback): """ Unpause the specified instance. """ -- cgit From 6e37cf42d758b5040442d9c296b21955d10a7327 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 14 Dec 2010 11:48:29 +0000 Subject: final cleanup, after moving unittest work into another branch --- nova/virt/xenapi/__init__.py | 30 ++++++++++++++++++ nova/virt/xenapi/fake.py | 66 +++++++++++++++++++++++++++++++++++++++ nova/virt/xenapi/network_utils.py | 3 +- nova/virt/xenapi/vm_utils.py | 19 ++--------- nova/virt/xenapi/vmops.py | 6 ++-- nova/virt/xenapi/volume_utils.py | 20 ++---------- nova/virt/xenapi/volumeops.py | 8 +++-- nova/virt/xenapi_conn.py | 10 ++++-- 8 files changed, 118 insertions(+), 44 deletions(-) create mode 100644 nova/virt/xenapi/fake.py diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index d9abe54c5..1a2903b98 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -18,3 +18,33 @@ :mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI ================================================================== """ + + +def load_sdk(flags): + """ + This method is used for loading the XenAPI SDK (fake or real) + """ + xenapi_module = \ + flags.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' + from_list = \ + flags.xenapi_use_fake_session and ['fake'] or [] + + return __import__(xenapi_module, globals(), locals(), from_list, -1) + + +class HelperBase(): + """ + The class that wraps the helper methods together. + """ + XenAPI = None + + def __init__(self): + return + + @classmethod + def late_import(cls, FLAGS): + """ + Load XenAPI module in for helper class + """ + if cls.XenAPI is None: + cls.XenAPI = load_sdk(FLAGS) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py new file mode 100644 index 000000000..8d6a39a87 --- /dev/null +++ b/nova/virt/xenapi/fake.py @@ -0,0 +1,66 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A fake XenAPI SDK. + +Allows for xenapi helper classes testing. +""" + + +class Failure(Exception): + def __init__(self, message=None): + super(Failure, self).__init__(message) + self.details = [] + + def __str__(self): + return 'Fake XenAPI Exception' + + +class FakeSession(object): + """ + The session to invoke XenAPI SDK calls. + FIXME(armando): this is a placeholder + for the xenapi unittests branch. + """ + def __init__(self, url): + pass + + def get_xenapi(self): + """ Return the xenapi object """ + raise NotImplementedError() + + def get_xenapi_host(self): + """ Return the xenapi host """ + raise NotImplementedError() + + def call_xenapi(self, method, *args): + """Call the specified XenAPI method on a background thread. Returns + a Deferred for the result.""" + raise NotImplementedError() + + def async_call_plugin(self, plugin, fn, args): + """Call Async.host.call_plugin on a background thread. Returns a + Deferred with the task reference.""" + raise NotImplementedError() + + def wait_for_task(self, task): + """Return a Deferred that will give the result of the given task. + The task is polled until it completes.""" + raise NotImplementedError() + + def __getattr__(self, name): + raise NotImplementedError() diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index 8cb4cce3a..cb745cdaf 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -21,9 +21,10 @@ their lookup functions. """ from twisted.internet import defer +from nova.virt.xenapi import HelperBase -class NetworkHelper(): +class NetworkHelper(HelperBase): """ The class that wraps the helper methods together. """ diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 89831fe5d..2b0691c01 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -32,6 +32,7 @@ from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +from nova.virt.xenapi import HelperBase from nova.virt.xenapi.volume_utils import StorageError FLAGS = flags.FLAGS @@ -44,29 +45,13 @@ XENAPI_POWER_STATE = { 'Crashed': power_state.CRASHED} -class VMHelper(): +class VMHelper(HelperBase): """ The class that wraps the helper methods together. """ - - XenAPI = None - def __init__(self): return - @classmethod - def late_import(cls): - """ - Load XenAPI module in for helper class - """ - xenapi_module = \ - FLAGS.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' - from_list = \ - FLAGS.xenapi_use_fake_session and ['fake'] or [] - if cls.XenAPI is None: - cls.XenAPI = __import__(xenapi_module, - globals(), locals(), from_list, -1) - @classmethod @defer.inlineCallbacks def create_vm(cls, session, instance, kernel, ramdisk): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 9a8db0ad4..c79245972 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -24,9 +24,11 @@ from twisted.internet import defer from nova import db from nova import context +from nova import flags from nova import exception from nova.auth.manager import AuthManager +from nova.virt.xenapi import load_sdk from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper @@ -36,10 +38,10 @@ class VMOps(object): Management class for VM-related tasks """ def __init__(self, session): - self.XenAPI = __import__('XenAPI') + self.XenAPI = load_sdk(flags.FLAGS) self._session = session # Load XenAPI module in the helper class - VMHelper.late_import() + VMHelper.late_import(flags.FLAGS) def list_instances(self): """ List VM instances """ diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 05143ed91..4482e465c 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -30,7 +30,7 @@ from nova import context from nova import flags from nova import process from nova import utils - +from nova.virt.xenapi import HelperBase FLAGS = flags.FLAGS @@ -41,29 +41,13 @@ class StorageError(Exception): super(StorageError, self).__init__(message) -class VolumeHelper(): +class VolumeHelper(HelperBase): """ The class that wraps the helper methods together. """ - - XenAPI = None - def __init__(self): return - @classmethod - def late_import(cls): - """ - Load XenAPI module in for helper class - """ - xenapi_module = \ - FLAGS.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' - from_list = \ - FLAGS.xenapi_use_fake_session and ['fake'] or [] - if cls.XenAPI is None: - cls.XenAPI = __import__(xenapi_module, - globals(), locals(), from_list, -1) - @classmethod @utils.deferredToThread def create_iscsi_storage(cls, session, info, label, description): diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index b1afdc811..1b337a6ed 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -21,6 +21,8 @@ import logging from twisted.internet import defer +from nova import flags +from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError @@ -31,11 +33,11 @@ class VolumeOps(object): Management class for Volume-related tasks """ def __init__(self, session): - self.XenAPI = __import__('XenAPI') + self.XenAPI = load_sdk(flags.FLAGS) self._session = session # Load XenAPI module in the helper classes respectively - VolumeHelper.late_import() - VMHelper.late_import() + VolumeHelper.late_import(flags.FLAGS) + VMHelper.late_import(flags.FLAGS) @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index d8d21e24d..649d5dd04 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -59,6 +59,7 @@ from twisted.internet import reactor from nova import utils from nova import flags +from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vmops import VMOps from nova.virt.xenapi.volumeops import VolumeOps @@ -91,7 +92,7 @@ flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') flags.DEFINE_string('iqn_prefix', - 'iqn.2010-12.org.openstack', + 'iqn.2010-10.org.openstack', 'IQN Prefix') @@ -156,8 +157,11 @@ class XenAPISession(object): def __init__(self, url, user, pw): # This is loaded late so that there's no need to install this # library when not using XenAPI. - self.XenAPI = __import__('XenAPI') - self._session = self.XenAPI.Session(url) + self.XenAPI = load_sdk(FLAGS) + if FLAGS.xenapi_use_fake_session: + self._session = self.XenAPI.FakeSession(url) + else: + self._session = self.XenAPI.Session(url) self._session.login_with_password(user, pw) def get_xenapi(self): -- cgit From bfe019e0de486eea09e4702262cd228791a4694c Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 14 Dec 2010 15:33:18 +0100 Subject: Now that we have a templating engine, let's use it. Consolidate all the libvirt templates into one, extending the unit tests to make sure I didn't mess up. --- nova/tests/virt_unittest.py | 135 +++++++++++++++++++++++------ nova/virt/libvirt.qemu.xml.template | 37 -------- nova/virt/libvirt.rescue.qemu.xml.template | 37 -------- nova/virt/libvirt.rescue.uml.xml.template | 26 ------ nova/virt/libvirt.rescue.xen.xml.template | 34 -------- nova/virt/libvirt.uml.xml.template | 26 ------ nova/virt/libvirt.xen.xml.template | 35 -------- nova/virt/libvirt.xml.template | 76 ++++++++++++++++ nova/virt/libvirt_conn.py | 60 +++---------- 9 files changed, 199 insertions(+), 267 deletions(-) delete mode 100644 nova/virt/libvirt.qemu.xml.template delete mode 100644 nova/virt/libvirt.rescue.qemu.xml.template delete mode 100644 nova/virt/libvirt.rescue.uml.xml.template delete mode 100644 nova/virt/libvirt.rescue.xen.xml.template delete mode 100644 nova/virt/libvirt.uml.xml.template delete mode 100644 nova/virt/libvirt.xen.xml.template create mode 100644 nova/virt/libvirt.xml.template diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index d49383fb7..bcc995a5f 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -40,19 +40,53 @@ class LibvirtConnTestCase(test.TrialTestCase): self.network = utils.import_object(FLAGS.network_manager) FLAGS.instances_path = '' - def test_get_uri_and_template(self): - ip = '10.11.12.13' - - instance = {'internal_id': 1, - 'memory_kb': '1024000', - 'basepath': '/some/path', - 'bridge_name': 'br100', - 'mac_address': '02:12:34:46:56:67', - 'vcpus': 2, - 'project_id': 'fake', - 'bridge': 'br101', - 'instance_type': 'm1.small'} - + test_ip = '10.11.12.13' + test_instance = { + 'memory_kb' : '1024000', + 'basepath' : '/some/path', + 'bridge_name' : 'br100', + 'mac_address' : '02:12:34:46:56:67', + 'vcpus' : 2, + 'project_id' : 'fake', + 'bridge' : 'br101', + 'instance_type' : 'm1.small'} + + def test_xml_and_uri_no_ramdisk_no_kernel(self): + instance_data = dict(self.test_instance) + self.do_test_xml_and_uri(instance_data, + expect_kernel=False, expect_ramdisk=False) + + def test_xml_and_uri_no_ramdisk(self): + instance_data = dict(self.test_instance) + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=False) + + def test_xml_and_uri_no_kernel(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=False, expect_ramdisk=False) + + def test_xml_and_uri(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=True) + + def test_xml_and_uri_rescue(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=True, + rescue=True) + + + def do_test_xml_and_uri(self, instance, + expect_ramdisk, expect_kernel, + rescue=False): user_context = context.RequestContext(project=self.project, user=self.user) instance_ref = db.instance_create(user_context, instance) @@ -60,13 +94,14 @@ class LibvirtConnTestCase(test.TrialTestCase): self.network.set_network_host(context.get_admin_context(), network_ref['id']) - fixed_ip = {'address': ip, - 'network_id': network_ref['id']} + fixed_ip = { 'address' : self.test_ip, + 'network_id' : network_ref['id'] } ctxt = context.get_admin_context() fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) - db.fixed_ip_update(ctxt, ip, {'allocated': True, - 'instance_id': instance_ref['id']}) + db.fixed_ip_update(ctxt, self.test_ip, + { 'allocated': True, + 'instance_id': instance_ref['id'] }) type_uri_map = {'qemu': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'qemu'), @@ -78,23 +113,71 @@ class LibvirtConnTestCase(test.TrialTestCase): (lambda t: t.find('./devices/emulator'), None)]), 'uml': ('uml:///system', [(lambda t: t.find('.').get('type'), 'uml'), - (lambda t: t.find('./os/type').text, 'uml')])} + (lambda t: t.find('./os/type').text, 'uml')]), + 'xen': ('xen:///', + [(lambda t: t.find('.').get('type'), 'xen'), + (lambda t: t.find('./os/type').text, 'linux')]), + } + + for hypervisor_type in ['qemu', 'kvm', 'xen']: + check_list = type_uri_map[hypervisor_type][1] + + if rescue: + check = (lambda t: t.find('./os/kernel').text.split('/')[1], + 'rescue-kernel') + check_list.append(check) + check = (lambda t: t.find('./os/initrd').text.split('/')[1], + 'rescue-ramdisk') + check_list.append(check) + else: + if expect_kernel: + check = (lambda t: t.find('./os/kernel').text.split('/')[1], + 'kernel') + else: + check = (lambda t: t.find('./os/kernel'), None) + check_list.append(check) + + if expect_ramdisk: + check = (lambda t: t.find('./os/initrd').text.split('/')[1], + 'ramdisk') + else: + check = (lambda t: t.find('./os/initrd'), None) + check_list.append(check) common_checks = [ (lambda t: t.find('.').tag, 'domain'), - (lambda t: t.find('./devices/interface/filterref/parameter').\ - get('name'), 'IP'), - (lambda t: t.find('./devices/interface/filterref/parameter').\ - get('value'), '10.11.12.13')] + (lambda t: t.find('./devices/interface/filterref/parameter' + ).get('name'), 'IP'), + (lambda t: t.find('./devices/interface/filterref/parameter' + ).get('value'), '10.11.12.13'), + (lambda t: t.findall('./devices/interface/filterref/parameter' + )[1].get('name'), 'DHCPSERVER'), + (lambda t: t.findall('./devices/interface/filterref/parameter' + )[1].get('value'), '10.0.0.1'), + (lambda t: t.find('./devices/serial/source').get('path' + ).split('/')[1], 'console.log'), + (lambda t: t.find('./memory').text, '2097152')] + + if rescue: + common_checks += [(lambda t: t.findall('./devices/disk/source' + )[0].get('file').split('/')[1], + 'rescue-disk'), + (lambda t: t.findall('./devices/disk/source' + )[1].get('file').split('/')[1], + 'disk')] + else: + common_checks += [(lambda t: t.findall('./devices/disk/source' + )[0].get('file').split('/')[1], + 'disk')] for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type conn = libvirt_conn.LibvirtConnection(True) - uri, _template, _rescue = conn.get_uri_and_templates() + uri = conn.get_uri() self.assertEquals(uri, expected_uri) - xml = conn.to_xml(instance_ref) + xml = conn.to_xml(instance_ref, rescue) tree = xml_to_tree(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), @@ -106,6 +189,8 @@ class LibvirtConnTestCase(test.TrialTestCase): expected_result, '%s failed common check %d' % (xml, i)) + # This test is supposed to make sure we don't override a specifically set uri + # # Deliberately not just assigning this string to FLAGS.libvirt_uri and # checking against that later on. This way we make sure the # implementation doesn't fiddle around with the FLAGS. @@ -114,7 +199,7 @@ class LibvirtConnTestCase(test.TrialTestCase): for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type conn = libvirt_conn.LibvirtConnection(True) - uri, _template, _rescue = conn.get_uri_and_templates() + uri = conn.get_uri() self.assertEquals(uri, testuri) def tearDown(self): diff --git a/nova/virt/libvirt.qemu.xml.template b/nova/virt/libvirt.qemu.xml.template deleted file mode 100644 index 739eceaaa..000000000 --- a/nova/virt/libvirt.qemu.xml.template +++ /dev/null @@ -1,37 +0,0 @@ - - ${name} - - hvm -#if $getVar('kernel', None) - ${kernel} - #if $getVar('ramdisk', None) - ${ramdisk} - #end if - root=/dev/vda1 console=ttyS0 -#end if - - - - - ${memory_kb} - ${vcpus} - - - - - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.rescue.qemu.xml.template b/nova/virt/libvirt.rescue.qemu.xml.template deleted file mode 100644 index c0ffbdcee..000000000 --- a/nova/virt/libvirt.rescue.qemu.xml.template +++ /dev/null @@ -1,37 +0,0 @@ - - %(name)s - - hvm - %(basepath)s/rescue-kernel - %(basepath)s/rescue-ramdisk - root=/dev/vda1 console=ttyS0 - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.rescue.uml.xml.template b/nova/virt/libvirt.rescue.uml.xml.template deleted file mode 100644 index 836f47532..000000000 --- a/nova/virt/libvirt.rescue.uml.xml.template +++ /dev/null @@ -1,26 +0,0 @@ - - %(name)s - %(memory_kb)s - - %(type)s - /usr/bin/linux - /dev/ubda1 - - - - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.rescue.xen.xml.template b/nova/virt/libvirt.rescue.xen.xml.template deleted file mode 100644 index 3b8d27237..000000000 --- a/nova/virt/libvirt.rescue.xen.xml.template +++ /dev/null @@ -1,34 +0,0 @@ - - %(name)s - - linux - %(basepath)s/kernel - %(basepath)s/ramdisk - /dev/xvda1 - ro - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template deleted file mode 100644 index da9588049..000000000 --- a/nova/virt/libvirt.uml.xml.template +++ /dev/null @@ -1,26 +0,0 @@ - - ${name} - ${memory_kb} - - ${type} - /usr/bin/linux - /dev/ubda1 - - - - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.xen.xml.template b/nova/virt/libvirt.xen.xml.template deleted file mode 100644 index 8f650e512..000000000 --- a/nova/virt/libvirt.xen.xml.template +++ /dev/null @@ -1,35 +0,0 @@ - - ${name} - - linux -#if $getVar('kernel', None) - ${kernel} - #if $getVar('ramdisk', None) - ${ramdisk} - #end if - root=/dev/vda1 console=ttyS0 -#end if - /dev/xvda1 - ro - - - - - ${memory_kb} - ${vcpus} - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template new file mode 100644 index 000000000..13d087330 --- /dev/null +++ b/nova/virt/libvirt.xml.template @@ -0,0 +1,76 @@ + + ${name} + ${memory_kb} + +#if $type == 'uml' + #set $disk_prefix = 'ubd' + #set $disk_bus = 'uml' + uml + /usr/bin/linux + /dev/ubda1 +#else + #if $type == 'xen' + #set $disk_prefix = 'sd' + #set $disk_bus = 'scsi' + linux + /dev/xvda1 + #else + #set $disk_prefix = 'vd' + #set $disk_bus = 'virtio' + hvm + #end if + #if $getVar('rescue', False) + ${basepath}/rescue-kernel + ${basepath}/rescue-ramdisk + #else + #if $getVar('kernel', None) + ${kernel} + #if $type == 'xen' + ro + #else + root=/dev/vda1 console=ttyS0 + #end if + #if $getVar('ramdisk', None) + ${ramdisk} + #end if + #else + + #end if + #end if +#end if + + + + + ${vcpus} + +#if $getVar('rescue', False) + + + + + + + + +#else + + + + +#end if + + + + + + + + + + + + + + + diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 81dbbaad5..2865c18ac 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -27,12 +27,7 @@ Supports KVM, QEMU, UML, and XEN. :libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen (default: kvm). :libvirt_uri: Override for the default libvirt URI (depends on libvirt_type). -:libvirt_xml_template: Libvirt XML Template (QEmu/KVM). -:libvirt_xen_xml_template: Libvirt XML Template (Xen). -:libvirt_uml_xml_template: Libvirt XML Template (User Mode Linux). -:libvirt_rescue_xml_template: XML template for rescue mode (KVM & QEMU). -:libvirt_rescue_xen_xml_template: XML templage for rescue mode (XEN). -:libvirt_rescue_uml_xml_template: XML template for rescue mode (UML). +:libvirt_xml_template: Libvirt XML Template. :rescue_image_id: Rescue ami image (default: ami-rescue). :rescue_kernel_id: Rescue aki image (default: aki-rescue). :rescue_ramdisk_id: Rescue ari image (default: ari-rescue). @@ -70,31 +65,13 @@ libxml2 = None FLAGS = flags.FLAGS -flags.DEFINE_string('libvirt_rescue_xml_template', - utils.abspath('virt/libvirt.rescue.qemu.xml.template'), - 'Libvirt RESCUE XML Template for QEmu/KVM') -flags.DEFINE_string('libvirt_rescue_xen_xml_template', - utils.abspath('virt/libvirt.rescue.xen.xml.template'), - 'Libvirt RESCUE XML Template for xen') -flags.DEFINE_string('libvirt_rescue_uml_xml_template', - utils.abspath('virt/libvirt.rescue.uml.xml.template'), - 'Libvirt RESCUE XML Template for user-mode-linux') # TODO(vish): These flags should probably go into a shared location flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') flags.DEFINE_string('libvirt_xml_template', - utils.abspath('virt/libvirt.qemu.xml.template'), - 'Libvirt XML Template for QEmu/KVM') -flags.DEFINE_string('libvirt_xen_xml_template', - utils.abspath('virt/libvirt.xen.xml.template'), - 'Libvirt XML Template for Xen') -flags.DEFINE_string('libvirt_uml_xml_template', - utils.abspath('virt/libvirt.uml.xml.template'), - 'Libvirt XML Template for user-mode-linux') -flags.DEFINE_string('injected_network_template', - utils.abspath('virt/interfaces.template'), - 'Template file for injected network') + utils.abspath('virt/libvirt.xml.template'), + 'Libvirt XML Template') flags.DEFINE_string('libvirt_type', 'kvm', 'Libvirt domain type (valid options are: ' @@ -122,12 +99,9 @@ def get_connection(read_only): class LibvirtConnection(object): def __init__(self, read_only): - (self.libvirt_uri, - template_file, - rescue_file) = self.get_uri_and_templates() + self.libvirt_uri = self.get_uri() - self.libvirt_xml = open(template_file).read() - self.rescue_xml = open(rescue_file).read() + self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() self._wrapped_conn = None self.read_only = read_only @@ -150,20 +124,14 @@ class LibvirtConnection(object): return False raise - def get_uri_and_templates(self): + def get_uri(self): if FLAGS.libvirt_type == 'uml': uri = FLAGS.libvirt_uri or 'uml:///system' - template_file = FLAGS.libvirt_uml_xml_template - rescue_file = FLAGS.libvirt_rescue_uml_xml_template elif FLAGS.libvirt_type == 'xen': uri = FLAGS.libvirt_uri or 'xen:///' - template_file = FLAGS.libvirt_xen_xml_template - rescue_file = FLAGS.libvirt_rescue_xen_xml_template else: uri = FLAGS.libvirt_uri or 'qemu:///system' - template_file = FLAGS.libvirt_xml_template - rescue_file = FLAGS.libvirt_rescue_xml_template - return uri, template_file, rescue_file + return uri def _connect(self, uri, read_only): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], @@ -543,18 +511,16 @@ class LibvirtConnection(object): 'bridge_name': network['bridge'], 'mac_address': instance['mac_address'], 'ip_address': ip_address, - 'dhcp_server': dhcp_server} - if rescue: - xml = self.rescue_xml % xml_info - else: - if xml_info['kernel_id']: + 'dhcp_server': dhcp_server, + 'rescue': rescue} + if not rescue: + if instance['kernel_id']: xml_info['kernel'] = xml_info['basepath'] + "/kernel" - if xml_info['ramdisk_id']: + if instance['ramdisk_id']: xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk" - if xml_info['ramdisk_id'] or xml_info['kernel_id']: - xml_info['disk'] = xml_info['basepath'] + "/disk" + xml_info['disk'] = xml_info['basepath'] + "/disk" xml = str(Template(self.libvirt_xml, searchList=[xml_info])) logging.debug('instance %s: finished toXML method', instance['name']) -- cgit From fa7d288e6af3d997d6275d9e6778e932be9f1c3f Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 14 Dec 2010 17:56:42 -0400 Subject: pep8 --- nova/adminclient.py | 1 + nova/api/ec2/admin.py | 1 + nova/api/openstack/backup_schedules.py | 1 + nova/api/openstack/servers.py | 2 -- nova/compute/api.py | 1 - nova/compute/manager.py | 18 ++++++++---------- nova/db/sqlalchemy/api.py | 4 ++++ nova/exception.py | 3 +++ nova/process.py | 2 ++ nova/server.py | 2 +- nova/tests/api/openstack/test_servers.py | 4 ++-- nova/twistd.py | 2 +- nova/virt/fake.py | 1 + nova/virt/libvirt_conn.py | 5 +++-- nova/virt/xenapi/network_utils.py | 1 + nova/virt/xenapi/vm_utils.py | 1 + nova/virt/xenapi/vmops.py | 1 + nova/virt/xenapi/volumeops.py | 1 + nova/virt/xenapi_conn.py | 2 ++ 19 files changed, 34 insertions(+), 19 deletions(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index 5a62cce7d..6ae9f0c0f 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -194,6 +194,7 @@ class HostInfo(object): class NovaAdminClient(object): + def __init__( self, clc_url=DEFAULT_CLC_URL, diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index 1c6ab688d..fac01369e 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -168,6 +168,7 @@ class AdminController(object): # FIXME(vish): these host commands don't work yet, perhaps some of the # required data can be retrieved from service objects? + def describe_hosts(self, _context, **_kwargs): """Returns status info for all nodes. Includes: * Disk Space diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index 3ed691d7b..fc70b5c6c 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -24,6 +24,7 @@ import nova.image.service class Controller(wsgi.Controller): + def __init__(self): pass diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index dcd959ae7..5c3322f7c 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -195,5 +195,3 @@ class Controller(wsgi.Controller): logging.error("Compute.api::unpause %s", readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - - diff --git a/nova/compute/api.py b/nova/compute/api.py index 79da79cd1..7420c40d2 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -293,7 +293,6 @@ class ComputeAPI(base.Base): {"method": "unpause_instance", "args": {"instance_id": instance['id']}}) - def rescue(self, context, instance_id): """Rescue the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ae33fe5b9..0c0ba7450 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -196,9 +196,7 @@ class ComputeManager(manager.Manager): @staticmethod def _update_state_callback(self, context, instance_id, result): - """Update instance state when Deferred task completes. - This staticmethod must be wrappered in a - lambda to pass in self, context & instance_id.""" + """Update instance state when Deferred task completes.""" self._update_state(context, instance_id) @defer.inlineCallbacks @@ -214,10 +212,10 @@ class ComputeManager(manager.Manager): instance_id, power_state.NOSTATE, 'pausing') - yield self.driver.pause(instance_ref, - lambda result : self._update_state_callback(self, - context, - instance_id, + yield self.driver.pause(instance_ref, + lambda result: self._update_state_callback(self, + context, + instance_id, result)) @defer.inlineCallbacks @@ -234,9 +232,9 @@ class ComputeManager(manager.Manager): power_state.NOSTATE, 'unpausing') yield self.driver.unpause(instance_ref, - lambda result : self._update_state_callback(self, - context, - instance_id, + lambda result: self._update_state_callback(self, + context, + instance_id, result)) @exception.wrap_exception diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 55036d1d1..935063609 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -528,6 +528,8 @@ def fixed_ip_update(context, address, values): #TODO(gundlach): instance_create and volume_create are nearly identical #and should be refactored. I expect there are other copy-and-paste #functions between the two of them as well. + + @require_context def instance_create(context, values): """Create a new Instance record in the database. @@ -913,6 +915,8 @@ def network_get(context, network_id, session=None): # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable-msg=C0103 + + @require_admin_context def network_get_associated_fixed_ips(context, network_id): session = get_session() diff --git a/nova/exception.py b/nova/exception.py index 6d6c37338..9af4017ba 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -27,6 +27,7 @@ import traceback class ProcessExecutionError(IOError): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): if description is None: @@ -39,11 +40,13 @@ class ProcessExecutionError(IOError): class Error(Exception): + def __init__(self, message=None): super(Error, self).__init__(message) class ApiError(Error): + def __init__(self, message='Unknown', code='Unknown'): self.message = message self.code = code diff --git a/nova/process.py b/nova/process.py index b33df048b..39fddef6f 100644 --- a/nova/process.py +++ b/nova/process.py @@ -40,6 +40,8 @@ flags.DEFINE_integer('process_pool_size', 4, # This is based on _BackRelay from twister.internal.utils, but modified to # capture both stdout and stderr, without odd stderr handling, and also to # handle stdin + + class BackRelayWithInput(protocol.ProcessProtocol): """ Trivial protocol for communicating with a process and turning its output diff --git a/nova/server.py b/nova/server.py index a0ee54681..3b9086177 100644 --- a/nova/server.py +++ b/nova/server.py @@ -42,7 +42,7 @@ flags.DEFINE_bool('daemonize', False, 'daemonize this process') # clutter. flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing') flags.DEFINE_string('logfile', None, 'log file to output to') -flags.DEFINE_string('logdir', None, 'directory to keep log files in ' +flags.DEFINE_string('logdir', None, 'directory to keep log files in ' '(will be prepended to $logfile)') flags.DEFINE_string('pidfile', None, 'pid file to output to') flags.DEFINE_string('working_directory', './', 'working directory...') diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 8e48017d0..ba432f6c3 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -84,9 +84,9 @@ class ServersTest(unittest.TestCase): instance_address) self.stubs.Set(nova.db.api, 'instance_get_floating_address', instance_address) - self.stubs.Set(nova.compute.api.ComputeAPI, 'pause', + self.stubs.Set(nova.compute.api.ComputeAPI, 'pause', fake_compute_api) - self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause', + self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause', fake_compute_api) self.allow_admin = FLAGS.allow_admin_api diff --git a/nova/twistd.py b/nova/twistd.py index cb5648ce6..e6c3101f1 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -43,7 +43,7 @@ else: FLAGS = flags.FLAGS -flags.DEFINE_string('logdir', None, 'directory to keep log files in ' +flags.DEFINE_string('logdir', None, 'directory to keep log files in ' '(will be prepended to $logfile)') diff --git a/nova/virt/fake.py b/nova/virt/fake.py index c56907175..59acabc21 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -259,5 +259,6 @@ class FakeConnection(object): class FakeInstance(object): + def __init__(self): self._state = power_state.NOSTATE diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 97ff49a10..5939f0afe 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -119,6 +119,7 @@ def get_connection(read_only): class LibvirtConnection(object): + def __init__(self, read_only): (self.libvirt_uri, template_file, @@ -298,12 +299,12 @@ class LibvirtConnection(object): @exception.wrap_exception def pause(self, instance, callback): raise exception.APIError("pause not supported for libvirt.") - + @defer.inlineCallbacks @exception.wrap_exception def unpause(self, instance, callback): raise exception.APIError("unpause not supported for libvirt.") - + @defer.inlineCallbacks @exception.wrap_exception def rescue(self, instance): diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index 8cb4cce3a..cffaf7f23 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -27,6 +27,7 @@ class NetworkHelper(): """ The class that wraps the helper methods together. """ + def __init__(self): return diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 77edb576e..c17dc0bed 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -49,6 +49,7 @@ class VMHelper(): """ The class that wraps the helper methods together. """ + def __init__(self): return diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 7c5db0b73..405a8518e 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -36,6 +36,7 @@ class VMOps(object): """ Management class for VM-related tasks """ + def __init__(self, session): global XenAPI if XenAPI is None: diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index a4c7a3861..1943ccab0 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -20,6 +20,7 @@ Management class for Storage-related functions (attach, detach, etc). class VolumeOps(object): + def __init__(self, session): self._session = session diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index ebd572258..fa87bb779 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -101,6 +101,7 @@ def get_connection(_): class XenAPIConnection(object): """ A connection to XenServer or Xen Cloud Platform """ + def __init__(self, url, user, pw): session = XenAPISession(url, user, pw) self._vmops = VMOps(session) @@ -155,6 +156,7 @@ class XenAPIConnection(object): class XenAPISession(object): """ The session to invoke XenAPI SDK calls """ + def __init__(self, url, user, pw): self._session = XenAPI.Session(url) self._session.login_with_password(user, pw) -- cgit From aab6a89ba1e9ace73dcb4fa68a67957e29c47f84 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 14 Dec 2010 23:34:08 +0100 Subject: Don't attempt to fiddle with partitions for whole-disk-images. --- nova/compute/disk.py | 7 +++++++ nova/virt/libvirt_conn.py | 21 +++++++++++++-------- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 9ba827519..a77c30a19 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -108,6 +108,13 @@ def partition(infile, outfile, local_bytes=0, resize=True, yield execute('parted --script %s mkpartfs primary %s %ds %ds' % (outfile, local_type, local_first, local_last)) +@defer.inlineCallbacks +def extend(image, size, execute): + file_size = os.path.getsize(image) + if file_size >= size: + return + yield execute('truncate -s size %s' % (image,)) + @defer.inlineCallbacks def inject_data(image, key=None, net=None, partition=None, execute=None): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 5dcb05b1f..3529be333 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -421,13 +421,13 @@ class LibvirtConnection(object): yield images.fetch(inst.image_id, basepath('disk-raw'), user, project) - if inst.kernel_id: + if inst['kernel_id']: if not os.path.exists(basepath('kernel')): - yield images.fetch(inst.kernel_id, basepath('kernel'), + yield images.fetch(inst['kernel_id'], basepath('kernel'), user, project) - if inst.ramdisk_id: + if inst['ramdisk_id']: if not os.path.exists(basepath('ramdisk')): - yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), + yield images.fetch(inst['ramdisk_id'], basepath('ramdisk'), user, project) execute = lambda cmd, process_input = None, check_exit_code = True: \ @@ -439,7 +439,7 @@ class LibvirtConnection(object): # partitioned disk image where the target partition is the first # partition target_partition = None - if not inst.kernel_id: + if not inst['kernel_id']: target_partition = "1" key = str(inst['key_data']) @@ -472,7 +472,7 @@ class LibvirtConnection(object): ' into image %s (%s)', inst['name'], inst.image_id, e) - if inst.kernel_id: + if inst['kernel_id']: if os.path.exists(basepath('disk')): yield process.simple_execute('rm -f %s' % basepath('disk')) @@ -483,8 +483,13 @@ class LibvirtConnection(object): resize = True if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-': resize = False - yield disk.partition(basepath('disk-raw'), basepath('disk'), - local_bytes, resize, execute=execute) + + if inst['kernel_id']: + yield disk.partition(basepath('disk-raw'), basepath('disk'), + local_bytes, resize, execute=execute) + else: + os.rename(basepath('disk-raw'), basepath('disk')) + yield disk.extend(basepath('disk'), local_bytes, execute=execute) if FLAGS.libvirt_type == 'uml': yield process.simple_execute('sudo chown root %s' % -- cgit From 9c2fa95298000c0c9d3953f0ef87823ed604949a Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 14 Dec 2010 23:35:54 +0100 Subject: Remove default_{kernel,ramdisk} flags. They are not used anymore. --- nova/flags.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 45f5d7469..5c265f4ea 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -228,10 +228,6 @@ DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', DEFINE_string('default_image', 'ami-11111', 'default image to use, testing only') -DEFINE_string('default_kernel', 'aki-11111', - 'default kernel to use, testing only') -DEFINE_string('default_ramdisk', 'ari-11111', - 'default ramdisk to use, testing only') DEFINE_string('default_instance_type', 'm1.small', 'default instance type to use, testing only') DEFINE_string('null_kernel', 'nokernel', -- cgit From a2a8406b5d793545c8ecb359e18b80bba618c509 Mon Sep 17 00:00:00 2001 From: termie Date: Tue, 14 Dec 2010 16:05:39 -0800 Subject: updates per review --- bin/nova-api | 9 ++--- bin/nova-combined | 3 -- bin/nova-scheduler | 2 +- nova/compute/manager.py | 4 +-- nova/manager.py | 2 +- nova/rpc.py | 1 - nova/service.py | 69 +-------------------------------------- nova/tests/service_unittest.py | 13 -------- nova/utils.py | 2 +- nova/virt/fake.py | 4 +-- nova/virt/libvirt_conn.py | 14 ++++---- nova/virt/xenapi/network_utils.py | 5 ++- nova/virt/xenapi/vmops.py | 3 +- nova/virt/xenapi_conn.py | 6 ++-- nova/volume/driver.py | 25 +++++++------- run_tests.py | 5 --- 16 files changed, 35 insertions(+), 132 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index 210486666..3f433ea6d 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -36,6 +36,7 @@ from nova import flags from nova import utils from nova import wsgi + FLAGS = flags.FLAGS flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port') flags.DEFINE_string('osapi_host', '0.0.0.0', 'OpenStack API host') @@ -43,14 +44,10 @@ flags.DEFINE_integer('ec2api_port', 8773, 'EC2 API port') flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host') -def main(): +if __name__ == '__main__': + utils.default_flagfile() FLAGS(sys.argv) server = wsgi.Server() server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host) server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host) server.wait() - - -if __name__ == '__main__': - utils.default_flagfile() - main() diff --git a/bin/nova-combined b/bin/nova-combined index e82b9c9b6..5f635b3a3 100755 --- a/bin/nova-combined +++ b/bin/nova-combined @@ -40,9 +40,6 @@ from nova import utils from nova import wsgi -FLAGS = flags.FLAGS -flags.DEFINE_integer('api_port', 8773, 'API port') - FLAGS = flags.FLAGS flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port') flags.DEFINE_string('osapi_host', '0.0.0.0', 'OpenStack API host') diff --git a/bin/nova-scheduler b/bin/nova-scheduler index a46dd8dda..59cb060d4 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -17,7 +17,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" Starter script for Nova Scheduler.""" +"""Starter script for Nova Scheduler.""" import eventlet eventlet.monkey_patch() diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 051ce579d..f90f28b78 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -203,7 +203,7 @@ class ComputeManager(manager.Manager): volume_id, mountpoint) instance_ref = self.db.instance_get(context, instance_id) dev_path = self.volume_manager.setup_compute_volume(context, - volume_id) + volume_id) try: self.driver.attach_volume(instance_ref['name'], dev_path, @@ -238,7 +238,7 @@ class ComputeManager(manager.Manager): instance_ref['name']) else: self.driver.detach_volume(instance_ref['name'], - volume_ref['mountpoint']) + volume_ref['mountpoint']) self.volume_manager.remove_compute_volume(context, volume_id) self.db.volume_detached(context, volume_id) return True diff --git a/nova/manager.py b/nova/manager.py index a343d7fc6..3d38504bd 100644 --- a/nova/manager.py +++ b/nova/manager.py @@ -68,7 +68,7 @@ class Manager(base.Base): def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval""" - return + pass def init_host(self): """Do any initialization that needs to be run if this is a standalone diff --git a/nova/rpc.py b/nova/rpc.py index b5df4904b..6a3f552db 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -188,7 +188,6 @@ class AdapterConsumer(TopicConsumer): node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) # NOTE(vish): magic is fun! - # pylint: disable-msg=W0142 try: rval = node_func(context=ctxt, **node_args) if msg_id: diff --git a/nova/service.py b/nova/service.py index 5c171b2ae..3d40e83a6 100644 --- a/nova/service.py +++ b/nova/service.py @@ -205,39 +205,6 @@ class Service(object): logging.exception("model server went away") -def stop(pidfile): - """ - Stop the daemon - """ - # Get the pid from the pidfile - try: - pf = file(pidfile, 'r') - pid = int(pf.read().strip()) - pf.close() - except IOError: - pid = None - - if not pid: - message = "pidfile %s does not exist. Daemon not running?\n" - sys.stderr.write(message % pidfile) - # Not an error in a restart - return - - # Try killing the daemon process - try: - while 1: - os.kill(pid, signal.SIGKILL) - time.sleep(0.1) - except OSError, err: - err = str(err) - if err.find("No such process") > 0: - if os.path.exists(pidfile): - os.remove(pidfile) - else: - print str(err) - sys.exit(1) - - def serve(*services): argv = FLAGS(sys.argv) @@ -247,38 +214,7 @@ def serve(*services): name = '_'.join(x.binary for x in services) logging.debug("Serving %s" % name) - logging.getLogger('amqplib').setLevel(logging.DEBUG) - - if not FLAGS.pidfile: - FLAGS.pidfile = '%s.pid' % name - # NOTE(vish): if we're running nodaemon, redirect the log to stdout - #if FLAGS.nodaemon and not FLAGS.logfile: - # FLAGS.logfile = "-" - #if not FLAGS.logfile: - # FLAGS.logfile = '%s.log' % name - #if not FLAGS.prefix: - # FLAGS.prefix = name - - action = 'start' - if len(argv) > 1: - action = argv.pop() - - if action == 'stop': - stop(FLAGS.pidfile) - sys.exit() - elif action == 'restart': - stop(FLAGS.pidfile) - elif action == 'start': - pass - else: - print 'usage: %s [options] [start|stop|restart]' % argv[0] - sys.exit(1) - - #formatter = logging.Formatter( - # '(%(name)s): %(levelname)s %(message)s') - #handler = logging.StreamHandler() - #handler.setFormatter(formatter) - #logging.getLogger().addHandler(handler) + logging.getLogger('amqplib').setLevel(logging.WARN) if FLAGS.verbose: logging.getLogger().setLevel(logging.DEBUG) @@ -292,9 +228,6 @@ def serve(*services): for x in services: x.start() - #while True: - # greenthread.sleep(5) - def wait(): while True: diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py index c94af4a23..6bdc7071c 100644 --- a/nova/tests/service_unittest.py +++ b/nova/tests/service_unittest.py @@ -91,8 +91,6 @@ class ServiceTestCase(test.TestCase): self.mox.StubOutWithMock(rpc, 'AdapterConsumer', use_mock_anything=True) - #self.mox.StubOutWithMock( - # service.task, 'LoopingCall', use_mock_anything=True) rpc.AdapterConsumer(connection=mox.IgnoreArg(), topic=topic, proxy=mox.IsA(service.Service)).AndReturn( @@ -106,17 +104,6 @@ class ServiceTestCase(test.TestCase): rpc.AdapterConsumer.attach_to_eventlet() rpc.AdapterConsumer.attach_to_eventlet() - # Stub out looping call a bit needlessly since we don't have an easy - # way to cancel it (yet) when the tests finishes - #service.task.LoopingCall(mox.IgnoreArg()).AndReturn( - # service.task.LoopingCall) - #service.task.LoopingCall.start(interval=mox.IgnoreArg(), - # now=mox.IgnoreArg()) - #service.task.LoopingCall(mox.IgnoreArg()).AndReturn( - # service.task.LoopingCall) - #service.task.LoopingCall.start(interval=mox.IgnoreArg(), - # now=mox.IgnoreArg()) - service_create = {'host': host, 'binary': binary, 'topic': topic, diff --git a/nova/utils.py b/nova/utils.py index 22bf5d8cf..5f2d47202 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -241,7 +241,7 @@ class LoopingCall(object): self.f(*self.args, **self.kw) greenthread.sleep(interval) except Exception: - logging.exception('hhmm') + logging.exception('in looping call') done.send_exception(*sys.exc_info()) return diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 91dc8173b..77bc926c2 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -122,13 +122,13 @@ class FakeConnection(object): """ Rescue the specified instance. """ - return + pass def unrescue(self, instance): """ Unrescue the specified instance. """ - return + pass def destroy(self, instance): """ diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 715e7234c..ba51f8f69 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -215,7 +215,7 @@ class LibvirtConnection(object): self._cleanup(instance) done.send() - greenthread.spawn(_wait_for_time) + greenthread.spawn(_wait_for_timer) return done def _cleanup(self, instance): @@ -365,9 +365,9 @@ class LibvirtConnection(object): if virsh_output.startswith('/dev/'): logging.info('cool, it\'s a device') - r = utils.execute("sudo dd if=%s iflag=nonblock" % - virsh_output, check_exit_code=False) - return r[0] + out, err = utils.execute("sudo dd if=%s iflag=nonblock" % + virsh_output, check_exit_code=False) + return out else: return '' @@ -388,8 +388,7 @@ class LibvirtConnection(object): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - utils.execute('sudo chown %d %s' % (os.getuid(), - console_log)) + utils.execute('sudo chown %d %s' % (os.getuid(), console_log)) if FLAGS.libvirt_type == 'xen': # Xen is special @@ -476,7 +475,6 @@ class LibvirtConnection(object): ['local_gb'] * 1024 * 1024 * 1024) - resize = inst['instance_type'] != 'm1.tiny' resize = True if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-': resize = False @@ -743,7 +741,7 @@ class NWFilterFirewall(object): if callable(xml): xml = xml() - # execute in a native thread and block until done + # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) @staticmethod diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index d8632f393..012954394 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -30,10 +30,9 @@ class NetworkHelper(): @classmethod def find_network_with_bridge(cls, session, bridge): - """ Return the network on which the bridge is attached, if found """ + """ Return the network on which the bridge is attached, if found.""" expr = 'field "bridge" = "%s"' % bridge - networks = session.call_xenapi('network.get_all_records_where', - expr) + networks = session.call_xenapi('network.get_all_records_where', expr) if len(networks) == 1: return networks.keys()[0] elif len(networks) > 1: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b6b92b926..3034df9e1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -107,8 +107,7 @@ class VMOps(object): if vdis: for vdi in vdis: try: - task = self._session.call_xenapi('Async.VDI.destroy', - vdi) + task = self._session.call_xenapi('Async.VDI.destroy', vdi) self._session.wait_for_task(task) except XenAPI.Failure, exc: logging.warn(exc) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index df8e42d34..424311133 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -175,9 +175,11 @@ class XenAPISession(object): The task is polled until it completes.""" done = event.Event() - loop = utis.LoopingTask(self._poll_task, task, done) + loop = utils.LoopingTask(self._poll_task, task, done) loop.start(FLAGS.xenapi_task_poll_interval, now=True) - return done.wait() + rv = done.wait() + loop.stop() + return rv def _poll_task(self, task, done): """Poll the given XenAPI task, and fire the given Deferred if we diff --git a/nova/volume/driver.py b/nova/volume/driver.py index f675c9132..1cd4c1fd4 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -22,10 +22,10 @@ Drivers for volumes. import logging import os +import time from nova import exception from nova import flags -from nova import process from nova import utils @@ -75,7 +75,7 @@ class VolumeDriver(object): raise logging.exception("Recovering from a failed execute." "Try number %s", tries) - self._execute("sleep %s" % tries ** 2) + time.sleep(tries ** 2) def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" @@ -91,21 +91,20 @@ class VolumeDriver(object): sizestr = '%sG' % volume['size'] self._try_execute("sudo lvcreate -L %s -n %s %s" % (sizestr, - volume['name'], - FLAGS.volume_group)) + volume['name'], + FLAGS.volume_group)) def delete_volume(self, volume): """Deletes a logical volume.""" self._try_execute("sudo lvremove -f %s/%s" % (FLAGS.volume_group, - volume['name'])) + volume['name'])) def local_path(self, volume): # NOTE(vish): stops deprecation warning escaped_group = FLAGS.volume_group.replace('-', '--') escaped_name = volume['name'].replace('-', '--') - return "/dev/mapper/%s-%s" % (escaped_group, - escaped_name) + return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" @@ -165,7 +164,7 @@ class AOEDriver(VolumeDriver): # still works for the other volumes, so we # just wait a bit for the current volume to # be ready and ignore any errors. - self._execute("sleep 2") + time.sleep(2) self._execute("sudo vblade-persist auto all", check_exit_code=False) self._execute("sudo vblade-persist start all", @@ -275,9 +274,8 @@ class ISCSIDriver(VolumeDriver): def discover_volume(self, volume): """Discover volume on a remote host.""" - (iscsi_name, - iscsi_portal) = self._get_name_and_portal(volume['name'], - volume['host']) + iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'], + volume['host']) self._execute("sudo iscsiadm -m node -T %s -p %s --login" % (iscsi_name, iscsi_portal)) self._execute("sudo iscsiadm -m node -T %s -p %s --op update " @@ -287,9 +285,8 @@ class ISCSIDriver(VolumeDriver): def undiscover_volume(self, volume): """Undiscover volume on a remote host.""" - (iscsi_name, - iscsi_portal) = self._get_name_and_portal(volume['name'], - volume['host']) + iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'], + volume['host']) self._execute("sudo iscsiadm -m node -T %s -p %s --op update " "-n node.startup -v manual" % (iscsi_name, iscsi_portal)) diff --git a/run_tests.py b/run_tests.py index 6d7830a29..9f3708412 100644 --- a/run_tests.py +++ b/run_tests.py @@ -46,7 +46,6 @@ import __main__ import os import sys - from twisted.scripts import trial as trial_script from nova import flags @@ -66,8 +65,6 @@ from nova.tests.rpc_unittest import * from nova.tests.scheduler_unittest import * from nova.tests.service_unittest import * from nova.tests.twistd_unittest import * -from nova.tests.validator_unittest import * -from nova.tests.virt_unittest import * from nova.tests.virt_unittest import * from nova.tests.volume_unittest import * @@ -85,8 +82,6 @@ if __name__ == '__main__': config = OptionsClass() argv = config.parseOptions() - argv = FLAGS(sys.argv) - FLAGS.verbose = True # TODO(termie): these should make a call instead of doing work on import -- cgit From 99347717ed2c7e92b3dc3bd33c12a3a05e8e349d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 15 Dec 2010 00:25:04 +0000 Subject: Lockout middleware for ec2 api --- nova/api/ec2/__init__.py | 70 ++++++++++++++++++++++++++++++++- nova/fakememcache.py | 50 ++++++++++++++++++++++++ nova/tests/middleware_unittest.py | 82 +++++++++++++++++++++++++++++++++++++++ run_tests.py | 1 + 4 files changed, 202 insertions(+), 1 deletion(-) create mode 100644 nova/fakememcache.py create mode 100644 nova/tests/middleware_unittest.py diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index a6ee16c33..19eb666cd 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -22,12 +22,13 @@ Starting point for routing EC2 requests. import logging import routes +import time import webob import webob.dec import webob.exc -from nova import exception from nova import context +from nova import exception from nova import flags from nova import wsgi from nova.api.ec2 import apirequest @@ -37,6 +38,16 @@ from nova.auth import manager FLAGS = flags.FLAGS +flags.DEFINE_boolean('use_lockout', False, + 'Whether or not to use lockout middleware.') +flags.DEFINE_integer('lockout_attempts', 5, + 'Number of failed auths before lockout.') +flags.DEFINE_integer('lockout_minutes', 15, + 'Number of minutes to lockout if triggered.') +flags.DEFINE_list('lockout_memcached_servers', None, + 'Memcached servers or None for in process cache.') + + _log = logging.getLogger("api") _log.setLevel(logging.DEBUG) @@ -47,6 +58,63 @@ class API(wsgi.Middleware): def __init__(self): self.application = Authenticate(Router(Authorizer(Executor()))) + if FLAGS.use_lockout: + self.application = Lockout(self.application) + + +class Lockout(wsgi.Middleware): + """Only allow x failed auths in a y minute period. + + x = lockout_attempts flag + y = lockout_timeout flag + + Uses memcached if lockout_memcached_servers flag is set, otherwise it + uses a very simple in-proccess cache. Due to the simplicity of + the implementation, the timeout window is reset with every failed + request, so it actually blocks if there are x failed logins with no + more than y minutes between any two failures. + + There is a possible race condition where simultaneous requests could + sneak in before the lockout hits, but this is extremely rare and would + only result in a couple of extra failed attempts.""" + + def __init__(self, application, time_fn=time.time): + """The middleware can use a custom time function for testing.""" + self.time_fn = time_fn + if FLAGS.lockout_memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + self.mc = memcache.Client(FLAGS.lockout_memcached_servers, debug=0) + super(Lockout, self).__init__(application) + + @webob.dec.wsgify + def __call__(self, req): + access_key = req.params['AWSAccessKeyId'] + failures_key = "%s-failures" % access_key + last_key = "%s-last" % access_key + now = self.time_fn() + timeout = now - FLAGS.lockout_minutes * 60 + # NOTE(vish): To use incr, failures has to be a string. + failures = int(self.mc.get(failures_key) or 0) + last = self.mc.get(last_key) + if (failures and failures >= FLAGS.lockout_attempts + and last > timeout): + self.mc.set(last_key, now) + detail = "Too many failed authentications." + raise webob.exc.HTTPForbidden(detail=detail) + res = req.get_response(self.application) + if res.status_int == 403: + if last > timeout: + failures = int(self.mc.incr(failures_key)) + if failures >= FLAGS.lockout_attempts: + _log.warn('Access key %s has had %d failed authentications' + ' and will be locked out for %d minutes.' % + (access_key, failures, FLAGS.lockout_minutes)) + else: + self.mc.set(failures_key, '1') + self.mc.set(last_key, now) + return res class Authenticate(wsgi.Middleware): diff --git a/nova/fakememcache.py b/nova/fakememcache.py new file mode 100644 index 000000000..0b2e3b6c1 --- /dev/null +++ b/nova/fakememcache.py @@ -0,0 +1,50 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Super simple fake memcache client.""" + + +class Client(object): + """Replicates a tiny subset of memcached client interface.""" + __cache = {} + + def __init__(self, *args, **kwargs): + """Ignores all constructor params.""" + pass + + def get(self, key): + """Retrieves the value for a key or None.""" + return self.__cache.get(key, None) + + def set(self, key, value): + """Sets the value for a key.""" + self.__cache[key] = value + return True + + def add(self, key, value): + """Sets the value for a key if it doesn't exist.""" + if key in self.__cache: + return False + return self.set(key, value) + + def incr(self, key, delta=1): + """Increments the value for a key.""" + if not key in self.__cache: + return 0 + self.__cache[key] = str(int(self.__cache[key]) + 1) + return self.__cache[key] diff --git a/nova/tests/middleware_unittest.py b/nova/tests/middleware_unittest.py new file mode 100644 index 000000000..bbbd4a5a7 --- /dev/null +++ b/nova/tests/middleware_unittest.py @@ -0,0 +1,82 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob +import webob.dec +import webob.exc + +from nova.api import ec2 +from nova import flags +from nova import test + + +FLAGS = flags.FLAGS + + +@webob.dec.wsgify +def conditional_forbid(req): + """Helper wsgi app returns 403 if param 'die' is 1.""" + if 'die' in req.params and req.params['die'] == '1': + raise webob.exc.HTTPForbidden() + return 'OK' + + +class LockoutTestCase(test.TrialTestCase): + """Test case for the Lockout middleware.""" + def setUp(self): # pylint: disable-msg=C0103 + self.local_time = 0 + self.lockout = ec2.Lockout(conditional_forbid, + time_fn=self._constant_time) + super(LockoutTestCase, self).setUp() + + def _constant_time(self): + """Helper method to force timeouts.""" + return self.local_time + + def _trigger_lockout(self, access_key): + """Send x failed requests where x = lockout_attempts.""" + for i in xrange(FLAGS.lockout_attempts): + req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key) + self.assertEqual(req.get_response(self.lockout).status_int, 403) + + def _is_locked_out(self, access_key): + """Sends a test request to see if key is locked out.""" + req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key) + return (req.get_response(self.lockout).status_int == 403) + + def _timeout(self): + """Increment time to 1 second past the lockout.""" + self.local_time = 1 + self.local_time + FLAGS.lockout_minutes * 60 + + def test_lockout(self): + self._trigger_lockout('test') + self.assertTrue(self._is_locked_out('test')) + + def test_timeout(self): + self._trigger_lockout('test') + self.assertTrue(self._is_locked_out('test')) + self._timeout() + self.assertFalse(self._is_locked_out('test')) + + def test_multiple_keys(self): + self._trigger_lockout('test1') + self.assertTrue(self._is_locked_out('test1')) + self.assertFalse(self._is_locked_out('test2')) + self._timeout() + self.assertFalse(self._is_locked_out('test1')) + self.assertFalse(self._is_locked_out('test2')) diff --git a/run_tests.py b/run_tests.py index 37a548e4c..a0ef3fd99 100644 --- a/run_tests.py +++ b/run_tests.py @@ -57,6 +57,7 @@ from nova.tests.auth_unittest import * from nova.tests.cloud_unittest import * from nova.tests.compute_unittest import * from nova.tests.flags_unittest import * +from nova.tests.middleware_unittest import * from nova.tests.misc_unittest import * from nova.tests.network_unittest import * from nova.tests.objectstore_unittest import * -- cgit From e84f46e739c56b7ae186866f33c713a0ac98e770 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 15 Dec 2010 13:15:19 +0100 Subject: Make sure the new, consolidated template gets included. --- MANIFEST.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index 982b727aa..199ce30b6 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -13,7 +13,7 @@ include nova/cloudpipe/client.ovpn.template include nova/compute/fakevirtinstance.xml include nova/compute/interfaces.template include nova/virt/interfaces.template -include nova/virt/libvirt.*.xml.template +include nova/virt/libvirt*.xml.template include nova/tests/CA/ include nova/tests/CA/cacert.pem include nova/tests/CA/private/ -- cgit From b0279030127b7fe8df21db12a8727ea623ca46e2 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 15 Dec 2010 09:38:38 -0800 Subject: clean up code to use timeout instead of two keys --- nova/api/ec2/__init__.py | 58 ++++++++++++++++++--------------------- nova/fakememcache.py | 38 +++++++++++++++---------- nova/tests/middleware_unittest.py | 27 +++++++++++------- 3 files changed, 68 insertions(+), 55 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 19eb666cd..381b0e871 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -22,7 +22,6 @@ Starting point for routing EC2 requests. import logging import routes -import time import webob import webob.dec import webob.exc @@ -44,6 +43,8 @@ flags.DEFINE_integer('lockout_attempts', 5, 'Number of failed auths before lockout.') flags.DEFINE_integer('lockout_minutes', 15, 'Number of minutes to lockout if triggered.') +flags.DEFINE_integer('lockout_window', 15, + 'Number of minutes for lockout window.') flags.DEFINE_list('lockout_memcached_servers', None, 'Memcached servers or None for in process cache.') @@ -53,7 +54,6 @@ _log.setLevel(logging.DEBUG) class API(wsgi.Middleware): - """Routing for all EC2 API requests.""" def __init__(self): @@ -63,57 +63,53 @@ class API(wsgi.Middleware): class Lockout(wsgi.Middleware): - """Only allow x failed auths in a y minute period. + """Lockout for x minutes on y failed auths in a z minute period. - x = lockout_attempts flag - y = lockout_timeout flag + x = lockout_timeout flag + y = lockout_window flag + z = lockout_attempts flag Uses memcached if lockout_memcached_servers flag is set, otherwise it uses a very simple in-proccess cache. Due to the simplicity of - the implementation, the timeout window is reset with every failed - request, so it actually blocks if there are x failed logins with no - more than y minutes between any two failures. + the implementation, the timeout window is started with the first + failed request, so it will block if there are x failed logins within + that period. There is a possible race condition where simultaneous requests could sneak in before the lockout hits, but this is extremely rare and would only result in a couple of extra failed attempts.""" - def __init__(self, application, time_fn=time.time): - """The middleware can use a custom time function for testing.""" - self.time_fn = time_fn + def __init__(self, application, time_fn=None): + """middleware can pass a custom time function to fake for testing.""" if FLAGS.lockout_memcached_servers: import memcache + self.mc = memcache.Client(FLAGS.lockout_memcached_servers, + debug=0) else: - from nova import fakememcache as memcache - self.mc = memcache.Client(FLAGS.lockout_memcached_servers, debug=0) + from nova import fakememcache + self.mc = fakememcache.Client(time_fn=time_fn) super(Lockout, self).__init__(application) @webob.dec.wsgify def __call__(self, req): access_key = req.params['AWSAccessKeyId'] - failures_key = "%s-failures" % access_key - last_key = "%s-last" % access_key - now = self.time_fn() - timeout = now - FLAGS.lockout_minutes * 60 - # NOTE(vish): To use incr, failures has to be a string. + failures_key = "authfailures-%s" % access_key failures = int(self.mc.get(failures_key) or 0) - last = self.mc.get(last_key) - if (failures and failures >= FLAGS.lockout_attempts - and last > timeout): - self.mc.set(last_key, now) + if failures >= FLAGS.lockout_attempts: detail = "Too many failed authentications." raise webob.exc.HTTPForbidden(detail=detail) res = req.get_response(self.application) if res.status_int == 403: - if last > timeout: - failures = int(self.mc.incr(failures_key)) - if failures >= FLAGS.lockout_attempts: - _log.warn('Access key %s has had %d failed authentications' - ' and will be locked out for %d minutes.' % - (access_key, failures, FLAGS.lockout_minutes)) - else: - self.mc.set(failures_key, '1') - self.mc.set(last_key, now) + failures = self.mc.incr(failures_key) + if failures is None: + # NOTE(vish): To use incr, failures has to be a string. + self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60) + elif failures >= FLAGS.lockout_attempts: + _log.warn('Access key %s has had %d failed authentications' + ' and will be locked out for %d minutes.' % + (access_key, failures, FLAGS.lockout_minutes)) + self.mc.set(failures_key, str(failures), + time=FLAGS.lockout_minutes * 60) return res diff --git a/nova/fakememcache.py b/nova/fakememcache.py index 0b2e3b6c1..0b4037ef6 100644 --- a/nova/fakememcache.py +++ b/nova/fakememcache.py @@ -18,33 +18,43 @@ """Super simple fake memcache client.""" +import time + class Client(object): """Replicates a tiny subset of memcached client interface.""" - __cache = {} - def __init__(self, *args, **kwargs): - """Ignores all constructor params.""" - pass + def __init__(self, time_fn=time.time, *args, **kwargs): + """Time fn is to allow testing through a custom function""" + self.time_fn = time_fn + self.cache = {} def get(self, key): """Retrieves the value for a key or None.""" - return self.__cache.get(key, None) + (timeout, value) = self.cache.get(key, (0, None)) + if timeout == 0 or self.time_fn() < timeout: + return value + return None - def set(self, key, value): + def set(self, key, value, time=0, min_compress_len=0): """Sets the value for a key.""" - self.__cache[key] = value + timeout = 0 + if time != 0: + timeout = self.time_fn() + time + self.cache[key] = (timeout, value) return True - def add(self, key, value): + def add(self, key, value, time=0, min_compress_len=0): """Sets the value for a key if it doesn't exist.""" - if key in self.__cache: + if not self.get(key) is None: return False - return self.set(key, value) + return self.set(key, value, time, min_compress_len) def incr(self, key, delta=1): """Increments the value for a key.""" - if not key in self.__cache: - return 0 - self.__cache[key] = str(int(self.__cache[key]) + 1) - return self.__cache[key] + value = self.get(key) + if value is None: + return None + new_value = int(value) + delta + self.cache[key] = (self.cache[key][0], str(new_value)) + return new_value diff --git a/nova/tests/middleware_unittest.py b/nova/tests/middleware_unittest.py index bbbd4a5a7..61a790c1f 100644 --- a/nova/tests/middleware_unittest.py +++ b/nova/tests/middleware_unittest.py @@ -48,9 +48,9 @@ class LockoutTestCase(test.TrialTestCase): """Helper method to force timeouts.""" return self.local_time - def _trigger_lockout(self, access_key): - """Send x failed requests where x = lockout_attempts.""" - for i in xrange(FLAGS.lockout_attempts): + def _send_bad_attempts(self, access_key, num_attempts=1): + """Fail x.""" + for i in xrange(num_attempts): req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key) self.assertEqual(req.get_response(self.lockout).status_int, 403) @@ -59,24 +59,31 @@ class LockoutTestCase(test.TrialTestCase): req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key) return (req.get_response(self.lockout).status_int == 403) - def _timeout(self): + def _advance_time(self, time): """Increment time to 1 second past the lockout.""" - self.local_time = 1 + self.local_time + FLAGS.lockout_minutes * 60 + self.local_time = self.local_time + time def test_lockout(self): - self._trigger_lockout('test') + self._send_bad_attempts('test', FLAGS.lockout_attempts) self.assertTrue(self._is_locked_out('test')) def test_timeout(self): - self._trigger_lockout('test') + self._send_bad_attempts('test', FLAGS.lockout_attempts) self.assertTrue(self._is_locked_out('test')) - self._timeout() + self._advance_time(FLAGS.lockout_minutes * 60) self.assertFalse(self._is_locked_out('test')) def test_multiple_keys(self): - self._trigger_lockout('test1') + self._send_bad_attempts('test1', FLAGS.lockout_attempts) self.assertTrue(self._is_locked_out('test1')) self.assertFalse(self._is_locked_out('test2')) - self._timeout() + self._advance_time(FLAGS.lockout_minutes * 60) self.assertFalse(self._is_locked_out('test1')) self.assertFalse(self._is_locked_out('test2')) + + def test_window_timeout(self): + self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) + self.assertFalse(self._is_locked_out('test')) + self._advance_time(FLAGS.lockout_window * 60) + self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) + self.assertFalse(self._is_locked_out('test')) -- cgit From dada56794679b213b2d80e4e1f907a212b73f54e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 15 Dec 2010 17:50:05 +0000 Subject: * code cleanup * revised unittest approach * added stubout and a number of tests --- nova/tests/xenapi_unittest.py | 136 ++++++++++++++++++++++++++++++++++++++---- nova/virt/xenapi/__init__.py | 22 +------ nova/virt/xenapi/fake.py | 40 +------------ nova/virt/xenapi/vmops.py | 6 +- nova/virt/xenapi/volumeops.py | 7 +-- nova/virt/xenapi_conn.py | 24 ++++---- 6 files changed, 146 insertions(+), 89 deletions(-) diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index c9be17327..b9955a946 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -31,7 +31,7 @@ # under the License. -import mox +import stubout import uuid from twisted.internet import defer @@ -51,20 +51,34 @@ from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils from nova.virt.xenapi import vm_utils from nova.virt.xenapi import volumeops +from boto.ec2.volume import Volume FLAGS = flags.FLAGS +def stubout_session(stubs, cls): + def fake_import(self): + fake_module = 'nova.virt.xenapi.fake' + from_list = ['fake'] + return __import__(fake_module, globals(), locals(), from_list, -1) + + stubs.Set(xenapi_conn.XenAPISession, '_create_session', + lambda s, url: cls(url)) + stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi', + fake_import) + + class XenAPIVolumeTestCase(test.TrialTestCase): """ - This uses Ewan's fake session approach + Unit tests for VM operations """ def setUp(self): super(XenAPIVolumeTestCase, self).setUp() - FLAGS.xenapi_use_fake_session = True + self.stubs = stubout.StubOutForTesting() FLAGS.target_host = '127.0.0.1' FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' + fake.reset() def _create_volume(self, size='0'): """Create a volume object.""" @@ -78,11 +92,12 @@ class XenAPIVolumeTestCase(test.TrialTestCase): vol['attach_status'] = "detached" return db.volume_create(context.get_admin_context(), vol) - def test_create_iscsi_storage_raise_no_exception(self): - fake.reset() + def test_create_iscsi_storage(self): + """ This shows how to test helper classes' methods """ + stubout_session(self.stubs, FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') helper = volume_utils.VolumeHelper - helper.late_import(FLAGS) + helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() info = yield helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') label = 'SR-%s' % vol['ec2_id'] @@ -93,8 +108,25 @@ class XenAPIVolumeTestCase(test.TrialTestCase): description) db.volume_destroy(context.get_admin_context(), vol['id']) + def test_parse_volume_info_raise_exception(self): + """ This shows how to test helper classes' methods """ + stubout_session(self.stubs, FakeSessionForVolumeTests) + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') + helper = volume_utils.VolumeHelper + helper.XenAPI = session.get_imported_xenapi() + vol = self._create_volume() + # oops, wrong mount point! + info = helper.parse_volume_info(vol['ec2_id'], '/dev/sd') + + def check(exc): + self.assertIsInstance(exc.value, volume_utils.StorageError) + + info.addErrback(check) + db.volume_destroy(context.get_admin_context(), vol['id']) + def test_attach_volume(self): - fake.reset() + """ This shows how to test Ops classes' methods """ + stubout_session(self.stubs, FakeSessionForVolumeTests) conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, @@ -116,13 +148,34 @@ class XenAPIVolumeTestCase(test.TrialTestCase): result.addCallback(check) return result + def test_attach_volume_raise_exception(self): + """ This shows how to test when exceptions are raised """ + stubout_session(self.stubs, FakeSessionForVolumeFailedTests) + conn = xenapi_conn.get_connection(False) + volume = self._create_volume() + instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, + 'm1.large', 'aa:bb:cc:dd:ee:ff') + fake.create_vm(instance.name, 'Running') + result = conn.attach_volume(instance.name, volume['ec2_id'], + '/dev/sdc') + + def check(exc): + if exc: + pass + else: + self.fail('Oops, no exception has been raised!') + + result.addErrback(check) + return result + def tearDown(self): super(XenAPIVolumeTestCase, self).tearDown() + self.stubs.UnsetAll() class XenAPIVMTestCase(test.TrialTestCase): """ - This uses Ewan's fake session approach + Unit tests for VM operations """ def setUp(self): super(XenAPIVMTestCase, self).setUp() @@ -131,19 +184,20 @@ class XenAPIVMTestCase(test.TrialTestCase): admin=True) self.project = self.manager.create_project('fake', 'fake', 'fake') self.network = utils.import_object(FLAGS.network_manager) - FLAGS.xenapi_use_fake_session = True + self.stubs = stubout.StubOutForTesting() FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' fake.reset() fake.create_network('fake', FLAGS.flat_network_bridge) def test_list_instances_0(self): + stubout_session(self.stubs, FakeSessionForVMTests) conn = xenapi_conn.get_connection(False) instances = conn.list_instances() self.assertEquals(instances, []) - #test_list_instances_0.skip = "E" def test_spawn(self): + stubout_session(self.stubs, FakeSessionForVMTests) conn = xenapi_conn.get_connection(False) instance = FakeInstance(1, self.project.id, self.user.id, 1, 2, 3, 'm1.large', 'aa:bb:cc:dd:ee:ff') @@ -186,6 +240,7 @@ class XenAPIVMTestCase(test.TrialTestCase): super(XenAPIVMTestCase, self).tearDown() self.manager.delete_project(self.project) self.manager.delete_user(self.user) + self.stubs.UnsetAll() class FakeInstance(): @@ -199,3 +254,64 @@ class FakeInstance(): self.ramdisk_id = ramdisk_id self.instance_type = instance_type self.mac_address = mac_address + + +class FakeSessionForVMTests(fake.SessionBase): + def __init__(self, uri): + super(FakeSessionForVMTests, self).__init__(uri) + + def network_get_all_records_where(self, _1, _2): + return self.xenapi.network.get_all_records() + + def host_call_plugin(self, _1, _2, _3, _4, _5): + return '' + + def VM_start(self, _1, ref, _2, _3): + vm = fake.get_record('VM', ref) + if vm['power_state'] != 'Halted': + raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', + vm['power_state']]) + vm['power_state'] = 'Running' + + +class FakeSessionForVolumeTests(fake.SessionBase): + def __init__(self, uri): + super(FakeSessionForVolumeTests, self).__init__(uri) + + def VBD_plug(self, _1, _2): + #FIXME(armando):make proper plug + pass + + def PBD_unplug(self, _1, _2): + #FIXME(armando):make proper unplug + pass + + def SR_forget(self, _1, _2): + #FIXME(armando):make proper forget + pass + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + #FIXME(armando):make proper introduce + valid_vdi = False + refs = fake.get_all('VDI') + for ref in refs: + rec = fake.get_record('VDI', ref) + if rec['uuid'] == uuid: + valid_vdi = True + if not valid_vdi: + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + +class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): + def __init__(self, uri): + super(FakeSessionForVolumeFailedTests, self).__init__(uri) + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + # test failure + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + def VBD_plug(self, _1, _2): + # test failure + raise fake.Failure([['INVALID_VBD', 'session', self._session]]) diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index 1a2903b98..c7038deae 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -20,31 +20,11 @@ """ -def load_sdk(flags): - """ - This method is used for loading the XenAPI SDK (fake or real) - """ - xenapi_module = \ - flags.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' - from_list = \ - flags.xenapi_use_fake_session and ['fake'] or [] - - return __import__(xenapi_module, globals(), locals(), from_list, -1) - - class HelperBase(): """ - The class that wraps the helper methods together. + The base for helper classes. This adds the XenAPI class attribute """ XenAPI = None def __init__(self): return - - @classmethod - def late_import(cls, FLAGS): - """ - Load XenAPI module in for helper class - """ - if cls.XenAPI is None: - cls.XenAPI = load_sdk(FLAGS) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index f5fea3cc2..038064e8e 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -154,7 +154,7 @@ class Failure(Exception): def __str__(self): try: return str(self.details) - except Exception, exn: + except Exception, exc: return "XenAPI Fake Failure: %s" % str(self.details) def _details_map(self): @@ -324,8 +324,8 @@ class SessionBase(object): try: task['result'] = self.xenapi_request(func, params[1:]) task['status'] = 'success' - except Failure, exn: - task['error_info'] = exn.details + except Failure, exc: + task['error_info'] = exc.details task['status'] = 'failed' task['finished'] = datetime.datetime.now() return task_ref @@ -372,37 +372,3 @@ class _Dispatcher: def __call__(self, *args): return self.__send(self.__name, args) - - -class FakeSession(SessionBase): - def __init__(self, uri): - super(FakeSession, self).__init__(uri) - - def network_get_all_records_where(self, _1, _2): - return self.xenapi.network.get_all_records() - - def host_call_plugin(self, _1, _2, _3, _4, _5): - return '' - - def VM_start(self, _1, ref, _2, _3): - vm = get_record('VM', ref) - if vm['power_state'] != 'Halted': - raise Failure(['VM_BAD_POWER_STATE', ref, 'Halted', - vm['power_state']]) - vm['power_state'] = 'Running' - - def VBD_plug(self, _1, _2): - #FIXME(armando):make proper plug - pass - - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - #FIXME(armando):make proper introduce - valid_vdi = False - refs = get_all('VDI') - for ref in refs: - rec = get_record('VDI', ref) - if rec['uuid'] == uuid: - valid_vdi = True - if not valid_vdi: - raise Failure([['INVALID_VDI', 'session', self._session]]) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 36b8fecc2..abaa1f5f1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -28,7 +28,6 @@ from nova import flags from nova import exception from nova.auth.manager import AuthManager -from nova.virt.xenapi import load_sdk from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper @@ -38,10 +37,9 @@ class VMOps(object): Management class for VM-related tasks """ def __init__(self, session): - self.XenAPI = load_sdk(flags.FLAGS) + self.XenAPI = session.get_imported_xenapi() self._session = session - # Load XenAPI module in the helper class - VMHelper.late_import(flags.FLAGS) + VMHelper.XenAPI = self.XenAPI def list_instances(self): """ List VM instances """ diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 1b337a6ed..68806c4c2 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -22,7 +22,6 @@ import logging from twisted.internet import defer from nova import flags -from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError @@ -33,11 +32,11 @@ class VolumeOps(object): Management class for Volume-related tasks """ def __init__(self, session): - self.XenAPI = load_sdk(flags.FLAGS) + self.XenAPI = session.get_imported_xenapi() self._session = session # Load XenAPI module in the helper classes respectively - VolumeHelper.late_import(flags.FLAGS) - VMHelper.late_import(flags.FLAGS) + VolumeHelper.XenAPI = self.XenAPI + VMHelper.XenAPI = self.XenAPI @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 5f2b9c7c6..a7e3a6723 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -36,7 +36,6 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. **Related Flags** -:xenapi_use_fake_session: To be set for unit testing :xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform. :xenapi_connection_username: Username for connection to XenServer/Xen Cloud Platform (default: root). @@ -59,15 +58,11 @@ from twisted.internet import reactor from nova import utils from nova import flags -from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vmops import VMOps from nova.virt.xenapi.volumeops import VolumeOps FLAGS = flags.FLAGS -flags.DEFINE_boolean('xenapi_use_fake_session', - False, - 'Set to true in order to use the fake XenAPI SDK') flags.DEFINE_string('xenapi_connection_url', None, 'URL for connection to XenServer/Xen Cloud Platform.' @@ -159,15 +154,14 @@ class XenAPIConnection(object): class XenAPISession(object): """ The session to invoke XenAPI SDK calls """ def __init__(self, url, user, pw): - # This is loaded late so that there's no need to install this - # library when not using XenAPI. - self.XenAPI = load_sdk(FLAGS) - if FLAGS.xenapi_use_fake_session: - self._session = self.XenAPI.FakeSession(url) - else: - self._session = self.XenAPI.Session(url) + self.XenAPI = self.get_imported_xenapi() + self._session = self._create_session(url) self._session.login_with_password(user, pw) + def get_imported_xenapi(self): + """Stubout point. This can be replaced with a mock xenapi module.""" + return __import__('XenAPI') + def get_xenapi(self): """ Return the xenapi object """ return self._session.xenapi @@ -200,6 +194,10 @@ class XenAPISession(object): reactor.callLater(0, self._poll_task, task, d) return d + def _create_session(self, url): + """Stubout point. This can be replaced with a mock session.""" + return self.XenAPI.Session(url) + @utils.deferredToThread def _poll_task(self, task, deferred): """Poll the given XenAPI task, and fire the given Deferred if we @@ -220,7 +218,7 @@ class XenAPISession(object): error_info) deferred.errback(self.XenAPI.Failure(error_info)) #logging.debug('Polling task %s done.', task) - except self.XenAPI.Failure, exc: + except Exception, exc: logging.warn(exc) deferred.errback(exc) -- cgit From e893be0a8d32cf1eb2c91187b81a6febf90e5b7c Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 15 Dec 2010 18:28:00 +0000 Subject: Adding back in openssh-lpk schema, as keys will likely be stored in LDAP again. --- nova/auth/opendj.sh | 1 + nova/auth/openssh-lpk_openldap.schema | 19 +++++++++++++++++++ nova/auth/openssh-lpk_sun.schema | 10 ++++++++++ nova/auth/slap.sh | 1 + 4 files changed, 31 insertions(+) create mode 100644 nova/auth/openssh-lpk_openldap.schema create mode 100644 nova/auth/openssh-lpk_sun.schema diff --git a/nova/auth/opendj.sh b/nova/auth/opendj.sh index 9a9600342..1a280e5a8 100755 --- a/nova/auth/opendj.sh +++ b/nova/auth/opendj.sh @@ -30,6 +30,7 @@ fi abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` schemapath='/var/opendj/instance/config/schema' +cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif chown opendj:opendj $schemapath/98-nova_sun.ldif diff --git a/nova/auth/openssh-lpk_openldap.schema b/nova/auth/openssh-lpk_openldap.schema new file mode 100644 index 000000000..93351da6d --- /dev/null +++ b/nova/auth/openssh-lpk_openldap.schema @@ -0,0 +1,19 @@ +# +# LDAP Public Key Patch schema for use with openssh-ldappubkey +# Author: Eric AUGE +# +# Based on the proposal of : Mark Ruijter +# + + +# octetString SYNTAX +attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' + DESC 'MANDATORY: OpenSSH Public key' + EQUALITY octetStringMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) + +# printableString SYNTAX yes|no +objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY + DESC 'MANDATORY: OpenSSH LPK objectclass' + MAY ( sshPublicKey $ uid ) + ) diff --git a/nova/auth/openssh-lpk_sun.schema b/nova/auth/openssh-lpk_sun.schema new file mode 100644 index 000000000..5f52db3b6 --- /dev/null +++ b/nova/auth/openssh-lpk_sun.schema @@ -0,0 +1,10 @@ +# +# LDAP Public Key Patch schema for use with openssh-ldappubkey +# Author: Eric AUGE +# +# Schema for Sun Directory Server. +# Based on the original schema, modified by Stefan Fischer. +# +dn: cn=schema +attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) +objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) ) diff --git a/nova/auth/slap.sh b/nova/auth/slap.sh index 36c4ba37b..95c61dafd 100755 --- a/nova/auth/slap.sh +++ b/nova/auth/slap.sh @@ -21,6 +21,7 @@ apt-get install -y slapd ldap-utils python-ldap abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` +cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema cp $abspath/nova_openldap.schema /etc/ldap/schema/nova.schema mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig -- cgit From fdf067037981c2b4b4501258919af0f9e1d0ec26 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 15 Dec 2010 10:38:30 -0800 Subject: add missing import --- nova/virt/xenapi_conn.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 424311133..a88101ad0 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -51,6 +51,8 @@ import logging import sys import xmlrpclib +from eventlet import event + from nova import utils from nova import flags from nova.virt.xenapi.vmops import VMOps -- cgit From dd4ee43cc2042299ed7a56b4690999fa1df120a1 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 15 Dec 2010 11:23:33 -0800 Subject: clean up tests and add overriden time method to utils --- nova/api/ec2/__init__.py | 11 +++++----- nova/fakememcache.py | 11 +++++----- nova/tests/middleware_unittest.py | 23 ++++++++++----------- nova/utils.py | 42 +++++++++++++++++++++++++++++++++++++-- 4 files changed, 60 insertions(+), 27 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 381b0e871..5ae15f2ae 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -79,15 +79,14 @@ class Lockout(wsgi.Middleware): sneak in before the lockout hits, but this is extremely rare and would only result in a couple of extra failed attempts.""" - def __init__(self, application, time_fn=None): - """middleware can pass a custom time function to fake for testing.""" + def __init__(self, application): + """middleware can use fake for testing.""" if FLAGS.lockout_memcached_servers: import memcache - self.mc = memcache.Client(FLAGS.lockout_memcached_servers, - debug=0) else: - from nova import fakememcache - self.mc = fakememcache.Client(time_fn=time_fn) + from nova import fakememcache as memcache + self.mc = memcache.Client(FLAGS.lockout_memcached_servers, + debug=0) super(Lockout, self).__init__(application) @webob.dec.wsgify diff --git a/nova/fakememcache.py b/nova/fakememcache.py index 0b4037ef6..67f46dbdc 100644 --- a/nova/fakememcache.py +++ b/nova/fakememcache.py @@ -18,21 +18,20 @@ """Super simple fake memcache client.""" -import time +import utils class Client(object): """Replicates a tiny subset of memcached client interface.""" - def __init__(self, time_fn=time.time, *args, **kwargs): - """Time fn is to allow testing through a custom function""" - self.time_fn = time_fn + def __init__(self, *args, **kwargs): + """Ignores the passed in args""" self.cache = {} def get(self, key): """Retrieves the value for a key or None.""" (timeout, value) = self.cache.get(key, (0, None)) - if timeout == 0 or self.time_fn() < timeout: + if timeout == 0 or utils.utcnow_ts() < timeout: return value return None @@ -40,7 +39,7 @@ class Client(object): """Sets the value for a key.""" timeout = 0 if time != 0: - timeout = self.time_fn() + time + timeout = utils.utcnow_ts() + time self.cache[key] = (timeout, value) return True diff --git a/nova/tests/middleware_unittest.py b/nova/tests/middleware_unittest.py index 61a790c1f..0febf52d6 100644 --- a/nova/tests/middleware_unittest.py +++ b/nova/tests/middleware_unittest.py @@ -16,6 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime import webob import webob.dec import webob.exc @@ -23,6 +24,7 @@ import webob.exc from nova.api import ec2 from nova import flags from nova import test +from nova import utils FLAGS = flags.FLAGS @@ -39,14 +41,13 @@ def conditional_forbid(req): class LockoutTestCase(test.TrialTestCase): """Test case for the Lockout middleware.""" def setUp(self): # pylint: disable-msg=C0103 - self.local_time = 0 - self.lockout = ec2.Lockout(conditional_forbid, - time_fn=self._constant_time) super(LockoutTestCase, self).setUp() + utils.set_time_override() + self.lockout = ec2.Lockout(conditional_forbid) - def _constant_time(self): - """Helper method to force timeouts.""" - return self.local_time + def tearDown(self): # pylint: disable-msg=C0103 + utils.clear_time_override() + super(LockoutTestCase, self).tearDown() def _send_bad_attempts(self, access_key, num_attempts=1): """Fail x.""" @@ -59,10 +60,6 @@ class LockoutTestCase(test.TrialTestCase): req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key) return (req.get_response(self.lockout).status_int == 403) - def _advance_time(self, time): - """Increment time to 1 second past the lockout.""" - self.local_time = self.local_time + time - def test_lockout(self): self._send_bad_attempts('test', FLAGS.lockout_attempts) self.assertTrue(self._is_locked_out('test')) @@ -70,20 +67,20 @@ class LockoutTestCase(test.TrialTestCase): def test_timeout(self): self._send_bad_attempts('test', FLAGS.lockout_attempts) self.assertTrue(self._is_locked_out('test')) - self._advance_time(FLAGS.lockout_minutes * 60) + utils.advance_time_seconds(FLAGS.lockout_minutes * 60) self.assertFalse(self._is_locked_out('test')) def test_multiple_keys(self): self._send_bad_attempts('test1', FLAGS.lockout_attempts) self.assertTrue(self._is_locked_out('test1')) self.assertFalse(self._is_locked_out('test2')) - self._advance_time(FLAGS.lockout_minutes * 60) + utils.advance_time_seconds(FLAGS.lockout_minutes * 60) self.assertFalse(self._is_locked_out('test1')) self.assertFalse(self._is_locked_out('test2')) def test_window_timeout(self): self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) self.assertFalse(self._is_locked_out('test')) - self._advance_time(FLAGS.lockout_window * 60) + utils.advance_time_seconds(FLAGS.lockout_window * 60) self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) self.assertFalse(self._is_locked_out('test')) diff --git a/nova/utils.py b/nova/utils.py index 142584df8..048a9d974 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -21,7 +21,6 @@ System-level utilities and helper functions. """ import datetime -import functools import inspect import logging import os @@ -29,6 +28,7 @@ import random import subprocess import socket import sys +import time from xml.sax import saxutils from twisted.internet.threads import deferToThread @@ -165,13 +165,51 @@ def get_my_ip(): return "127.0.0.1" +def utcnow(): + """Overridable version of datetime.datetime.utcnow.""" + if utcnow.override_time: + return utcnow.override_time + return datetime.datetime.utcnow() + + +utcnow.override_time = None + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + return time.mktime(utcnow().timetuple()) + + +def set_time_override(override_time=datetime.datetime.utcnow()): + """Override utils.utcnow to return a constant time.""" + utcnow.override_time = override_time + + +def advance_time_delta(timedelta): + """Advance overriden time using a datetime.timedelta.""" + assert(not utcnow.override_time is None) + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overriden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + def isotime(at=None): + """Returns iso formatted utcnow.""" if not at: - at = datetime.datetime.utcnow() + at = utcnow() return at.strftime(TIME_FORMAT) def parse_isotime(timestr): + """Turn an iso formatted time back into a datetime""" return datetime.datetime.strptime(timestr, TIME_FORMAT) -- cgit From 9a8113584edc9a8dbf42e7039b373429c11a7760 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 15 Dec 2010 11:53:18 -0800 Subject: fixes for xenapi (thanks sandywalsh) --- nova/virt/xenapi_conn.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index a88101ad0..09d399da4 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -52,6 +52,7 @@ import sys import xmlrpclib from eventlet import event +from eventlet import tpool from nova import utils from nova import flags @@ -164,20 +165,20 @@ class XenAPISession(object): f = self._session.xenapi for m in method.split('.'): f = f.__getattr__(m) - return f(*args) + return tpool.execute(f, *args) def async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread.""" - return _unwrap_plugin_exceptions( - self._session.xenapi.Async.host.call_plugin, - self.get_xenapi_host(), plugin, fn, args) + return tpool.execute(_unwrap_plugin_exceptions, + self._session.xenapi.Async.host.call_plugin, + self.get_xenapi_host(), plugin, fn, args) def wait_for_task(self, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" done = event.Event() - loop = utils.LoopingTask(self._poll_task, task, done) + loop = utils.LoopingCall(self._poll_task, task, done) loop.start(FLAGS.xenapi_task_poll_interval, now=True) rv = done.wait() loop.stop() -- cgit From e1da5d66b2e33a043e7e9ee357d9769276d6e302 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 15 Dec 2010 13:14:28 -0800 Subject: memcached requires strings not unicode --- nova/api/ec2/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 5ae15f2ae..def0ee207 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -91,7 +91,7 @@ class Lockout(wsgi.Middleware): @webob.dec.wsgify def __call__(self, req): - access_key = req.params['AWSAccessKeyId'] + access_key = str(req.params['AWSAccessKeyId']) failures_key = "authfailures-%s" % access_key failures = int(self.mc.get(failures_key) or 0) if failures >= FLAGS.lockout_attempts: -- cgit From a87b4081c6617ba193836ad12008204d62814549 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Wed, 15 Dec 2010 18:23:51 -0400 Subject: fixup after merge with trunk --- nova/virt/libvirt_conn.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index d7abc874a..d4d616b31 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -295,12 +295,10 @@ class LibvirtConnection(object): def pause(self, instance, callback): raise exception.APIError("pause not supported for libvirt.") - @defer.inlineCallbacks @exception.wrap_exception def unpause(self, instance, callback): raise exception.APIError("unpause not supported for libvirt.") - @defer.inlineCallbacks @exception.wrap_exception def rescue(self, instance): self.destroy(instance, False) -- cgit From a4db44b94e611798d57ad59f4d4dbb5fb00516db Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 16 Dec 2010 16:33:38 +0000 Subject: Removed FakeInstance and introduced stubout for DB. Code clean-up --- nova/tests/db/__init__.py | 33 +++++++++ nova/tests/db/fakes.py | 61 ++++++++++++++++ nova/tests/xenapi/__init__.py | 33 +++++++++ nova/tests/xenapi/stubs.py | 98 +++++++++++++++++++++++++ nova/tests/xenapi_unittest.py | 165 +++++++++++------------------------------- 5 files changed, 267 insertions(+), 123 deletions(-) create mode 100644 nova/tests/db/__init__.py create mode 100644 nova/tests/db/fakes.py create mode 100644 nova/tests/xenapi/__init__.py create mode 100644 nova/tests/xenapi/stubs.py diff --git a/nova/tests/db/__init__.py b/nova/tests/db/__init__.py new file mode 100644 index 000000000..a157d7592 --- /dev/null +++ b/nova/tests/db/__init__.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py new file mode 100644 index 000000000..b3fb56c69 --- /dev/null +++ b/nova/tests/db/fakes.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack, LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" + +import time + +from nova import db +from nova import utils +from nova.compute import instance_types + + +def stub_out_db_instance_api(stubs): + """ Stubs out the db API for creating Instances """ + + class FakeInstance(object): + """ Stubs out the Instance model """ + def __init__(self, values): + self.values = values + + def __getattr__(self, name): + return self.values[name] + + def fake_create(values): + """ Stubs out the db.instance_create method """ + + type_data = instance_types.INSTANCE_TYPES[values['instance_type']] + + base_options = { + 'name': values['name'], + 'reservation_id': utils.generate_uid('r'), + 'image_id': values['image_id'], + 'kernel_id': values['kernel_id'], + 'ramdisk_id': values['ramdisk_id'], + 'state_description': 'scheduling', + 'user_id': values['user_id'], + 'project_id': values['project_id'], + 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'instance_type': values['instance_type'], + 'memory_mb': type_data['memory_mb'], + 'mac_address': values['mac_address'], + 'vcpus': type_data['vcpus'], + 'local_gb': type_data['local_gb'], + } + return FakeInstance(base_options) + + stubs.Set(db, 'instance_create', fake_create) diff --git a/nova/tests/xenapi/__init__.py b/nova/tests/xenapi/__init__.py new file mode 100644 index 000000000..a157d7592 --- /dev/null +++ b/nova/tests/xenapi/__init__.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py new file mode 100644 index 000000000..525189388 --- /dev/null +++ b/nova/tests/xenapi/stubs.py @@ -0,0 +1,98 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" + +from nova.virt import xenapi_conn +from nova.virt.xenapi import fake + + +def stubout_session(stubs, cls): + """ Stubs out two methods from XenAPISession """ + def fake_import(self): + """ Stubs out get_imported_xenapi of XenAPISession """ + fake_module = 'nova.virt.xenapi.fake' + from_list = ['fake'] + return __import__(fake_module, globals(), locals(), from_list, -1) + + stubs.Set(xenapi_conn.XenAPISession, '_create_session', + lambda s, url: cls(url)) + stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi', + fake_import) + + +class FakeSessionForVMTests(fake.SessionBase): + """ Stubs out a XenAPISession for VM tests """ + def __init__(self, uri): + super(FakeSessionForVMTests, self).__init__(uri) + + def network_get_all_records_where(self, _1, _2): + return self.xenapi.network.get_all_records() + + def host_call_plugin(self, _1, _2, _3, _4, _5): + return '' + + def VM_start(self, _1, ref, _2, _3): + vm = fake.get_record('VM', ref) + if vm['power_state'] != 'Halted': + raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', + vm['power_state']]) + vm['power_state'] = 'Running' + + +class FakeSessionForVolumeTests(fake.SessionBase): + """ Stubs out a XenAPISession for Volume tests """ + def __init__(self, uri): + super(FakeSessionForVolumeTests, self).__init__(uri) + + def VBD_plug(self, _1, _2): + #FIXME(armando):make proper plug + pass + + def PBD_unplug(self, _1, _2): + #FIXME(armando):make proper unplug + pass + + def SR_forget(self, _1, _2): + #FIXME(armando):make proper forget + pass + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + #FIXME(armando):make proper introduce + valid_vdi = False + refs = fake.get_all('VDI') + for ref in refs: + rec = fake.get_record('VDI', ref) + if rec['uuid'] == uuid: + valid_vdi = True + if not valid_vdi: + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + +class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): + """ Stubs out a XenAPISession for Volume tests: it injects failures """ + def __init__(self, uri): + super(FakeSessionForVolumeFailedTests, self).__init__(uri) + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + # This is for testing failure + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + def VBD_plug(self, _1, _2): + # This is for testing failure + raise fake.Failure([['INVALID_VBD', 'session', self._session]]) diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index b9955a946..c2612a4c5 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -1,21 +1,5 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -30,16 +14,14 @@ # License for the specific language governing permissions and limitations # under the License. +""" +Test suite for XenAPI +""" import stubout -import uuid - -from twisted.internet import defer -from twisted.internet import threads from nova import db from nova import context -from nova import exception from nova import flags from nova import test from nova import utils @@ -49,28 +31,15 @@ from nova.compute import power_state from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils -from nova.virt.xenapi import vm_utils -from nova.virt.xenapi import volumeops -from boto.ec2.volume import Volume +from nova.tests.db import fakes +from nova.tests.xenapi import stubs FLAGS = flags.FLAGS -def stubout_session(stubs, cls): - def fake_import(self): - fake_module = 'nova.virt.xenapi.fake' - from_list = ['fake'] - return __import__(fake_module, globals(), locals(), from_list, -1) - - stubs.Set(xenapi_conn.XenAPISession, '_create_session', - lambda s, url: cls(url)) - stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi', - fake_import) - - class XenAPIVolumeTestCase(test.TrialTestCase): """ - Unit tests for VM operations + Unit tests for Volume operations """ def setUp(self): super(XenAPIVolumeTestCase, self).setUp() @@ -78,7 +47,17 @@ class XenAPIVolumeTestCase(test.TrialTestCase): FLAGS.target_host = '127.0.0.1' FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' + fakes.stub_out_db_instance_api(self.stubs) fake.reset() + self.values = {'name': 1, + 'project_id': 'fake', + 'user_id': 'fake', + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } def _create_volume(self, size='0'): """Create a volume object.""" @@ -94,7 +73,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): def test_create_iscsi_storage(self): """ This shows how to test helper classes' methods """ - stubout_session(self.stubs, FakeSessionForVolumeTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() @@ -106,11 +85,13 @@ class XenAPIVolumeTestCase(test.TrialTestCase): info, label, description) + srs = fake.get_all('SR') + self.assertEqual(sr_ref, srs[0]) db.volume_destroy(context.get_admin_context(), vol['id']) def test_parse_volume_info_raise_exception(self): """ This shows how to test helper classes' methods """ - stubout_session(self.stubs, FakeSessionForVolumeTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() @@ -119,6 +100,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): info = helper.parse_volume_info(vol['ec2_id'], '/dev/sd') def check(exc): + """ handler """ self.assertIsInstance(exc.value, volume_utils.StorageError) info.addErrback(check) @@ -126,16 +108,16 @@ class XenAPIVolumeTestCase(test.TrialTestCase): def test_attach_volume(self): """ This shows how to test Ops classes' methods """ - stubout_session(self.stubs, FakeSessionForVolumeTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) conn = xenapi_conn.get_connection(False) volume = self._create_volume() - instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, - 'm1.large', 'aa:bb:cc:dd:ee:ff') + instance = db.instance_create(self.values) fake.create_vm(instance.name, 'Running') result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') def check(_): + """ handler """ # check that the VM has a VBD attached to it # Get XenAPI reference for the VM vms = fake.get_all('VM') @@ -150,16 +132,17 @@ class XenAPIVolumeTestCase(test.TrialTestCase): def test_attach_volume_raise_exception(self): """ This shows how to test when exceptions are raised """ - stubout_session(self.stubs, FakeSessionForVolumeFailedTests) + stubs.stubout_session(self.stubs, + stubs.FakeSessionForVolumeFailedTests) conn = xenapi_conn.get_connection(False) volume = self._create_volume() - instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, - 'm1.large', 'aa:bb:cc:dd:ee:ff') + instance = db.instance_create(self.values) fake.create_vm(instance.name, 'Running') result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') def check(exc): + """ handler """ if exc: pass else: @@ -188,22 +171,32 @@ class XenAPIVMTestCase(test.TrialTestCase): FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' fake.reset() + fakes.stub_out_db_instance_api(self.stubs) fake.create_network('fake', FLAGS.flat_network_bridge) def test_list_instances_0(self): - stubout_session(self.stubs, FakeSessionForVMTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) conn = xenapi_conn.get_connection(False) instances = conn.list_instances() self.assertEquals(instances, []) def test_spawn(self): - stubout_session(self.stubs, FakeSessionForVMTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + values = {'name': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } conn = xenapi_conn.get_connection(False) - instance = FakeInstance(1, self.project.id, self.user.id, 1, 2, 3, - 'm1.large', 'aa:bb:cc:dd:ee:ff') + instance = db.instance_create(values) result = conn.spawn(instance) def check(_): + """ handler """ instances = conn.list_instances() self.assertEquals(instances, [1]) @@ -241,77 +234,3 @@ class XenAPIVMTestCase(test.TrialTestCase): self.manager.delete_project(self.project) self.manager.delete_user(self.user) self.stubs.UnsetAll() - - -class FakeInstance(): - def __init__(self, name, project_id, user_id, image_id, kernel_id, - ramdisk_id, instance_type, mac_address): - self.name = name - self.project_id = project_id - self.user_id = user_id - self.image_id = image_id - self.kernel_id = kernel_id - self.ramdisk_id = ramdisk_id - self.instance_type = instance_type - self.mac_address = mac_address - - -class FakeSessionForVMTests(fake.SessionBase): - def __init__(self, uri): - super(FakeSessionForVMTests, self).__init__(uri) - - def network_get_all_records_where(self, _1, _2): - return self.xenapi.network.get_all_records() - - def host_call_plugin(self, _1, _2, _3, _4, _5): - return '' - - def VM_start(self, _1, ref, _2, _3): - vm = fake.get_record('VM', ref) - if vm['power_state'] != 'Halted': - raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', - vm['power_state']]) - vm['power_state'] = 'Running' - - -class FakeSessionForVolumeTests(fake.SessionBase): - def __init__(self, uri): - super(FakeSessionForVolumeTests, self).__init__(uri) - - def VBD_plug(self, _1, _2): - #FIXME(armando):make proper plug - pass - - def PBD_unplug(self, _1, _2): - #FIXME(armando):make proper unplug - pass - - def SR_forget(self, _1, _2): - #FIXME(armando):make proper forget - pass - - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - #FIXME(armando):make proper introduce - valid_vdi = False - refs = fake.get_all('VDI') - for ref in refs: - rec = fake.get_record('VDI', ref) - if rec['uuid'] == uuid: - valid_vdi = True - if not valid_vdi: - raise fake.Failure([['INVALID_VDI', 'session', self._session]]) - - -class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): - def __init__(self, uri): - super(FakeSessionForVolumeFailedTests, self).__init__(uri) - - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - # test failure - raise fake.Failure([['INVALID_VDI', 'session', self._session]]) - - def VBD_plug(self, _1, _2): - # test failure - raise fake.Failure([['INVALID_VBD', 'session', self._session]]) -- cgit From 8152acf7c3df83a04591fdafb21201965da7bfad Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 16 Dec 2010 17:47:48 +0000 Subject: fake session clean-up --- nova/tests/db/__init__.py | 16 ---------------- nova/tests/xenapi/__init__.py | 16 ---------------- nova/virt/xenapi/fake.py | 34 ++++++++++++++++++++++------------ 3 files changed, 22 insertions(+), 44 deletions(-) diff --git a/nova/tests/db/__init__.py b/nova/tests/db/__init__.py index a157d7592..dcd81b743 100644 --- a/nova/tests/db/__init__.py +++ b/nova/tests/db/__init__.py @@ -1,21 +1,5 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/tests/xenapi/__init__.py b/nova/tests/xenapi/__init__.py index a157d7592..dcd81b743 100644 --- a/nova/tests/xenapi/__init__.py +++ b/nova/tests/xenapi/__init__.py @@ -1,21 +1,5 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 038064e8e..a46f3fd80 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -105,9 +105,10 @@ def create_vdi(name_label, read_only, sr_ref, sharable): }) -def create_pbd(config, attached): +def create_pbd(config, sr_ref, attached): return _create_object('PBD', { 'device-config': config, + 'SR': sr_ref, 'currently-attached': attached, }) @@ -126,6 +127,21 @@ def _create_object(table, obj): return ref +def _create_sr(table, obj): + sr_type = obj[6] + # Forces fake to support iscsi only + if sr_type != 'iscsi': + raise Failure(['SR_UNKNOWN_DRIVER', sr_type]) + sr_ref = _create_object(table, obj[2]) + vdi_ref = create_vdi('', False, sr_ref, False) + pbd_ref = create_pbd('', sr_ref, True) + _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref] + _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] + _db_content['VDI'][vdi_ref]['SR'] = sr_ref + _db_content['PBD'][pbd_ref]['SR'] = sr_ref + return sr_ref + + def get_all(table): return _db_content[table].keys() @@ -296,19 +312,13 @@ class SessionBase(object): def _create(self, name, params): self._check_session(params) - expected = 2 - if name == 'SR.create': - expected = 10 + is_sr_create = name == 'SR.create' + # Storage Repositories have a different API + expected = is_sr_create and 10 or 2 self._check_arg_count(params, expected) (cls, _) = name.split('.') - if name == 'SR.create': - vdi_ref = create_vdi('', False, '', False) - pbd_ref = create_pbd('', True) - params[2]['VDIs'] = [vdi_ref] - params[2]['PBDs'] = [pbd_ref] - ref = _create_object(cls, params[2]) - else: - ref = _create_object(cls, params[1]) + ref = is_sr_create and \ + _create_sr(cls, params) or _create_object(cls, params[1]) obj = get_record(cls, ref) # Add RO fields -- cgit From e01e6d7976adfd99addf31f4f914c7625a394fda Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 16 Dec 2010 12:09:38 -0600 Subject: Moved implementation specific stuff from the middleware into their respective modules --- nova/api/openstack/__init__.py | 83 ++++------------------------- nova/api/openstack/auth.py | 20 ++++--- nova/api/openstack/common.py | 17 ++++++ nova/api/openstack/flavors.py | 3 +- nova/api/openstack/images.py | 6 ++- nova/api/openstack/ratelimiting/__init__.py | 60 +++++++++++++++++++++ nova/api/openstack/servers.py | 3 +- 7 files changed, 106 insertions(+), 86 deletions(-) create mode 100644 nova/api/openstack/common.py diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index b9ecbd9b8..e941694d9 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -49,6 +49,10 @@ flags.DEFINE_string('nova_api_auth', 'nova.api.openstack.auth.BasicApiAuthManager', 'The auth mechanism to use for the OpenStack API implemenation') +flags.DEFINE_string('os_api_ratelimiting', + 'nova.api.openstack.ratelimiting.BasicRateLimiting', + 'Default ratelimiting implementation for the Openstack API') + flags.DEFINE_bool('allow_admin_api', False, 'When True, this API service will accept admin operations.') @@ -81,10 +85,10 @@ class AuthMiddleware(wsgi.Middleware): @webob.dec.wsgify def __call__(self, req): - if 'X-Auth-Token' not in req.headers: + if not self.auth_driver.has_authentication(req) return self.auth_driver.authenticate(req) - user = self.auth_driver.authorize_token(req.headers["X-Auth-Token"]) + user = self.auth_driver.get_user_by_authentication(req) if not user: return faults.Fault(webob.exc.HTTPUnauthorized()) @@ -104,62 +108,12 @@ class RateLimitingMiddleware(wsgi.Middleware): at the given host+port to keep rate counters. """ super(RateLimitingMiddleware, self).__init__(application) - if not service_host: - #TODO(gundlach): These limits were based on limitations of Cloud - #Servers. We should revisit them in Nova. - self.limiter = ratelimiting.Limiter(limits={ - 'DELETE': (100, ratelimiting.PER_MINUTE), - 'PUT': (10, ratelimiting.PER_MINUTE), - 'POST': (10, ratelimiting.PER_MINUTE), - 'POST servers': (50, ratelimiting.PER_DAY), - 'GET changes-since': (3, ratelimiting.PER_MINUTE), - }) - else: - self.limiter = ratelimiting.WSGIAppProxy(service_host) + self._limiting_driver = + utils.import_class(FLAGS.os_api_ratelimiting)(service_host) @webob.dec.wsgify def __call__(self, req): - """Rate limit the request. - - If the request should be rate limited, return a 413 status with a - Retry-After header giving the time when the request would succeed. - """ - action_name = self.get_action_name(req) - if not action_name: - # Not rate limited - return self.application - delay = self.get_delay(action_name, - req.environ['nova.context'].user_id) - if delay: - # TODO(gundlach): Get the retry-after format correct. - exc = webob.exc.HTTPRequestEntityTooLarge( - explanation='Too many requests.', - headers={'Retry-After': time.time() + delay}) - raise faults.Fault(exc) - return self.application - - def get_delay(self, action_name, username): - """Return the delay for the given action and username, or None if - the action would not be rate limited. - """ - if action_name == 'POST servers': - # "POST servers" is a POST, so it counts against "POST" too. - # Attempt the "POST" first, lest we are rate limited by "POST" but - # use up a precious "POST servers" call. - delay = self.limiter.perform("POST", username=username) - if delay: - return delay - return self.limiter.perform(action_name, username=username) - - def get_action_name(self, req): - """Return the action name for this request.""" - if req.method == 'GET' and 'changes-since' in req.GET: - return 'GET changes-since' - if req.method == 'POST' and req.path_info.startswith('/servers'): - return 'POST servers' - if req.method in ['PUT', 'POST', 'DELETE']: - return req.method - return None + return self._limiting_driver.limited_request(req) class APIRouter(wsgi.Router): @@ -191,22 +145,3 @@ class APIRouter(wsgi.Router): # TODO: Place routes for admin operations here. super(APIRouter, self).__init__(mapper) - - -def limited(items, req): - """Return a slice of items according to requested offset and limit. - - items - a sliceable - req - wobob.Request possibly containing offset and limit GET variables. - offset is where to start in the list, and limit is the maximum number - of items to return. - - If limit is not specified, 0, or > 1000, defaults to 1000. - """ - offset = int(req.GET.get('offset', 0)) - limit = int(req.GET.get('limit', 0)) - if not limit: - limit = 1000 - limit = min(1000, limit) - range_end = offset + limit - return items[offset:range_end] diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index fcda97ab1..da8ebcfcd 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -7,6 +7,7 @@ import webob.exc import webob.dec from nova import auth +from nova import context from nova import db from nova import flags from nova import manager @@ -16,10 +17,6 @@ from nova.api.openstack import faults FLAGS = flags.FLAGS -class Context(object): - pass - - class BasicApiAuthManager(object): """ Implements a somewhat rudimentary version of OpenStack Auth""" @@ -28,9 +25,14 @@ class BasicApiAuthManager(object): db_driver = FLAGS.db_driver self.db = utils.import_object(db_driver) self.auth = auth.manager.AuthManager() - self.context = Context() super(BasicApiAuthManager, self).__init__() + def has_authentication(self, req): + return 'X-Auth-Token' in req.headers: + + def get_user_by_authentication(self, req): + return self.auth_driver.authorize_token(req.headers["X-Auth-Token"]) + def authenticate(self, req): # Unless the request is explicitly made against // don't # honor it @@ -68,11 +70,12 @@ class BasicApiAuthManager(object): This method will also remove the token if the timestamp is older than 2 days ago. """ - token = self.db.auth_get_token(self.context, token_hash) + ctxt = context.get_admin_context() + token = self.db.auth_get_token(ctxt, token_hash) if token: delta = datetime.datetime.now() - token.created_at if delta.days >= 2: - self.db.auth_destroy_token(self.context, token) + self.db.auth_destroy_token(ctxt, token) else: return self.auth.get_user(token.user_id) return None @@ -84,6 +87,7 @@ class BasicApiAuthManager(object): key - string API key req - webob.Request object """ + ctxt = context.get_admin_context() user = self.auth.get_user_from_access_key(key) if user and user.name == username: token_hash = hashlib.sha1('%s%s%f' % (username, key, @@ -95,6 +99,6 @@ class BasicApiAuthManager(object): token_dict['server_management_url'] = req.url token_dict['storage_url'] = '' token_dict['user_id'] = user.id - token = self.db.auth_create_token(self.context, token_dict) + token = self.db.auth_create_token(ctxt, token_dict) return token, user return None, None diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py new file mode 100644 index 000000000..29e9a8623 --- /dev/null +++ b/nova/api/openstack/common.py @@ -0,0 +1,17 @@ +def limited(items, req): + """Return a slice of items according to requested offset and limit. + + items - a sliceable + req - wobob.Request possibly containing offset and limit GET variables. + offset is where to start in the list, and limit is the maximum number + of items to return. + + If limit is not specified, 0, or > 1000, defaults to 1000. + """ + offset = int(req.GET.get('offset', 0)) + limit = int(req.GET.get('limit', 0)) + if not limit: + limit = 1000 + limit = min(1000, limit) + range_end = offset + limit + return items[offset:range_end] diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index f23f74fd1..f620d4107 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -18,6 +18,7 @@ from webob import exc from nova.api.openstack import faults +from nova.api.openstack import common from nova.compute import instance_types from nova import wsgi import nova.api.openstack @@ -39,7 +40,7 @@ class Controller(wsgi.Controller): def detail(self, req): """Return all flavors in detail.""" items = [self.show(req, id)['flavor'] for id in self._all_ids()] - items = nova.api.openstack.limited(items, req) + items = common.limited(items, req) return dict(flavors=items) def show(self, req, id): diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 4a0a8e6f1..fe8d9d75f 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -22,6 +22,8 @@ from nova import utils from nova import wsgi import nova.api.openstack import nova.image.service + +from nova.api.openstack import common from nova.api.openstack import faults @@ -48,11 +50,11 @@ class Controller(wsgi.Controller): """Return all public images in detail.""" try: images = self._service.detail(req.environ['nova.context']) - images = nova.api.openstack.limited(images, req) + images = common.limited(images, req) except NotImplementedError: # Emulate detail() using repeated calls to show() images = self._service.index(ctxt) - images = nova.api.openstack.limited(images, req) + images = common.limited(images, req) images = [self._service.show(ctxt, i['id']) for i in images] return dict(images=images) diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index 918caf055..d1da9afa7 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -14,6 +14,66 @@ PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 +class BasicRateLimiting(object): + """ Implements Rate limits as per the Rackspace CloudServers API spec. """ + + def __init__(self, service_host): + if not service_host: + #TODO(gundlach): These limits were based on limitations of Cloud + #Servers. We should revisit them in Nova. + self.limiter = ratelimiting.Limiter(limits={ + 'DELETE': (100, ratelimiting.PER_MINUTE), + 'PUT': (10, ratelimiting.PER_MINUTE), + 'POST': (10, ratelimiting.PER_MINUTE), + 'POST servers': (50, ratelimiting.PER_DAY), + 'GET changes-since': (3, ratelimiting.PER_MINUTE), + }) + else: + self.limiter = ratelimiting.WSGIAppProxy(service_host) + + def limited_request(self, req): + """Rate limit the request. + + If the request should be rate limited, return a 413 status with a + Retry-After header giving the time when the request would succeed. + """ + action_name = self.get_action_name(req) + if not action_name: + # Not rate limited + return self.application + delay = self.get_delay(action_name, + req.environ['nova.context'].user_id) + if delay: + # TODO(gundlach): Get the retry-after format correct. + exc = webob.exc.HTTPRequestEntityTooLarge( + explanation='Too many requests.', + headers={'Retry-After': time.time() + delay}) + raise faults.Fault(exc) + return self.application + + def get_delay(self, action_name, username): + """Return the delay for the given action and username, or None if + the action would not be rate limited. + """ + if action_name == 'POST servers': + # "POST servers" is a POST, so it counts against "POST" too. + # Attempt the "POST" first, lest we are rate limited by "POST" but + # use up a precious "POST servers" call. + delay = self.limiter.perform("POST", username=username) + if delay: + return delay + return self.limiter.perform(action_name, username=username) + + def get_action_name(self, req): + """Return the action name for this request.""" + if req.method == 'GET' and 'changes-since' in req.GET: + return 'GET changes-since' + if req.method == 'POST' and req.path_info.startswith('/servers'): + return 'POST servers' + if req.method in ['PUT', 'POST', 'DELETE']: + return req.method + return None + class Limiter(object): """Class providing rate limiting of arbitrary actions.""" diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 7704f48f1..9e6047805 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -19,6 +19,7 @@ from webob import exc from nova import exception from nova import wsgi +from nova.api.openstack import common from nova.api.openstack import faults from nova.auth import manager as auth_manager from nova.compute import api as compute_api @@ -91,7 +92,7 @@ class Controller(wsgi.Controller): """ instance_list = self.compute_api.get_instances( req.environ['nova.context']) - limited_list = nova.api.openstack.limited(instance_list, req) + limited_list = common.limited(instance_list, req) res = [entity_maker(inst)['server'] for inst in limited_list] return _entity_list(res) -- cgit From db96fd559d28bcfdf8cc29d79b9afca6dea1cfb7 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 16 Dec 2010 18:44:42 +0000 Subject: reviewed the FIXMEs, and spotted an uncaught exception in volume_utils...yay! --- nova/tests/virt_unittest.py | 2 -- nova/tests/xenapi/stubs.py | 24 +++++++++--------------- nova/tests/xenapi_unittest.py | 5 ++--- nova/virt/xenapi/fake.py | 1 + nova/virt/xenapi/volume_utils.py | 29 +++++++++++++++++------------ 5 files changed, 29 insertions(+), 32 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 52843b703..d49383fb7 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -25,8 +25,6 @@ from nova import utils from nova.api.ec2 import cloud from nova.auth import manager from nova.virt import libvirt_conn -from nova.virt.xenapi import fake -from nova.virt.xenapi import volume_utils FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 525189388..11dd535d4 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -58,21 +58,12 @@ class FakeSessionForVolumeTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForVolumeTests, self).__init__(uri) - def VBD_plug(self, _1, _2): - #FIXME(armando):make proper plug - pass - - def PBD_unplug(self, _1, _2): - #FIXME(armando):make proper unplug - pass - - def SR_forget(self, _1, _2): - #FIXME(armando):make proper forget - pass + def VBD_plug(self, _1, ref): + rec = fake.get_record('VBD', ref) + rec['currently-attached'] = True def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11): - #FIXME(armando):make proper introduce valid_vdi = False refs = fake.get_all('VDI') for ref in refs: @@ -93,6 +84,9 @@ class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): # This is for testing failure raise fake.Failure([['INVALID_VDI', 'session', self._session]]) - def VBD_plug(self, _1, _2): - # This is for testing failure - raise fake.Failure([['INVALID_VBD', 'session', self._session]]) + def PBD_unplug(self, _1, ref): + rec = fake.get_record('PBD', ref) + rec['currently-attached'] = False + + def SR_forget(self, _1, ref): + pass diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index c2612a4c5..839d6aa44 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -141,14 +141,13 @@ class XenAPIVolumeTestCase(test.TrialTestCase): result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') - def check(exc): + def check_exception(exc): """ handler """ if exc: pass else: self.fail('Oops, no exception has been raised!') - - result.addErrback(check) + result.addErrback(check_exception) return result def tearDown(self): diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index a46f3fd80..7877b5905 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -102,6 +102,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable): 'location': '', 'xenstore_data': '', 'sm_config': {}, + 'VBDs': {}, }) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 4482e465c..8d1d6fb81 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -152,18 +152,23 @@ class VolumeHelper(HelperBase): logging.warn(exc) raise StorageError('Unable to get record of VDI %s on' % vdis[0]) else: - return session.get_xenapi().VDI.introduce( - vdi_rec['uuid'], - vdi_rec['name_label'], - vdi_rec['name_description'], - vdi_rec['SR'], - vdi_rec['type'], - vdi_rec['sharable'], - vdi_rec['read_only'], - vdi_rec['other_config'], - vdi_rec['location'], - vdi_rec['xenstore_data'], - vdi_rec['sm_config']) + try: + vdi_ref = session.get_xenapi().VDI.introduce( + vdi_rec['uuid'], + vdi_rec['name_label'], + vdi_rec['name_description'], + vdi_rec['SR'], + vdi_rec['type'], + vdi_rec['sharable'], + vdi_rec['read_only'], + vdi_rec['other_config'], + vdi_rec['location'], + vdi_rec['xenstore_data'], + vdi_rec['sm_config']) + except cls.XenAPI.Failure, exc: + logging.warn(exc) + raise StorageError('Unable to introduce VDI for SR %s' + % sr_ref) @classmethod @defer.inlineCallbacks -- cgit From 9b049acc27d477a1ab9e13c9e064e59d8bd0a3ae Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 16 Dec 2010 10:52:30 -0800 Subject: pep8 fixes --- nova/compute/manager.py | 2 +- nova/service.py | 10 +++++----- nova/test.py | 1 + nova/tests/rpc_unittest.py | 2 +- nova/tests/service_unittest.py | 2 +- nova/utils.py | 7 ++++--- nova/virt/libvirt_conn.py | 12 ++++++------ nova/virt/xenapi_conn.py | 2 +- 8 files changed, 20 insertions(+), 18 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f90f28b78..7eb60e262 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -64,7 +64,7 @@ class ComputeManager(manager.Manager): self.network_manager = utils.import_object(FLAGS.network_manager) self.volume_manager = utils.import_object(FLAGS.volume_manager) super(ComputeManager, self).__init__(*args, **kwargs) - + def _update_state(self, context, instance_id): """Update the state of an instance from the driver info.""" # FIXME(ja): include other fields from state? diff --git a/nova/service.py b/nova/service.py index 3d40e83a6..ac30aaceb 100644 --- a/nova/service.py +++ b/nova/service.py @@ -99,7 +99,7 @@ class Service(object): self.timers.append(consumer_all.attach_to_eventlet()) self.timers.append(consumer_node.attach_to_eventlet()) - + pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) self.timers.append(pulse) @@ -122,7 +122,7 @@ class Service(object): return getattr(manager, key) @classmethod - def create(cls, + def create(cls, host=None, binary=None, topic=None, @@ -192,7 +192,7 @@ class Service(object): db.service_update(ctxt, self.service_id, {'report_count': service_ref['report_count'] + 1}) - + # TODO(termie): make this pattern be more elegant. if getattr(self, "model_disconnected", False): self.model_disconnected = False @@ -207,7 +207,7 @@ class Service(object): def serve(*services): argv = FLAGS(sys.argv) - + if not services: services = [Service.create()] @@ -227,7 +227,7 @@ def serve(*services): for x in services: x.start() - + def wait(): while True: diff --git a/nova/test.py b/nova/test.py index ecc97aa4d..7076f1bf4 100644 --- a/nova/test.py +++ b/nova/test.py @@ -55,6 +55,7 @@ def skip_if_fake(func): return func(*args, **kw) return _skipper + class TestCase(unittest.TestCase): """Test case base class for all unit tests""" def setUp(self): diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py index 4128c35b8..a2495e65a 100644 --- a/nova/tests/rpc_unittest.py +++ b/nova/tests/rpc_unittest.py @@ -67,7 +67,7 @@ class RpcTestCase(test.TestCase): self.assertRaises(rpc.RemoteError, rpc.call, self.context, - 'test', + 'test', {"method": "fail", "args": {"value": value}}) try: diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py index 6bdc7071c..47c092f8e 100644 --- a/nova/tests/service_unittest.py +++ b/nova/tests/service_unittest.py @@ -119,7 +119,7 @@ class ServiceTestCase(test.TestCase): service.db.service_create(mox.IgnoreArg(), service_create).AndReturn(service_ref) self.mox.ReplayAll() - + app.start() app.stop() self.assert_(app) diff --git a/nova/utils.py b/nova/utils.py index 5f2d47202..ea1f04ca7 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -233,6 +233,7 @@ class LoopingCall(object): def start(self, interval, now=True): self._running = True done = event.Event() + def _inner(): if not now: greenthread.sleep(interval) @@ -244,14 +245,14 @@ class LoopingCall(object): logging.exception('in looping call') done.send_exception(*sys.exc_info()) return - + done.send(True) self.done = done - + greenthread.spawn(_inner) return self.done - + def stop(self): self._running = False diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ba51f8f69..5a8c71850 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -205,7 +205,7 @@ class LibvirtConnection(object): timer.f = _wait_for_shutdown timer_done = timer.start(interval=0.5, now=True) - + # NOTE(termie): this is strictly superfluous (we could put the # cleanup code in the timer), but this emulates the # previous model so I am keeping it around until @@ -387,7 +387,7 @@ class LibvirtConnection(object): def get_console_output(self, instance): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - + utils.execute('sudo chown %d %s' % (os.getuid(), console_log)) if FLAGS.libvirt_type == 'xen': @@ -439,11 +439,11 @@ class LibvirtConnection(object): if not os.path.exists(basepath('ramdisk')): images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, project) - + def execute(cmd, process_input=None, check_exit_code=True): - return utils.execute(cmd=cmd, - process_input=process_input, - check_exit_code=check_exit_code) + return utils.execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) key = str(inst['key_data']) net = None diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 09d399da4..6beb08f5e 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -200,7 +200,7 @@ class XenAPISession(object): error_info = self._session.xenapi.task.get_error_info(task) logging.warn('Task %s status: %s. %s', task, status, error_info) - done.send_exception(XenAPI.Failure(error_info)) + done.send_exception(XenAPI.Failure(error_info)) #logging.debug('Polling task %s done.', task) except XenAPI.Failure, exc: logging.warn(exc) -- cgit From dc29400d104d34c6383132a43e018f7724e85ec3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 16 Dec 2010 19:13:37 +0000 Subject: use getent, update docstring --- CA/genvpn.sh | 36 ++++++++++++++++++++++++++++++++++++ nova/auth/manager.py | 3 ++- 2 files changed, 38 insertions(+), 1 deletion(-) create mode 100755 CA/genvpn.sh diff --git a/CA/genvpn.sh b/CA/genvpn.sh new file mode 100755 index 000000000..7e7db185d --- /dev/null +++ b/CA/genvpn.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This gets zipped and run on the cloudpipe-managed OpenVPN server +NAME=$1 +SUBJ=$2 + +mkdir -p projects/$NAME +cd projects/$NAME + +# generate a server priv key +openssl genrsa -out server.key 2048 + +# generate a server CSR +openssl req -new -key server.key -out server.csr -batch -subj "$SUBJ" + +novauid=`getent passwd nova | awk -F: '{print $3}'` +if [ ! -z "${novauid}" ] && [ "`id -u`" != "${novauid}" ]; then + sudo chown -R nova:nogroup . +fi diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 783ef51af..735473027 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -65,7 +65,8 @@ flags.DEFINE_string('credential_key_file', 'pk.pem', flags.DEFINE_string('credential_cert_file', 'cert.pem', 'Filename of certificate in credentials zip') flags.DEFINE_string('credential_rc_file', '%src', - 'Filename of rc in credentials zip') + 'Filename of rc in credentials zip, %s will be ' + 'replaced by name of the region (nova by default)') flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', 'Driver that auth manager uses') -- cgit From d283922defdda6ede5fa2e09656cd8d411a90096 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Thu, 16 Dec 2010 14:47:42 -0500 Subject: PEP8 cleanups --- nova/api/ec2/cloud.py | 3 ++- nova/objectstore/handler.py | 8 ++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 4b8b85b4c..d638582a7 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -360,7 +360,8 @@ class CloudController(object): criteria = self._revoke_rule_args_to_dict(context, **kwargs) if criteria == None: - raise exception.ApiError(_("No rule for the specified parameters.")) + raise exception.ApiError(_("No rule for the specified " + "parameters.")) for rule in security_group.rules: match = True diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index 0c71c3705..52257f69f 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -261,7 +261,9 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug(_("Getting object: %s / %s"), self.bucket.name, self.name) + logging.debug(_("Getting object: %s / %s"), + self.bucket.name, + self.name) if not self.bucket.is_authorized(request.context): raise exception.NotAuthorized() @@ -279,7 +281,9 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug(_("Putting object: %s / %s"), self.bucket.name, self.name) + logging.debug(_("Putting object: %s / %s"), + self.bucket.name, + self.name) if not self.bucket.is_authorized(request.context): raise exception.NotAuthorized() -- cgit From e306d236e3cf91c00a36940e277ad2d105b055a1 Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 16 Dec 2010 12:45:06 -0800 Subject: pep8 fixes for bin --- bin/nova-combined | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/nova-combined b/bin/nova-combined index 5f635b3a3..c6a04f7e9 100755 --- a/bin/nova-combined +++ b/bin/nova-combined @@ -50,7 +50,7 @@ flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host') if __name__ == '__main__': utils.default_flagfile() FLAGS(sys.argv) - + compute = service.Service.create(binary='nova-compute') network = service.Service.create(binary='nova-network') volume = service.Service.create(binary='nova-volume') @@ -63,4 +63,3 @@ if __name__ == '__main__': server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host) server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host) server.wait() - -- cgit From 611935aa3e3a66e9638b0c127041a6fca4788b9c Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 16 Dec 2010 15:03:37 -0600 Subject: Put flags back in vm_utils --- nova/virt/xenapi/vm_utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index dde138404..b83ae9475 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -21,15 +21,20 @@ their attributes like VDIs, VIFs, as well as their lookup functions. import logging import urllib + from xml.dom import minidom +from nova import flags from nova import utils + from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +FLAGS = flags.FLAGS + XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, @@ -37,7 +42,6 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} - XenAPI = None -- cgit From e5a3d993cb13c8dc5e984a67521f77ce8fdf8e4c Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 16 Dec 2010 15:19:35 -0600 Subject: Removed unnecessary blank lines --- nova/virt/xenapi/vm_utils.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index b83ae9475..2f5d78e75 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -21,12 +21,10 @@ their attributes like VDIs, VIFs, as well as their lookup functions. import logging import urllib - from xml.dom import minidom from nova import flags from nova import utils - from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state -- cgit From 7954862c8133bacd5e612864a26e7d0ae9b0d663 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 16 Dec 2010 15:54:38 -0600 Subject: Added Instance Diagnostics DB model --- nova/db/sqlalchemy/models.py | 23 ++++++++++++++++++++--- nova/virt/xenapi/vm_utils.py | 6 +----- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index fe0a9a921..61764ee8d 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -22,7 +22,7 @@ SQLAlchemy models for nova data. import datetime from sqlalchemy.orm import relationship, backref, object_mapper -from sqlalchemy import Column, Integer, String, schema +from sqlalchemy import Column, Integer, Float, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base @@ -226,6 +226,23 @@ class Instance(BASE, NovaBase): # 'shutdown', 'shutoff', 'crashed']) +class InstanceDiagnostics(BASE, NovaBase): + """Represents a guest VM's diagnostics""" + __tablename__ = "instance_diagnostics" + id = Column(Integer, primary_key=True) + instance_id = Column(Integer, ForeignKey('instances.id')) + + vbd_xvda_read = Column(Float) + vbd_xvda_write = Column(Float) + vbd_xvdb_read = Column(Float) + vbd_xvdb_write = Column(Float) + memory = Column(Float) + memory_internal_free = Column(Float) + cpu0 = Column(Float) + vif_0_tx = Column(Float) + vif_0_rx = Column(Float) + + class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a vm.""" __tablename__ = 'volumes' @@ -526,8 +543,8 @@ def register_models(): it will never need to be called explicitly elsewhere. """ from sqlalchemy import create_engine - models = (Service, Instance, Volume, ExportDevice, IscsiTarget, FixedIp, - FloatingIp, Network, SecurityGroup, + models = (Service, Instance, InstanceDiagnostics, Volume, ExportDevice, + IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, Project) # , Image, Host engine = create_engine(FLAGS.sql_connection, echo=False) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 2f5d78e75..659559c31 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -228,11 +228,7 @@ class VMHelper(): try: host = session.get_xenapi_host() host_ip = session.get_xenapi().host.get_record(host)["address"] - metrics = session.get_xenapi().VM_guest_metrics.get_record( - record["guest_metrics"]) - diags = { - "Kernel": metrics["os_version"]["uname"], - "Distro": metrics["os_version"]["name"]} + diags = {} xml = get_rrd(host_ip, record["uuid"]) if xml: rrd = minidom.parseString(xml) -- cgit From a6f90bacda223add276698958b2e7479bb6841e9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 16 Dec 2010 23:25:21 +0000 Subject: make sure all network data is recreated when nova-network is rebooted --- nova/network/manager.py | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 6a30f30b7..815fbfdc2 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -112,6 +112,16 @@ class NetworkManager(manager.Manager): ctxt = context.get_admin_context() for network in self.db.host_get_networks(ctxt, self.host): self._on_set_network_host(ctxt, network['id']) + floating_ips = self.db.floating_ip_get_all_by_host(ctxt, + self.host) + for floating_ip in floating_ips: + if floating_ip.get('fixed_ip', None): + fixed_address = floating_ip['fixed_ip']['address'] + # NOTE(vish): The False here is because we ignore the case + # that the ip is already bound. + self.driver.bind_floating_ip(floating_ip['address'], False) + self.driver.ensure_floating_forward(floating_ip['address'], + fixed_address) def set_network_host(self, context, network_id): """Safely sets the host of the network.""" @@ -444,12 +454,7 @@ class VlanManager(NetworkManager): def setup_fixed_ip(self, context, address): """Sets forwarding rules and dhcp for fixed ip.""" - fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) network_ref = self.db.fixed_ip_get_network(context, address) - if self.db.instance_is_vpn(context, fixed_ip_ref['instance_id']): - self.driver.ensure_vlan_forward(network_ref['vpn_public_address'], - network_ref['vpn_public_port'], - network_ref['vpn_private_address']) self.driver.update_dhcp(context, network_ref['id']) def setup_compute_network(self, context, instance_id): @@ -497,13 +502,24 @@ class VlanManager(NetworkManager): def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a network.""" network_ref = self.db.network_get(context, network_id) - net = {} - net['vpn_public_address'] = FLAGS.vpn_ip - db.network_update(context, network_id, net) + if not network_ref['vpn_public_address']: + net = {} + address = FLAGS.vpn_ip + net['vpn_public_address'] = address + db.network_update(context, network_id, net) + else: + address = network_ref['vpn_public_address'] self.driver.ensure_vlan_bridge(network_ref['vlan'], network_ref['bridge'], network_ref) - self.driver.update_dhcp(context, network_id) + # NOTE(vish): only ensure this forward if the address hasn't been set + # manually. + if address == FLAGS.vpn_ip: + self.driver.ensure_vlan_forward(FLAGS.vpn_ip, + network_ref['vpn_public_port'], + network_ref['vpn_private_address']) + if not FLAGS.fake_network: + self.driver.update_dhcp(context, network_id) @property def _bottom_reserved_ips(self): -- cgit From 0d705117a0d0c04d845c5d146455cd11ba9af88c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 16 Dec 2010 23:47:14 +0000 Subject: add conditional bind to linux net --- nova/network/linux_net.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 0fefd9415..40fe7619b 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -77,10 +77,11 @@ def init_host(): {'range': FLAGS.fixed_range}) -def bind_floating_ip(floating_ip): +def bind_floating_ip(floating_ip, check_exit_code=True): """Bind ip to public interface""" _execute("sudo ip addr add %s dev %s" % (floating_ip, - FLAGS.public_interface)) + FLAGS.public_interface), + check_exit_code=check_exit_code) def unbind_floating_ip(floating_ip): -- cgit From 86f71493fa5a02762bc7c56308c85b9182913efb Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 17 Dec 2010 00:43:18 +0000 Subject: move some flags around --- nova/api/__init__.py | 2 +- nova/api/ec2/cloud.py | 16 ++++++++++------ nova/flags.py | 12 +++++++----- nova/network/linux_net.py | 5 ++--- nova/utils.py | 5 ----- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/nova/api/__init__.py b/nova/api/__init__.py index 95fc6b145..803470570 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -29,7 +29,6 @@ import routes import webob.dec from nova import flags -from nova import utils from nova import wsgi from nova.api import ec2 from nova.api import openstack @@ -40,6 +39,7 @@ flags.DEFINE_string('osapi_subdomain', 'api', 'subdomain running the OpenStack API') flags.DEFINE_string('ec2api_subdomain', 'ec2', 'subdomain running the EC2 API') + FLAGS = flags.FLAGS diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ebb13aedc..684e29ee1 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -196,15 +196,19 @@ class CloudController(object): if FLAGS.region_list: regions = [] for region in FLAGS.region_list: - name, _sep, url = region.partition('=') + name, _sep, host = region.partition('=') + endpoint = '%s://%s:%s%s' % (FLAGS.ec2_prefix, + host, + FLAGS.cc_port, + FLAGS.ec2_suffix) regions.append({'regionName': name, - 'regionEndpoint': url}) + 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', - 'regionEndpoint': FLAGS.ec2_url}] - if region_name: - regions = [r for r in regions if r['regionName'] in region_name] - return {'regionInfo': regions} + 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_prefix, + FLAGS.cc_host, + FLAGS.cc_port, + FLAGS.ec2_suffix)}] def describe_snapshots(self, context, diff --git a/nova/flags.py b/nova/flags.py index 9e99ffb5e..95bfd3773 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -29,6 +29,7 @@ import sys import gflags +from nova import utils class FlagValues(gflags.FlagValues): """Extension of gflags.FlagValues that allows undefined and runtime flags. @@ -211,8 +212,8 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') DEFINE_integer('s3_port', 3333, 's3 port') -DEFINE_string('s3_host', '127.0.0.1', 's3 host (for infrastructure)') -DEFINE_string('s3_dmz', '127.0.0.1', 's3 dmz ip (for instances)') +DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)') +DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on') @@ -231,10 +232,11 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') -DEFINE_string('cc_dmz', '127.0.0.1', 'ip of api server (for instances)') +DEFINE_string('ec2_prefix', 'http', 'prefix for ec2') +DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server') +DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server') DEFINE_integer('cc_port', 8773, 'cloud controller port') -DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', - 'Url to ec2 api server') +DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2') DEFINE_string('default_image', 'ami-11111', 'default image to use, testing only') diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index cb9f01a36..8c7c528b5 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -46,8 +46,7 @@ flags.DEFINE_string('vlan_interface', 'eth0', 'network device for vlans') flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'), 'location of nova-dhcpbridge') -flags.DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server') -flags.DEFINE_string('routing_source_ip', '127.0.0.1', +flags.DEFINE_string('routing_source_ip', utils.get_my_ip(), 'Public IP of network host') flags.DEFINE_bool('use_nova_chains', False, 'use the nova_ routing chains instead of default') @@ -59,7 +58,7 @@ def metadata_forward(): """Create forwarding rule for metadata""" _confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 " "-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT " - "--to-destination %s:%s" % (FLAGS.cc_host, FLAGS.cc_port)) + "--to-destination %s:%s" % (FLAGS.cc_dmz, FLAGS.cc_port)) def init_host(): diff --git a/nova/utils.py b/nova/utils.py index 11160c118..082a42acf 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -21,7 +21,6 @@ System-level utilities and helper functions. """ import datetime -import functools import inspect import logging import os @@ -36,11 +35,9 @@ from eventlet import event from eventlet import greenthread from nova import exception -from nova import flags from nova.exception import ProcessExecutionError -FLAGS = flags.FLAGS TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" @@ -197,8 +194,6 @@ def last_octet(address): def get_my_ip(): """Returns the actual ip of the local machine.""" - if getattr(FLAGS, 'fake_tests', None): - return '127.0.0.1' try: csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) csock.connect(('8.8.8.8', 80)) -- cgit From 9e28957c45c69bf11a414faeb16a068f10a6a73d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 17 Dec 2010 00:44:08 +0000 Subject: clean up use of iptables chains --- nova/network/linux_net.py | 104 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 78 insertions(+), 26 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 0fefd9415..8aca1c80a 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -19,7 +19,6 @@ Implements vlans, bridges, and iptables rules using linux utilities. import logging import os -import signal # TODO(ja): does the definition of network_path belong here? @@ -48,39 +47,88 @@ flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'), 'location of nova-dhcpbridge') flags.DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server') flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') -flags.DEFINE_string('routing_source_ip', '127.0.0.1', +flags.DEFINE_string('routing_source_ip', utils.get_my_ip(), 'Public IP of network host') flags.DEFINE_bool('use_nova_chains', False, 'use the nova_ routing chains instead of default') - -DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] +flags.DEFINE_string('dns_server', None, + 'if set, uses specific dns server for dnsmasq') def metadata_forward(): """Create forwarding rule for metadata""" _confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 " "-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT " - "--to-destination %s:%s" % (FLAGS.cc_host, FLAGS.cc_port)) + "--to-destination %s:%s" % (FLAGS.cc_dmz, FLAGS.cc_port)) def init_host(): """Basic networking setup goes here""" + + if FLAGS.use_nova_chains: + _execute("sudo iptables -N nova_input", check_exit_code=False) + _execute("sudo iptables -D %s -j nova_input" % FLAGS.input_chain, + check_exit_code=False) + _execute("sudo iptables -A %s -j nova_input" % FLAGS.input_chain) + + _execute("sudo iptables -N nova_forward", check_exit_code=False) + _execute("sudo iptables -D FORWARD -j nova_forward", + check_exit_code=False) + _execute("sudo iptables -A FORWARD -j nova_forward") + + _execute("sudo iptables -N nova_output", check_exit_code=False) + _execute("sudo iptables -D OUTPUT -j nova_output", + check_exit_code=False) + _execute("sudo iptables -A OUTPUT -j nova_output") + + _execute("sudo iptables -t nat -N nova_prerouting", + check_exit_code=False) + _execute("sudo iptables -t nat -D PREROUTING -j nova_prerouting", + check_exit_code=False) + _execute("sudo iptables -t nat -A PREROUTING -j nova_prerouting") + + _execute("sudo iptables -t nat -N nova_postrouting", + check_exit_code=False) + _execute("sudo iptables -t nat -D POSTROUTING -j nova_postrouting", + check_exit_code=False) + _execute("sudo iptables -t nat -A POSTROUTING -j nova_postrouting") + + _execute("sudo iptables -t nat -N nova_snatting", + check_exit_code=False) + _execute("sudo iptables -t nat -D POSTROUTING -j nova_snatting", + check_exit_code=False) + _execute("sudo iptables -t nat -A POSTROUTING -j nova_snatting") + + _execute("sudo iptables -t nat -N nova_output", check_exit_code=False) + _execute("sudo iptables -t nat -D OUTPUT -j nova_output", + check_exit_code=False) + _execute("sudo iptables -t nat -A OUTPUT -j nova_output") + else: + # NOTE(vish): This makes it easy to ensure snatting rules always + # come after the accept rules in the postrouting chain + _execute("sudo iptables -t nat -N SNATTING", + check_exit_code=False) + _execute("sudo iptables -t nat -D POSTROUTING -j SNATTING", + check_exit_code=False) + _execute("sudo iptables -t nat -A POSTROUTING -j SNATTING") + # NOTE(devcamcar): Cloud public SNAT entries and the default # SNAT rule for outbound traffic. - _confirm_rule("POSTROUTING", "-t nat -s %s " + _confirm_rule("SNATTING", "-t nat -s %s " "-j SNAT --to-source %s" - % (FLAGS.fixed_range, FLAGS.routing_source_ip)) + % (FLAGS.fixed_range, FLAGS.routing_source_ip), append=True) - _confirm_rule("POSTROUTING", "-t nat -s %s -j MASQUERADE" % - FLAGS.fixed_range) + _confirm_rule("POSTROUTING", "-t nat -s %s -d %s -j ACCEPT" % + (FLAGS.fixed_range, FLAGS.dmz_cidr)) _confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" % {'range': FLAGS.fixed_range}) -def bind_floating_ip(floating_ip): +def bind_floating_ip(floating_ip, check_exit_code=True): """Bind ip to public interface""" _execute("sudo ip addr add %s dev %s" % (floating_ip, - FLAGS.public_interface)) + FLAGS.public_interface), + check_exit_code=check_exit_code) def unbind_floating_ip(floating_ip): @@ -102,27 +150,16 @@ def ensure_floating_forward(floating_ip, fixed_ip): """Ensure floating ip forwarding rule""" _confirm_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s" % (floating_ip, fixed_ip)) - _confirm_rule("POSTROUTING", "-t nat -s %s -j SNAT --to %s" + _confirm_rule("SNATTING", "-t nat -s %s -j SNAT --to %s" % (fixed_ip, floating_ip)) - # TODO(joshua): Get these from the secgroup datastore entries - _confirm_rule("FORWARD", "-d %s -p icmp -j ACCEPT" - % (fixed_ip)) - for (protocol, port) in DEFAULT_PORTS: - _confirm_rule("FORWARD", "-d %s -p %s --dport %s -j ACCEPT" - % (fixed_ip, protocol, port)) def remove_floating_forward(floating_ip, fixed_ip): """Remove forwarding for floating ip""" _remove_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s" % (floating_ip, fixed_ip)) - _remove_rule("POSTROUTING", "-t nat -s %s -j SNAT --to %s" + _remove_rule("SNATTING", "-t nat -s %s -j SNAT --to %s" % (fixed_ip, floating_ip)) - _remove_rule("FORWARD", "-d %s -p icmp -j ACCEPT" - % (fixed_ip)) - for (protocol, port) in DEFAULT_PORTS: - _remove_rule("FORWARD", "-d %s -p %s --dport %s -j ACCEPT" - % (fixed_ip, protocol, port)) def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): @@ -160,6 +197,15 @@ def ensure_bridge(bridge, interface, net_attrs=None): net_attrs['netmask'])) else: _execute("sudo ifconfig %s up" % bridge) + if FLAGS.use_nova_chains: + (out, err) = _execute("sudo iptables -N nova_forward", + check_exit_code=False) + if err != 'iptables: Chain already exists.\n': + # NOTE(vish): chain didn't exist link chain + _execute("sudo iptables -D FORWARD -j nova_forward", + check_exit_code=False) + _execute("sudo iptables -A FORWARD -j nova_forward") + _confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge) _confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge) @@ -236,13 +282,17 @@ def _device_exists(device): return not err -def _confirm_rule(chain, cmd): +def _confirm_rule(chain, cmd, append=False): """Delete and re-add iptables rule""" if FLAGS.use_nova_chains: chain = "nova_%s" % chain.lower() + if append: + loc = "-A" + else: + loc = "-I" _execute("sudo iptables --delete %s %s" % (chain, cmd), check_exit_code=False) - _execute("sudo iptables -I %s %s" % (chain, cmd)) + _execute("sudo iptables %s %s %s" % (loc, chain, cmd)) def _remove_rule(chain, cmd): @@ -265,6 +315,8 @@ def _dnsmasq_cmd(net): ' --dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'), ' --dhcp-script=%s' % FLAGS.dhcpbridge, ' --leasefile-ro'] + if FLAGS.dns_server: + cmd.append(' -h -R --server=%s' % FLAGS.dns_server) return ''.join(cmd) -- cgit From baf0b1db2d4997f0e47277763e8ab393c131b8c8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 17 Dec 2010 00:52:17 +0000 Subject: pep8 --- nova/flags.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/flags.py b/nova/flags.py index 95bfd3773..74badf6fd 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -31,6 +31,7 @@ import gflags from nova import utils + class FlagValues(gflags.FlagValues): """Extension of gflags.FlagValues that allows undefined and runtime flags. -- cgit From 8c343e1b4b92aa7b1062acebe8eaea402bc6ab4a Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 16 Dec 2010 17:05:54 -0800 Subject: First pass at converting run_tests.py to nosetests. The network and objctstore tests don't yet work. Also, we need to manually remove the sqlite file between runs. --- nova/test.py | 3 + nova/tests/access_unittest.py | 127 -------------- nova/tests/api/__init__.py | 81 --------- nova/tests/api/test.py | 81 +++++++++ nova/tests/api_integration.py | 54 ------ nova/tests/api_unittest.py | 338 ------------------------------------- nova/tests/auth_unittest.py | 352 --------------------------------------- nova/tests/cloud_unittest.py | 332 ------------------------------------ nova/tests/compute_unittest.py | 155 ----------------- nova/tests/flags_unittest.py | 102 ------------ nova/tests/misc_unittest.py | 55 ------ nova/tests/quota_unittest.py | 153 ----------------- nova/tests/rpc_unittest.py | 103 ------------ nova/tests/scheduler_unittest.py | 246 --------------------------- nova/tests/service_unittest.py | 227 ------------------------- nova/tests/test_access.py | 127 ++++++++++++++ nova/tests/test_api.py | 338 +++++++++++++++++++++++++++++++++++++ nova/tests/test_auth.py | 352 +++++++++++++++++++++++++++++++++++++++ nova/tests/test_cloud.py | 332 ++++++++++++++++++++++++++++++++++++ nova/tests/test_compute.py | 155 +++++++++++++++++ nova/tests/test_flags.py | 102 ++++++++++++ nova/tests/test_misc.py | 55 ++++++ nova/tests/test_quota.py | 153 +++++++++++++++++ nova/tests/test_rpc.py | 103 ++++++++++++ nova/tests/test_scheduler.py | 246 +++++++++++++++++++++++++++ nova/tests/test_service.py | 227 +++++++++++++++++++++++++ nova/tests/test_twistd.py | 53 ++++++ nova/tests/test_virt.py | 258 ++++++++++++++++++++++++++++ nova/tests/test_volume.py | 175 +++++++++++++++++++ nova/tests/twistd_unittest.py | 53 ------ nova/tests/virt_unittest.py | 258 ---------------------------- nova/tests/volume_unittest.py | 175 ------------------- run_tests.py | 125 -------------- 33 files changed, 2760 insertions(+), 2936 deletions(-) delete mode 100644 nova/tests/access_unittest.py create mode 100644 nova/tests/api/test.py delete mode 100644 nova/tests/api_integration.py delete mode 100644 nova/tests/api_unittest.py delete mode 100644 nova/tests/auth_unittest.py delete mode 100644 nova/tests/cloud_unittest.py delete mode 100644 nova/tests/compute_unittest.py delete mode 100644 nova/tests/flags_unittest.py delete mode 100644 nova/tests/misc_unittest.py delete mode 100644 nova/tests/quota_unittest.py delete mode 100644 nova/tests/rpc_unittest.py delete mode 100644 nova/tests/scheduler_unittest.py delete mode 100644 nova/tests/service_unittest.py create mode 100644 nova/tests/test_access.py create mode 100644 nova/tests/test_api.py create mode 100644 nova/tests/test_auth.py create mode 100644 nova/tests/test_cloud.py create mode 100644 nova/tests/test_compute.py create mode 100644 nova/tests/test_flags.py create mode 100644 nova/tests/test_misc.py create mode 100644 nova/tests/test_quota.py create mode 100644 nova/tests/test_rpc.py create mode 100644 nova/tests/test_scheduler.py create mode 100644 nova/tests/test_service.py create mode 100644 nova/tests/test_twistd.py create mode 100644 nova/tests/test_virt.py create mode 100644 nova/tests/test_volume.py delete mode 100644 nova/tests/twistd_unittest.py delete mode 100644 nova/tests/virt_unittest.py delete mode 100644 nova/tests/volume_unittest.py delete mode 100644 run_tests.py diff --git a/nova/test.py b/nova/test.py index 7076f1bf4..db5826c04 100644 --- a/nova/test.py +++ b/nova/test.py @@ -38,9 +38,12 @@ from nova import fakerabbit from nova import flags from nova import rpc from nova.network import manager as network_manager +from nova.tests import fake_flags FLAGS = flags.FLAGS +flags.DEFINE_bool('flush_db', True, + 'Flush the database before running fake tests') flags.DEFINE_bool('fake_tests', True, 'should we use everything for testing') diff --git a/nova/tests/access_unittest.py b/nova/tests/access_unittest.py deleted file mode 100644 index 58fdea3b5..000000000 --- a/nova/tests/access_unittest.py +++ /dev/null @@ -1,127 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest -import logging -import webob - -from nova import context -from nova import exception -from nova import flags -from nova import test -from nova.api import ec2 -from nova.auth import manager - - -FLAGS = flags.FLAGS - - -class Context(object): - pass - - -class AccessTestCase(test.TestCase): - def setUp(self): - super(AccessTestCase, self).setUp() - um = manager.AuthManager() - self.context = context.get_admin_context() - # Make test users - self.testadmin = um.create_user('testadmin') - self.testpmsys = um.create_user('testpmsys') - self.testnet = um.create_user('testnet') - self.testsys = um.create_user('testsys') - # Assign some rules - um.add_role('testadmin', 'cloudadmin') - um.add_role('testpmsys', 'sysadmin') - um.add_role('testnet', 'netadmin') - um.add_role('testsys', 'sysadmin') - - # Make a test project - self.project = um.create_project('testproj', - 'testpmsys', - 'a test project', - ['testpmsys', 'testnet', 'testsys']) - self.project.add_role(self.testnet, 'netadmin') - self.project.add_role(self.testsys, 'sysadmin') - #user is set in each test - - def noopWSGIApp(environ, start_response): - start_response('200 OK', []) - return [''] - - self.mw = ec2.Authorizer(noopWSGIApp) - self.mw.action_roles = {'str': { - '_allow_all': ['all'], - '_allow_none': [], - '_allow_project_manager': ['projectmanager'], - '_allow_sys_and_net': ['sysadmin', 'netadmin'], - '_allow_sysadmin': ['sysadmin']}} - - def tearDown(self): - um = manager.AuthManager() - # Delete the test project - um.delete_project('testproj') - # Delete the test user - um.delete_user('testadmin') - um.delete_user('testpmsys') - um.delete_user('testnet') - um.delete_user('testsys') - super(AccessTestCase, self).tearDown() - - def response_status(self, user, methodName): - ctxt = context.RequestContext(user, self.project) - environ = {'ec2.context': ctxt, - 'ec2.controller': 'some string', - 'ec2.action': methodName} - req = webob.Request.blank('/', environ) - resp = req.get_response(self.mw) - return resp.status_int - - def shouldAllow(self, user, methodName): - self.assertEqual(200, self.response_status(user, methodName)) - - def shouldDeny(self, user, methodName): - self.assertEqual(401, self.response_status(user, methodName)) - - def test_001_allow_all(self): - users = [self.testadmin, self.testpmsys, self.testnet, self.testsys] - for user in users: - self.shouldAllow(user, '_allow_all') - - def test_002_allow_none(self): - self.shouldAllow(self.testadmin, '_allow_none') - users = [self.testpmsys, self.testnet, self.testsys] - for user in users: - self.shouldDeny(user, '_allow_none') - - def test_003_allow_project_manager(self): - for user in [self.testadmin, self.testpmsys]: - self.shouldAllow(user, '_allow_project_manager') - for user in [self.testnet, self.testsys]: - self.shouldDeny(user, '_allow_project_manager') - - def test_004_allow_sys_and_net(self): - for user in [self.testadmin, self.testnet, self.testsys]: - self.shouldAllow(user, '_allow_sys_and_net') - # denied because it doesn't have the per project sysadmin - for user in [self.testpmsys]: - self.shouldDeny(user, '_allow_sys_and_net') - -if __name__ == "__main__": - # TODO: Implement use_fake as an option - unittest.main() diff --git a/nova/tests/api/__init__.py b/nova/tests/api/__init__.py index 9caa8c9d0..e69de29bb 100644 --- a/nova/tests/api/__init__.py +++ b/nova/tests/api/__init__.py @@ -1,81 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test for the root WSGI middleware for all API controllers. -""" - -import unittest - -import stubout -import webob -import webob.dec - -import nova.exception -from nova import api -from nova.tests.api.fakes import APIStub - - -class Test(unittest.TestCase): - - def setUp(self): - self.stubs = stubout.StubOutForTesting() - - def tearDown(self): - self.stubs.UnsetAll() - - def _request(self, url, subdomain, **kwargs): - environ_keys = {'HTTP_HOST': '%s.example.com' % subdomain} - environ_keys.update(kwargs) - req = webob.Request.blank(url, environ_keys) - return req.get_response(api.API('ec2')) - - def test_openstack(self): - self.stubs.Set(api.openstack, 'API', APIStub) - result = self._request('/v1.0/cloud', 'api') - self.assertEqual(result.body, "/cloud") - - def test_ec2(self): - self.stubs.Set(api.ec2, 'API', APIStub) - result = self._request('/services/cloud', 'ec2') - self.assertEqual(result.body, "/cloud") - - def test_not_found(self): - self.stubs.Set(api.ec2, 'API', APIStub) - self.stubs.Set(api.openstack, 'API', APIStub) - result = self._request('/test/cloud', 'ec2') - self.assertNotEqual(result.body, "/cloud") - - def test_query_api_versions(self): - result = self._request('/', 'api') - self.assertTrue('CURRENT' in result.body) - - def test_metadata(self): - def go(url): - result = self._request(url, 'ec2', REMOTE_ADDR='128.192.151.2') - # Each should get to the ORM layer and fail to find the IP - self.assertRaises(nova.exception.NotFound, go, '/latest/') - self.assertRaises(nova.exception.NotFound, go, '/2009-04-04/') - self.assertRaises(nova.exception.NotFound, go, '/1.0/') - - def test_ec2_root(self): - result = self._request('/', 'ec2') - self.assertTrue('2007-12-15\n' in result.body) - - -if __name__ == '__main__': - unittest.main() diff --git a/nova/tests/api/test.py b/nova/tests/api/test.py new file mode 100644 index 000000000..9caa8c9d0 --- /dev/null +++ b/nova/tests/api/test.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test for the root WSGI middleware for all API controllers. +""" + +import unittest + +import stubout +import webob +import webob.dec + +import nova.exception +from nova import api +from nova.tests.api.fakes import APIStub + + +class Test(unittest.TestCase): + + def setUp(self): + self.stubs = stubout.StubOutForTesting() + + def tearDown(self): + self.stubs.UnsetAll() + + def _request(self, url, subdomain, **kwargs): + environ_keys = {'HTTP_HOST': '%s.example.com' % subdomain} + environ_keys.update(kwargs) + req = webob.Request.blank(url, environ_keys) + return req.get_response(api.API('ec2')) + + def test_openstack(self): + self.stubs.Set(api.openstack, 'API', APIStub) + result = self._request('/v1.0/cloud', 'api') + self.assertEqual(result.body, "/cloud") + + def test_ec2(self): + self.stubs.Set(api.ec2, 'API', APIStub) + result = self._request('/services/cloud', 'ec2') + self.assertEqual(result.body, "/cloud") + + def test_not_found(self): + self.stubs.Set(api.ec2, 'API', APIStub) + self.stubs.Set(api.openstack, 'API', APIStub) + result = self._request('/test/cloud', 'ec2') + self.assertNotEqual(result.body, "/cloud") + + def test_query_api_versions(self): + result = self._request('/', 'api') + self.assertTrue('CURRENT' in result.body) + + def test_metadata(self): + def go(url): + result = self._request(url, 'ec2', REMOTE_ADDR='128.192.151.2') + # Each should get to the ORM layer and fail to find the IP + self.assertRaises(nova.exception.NotFound, go, '/latest/') + self.assertRaises(nova.exception.NotFound, go, '/2009-04-04/') + self.assertRaises(nova.exception.NotFound, go, '/1.0/') + + def test_ec2_root(self): + result = self._request('/', 'ec2') + self.assertTrue('2007-12-15\n' in result.body) + + +if __name__ == '__main__': + unittest.main() diff --git a/nova/tests/api_integration.py b/nova/tests/api_integration.py deleted file mode 100644 index 54403c655..000000000 --- a/nova/tests/api_integration.py +++ /dev/null @@ -1,54 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import boto -from boto.ec2.regioninfo import RegionInfo -import unittest - - -ACCESS_KEY = 'fake' -SECRET_KEY = 'fake' -CLC_IP = '127.0.0.1' -CLC_PORT = 8773 -REGION = 'test' - - -def get_connection(): - return boto.connect_ec2( - aws_access_key_id=ACCESS_KEY, - aws_secret_access_key=SECRET_KEY, - is_secure=False, - region=RegionInfo(None, REGION, CLC_IP), - port=CLC_PORT, - path='/services/Cloud', - debug=99) - - -class APIIntegrationTests(unittest.TestCase): - def test_001_get_all_images(self): - conn = get_connection() - res = conn.get_all_images() - - -if __name__ == '__main__': - unittest.main() - -#print conn.get_all_key_pairs() -#print conn.create_key_pair -#print conn.create_security_group('name', 'description') diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py deleted file mode 100644 index 33d4cb294..000000000 --- a/nova/tests/api_unittest.py +++ /dev/null @@ -1,338 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Unit tests for the API endpoint""" - -import boto -from boto.ec2 import regioninfo -import httplib -import random -import StringIO -import webob - -from nova import context -from nova import flags -from nova import test -from nova import api -from nova.api.ec2 import cloud -from nova.api.ec2 import apirequest -from nova.auth import manager - - -class FakeHttplibSocket(object): - """a fake socket implementation for httplib.HTTPResponse, trivial""" - def __init__(self, response_string): - self._buffer = StringIO.StringIO(response_string) - - def makefile(self, _mode, _other): - """Returns the socket's internal buffer""" - return self._buffer - - -class FakeHttplibConnection(object): - """A fake httplib.HTTPConnection for boto to use - - requests made via this connection actually get translated and routed into - our WSGI app, we then wait for the response and turn it back into - the httplib.HTTPResponse that boto expects. - """ - def __init__(self, app, host, is_secure=False): - self.app = app - self.host = host - - def request(self, method, path, data, headers): - req = webob.Request.blank(path) - req.method = method - req.body = data - req.headers = headers - req.headers['Accept'] = 'text/html' - req.host = self.host - # Call the WSGI app, get the HTTP response - resp = str(req.get_response(self.app)) - # For some reason, the response doesn't have "HTTP/1.0 " prepended; I - # guess that's a function the web server usually provides. - resp = "HTTP/1.0 %s" % resp - sock = FakeHttplibSocket(resp) - self.http_response = httplib.HTTPResponse(sock) - self.http_response.begin() - - def getresponse(self): - return self.http_response - - def close(self): - """Required for compatibility with boto/tornado""" - pass - - -class XmlConversionTestCase(test.TrialTestCase): - """Unit test api xml conversion""" - def test_number_conversion(self): - conv = apirequest._try_convert - self.assertEqual(conv('None'), None) - self.assertEqual(conv('True'), True) - self.assertEqual(conv('False'), False) - self.assertEqual(conv('0'), 0) - self.assertEqual(conv('42'), 42) - self.assertEqual(conv('3.14'), 3.14) - self.assertEqual(conv('-57.12'), -57.12) - self.assertEqual(conv('0x57'), 0x57) - self.assertEqual(conv('-0x57'), -0x57) - self.assertEqual(conv('-'), '-') - self.assertEqual(conv('-0'), 0) - - -class ApiEc2TestCase(test.TrialTestCase): - """Unit test for the cloud controller on an EC2 API""" - def setUp(self): - super(ApiEc2TestCase, self).setUp() - - self.manager = manager.AuthManager() - - self.host = '127.0.0.1' - - self.app = api.API('ec2') - - def expect_http(self, host=None, is_secure=False): - """Returns a new EC2 connection""" - self.ec2 = boto.connect_ec2( - aws_access_key_id='fake', - aws_secret_access_key='fake', - is_secure=False, - region=regioninfo.RegionInfo(None, 'test', self.host), - port=8773, - path='/services/Cloud') - - self.mox.StubOutWithMock(self.ec2, 'new_http_connection') - http = FakeHttplibConnection( - self.app, '%s:8773' % (self.host), False) - # pylint: disable-msg=E1103 - self.ec2.new_http_connection(host, is_secure).AndReturn(http) - return http - - def test_describe_instances(self): - """Test that, after creating a user and a project, the describe - instances call to the API works properly""" - self.expect_http() - self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake') - project = self.manager.create_project('fake', 'fake', 'fake') - self.assertEqual(self.ec2.get_all_instances(), []) - self.manager.delete_project(project) - self.manager.delete_user(user) - - def test_get_all_key_pairs(self): - """Test that, after creating a user and project and generating - a key pair, that the API call to list key pairs works properly""" - self.expect_http() - self.mox.ReplayAll() - keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \ - for x in range(random.randint(4, 8))) - user = self.manager.create_user('fake', 'fake', 'fake') - project = self.manager.create_project('fake', 'fake', 'fake') - # NOTE(vish): create depends on pool, so call helper directly - cloud._gen_key(context.get_admin_context(), user.id, keyname) - - rv = self.ec2.get_all_key_pairs() - results = [k for k in rv if k.name == keyname] - self.assertEquals(len(results), 1) - self.manager.delete_project(project) - self.manager.delete_user(user) - - def test_get_all_security_groups(self): - """Test that we can retrieve security groups""" - self.expect_http() - self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake', admin=True) - project = self.manager.create_project('fake', 'fake', 'fake') - - rv = self.ec2.get_all_security_groups() - - self.assertEquals(len(rv), 1) - self.assertEquals(rv[0].name, 'default') - - self.manager.delete_project(project) - self.manager.delete_user(user) - - def test_create_delete_security_group(self): - """Test that we can create a security group""" - self.expect_http() - self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake', admin=True) - project = self.manager.create_project('fake', 'fake', 'fake') - - # At the moment, you need both of these to actually be netadmin - self.manager.add_role('fake', 'netadmin') - project.add_role('fake', 'netadmin') - - security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") - for x in range(random.randint(4, 8))) - - self.ec2.create_security_group(security_group_name, 'test group') - - self.expect_http() - self.mox.ReplayAll() - - rv = self.ec2.get_all_security_groups() - self.assertEquals(len(rv), 2) - self.assertTrue(security_group_name in [group.name for group in rv]) - - self.expect_http() - self.mox.ReplayAll() - - self.ec2.delete_security_group(security_group_name) - - self.manager.delete_project(project) - self.manager.delete_user(user) - - def test_authorize_revoke_security_group_cidr(self): - """ - Test that we can add and remove CIDR based rules - to a security group - """ - self.expect_http() - self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake') - project = self.manager.create_project('fake', 'fake', 'fake') - - # At the moment, you need both of these to actually be netadmin - self.manager.add_role('fake', 'netadmin') - project.add_role('fake', 'netadmin') - - security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") - for x in range(random.randint(4, 8))) - - group = self.ec2.create_security_group(security_group_name, - 'test group') - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - group.authorize('tcp', 80, 81, '0.0.0.0/0') - - self.expect_http() - self.mox.ReplayAll() - - rv = self.ec2.get_all_security_groups() - # I don't bother checkng that we actually find it here, - # because the create/delete unit test further up should - # be good enough for that. - for group in rv: - if group.name == security_group_name: - self.assertEquals(len(group.rules), 1) - self.assertEquals(int(group.rules[0].from_port), 80) - self.assertEquals(int(group.rules[0].to_port), 81) - self.assertEquals(len(group.rules[0].grants), 1) - self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0') - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - group.revoke('tcp', 80, 81, '0.0.0.0/0') - - self.expect_http() - self.mox.ReplayAll() - - self.ec2.delete_security_group(security_group_name) - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - rv = self.ec2.get_all_security_groups() - - self.assertEqual(len(rv), 1) - self.assertEqual(rv[0].name, 'default') - - self.manager.delete_project(project) - self.manager.delete_user(user) - - return - - def test_authorize_revoke_security_group_foreign_group(self): - """ - Test that we can grant and revoke another security group access - to a security group - """ - self.expect_http() - self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake', admin=True) - project = self.manager.create_project('fake', 'fake', 'fake') - - # At the moment, you need both of these to actually be netadmin - self.manager.add_role('fake', 'netadmin') - project.add_role('fake', 'netadmin') - - rand_string = 'sdiuisudfsdcnpaqwertasd' - security_group_name = "".join(random.choice(rand_string) - for x in range(random.randint(4, 8))) - other_security_group_name = "".join(random.choice(rand_string) - for x in range(random.randint(4, 8))) - - group = self.ec2.create_security_group(security_group_name, - 'test group') - - self.expect_http() - self.mox.ReplayAll() - - other_group = self.ec2.create_security_group(other_security_group_name, - 'some other group') - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - group.authorize(src_group=other_group) - - self.expect_http() - self.mox.ReplayAll() - - rv = self.ec2.get_all_security_groups() - - # I don't bother checkng that we actually find it here, - # because the create/delete unit test further up should - # be good enough for that. - for group in rv: - if group.name == security_group_name: - self.assertEquals(len(group.rules), 1) - self.assertEquals(len(group.rules[0].grants), 1) - self.assertEquals(str(group.rules[0].grants[0]), '%s-%s' % - (other_security_group_name, 'fake')) - - self.expect_http() - self.mox.ReplayAll() - - rv = self.ec2.get_all_security_groups() - - for group in rv: - if group.name == security_group_name: - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - group.revoke(src_group=other_group) - - self.expect_http() - self.mox.ReplayAll() - - self.ec2.delete_security_group(security_group_name) - - self.manager.delete_project(project) - self.manager.delete_user(user) - - return diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py deleted file mode 100644 index 4508d6721..000000000 --- a/nova/tests/auth_unittest.py +++ /dev/null @@ -1,352 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -from M2Crypto import X509 -import unittest - -from nova import crypto -from nova import flags -from nova import test -from nova.auth import manager -from nova.api.ec2 import cloud - -FLAGS = flags.FLAGS - - -class user_generator(object): - def __init__(self, manager, **user_state): - if 'name' not in user_state: - user_state['name'] = 'test1' - self.manager = manager - self.user = manager.create_user(**user_state) - - def __enter__(self): - return self.user - - def __exit__(self, value, type, trace): - self.manager.delete_user(self.user) - - -class project_generator(object): - def __init__(self, manager, **project_state): - if 'name' not in project_state: - project_state['name'] = 'testproj' - if 'manager_user' not in project_state: - project_state['manager_user'] = 'test1' - self.manager = manager - self.project = manager.create_project(**project_state) - - def __enter__(self): - return self.project - - def __exit__(self, value, type, trace): - self.manager.delete_project(self.project) - - -class user_and_project_generator(object): - def __init__(self, manager, user_state={}, project_state={}): - self.manager = manager - if 'name' not in user_state: - user_state['name'] = 'test1' - if 'name' not in project_state: - project_state['name'] = 'testproj' - if 'manager_user' not in project_state: - project_state['manager_user'] = 'test1' - self.user = manager.create_user(**user_state) - self.project = manager.create_project(**project_state) - - def __enter__(self): - return (self.user, self.project) - - def __exit__(self, value, type, trace): - self.manager.delete_user(self.user) - self.manager.delete_project(self.project) - - -class AuthManagerTestCase(object): - def setUp(self): - FLAGS.auth_driver = self.auth_driver - super(AuthManagerTestCase, self).setUp() - self.flags(connection_type='fake') - self.manager = manager.AuthManager(new=True) - - def test_create_and_find_user(self): - with user_generator(self.manager): - self.assert_(self.manager.get_user('test1')) - - def test_create_and_find_with_properties(self): - with user_generator(self.manager, name="herbert", secret="classified", - access="private-party"): - u = self.manager.get_user('herbert') - self.assertEqual('herbert', u.id) - self.assertEqual('herbert', u.name) - self.assertEqual('classified', u.secret) - self.assertEqual('private-party', u.access) - - def test_004_signature_is_valid(self): - #self.assertTrue(self.manager.authenticate(**boto.generate_url ...? )) - pass - #raise NotImplementedError - - def test_005_can_get_credentials(self): - return - credentials = self.manager.get_user('test1').get_credentials() - self.assertEqual(credentials, - 'export EC2_ACCESS_KEY="access"\n' + - 'export EC2_SECRET_KEY="secret"\n' + - 'export EC2_URL="http://127.0.0.1:8773/services/Cloud"\n' + - 'export S3_URL="http://127.0.0.1:3333/"\n' + - 'export EC2_USER_ID="test1"\n') - - def test_can_list_users(self): - with user_generator(self.manager): - with user_generator(self.manager, name="test2"): - users = self.manager.get_users() - self.assert_(filter(lambda u: u.id == 'test1', users)) - self.assert_(filter(lambda u: u.id == 'test2', users)) - self.assert_(not filter(lambda u: u.id == 'test3', users)) - - def test_can_add_and_remove_user_role(self): - with user_generator(self.manager): - self.assertFalse(self.manager.has_role('test1', 'itsec')) - self.manager.add_role('test1', 'itsec') - self.assertTrue(self.manager.has_role('test1', 'itsec')) - self.manager.remove_role('test1', 'itsec') - self.assertFalse(self.manager.has_role('test1', 'itsec')) - - def test_can_create_and_get_project(self): - with user_and_project_generator(self.manager) as (u, p): - self.assert_(self.manager.get_user('test1')) - self.assert_(self.manager.get_user('test1')) - self.assert_(self.manager.get_project('testproj')) - - def test_can_list_projects(self): - with user_and_project_generator(self.manager): - with project_generator(self.manager, name="testproj2"): - projects = self.manager.get_projects() - self.assert_(filter(lambda p: p.name == 'testproj', projects)) - self.assert_(filter(lambda p: p.name == 'testproj2', projects)) - self.assert_(not filter(lambda p: p.name == 'testproj3', - projects)) - - def test_can_create_and_get_project_with_attributes(self): - with user_generator(self.manager): - with project_generator(self.manager, description='A test project'): - project = self.manager.get_project('testproj') - self.assertEqual('A test project', project.description) - - def test_can_create_project_with_manager(self): - with user_and_project_generator(self.manager) as (user, project): - self.assertEqual('test1', project.project_manager_id) - self.assertTrue(self.manager.is_project_manager(user, project)) - - def test_create_project_assigns_manager_to_members(self): - with user_and_project_generator(self.manager) as (user, project): - self.assertTrue(self.manager.is_project_member(user, project)) - - def test_no_extra_project_members(self): - with user_generator(self.manager, name='test2') as baduser: - with user_and_project_generator(self.manager) as (user, project): - self.assertFalse(self.manager.is_project_member(baduser, - project)) - - def test_no_extra_project_managers(self): - with user_generator(self.manager, name='test2') as baduser: - with user_and_project_generator(self.manager) as (user, project): - self.assertFalse(self.manager.is_project_manager(baduser, - project)) - - def test_can_add_user_to_project(self): - with user_generator(self.manager, name='test2') as user: - with user_and_project_generator(self.manager) as (_user, project): - self.manager.add_to_project(user, project) - project = self.manager.get_project('testproj') - self.assertTrue(self.manager.is_project_member(user, project)) - - def test_can_remove_user_from_project(self): - with user_generator(self.manager, name='test2') as user: - with user_and_project_generator(self.manager) as (_user, project): - self.manager.add_to_project(user, project) - project = self.manager.get_project('testproj') - self.assertTrue(self.manager.is_project_member(user, project)) - self.manager.remove_from_project(user, project) - project = self.manager.get_project('testproj') - self.assertFalse(self.manager.is_project_member(user, project)) - - def test_can_add_remove_user_with_role(self): - with user_generator(self.manager, name='test2') as user: - with user_and_project_generator(self.manager) as (_user, project): - # NOTE(todd): after modifying users you must reload project - self.manager.add_to_project(user, project) - project = self.manager.get_project('testproj') - self.manager.add_role(user, 'developer', project) - self.assertTrue(self.manager.is_project_member(user, project)) - self.manager.remove_from_project(user, project) - project = self.manager.get_project('testproj') - self.assertFalse(self.manager.has_role(user, 'developer', - project)) - self.assertFalse(self.manager.is_project_member(user, project)) - - def test_can_generate_x509(self): - # NOTE(todd): this doesn't assert against the auth manager - # so it probably belongs in crypto_unittest - # but I'm leaving it where I found it. - with user_and_project_generator(self.manager) as (user, project): - # NOTE(todd): Should mention why we must setup controller first - # (somebody please clue me in) - cloud_controller = cloud.CloudController() - cloud_controller.setup() - _key, cert_str = self.manager._generate_x509_cert('test1', - 'testproj') - logging.debug(cert_str) - - # Need to verify that it's signed by the right intermediate CA - full_chain = crypto.fetch_ca(project_id='testproj', chain=True) - int_cert = crypto.fetch_ca(project_id='testproj', chain=False) - cloud_cert = crypto.fetch_ca() - logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain) - signed_cert = X509.load_cert_string(cert_str) - chain_cert = X509.load_cert_string(full_chain) - int_cert = X509.load_cert_string(int_cert) - cloud_cert = X509.load_cert_string(cloud_cert) - self.assertTrue(signed_cert.verify(chain_cert.get_pubkey())) - self.assertTrue(signed_cert.verify(int_cert.get_pubkey())) - if not FLAGS.use_intermediate_ca: - self.assertTrue(signed_cert.verify(cloud_cert.get_pubkey())) - else: - self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey())) - - def test_adding_role_to_project_is_ignored_unless_added_to_user(self): - with user_and_project_generator(self.manager) as (user, project): - self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) - self.manager.add_role(user, 'sysadmin', project) - # NOTE(todd): it will still show up in get_user_roles(u, project) - self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) - self.manager.add_role(user, 'sysadmin') - self.assertTrue(self.manager.has_role(user, 'sysadmin', project)) - - def test_add_user_role_doesnt_infect_project_roles(self): - with user_and_project_generator(self.manager) as (user, project): - self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) - self.manager.add_role(user, 'sysadmin') - self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) - - def test_can_list_user_roles(self): - with user_and_project_generator(self.manager) as (user, project): - self.manager.add_role(user, 'sysadmin') - roles = self.manager.get_user_roles(user) - self.assertTrue('sysadmin' in roles) - self.assertFalse('netadmin' in roles) - - def test_can_list_project_roles(self): - with user_and_project_generator(self.manager) as (user, project): - self.manager.add_role(user, 'sysadmin') - self.manager.add_role(user, 'sysadmin', project) - self.manager.add_role(user, 'netadmin', project) - project_roles = self.manager.get_user_roles(user, project) - self.assertTrue('sysadmin' in project_roles) - self.assertTrue('netadmin' in project_roles) - # has role should be false user-level role is missing - self.assertFalse(self.manager.has_role(user, 'netadmin', project)) - - def test_can_remove_user_roles(self): - with user_and_project_generator(self.manager) as (user, project): - self.manager.add_role(user, 'sysadmin') - self.assertTrue(self.manager.has_role(user, 'sysadmin')) - self.manager.remove_role(user, 'sysadmin') - self.assertFalse(self.manager.has_role(user, 'sysadmin')) - - def test_removing_user_role_hides_it_from_project(self): - with user_and_project_generator(self.manager) as (user, project): - self.manager.add_role(user, 'sysadmin') - self.manager.add_role(user, 'sysadmin', project) - self.assertTrue(self.manager.has_role(user, 'sysadmin', project)) - self.manager.remove_role(user, 'sysadmin') - self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) - - def test_can_remove_project_role_but_keep_user_role(self): - with user_and_project_generator(self.manager) as (user, project): - self.manager.add_role(user, 'sysadmin') - self.manager.add_role(user, 'sysadmin', project) - self.assertTrue(self.manager.has_role(user, 'sysadmin')) - self.manager.remove_role(user, 'sysadmin', project) - self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) - self.assertTrue(self.manager.has_role(user, 'sysadmin')) - - def test_can_retrieve_project_by_user(self): - with user_and_project_generator(self.manager) as (user, project): - self.assertEqual(1, len(self.manager.get_projects('test1'))) - - def test_can_modify_project(self): - with user_and_project_generator(self.manager): - with user_generator(self.manager, name='test2'): - self.manager.modify_project('testproj', 'test2', 'new desc') - project = self.manager.get_project('testproj') - self.assertEqual('test2', project.project_manager_id) - self.assertEqual('new desc', project.description) - - def test_can_delete_project(self): - with user_generator(self.manager): - self.manager.create_project('testproj', 'test1') - self.assert_(self.manager.get_project('testproj')) - self.manager.delete_project('testproj') - projectlist = self.manager.get_projects() - self.assert_(not filter(lambda p: p.name == 'testproj', - projectlist)) - - def test_can_delete_user(self): - self.manager.create_user('test1') - self.assert_(self.manager.get_user('test1')) - self.manager.delete_user('test1') - userlist = self.manager.get_users() - self.assert_(not filter(lambda u: u.id == 'test1', userlist)) - - def test_can_modify_users(self): - with user_generator(self.manager): - self.manager.modify_user('test1', 'access', 'secret', True) - user = self.manager.get_user('test1') - self.assertEqual('access', user.access) - self.assertEqual('secret', user.secret) - self.assertTrue(user.is_admin()) - - -class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): - auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' - - def __init__(self, *args, **kwargs): - AuthManagerTestCase.__init__(self) - test.TestCase.__init__(self, *args, **kwargs) - import nova.auth.fakeldap as fakeldap - FLAGS.redis_db = 8 - if FLAGS.flush_db: - logging.info("Flushing redis datastore") - try: - r = fakeldap.Redis.instance() - r.flushdb() - except: - self.skip = True - - -class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase): - auth_driver = 'nova.auth.dbdriver.DbDriver' - - -if __name__ == "__main__": - # TODO: Implement use_fake as an option - unittest.main() diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py deleted file mode 100644 index 53a762310..000000000 --- a/nova/tests/cloud_unittest.py +++ /dev/null @@ -1,332 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from base64 import b64decode -import json -import logging -from M2Crypto import BIO -from M2Crypto import RSA -import os -import StringIO -import tempfile -import time - -from eventlet import greenthread -from xml.etree import ElementTree - -from nova import context -from nova import crypto -from nova import db -from nova import flags -from nova import rpc -from nova import test -from nova import utils -from nova.auth import manager -from nova.compute import power_state -from nova.api.ec2 import cloud -from nova.objectstore import image - - -FLAGS = flags.FLAGS - -# Temp dirs for working with image attributes through the cloud controller -# (stole this from objectstore_unittest.py) -OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-') -IMAGES_PATH = os.path.join(OSS_TEMPDIR, 'images') -os.makedirs(IMAGES_PATH) - - -class CloudTestCase(test.TestCase): - def setUp(self): - super(CloudTestCase, self).setUp() - self.flags(connection_type='fake', images_path=IMAGES_PATH) - - self.conn = rpc.Connection.instance() - logging.getLogger().setLevel(logging.DEBUG) - - # set up our cloud - self.cloud = cloud.CloudController() - - # set up a service - self.compute = utils.import_object(FLAGS.compute_manager) - self.compute_consumer = rpc.AdapterConsumer(connection=self.conn, - topic=FLAGS.compute_topic, - proxy=self.compute) - self.compute_consumer.attach_to_eventlet() - self.network = utils.import_object(FLAGS.network_manager) - self.network_consumer = rpc.AdapterConsumer(connection=self.conn, - topic=FLAGS.network_topic, - proxy=self.network) - self.network_consumer.attach_to_eventlet() - - self.manager = manager.AuthManager() - self.user = self.manager.create_user('admin', 'admin', 'admin', True) - self.project = self.manager.create_project('proj', 'admin', 'proj') - self.context = context.RequestContext(user=self.user, - project=self.project) - - def tearDown(self): - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - super(CloudTestCase, self).tearDown() - - def _create_key(self, name): - # NOTE(vish): create depends on pool, so just call helper directly - return cloud._gen_key(self.context, self.context.user.id, name) - - def test_describe_addresses(self): - """Makes sure describe addresses runs without raising an exception""" - address = "10.10.10.10" - db.floating_ip_create(self.context, - {'address': address, - 'host': FLAGS.host}) - self.cloud.allocate_address(self.context) - self.cloud.describe_addresses(self.context) - self.cloud.release_address(self.context, - public_ip=address) - greenthread.sleep(0.3) - db.floating_ip_destroy(self.context, address) - - def test_associate_disassociate_address(self): - """Verifies associate runs cleanly without raising an exception""" - address = "10.10.10.10" - db.floating_ip_create(self.context, - {'address': address, - 'host': FLAGS.host}) - self.cloud.allocate_address(self.context) - inst = db.instance_create(self.context, {}) - fixed = self.network.allocate_fixed_ip(self.context, inst['id']) - ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) - self.cloud.associate_address(self.context, - instance_id=ec2_id, - public_ip=address) - self.cloud.disassociate_address(self.context, - public_ip=address) - self.cloud.release_address(self.context, - public_ip=address) - greenthread.sleep(0.3) - self.network.deallocate_fixed_ip(self.context, fixed) - db.instance_destroy(self.context, inst['id']) - db.floating_ip_destroy(self.context, address) - - def test_describe_volumes(self): - """Makes sure describe_volumes works and filters results.""" - vol1 = db.volume_create(self.context, {}) - vol2 = db.volume_create(self.context, {}) - result = self.cloud.describe_volumes(self.context) - self.assertEqual(len(result['volumeSet']), 2) - result = self.cloud.describe_volumes(self.context, - volume_id=[vol2['ec2_id']]) - self.assertEqual(len(result['volumeSet']), 1) - self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['ec2_id']) - db.volume_destroy(self.context, vol1['id']) - db.volume_destroy(self.context, vol2['id']) - - def test_console_output(self): - image_id = FLAGS.default_image - instance_type = FLAGS.default_instance_type - max_count = 1 - kwargs = {'image_id': image_id, - 'instance_type': instance_type, - 'max_count': max_count} - rv = yield self.cloud.run_instances(self.context, **kwargs) - instance_id = rv['instancesSet'][0]['instanceId'] - output = yield self.cloud.get_console_output(context=self.context, - instance_id=[instance_id]) - self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT') - # TODO(soren): We need this until we can stop polling in the rpc code - # for unit tests. - greenthread.sleep(0.3) - rv = yield self.cloud.terminate_instances(self.context, [instance_id]) - - def test_key_generation(self): - result = self._create_key('test') - private_key = result['private_key'] - key = RSA.load_key_string(private_key, callback=lambda: None) - bio = BIO.MemoryBuffer() - public_key = db.key_pair_get(self.context, - self.context.user.id, - 'test')['public_key'] - key.save_pub_key_bio(bio) - converted = crypto.ssl_pub_to_ssh_pub(bio.read()) - # assert key fields are equal - self.assertEqual(public_key.split(" ")[1].strip(), - converted.split(" ")[1].strip()) - - def test_describe_key_pairs(self): - self._create_key('test1') - self._create_key('test2') - result = self.cloud.describe_key_pairs(self.context) - keys = result["keypairsSet"] - self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys)) - self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) - - def test_delete_key_pair(self): - self._create_key('test') - self.cloud.delete_key_pair(self.context, 'test') - - def test_run_instances(self): - if FLAGS.connection_type == 'fake': - logging.debug("Can't test instances without a real virtual env.") - return - image_id = FLAGS.default_image - instance_type = FLAGS.default_instance_type - max_count = 1 - kwargs = {'image_id': image_id, - 'instance_type': instance_type, - 'max_count': max_count} - rv = yield self.cloud.run_instances(self.context, **kwargs) - # TODO: check for proper response - instance_id = rv['reservationSet'][0].keys()[0] - instance = rv['reservationSet'][0][instance_id][0] - logging.debug("Need to watch instance %s until it's running..." % - instance['instance_id']) - while True: - greenthread.sleep(1) - info = self.cloud._get_instance(instance['instance_id']) - logging.debug(info['state']) - if info['state'] == power_state.RUNNING: - break - self.assert_(rv) - - if connection_type != 'fake': - time.sleep(45) # Should use boto for polling here - for reservations in rv['reservationSet']: - # for res_id in reservations.keys(): - # logging.debug(reservations[res_id]) - # for instance in reservations[res_id]: - for instance in reservations[reservations.keys()[0]]: - instance_id = instance['instance_id'] - logging.debug("Terminating instance %s" % instance_id) - rv = yield self.compute.terminate_instance(instance_id) - - def test_instance_update_state(self): - def instance(num): - return { - 'reservation_id': 'r-1', - 'instance_id': 'i-%s' % num, - 'image_id': 'ami-%s' % num, - 'private_dns_name': '10.0.0.%s' % num, - 'dns_name': '10.0.0%s' % num, - 'ami_launch_index': str(num), - 'instance_type': 'fake', - 'availability_zone': 'fake', - 'key_name': None, - 'kernel_id': 'fake', - 'ramdisk_id': 'fake', - 'groups': ['default'], - 'product_codes': None, - 'state': 0x01, - 'user_data': ''} - rv = self.cloud._format_describe_instances(self.context) - self.assert_(len(rv['reservationSet']) == 0) - - # simulate launch of 5 instances - # self.cloud.instances['pending'] = {} - #for i in xrange(5): - # inst = instance(i) - # self.cloud.instances['pending'][inst['instance_id']] = inst - - #rv = self.cloud._format_instances(self.admin) - #self.assert_(len(rv['reservationSet']) == 1) - #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5) - # report 4 nodes each having 1 of the instances - #for i in xrange(4): - # self.cloud.update_state('instances', - # {('node-%s' % i): {('i-%s' % i): - # instance(i)}}) - - # one instance should be pending still - #self.assert_(len(self.cloud.instances['pending'].keys()) == 1) - - # check that the reservations collapse - #rv = self.cloud._format_instances(self.admin) - #self.assert_(len(rv['reservationSet']) == 1) - #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5) - - # check that we can get metadata for each instance - #for i in xrange(4): - # data = self.cloud.get_metadata(instance(i)['private_dns_name']) - # self.assert_(data['meta-data']['ami-id'] == 'ami-%s' % i) - - @staticmethod - def _fake_set_image_description(ctxt, image_id, description): - from nova.objectstore import handler - - class req: - pass - - request = req() - request.context = ctxt - request.args = {'image_id': [image_id], - 'description': [description]} - - resource = handler.ImagesResource() - resource.render_POST(request) - - def test_user_editable_image_endpoint(self): - pathdir = os.path.join(FLAGS.images_path, 'ami-testing') - os.mkdir(pathdir) - info = {'isPublic': False} - with open(os.path.join(pathdir, 'info.json'), 'w') as f: - json.dump(info, f) - img = image.Image('ami-testing') - # self.cloud.set_image_description(self.context, 'ami-testing', - # 'Foo Img') - # NOTE(vish): Above won't work unless we start objectstore or create - # a fake version of api/ec2/images.py conn that can - # call methods directly instead of going through boto. - # for now, just cheat and call the method directly - self._fake_set_image_description(self.context, 'ami-testing', - 'Foo Img') - self.assertEqual('Foo Img', img.metadata['description']) - self._fake_set_image_description(self.context, 'ami-testing', '') - self.assertEqual('', img.metadata['description']) - - def test_update_of_instance_display_fields(self): - inst = db.instance_create(self.context, {}) - ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) - self.cloud.update_instance(self.context, ec2_id, - display_name='c00l 1m4g3') - inst = db.instance_get(self.context, inst['id']) - self.assertEqual('c00l 1m4g3', inst['display_name']) - db.instance_destroy(self.context, inst['id']) - - def test_update_of_instance_wont_update_private_fields(self): - inst = db.instance_create(self.context, {}) - self.cloud.update_instance(self.context, inst['id'], - mac_address='DE:AD:BE:EF') - inst = db.instance_get(self.context, inst['id']) - self.assertEqual(None, inst['mac_address']) - db.instance_destroy(self.context, inst['id']) - - def test_update_of_volume_display_fields(self): - vol = db.volume_create(self.context, {}) - self.cloud.update_volume(self.context, vol['id'], - display_name='c00l v0lum3') - vol = db.volume_get(self.context, vol['id']) - self.assertEqual('c00l v0lum3', vol['display_name']) - db.volume_destroy(self.context, vol['id']) - - def test_update_of_volume_wont_update_private_fields(self): - vol = db.volume_create(self.context, {}) - self.cloud.update_volume(self.context, vol['id'], - mountpoint='/not/here') - vol = db.volume_get(self.context, vol['id']) - self.assertEqual(None, vol['mountpoint']) - db.volume_destroy(self.context, vol['id']) diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py deleted file mode 100644 index c6353d357..000000000 --- a/nova/tests/compute_unittest.py +++ /dev/null @@ -1,155 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Compute -""" - -import datetime -import logging - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import test -from nova import utils -from nova.auth import manager -from nova.compute import api as compute_api - - -FLAGS = flags.FLAGS - - -class ComputeTestCase(test.TestCase): - """Test case for compute""" - def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) - super(ComputeTestCase, self).setUp() - self.flags(connection_type='fake', - network_manager='nova.network.manager.FlatManager') - self.compute = utils.import_object(FLAGS.compute_manager) - self.compute_api = compute_api.ComputeAPI() - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake') - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.get_admin_context() - - def tearDown(self): - self.manager.delete_user(self.user) - self.manager.delete_project(self.project) - super(ComputeTestCase, self).tearDown() - - def _create_instance(self): - """Create a test instance""" - inst = {} - inst['image_id'] = 'ami-test' - inst['reservation_id'] = 'r-fakeres' - inst['launch_time'] = '10' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id - inst['instance_type'] = 'm1.tiny' - inst['mac_address'] = utils.generate_mac() - inst['ami_launch_index'] = 0 - return db.instance_create(self.context, inst)['id'] - - def test_create_instance_defaults_display_name(self): - """Verify that an instance cannot be created without a display_name.""" - cases = [dict(), dict(display_name=None)] - for instance in cases: - ref = self.compute_api.create_instances(self.context, - FLAGS.default_instance_type, None, **instance) - try: - self.assertNotEqual(ref[0].display_name, None) - finally: - db.instance_destroy(self.context, ref[0]['id']) - - def test_create_instance_associates_security_groups(self): - """Make sure create_instances associates security groups""" - values = {'name': 'default', - 'description': 'default', - 'user_id': self.user.id, - 'project_id': self.project.id} - group = db.security_group_create(self.context, values) - ref = self.compute_api.create_instances(self.context, - FLAGS.default_instance_type, None, security_group=['default']) - try: - self.assertEqual(len(ref[0]['security_groups']), 1) - finally: - db.security_group_destroy(self.context, group['id']) - db.instance_destroy(self.context, ref[0]['id']) - - def test_run_terminate(self): - """Make sure it is possible to run and terminate instance""" - instance_id = self._create_instance() - - self.compute.run_instance(self.context, instance_id) - - instances = db.instance_get_all(context.get_admin_context()) - logging.info("Running instances: %s", instances) - self.assertEqual(len(instances), 1) - - self.compute.terminate_instance(self.context, instance_id) - - instances = db.instance_get_all(context.get_admin_context()) - logging.info("After terminating instances: %s", instances) - self.assertEqual(len(instances), 0) - - def test_run_terminate_timestamps(self): - """Make sure timestamps are set for launched and destroyed""" - instance_id = self._create_instance() - instance_ref = db.instance_get(self.context, instance_id) - self.assertEqual(instance_ref['launched_at'], None) - self.assertEqual(instance_ref['deleted_at'], None) - launch = datetime.datetime.utcnow() - self.compute.run_instance(self.context, instance_id) - instance_ref = db.instance_get(self.context, instance_id) - self.assert_(instance_ref['launched_at'] > launch) - self.assertEqual(instance_ref['deleted_at'], None) - terminate = datetime.datetime.utcnow() - self.compute.terminate_instance(self.context, instance_id) - self.context = self.context.elevated(True) - instance_ref = db.instance_get(self.context, instance_id) - self.assert_(instance_ref['launched_at'] < terminate) - self.assert_(instance_ref['deleted_at'] > terminate) - - def test_reboot(self): - """Ensure instance can be rebooted""" - instance_id = self._create_instance() - self.compute.run_instance(self.context, instance_id) - self.compute.reboot_instance(self.context, instance_id) - self.compute.terminate_instance(self.context, instance_id) - - def test_console_output(self): - """Make sure we can get console output from instance""" - instance_id = self._create_instance() - self.compute.run_instance(self.context, instance_id) - - console = self.compute.get_console_output(self.context, - instance_id) - self.assert_(console) - self.compute.terminate_instance(self.context, instance_id) - - def test_run_instance_existing(self): - """Ensure failure when running an instance that already exists""" - instance_id = self._create_instance() - self.compute.run_instance(self.context, instance_id) - self.assertRaises(exception.Error, - self.compute.run_instance, - self.context, - instance_id) - self.compute.terminate_instance(self.context, instance_id) diff --git a/nova/tests/flags_unittest.py b/nova/tests/flags_unittest.py deleted file mode 100644 index 707300fcf..000000000 --- a/nova/tests/flags_unittest.py +++ /dev/null @@ -1,102 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import exception -from nova import flags -from nova import test - -FLAGS = flags.FLAGS -flags.DEFINE_string('flags_unittest', 'foo', 'for testing purposes only') - - -class FlagsTestCase(test.TestCase): - - def setUp(self): - super(FlagsTestCase, self).setUp() - self.FLAGS = flags.FlagValues() - self.global_FLAGS = flags.FLAGS - - def test_define(self): - self.assert_('string' not in self.FLAGS) - self.assert_('int' not in self.FLAGS) - self.assert_('false' not in self.FLAGS) - self.assert_('true' not in self.FLAGS) - - flags.DEFINE_string('string', 'default', 'desc', - flag_values=self.FLAGS) - flags.DEFINE_integer('int', 1, 'desc', flag_values=self.FLAGS) - flags.DEFINE_bool('false', False, 'desc', flag_values=self.FLAGS) - flags.DEFINE_bool('true', True, 'desc', flag_values=self.FLAGS) - - self.assert_(self.FLAGS['string']) - self.assert_(self.FLAGS['int']) - self.assert_(self.FLAGS['false']) - self.assert_(self.FLAGS['true']) - self.assertEqual(self.FLAGS.string, 'default') - self.assertEqual(self.FLAGS.int, 1) - self.assertEqual(self.FLAGS.false, False) - self.assertEqual(self.FLAGS.true, True) - - argv = ['flags_test', - '--string', 'foo', - '--int', '2', - '--false', - '--notrue'] - - self.FLAGS(argv) - self.assertEqual(self.FLAGS.string, 'foo') - self.assertEqual(self.FLAGS.int, 2) - self.assertEqual(self.FLAGS.false, True) - self.assertEqual(self.FLAGS.true, False) - - def test_declare(self): - self.assert_('answer' not in self.global_FLAGS) - flags.DECLARE('answer', 'nova.tests.declare_flags') - self.assert_('answer' in self.global_FLAGS) - self.assertEqual(self.global_FLAGS.answer, 42) - - # Make sure we don't overwrite anything - self.global_FLAGS.answer = 256 - self.assertEqual(self.global_FLAGS.answer, 256) - flags.DECLARE('answer', 'nova.tests.declare_flags') - self.assertEqual(self.global_FLAGS.answer, 256) - - def test_runtime_and_unknown_flags(self): - self.assert_('runtime_answer' not in self.global_FLAGS) - - argv = ['flags_test', '--runtime_answer=60', 'extra_arg'] - args = self.global_FLAGS(argv) - self.assertEqual(len(args), 2) - self.assertEqual(args[1], 'extra_arg') - - self.assert_('runtime_answer' not in self.global_FLAGS) - - import nova.tests.runtime_flags - - self.assert_('runtime_answer' in self.global_FLAGS) - self.assertEqual(self.global_FLAGS.runtime_answer, 60) - - def test_flag_leak_left(self): - self.assertEqual(FLAGS.flags_unittest, 'foo') - FLAGS.flags_unittest = 'bar' - self.assertEqual(FLAGS.flags_unittest, 'bar') - - def test_flag_leak_right(self): - self.assertEqual(FLAGS.flags_unittest, 'foo') - FLAGS.flags_unittest = 'bar' - self.assertEqual(FLAGS.flags_unittest, 'bar') diff --git a/nova/tests/misc_unittest.py b/nova/tests/misc_unittest.py deleted file mode 100644 index 3d947427a..000000000 --- a/nova/tests/misc_unittest.py +++ /dev/null @@ -1,55 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from nova import test -from nova.utils import parse_mailmap, str_dict_replace - - -class ProjectTestCase(test.TestCase): - def test_authors_up_to_date(self): - if os.path.exists('../.bzr'): - contributors = set() - - mailmap = parse_mailmap('../.mailmap') - - import bzrlib.workingtree - tree = bzrlib.workingtree.WorkingTree.open('..') - tree.lock_read() - try: - parents = tree.get_parent_ids() - g = tree.branch.repository.get_graph() - for p in parents[1:]: - rev_ids = [r for r, _ in g.iter_ancestry(parents) - if r != "null:"] - revs = tree.branch.repository.get_revisions(rev_ids) - for r in revs: - for author in r.get_apparent_authors(): - email = author.split(' ')[-1] - contributors.add(str_dict_replace(email, mailmap)) - - authors_file = open('../Authors', 'r').read() - - missing = set() - for contributor in contributors: - if not contributor in authors_file: - missing.add(contributor) - - self.assertTrue(len(missing) == 0, - '%r not listed in Authors' % missing) - finally: - tree.unlock() diff --git a/nova/tests/quota_unittest.py b/nova/tests/quota_unittest.py deleted file mode 100644 index 8cf2a5e54..000000000 --- a/nova/tests/quota_unittest.py +++ /dev/null @@ -1,153 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import quota -from nova import test -from nova import utils -from nova.auth import manager -from nova.api.ec2 import cloud - - -FLAGS = flags.FLAGS - - -class QuotaTestCase(test.TestCase): - def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) - super(QuotaTestCase, self).setUp() - self.flags(connection_type='fake', - quota_instances=2, - quota_cores=4, - quota_volumes=2, - quota_gigabytes=20, - quota_floating_ips=1) - - self.cloud = cloud.CloudController() - self.manager = manager.AuthManager() - self.user = self.manager.create_user('admin', 'admin', 'admin', True) - self.project = self.manager.create_project('admin', 'admin', 'admin') - self.network = utils.import_object(FLAGS.network_manager) - self.context = context.RequestContext(project=self.project, - user=self.user) - - def tearDown(self): - manager.AuthManager().delete_project(self.project) - manager.AuthManager().delete_user(self.user) - super(QuotaTestCase, self).tearDown() - - def _create_instance(self, cores=2): - """Create a test instance""" - inst = {} - inst['image_id'] = 'ami-test' - inst['reservation_id'] = 'r-fakeres' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id - inst['instance_type'] = 'm1.large' - inst['vcpus'] = cores - inst['mac_address'] = utils.generate_mac() - return db.instance_create(self.context, inst)['id'] - - def _create_volume(self, size=10): - """Create a test volume""" - vol = {} - vol['user_id'] = self.user.id - vol['project_id'] = self.project.id - vol['size'] = size - return db.volume_create(self.context, vol)['id'] - - def test_quota_overrides(self): - """Make sure overriding a projects quotas works""" - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') - self.assertEqual(num_instances, 2) - db.quota_create(self.context, {'project_id': self.project.id, - 'instances': 10}) - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') - self.assertEqual(num_instances, 4) - db.quota_update(self.context, self.project.id, {'cores': 100}) - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') - self.assertEqual(num_instances, 10) - db.quota_destroy(self.context, self.project.id) - - def test_too_many_instances(self): - instance_ids = [] - for i in range(FLAGS.quota_instances): - instance_id = self._create_instance() - instance_ids.append(instance_id) - self.assertRaises(quota.QuotaError, self.cloud.run_instances, - self.context, - min_count=1, - max_count=1, - instance_type='m1.small', - image_id='fake') - for instance_id in instance_ids: - db.instance_destroy(self.context, instance_id) - - def test_too_many_cores(self): - instance_ids = [] - instance_id = self._create_instance(cores=4) - instance_ids.append(instance_id) - self.assertRaises(quota.QuotaError, self.cloud.run_instances, - self.context, - min_count=1, - max_count=1, - instance_type='m1.small', - image_id='fake') - for instance_id in instance_ids: - db.instance_destroy(self.context, instance_id) - - def test_too_many_volumes(self): - volume_ids = [] - for i in range(FLAGS.quota_volumes): - volume_id = self._create_volume() - volume_ids.append(volume_id) - self.assertRaises(quota.QuotaError, self.cloud.create_volume, - self.context, - size=10) - for volume_id in volume_ids: - db.volume_destroy(self.context, volume_id) - - def test_too_many_gigabytes(self): - volume_ids = [] - volume_id = self._create_volume(size=20) - volume_ids.append(volume_id) - self.assertRaises(quota.QuotaError, - self.cloud.create_volume, - self.context, - size=10) - for volume_id in volume_ids: - db.volume_destroy(self.context, volume_id) - - def test_too_many_addresses(self): - address = '192.168.0.100' - db.floating_ip_create(context.get_admin_context(), - {'address': address, 'host': FLAGS.host}) - float_addr = self.network.allocate_floating_ip(self.context, - self.project.id) - # NOTE(vish): This assert never fails. When cloud attempts to - # make an rpc.call, the test just finishes with OK. It - # appears to be something in the magic inline callbacks - # that is breaking. - self.assertRaises(quota.QuotaError, self.cloud.allocate_address, - self.context) - db.floating_ip_destroy(context.get_admin_context(), address) diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py deleted file mode 100644 index a2495e65a..000000000 --- a/nova/tests/rpc_unittest.py +++ /dev/null @@ -1,103 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for remote procedure calls using queue -""" -import logging - -from nova import context -from nova import flags -from nova import rpc -from nova import test - - -FLAGS = flags.FLAGS - - -class RpcTestCase(test.TestCase): - """Test cases for rpc""" - def setUp(self): - super(RpcTestCase, self).setUp() - self.conn = rpc.Connection.instance() - self.receiver = TestReceiver() - self.consumer = rpc.AdapterConsumer(connection=self.conn, - topic='test', - proxy=self.receiver) - self.consumer.attach_to_eventlet() - self.context = context.get_admin_context() - - def test_call_succeed(self): - """Get a value through rpc call""" - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo", - "args": {"value": value}}) - self.assertEqual(value, result) - - def test_context_passed(self): - """Makes sure a context is passed through rpc call""" - value = 42 - result = rpc.call(self.context, - 'test', {"method": "context", - "args": {"value": value}}) - self.assertEqual(self.context.to_dict(), result) - - def test_call_exception(self): - """Test that exception gets passed back properly - - rpc.call returns a RemoteError object. The value of the - exception is converted to a string, so we convert it back - to an int in the test. - """ - value = 42 - self.assertRaises(rpc.RemoteError, - rpc.call, - self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - try: - rpc.call(self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - self.fail("should have thrown rpc.RemoteError") - except rpc.RemoteError as exc: - self.assertEqual(int(exc.value), value) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call - - Uses static methods because we aren't actually storing any state""" - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in""" - logging.debug("Received %s", value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context""" - logging.debug("Received %s", context) - return context.to_dict() - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in""" - raise Exception(value) diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py deleted file mode 100644 index d1756b8fb..000000000 --- a/nova/tests/scheduler_unittest.py +++ /dev/null @@ -1,246 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler -""" - -from nova import context -from nova import db -from nova import flags -from nova import service -from nova import test -from nova import rpc -from nova import utils -from nova.auth import manager as auth_manager -from nova.scheduler import manager -from nova.scheduler import driver - - -FLAGS = flags.FLAGS -flags.DECLARE('max_cores', 'nova.scheduler.simple') - - -class TestDriver(driver.Scheduler): - """Scheduler Driver for Tests""" - def schedule(context, topic, *args, **kwargs): - return 'fallback_host' - - def schedule_named_method(context, topic, num): - return 'named_host' - - -class SchedulerTestCase(test.TestCase): - """Test case for scheduler""" - def setUp(self): - super(SchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.tests.scheduler_unittest.TestDriver') - - def test_fallback(self): - scheduler = manager.SchedulerManager() - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - ctxt = context.get_admin_context() - rpc.cast(ctxt, - 'topic.fallback_host', - {'method': 'noexist', - 'args': {'num': 7}}) - self.mox.ReplayAll() - scheduler.noexist(ctxt, 'topic', num=7) - - def test_named_method(self): - scheduler = manager.SchedulerManager() - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - ctxt = context.get_admin_context() - rpc.cast(ctxt, - 'topic.named_host', - {'method': 'named_method', - 'args': {'num': 7}}) - self.mox.ReplayAll() - scheduler.named_method(ctxt, 'topic', num=7) - - -class SimpleDriverTestCase(test.TestCase): - """Test case for simple driver""" - def setUp(self): - super(SimpleDriverTestCase, self).setUp() - self.flags(connection_type='fake', - max_cores=4, - max_gigabytes=4, - network_manager='nova.network.manager.FlatManager', - volume_driver='nova.volume.driver.FakeISCSIDriver', - scheduler_driver='nova.scheduler.simple.SimpleScheduler') - self.scheduler = manager.SchedulerManager() - self.manager = auth_manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake') - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.get_admin_context() - - def tearDown(self): - self.manager.delete_user(self.user) - self.manager.delete_project(self.project) - - def _create_instance(self): - """Create a test instance""" - inst = {} - inst['image_id'] = 'ami-test' - inst['reservation_id'] = 'r-fakeres' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id - inst['instance_type'] = 'm1.tiny' - inst['mac_address'] = utils.generate_mac() - inst['ami_launch_index'] = 0 - inst['vcpus'] = 1 - return db.instance_create(self.context, inst)['id'] - - def _create_volume(self): - """Create a test volume""" - vol = {} - vol['image_id'] = 'ami-test' - vol['reservation_id'] = 'r-fakeres' - vol['size'] = 1 - return db.volume_create(self.context, vol)['id'] - - def test_hosts_are_up(self): - """Ensures driver can find the hosts that are up""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(len(hosts), 2) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_instance(self): - """Ensures the host with less cores gets the next one""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance() - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual(host, 'host2') - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_too_many_cores(self): - """Ensures we don't go over max cores""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_ids1 = [] - instance_ids2 = [] - for index in xrange(FLAGS.max_cores): - instance_id = self._create_instance() - compute1.run_instance(self.context, instance_id) - instance_ids1.append(instance_id) - instance_id = self._create_instance() - compute2.run_instance(self.context, instance_id) - instance_ids2.append(instance_id) - instance_id = self._create_instance() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id) - for instance_id in instance_ids1: - compute1.terminate_instance(self.context, instance_id) - for instance_id in instance_ids2: - compute2.terminate_instance(self.context, instance_id) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_volume(self): - """Ensures the host with less gigabytes gets the next one""" - volume1 = service.Service('host1', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume1.start() - volume2 = service.Service('host2', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume2.start() - volume_id1 = self._create_volume() - volume1.create_volume(self.context, volume_id1) - volume_id2 = self._create_volume() - host = self.scheduler.driver.schedule_create_volume(self.context, - volume_id2) - self.assertEqual(host, 'host2') - volume1.delete_volume(self.context, volume_id1) - db.volume_destroy(self.context, volume_id2) - volume1.kill() - volume2.kill() - - def test_too_many_gigabytes(self): - """Ensures we don't go over max gigabytes""" - volume1 = service.Service('host1', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume1.start() - volume2 = service.Service('host2', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume2.start() - volume_ids1 = [] - volume_ids2 = [] - for index in xrange(FLAGS.max_gigabytes): - volume_id = self._create_volume() - volume1.create_volume(self.context, volume_id) - volume_ids1.append(volume_id) - volume_id = self._create_volume() - volume2.create_volume(self.context, volume_id) - volume_ids2.append(volume_id) - volume_id = self._create_volume() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_create_volume, - self.context, - volume_id) - for volume_id in volume_ids1: - volume1.delete_volume(self.context, volume_id) - for volume_id in volume_ids2: - volume2.delete_volume(self.context, volume_id) - volume1.kill() - volume2.kill() diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py deleted file mode 100644 index 47c092f8e..000000000 --- a/nova/tests/service_unittest.py +++ /dev/null @@ -1,227 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit Tests for remote procedure calls using queue -""" - -import mox - -from nova import exception -from nova import flags -from nova import rpc -from nova import test -from nova import service -from nova import manager - -FLAGS = flags.FLAGS -flags.DEFINE_string("fake_manager", "nova.tests.service_unittest.FakeManager", - "Manager for testing") - - -class FakeManager(manager.Manager): - """Fake manager for tests""" - def test_method(self): - return 'manager' - - -class ExtendedService(service.Service): - def test_method(self): - return 'service' - - -class ServiceManagerTestCase(test.TestCase): - """Test cases for Services""" - - def test_attribute_error_for_no_manager(self): - serv = service.Service('test', - 'test', - 'test', - 'nova.tests.service_unittest.FakeManager') - self.assertRaises(AttributeError, getattr, serv, 'test_method') - - def test_message_gets_to_manager(self): - serv = service.Service('test', - 'test', - 'test', - 'nova.tests.service_unittest.FakeManager') - serv.start() - self.assertEqual(serv.test_method(), 'manager') - - def test_override_manager_method(self): - serv = ExtendedService('test', - 'test', - 'test', - 'nova.tests.service_unittest.FakeManager') - serv.start() - self.assertEqual(serv.test_method(), 'service') - - -class ServiceTestCase(test.TestCase): - """Test cases for Services""" - - def setUp(self): - super(ServiceTestCase, self).setUp() - self.mox.StubOutWithMock(service, 'db') - - def test_create(self): - host = 'foo' - binary = 'nova-fake' - topic = 'fake' - - # NOTE(vish): Create was moved out of mox replay to make sure that - # the looping calls are created in StartService. - app = service.Service.create(host=host, binary=binary) - - self.mox.StubOutWithMock(rpc, - 'AdapterConsumer', - use_mock_anything=True) - rpc.AdapterConsumer(connection=mox.IgnoreArg(), - topic=topic, - proxy=mox.IsA(service.Service)).AndReturn( - rpc.AdapterConsumer) - - rpc.AdapterConsumer(connection=mox.IgnoreArg(), - topic='%s.%s' % (topic, host), - proxy=mox.IsA(service.Service)).AndReturn( - rpc.AdapterConsumer) - - rpc.AdapterConsumer.attach_to_eventlet() - rpc.AdapterConsumer.attach_to_eventlet() - - service_create = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0} - service_ref = {'host': host, - 'binary': binary, - 'report_count': 0, - 'id': 1} - - service.db.service_get_by_args(mox.IgnoreArg(), - host, - binary).AndRaise(exception.NotFound()) - service.db.service_create(mox.IgnoreArg(), - service_create).AndReturn(service_ref) - self.mox.ReplayAll() - - app.start() - app.stop() - self.assert_(app) - - # We're testing sort of weird behavior in how report_state decides - # whether it is disconnected, it looks for a variable on itself called - # 'model_disconnected' and report_state doesn't really do much so this - # these are mostly just for coverage - def test_report_state_no_service(self): - host = 'foo' - binary = 'bar' - topic = 'test' - service_create = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0} - service_ref = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0, - 'id': 1} - - service.db.service_get_by_args(mox.IgnoreArg(), - host, - binary).AndRaise(exception.NotFound()) - service.db.service_create(mox.IgnoreArg(), - service_create).AndReturn(service_ref) - service.db.service_get(mox.IgnoreArg(), - service_ref['id']).AndReturn(service_ref) - service.db.service_update(mox.IgnoreArg(), service_ref['id'], - mox.ContainsKeyValue('report_count', 1)) - - self.mox.ReplayAll() - serv = service.Service(host, - binary, - topic, - 'nova.tests.service_unittest.FakeManager') - serv.start() - serv.report_state() - - def test_report_state_newly_disconnected(self): - host = 'foo' - binary = 'bar' - topic = 'test' - service_create = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0} - service_ref = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0, - 'id': 1} - - service.db.service_get_by_args(mox.IgnoreArg(), - host, - binary).AndRaise(exception.NotFound()) - service.db.service_create(mox.IgnoreArg(), - service_create).AndReturn(service_ref) - service.db.service_get(mox.IgnoreArg(), - mox.IgnoreArg()).AndRaise(Exception()) - - self.mox.ReplayAll() - serv = service.Service(host, - binary, - topic, - 'nova.tests.service_unittest.FakeManager') - serv.start() - serv.report_state() - self.assert_(serv.model_disconnected) - - def test_report_state_newly_connected(self): - host = 'foo' - binary = 'bar' - topic = 'test' - service_create = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0} - service_ref = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0, - 'id': 1} - - service.db.service_get_by_args(mox.IgnoreArg(), - host, - binary).AndRaise(exception.NotFound()) - service.db.service_create(mox.IgnoreArg(), - service_create).AndReturn(service_ref) - service.db.service_get(mox.IgnoreArg(), - service_ref['id']).AndReturn(service_ref) - service.db.service_update(mox.IgnoreArg(), service_ref['id'], - mox.ContainsKeyValue('report_count', 1)) - - self.mox.ReplayAll() - serv = service.Service(host, - binary, - topic, - 'nova.tests.service_unittest.FakeManager') - serv.start() - serv.model_disconnected = True - serv.report_state() - - self.assert_(not serv.model_disconnected) diff --git a/nova/tests/test_access.py b/nova/tests/test_access.py new file mode 100644 index 000000000..58fdea3b5 --- /dev/null +++ b/nova/tests/test_access.py @@ -0,0 +1,127 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import unittest +import logging +import webob + +from nova import context +from nova import exception +from nova import flags +from nova import test +from nova.api import ec2 +from nova.auth import manager + + +FLAGS = flags.FLAGS + + +class Context(object): + pass + + +class AccessTestCase(test.TestCase): + def setUp(self): + super(AccessTestCase, self).setUp() + um = manager.AuthManager() + self.context = context.get_admin_context() + # Make test users + self.testadmin = um.create_user('testadmin') + self.testpmsys = um.create_user('testpmsys') + self.testnet = um.create_user('testnet') + self.testsys = um.create_user('testsys') + # Assign some rules + um.add_role('testadmin', 'cloudadmin') + um.add_role('testpmsys', 'sysadmin') + um.add_role('testnet', 'netadmin') + um.add_role('testsys', 'sysadmin') + + # Make a test project + self.project = um.create_project('testproj', + 'testpmsys', + 'a test project', + ['testpmsys', 'testnet', 'testsys']) + self.project.add_role(self.testnet, 'netadmin') + self.project.add_role(self.testsys, 'sysadmin') + #user is set in each test + + def noopWSGIApp(environ, start_response): + start_response('200 OK', []) + return [''] + + self.mw = ec2.Authorizer(noopWSGIApp) + self.mw.action_roles = {'str': { + '_allow_all': ['all'], + '_allow_none': [], + '_allow_project_manager': ['projectmanager'], + '_allow_sys_and_net': ['sysadmin', 'netadmin'], + '_allow_sysadmin': ['sysadmin']}} + + def tearDown(self): + um = manager.AuthManager() + # Delete the test project + um.delete_project('testproj') + # Delete the test user + um.delete_user('testadmin') + um.delete_user('testpmsys') + um.delete_user('testnet') + um.delete_user('testsys') + super(AccessTestCase, self).tearDown() + + def response_status(self, user, methodName): + ctxt = context.RequestContext(user, self.project) + environ = {'ec2.context': ctxt, + 'ec2.controller': 'some string', + 'ec2.action': methodName} + req = webob.Request.blank('/', environ) + resp = req.get_response(self.mw) + return resp.status_int + + def shouldAllow(self, user, methodName): + self.assertEqual(200, self.response_status(user, methodName)) + + def shouldDeny(self, user, methodName): + self.assertEqual(401, self.response_status(user, methodName)) + + def test_001_allow_all(self): + users = [self.testadmin, self.testpmsys, self.testnet, self.testsys] + for user in users: + self.shouldAllow(user, '_allow_all') + + def test_002_allow_none(self): + self.shouldAllow(self.testadmin, '_allow_none') + users = [self.testpmsys, self.testnet, self.testsys] + for user in users: + self.shouldDeny(user, '_allow_none') + + def test_003_allow_project_manager(self): + for user in [self.testadmin, self.testpmsys]: + self.shouldAllow(user, '_allow_project_manager') + for user in [self.testnet, self.testsys]: + self.shouldDeny(user, '_allow_project_manager') + + def test_004_allow_sys_and_net(self): + for user in [self.testadmin, self.testnet, self.testsys]: + self.shouldAllow(user, '_allow_sys_and_net') + # denied because it doesn't have the per project sysadmin + for user in [self.testpmsys]: + self.shouldDeny(user, '_allow_sys_and_net') + +if __name__ == "__main__": + # TODO: Implement use_fake as an option + unittest.main() diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py new file mode 100644 index 000000000..33d4cb294 --- /dev/null +++ b/nova/tests/test_api.py @@ -0,0 +1,338 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Unit tests for the API endpoint""" + +import boto +from boto.ec2 import regioninfo +import httplib +import random +import StringIO +import webob + +from nova import context +from nova import flags +from nova import test +from nova import api +from nova.api.ec2 import cloud +from nova.api.ec2 import apirequest +from nova.auth import manager + + +class FakeHttplibSocket(object): + """a fake socket implementation for httplib.HTTPResponse, trivial""" + def __init__(self, response_string): + self._buffer = StringIO.StringIO(response_string) + + def makefile(self, _mode, _other): + """Returns the socket's internal buffer""" + return self._buffer + + +class FakeHttplibConnection(object): + """A fake httplib.HTTPConnection for boto to use + + requests made via this connection actually get translated and routed into + our WSGI app, we then wait for the response and turn it back into + the httplib.HTTPResponse that boto expects. + """ + def __init__(self, app, host, is_secure=False): + self.app = app + self.host = host + + def request(self, method, path, data, headers): + req = webob.Request.blank(path) + req.method = method + req.body = data + req.headers = headers + req.headers['Accept'] = 'text/html' + req.host = self.host + # Call the WSGI app, get the HTTP response + resp = str(req.get_response(self.app)) + # For some reason, the response doesn't have "HTTP/1.0 " prepended; I + # guess that's a function the web server usually provides. + resp = "HTTP/1.0 %s" % resp + sock = FakeHttplibSocket(resp) + self.http_response = httplib.HTTPResponse(sock) + self.http_response.begin() + + def getresponse(self): + return self.http_response + + def close(self): + """Required for compatibility with boto/tornado""" + pass + + +class XmlConversionTestCase(test.TrialTestCase): + """Unit test api xml conversion""" + def test_number_conversion(self): + conv = apirequest._try_convert + self.assertEqual(conv('None'), None) + self.assertEqual(conv('True'), True) + self.assertEqual(conv('False'), False) + self.assertEqual(conv('0'), 0) + self.assertEqual(conv('42'), 42) + self.assertEqual(conv('3.14'), 3.14) + self.assertEqual(conv('-57.12'), -57.12) + self.assertEqual(conv('0x57'), 0x57) + self.assertEqual(conv('-0x57'), -0x57) + self.assertEqual(conv('-'), '-') + self.assertEqual(conv('-0'), 0) + + +class ApiEc2TestCase(test.TrialTestCase): + """Unit test for the cloud controller on an EC2 API""" + def setUp(self): + super(ApiEc2TestCase, self).setUp() + + self.manager = manager.AuthManager() + + self.host = '127.0.0.1' + + self.app = api.API('ec2') + + def expect_http(self, host=None, is_secure=False): + """Returns a new EC2 connection""" + self.ec2 = boto.connect_ec2( + aws_access_key_id='fake', + aws_secret_access_key='fake', + is_secure=False, + region=regioninfo.RegionInfo(None, 'test', self.host), + port=8773, + path='/services/Cloud') + + self.mox.StubOutWithMock(self.ec2, 'new_http_connection') + http = FakeHttplibConnection( + self.app, '%s:8773' % (self.host), False) + # pylint: disable-msg=E1103 + self.ec2.new_http_connection(host, is_secure).AndReturn(http) + return http + + def test_describe_instances(self): + """Test that, after creating a user and a project, the describe + instances call to the API works properly""" + self.expect_http() + self.mox.ReplayAll() + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + self.assertEqual(self.ec2.get_all_instances(), []) + self.manager.delete_project(project) + self.manager.delete_user(user) + + def test_get_all_key_pairs(self): + """Test that, after creating a user and project and generating + a key pair, that the API call to list key pairs works properly""" + self.expect_http() + self.mox.ReplayAll() + keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \ + for x in range(random.randint(4, 8))) + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + # NOTE(vish): create depends on pool, so call helper directly + cloud._gen_key(context.get_admin_context(), user.id, keyname) + + rv = self.ec2.get_all_key_pairs() + results = [k for k in rv if k.name == keyname] + self.assertEquals(len(results), 1) + self.manager.delete_project(project) + self.manager.delete_user(user) + + def test_get_all_security_groups(self): + """Test that we can retrieve security groups""" + self.expect_http() + self.mox.ReplayAll() + user = self.manager.create_user('fake', 'fake', 'fake', admin=True) + project = self.manager.create_project('fake', 'fake', 'fake') + + rv = self.ec2.get_all_security_groups() + + self.assertEquals(len(rv), 1) + self.assertEquals(rv[0].name, 'default') + + self.manager.delete_project(project) + self.manager.delete_user(user) + + def test_create_delete_security_group(self): + """Test that we can create a security group""" + self.expect_http() + self.mox.ReplayAll() + user = self.manager.create_user('fake', 'fake', 'fake', admin=True) + project = self.manager.create_project('fake', 'fake', 'fake') + + # At the moment, you need both of these to actually be netadmin + self.manager.add_role('fake', 'netadmin') + project.add_role('fake', 'netadmin') + + security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") + for x in range(random.randint(4, 8))) + + self.ec2.create_security_group(security_group_name, 'test group') + + self.expect_http() + self.mox.ReplayAll() + + rv = self.ec2.get_all_security_groups() + self.assertEquals(len(rv), 2) + self.assertTrue(security_group_name in [group.name for group in rv]) + + self.expect_http() + self.mox.ReplayAll() + + self.ec2.delete_security_group(security_group_name) + + self.manager.delete_project(project) + self.manager.delete_user(user) + + def test_authorize_revoke_security_group_cidr(self): + """ + Test that we can add and remove CIDR based rules + to a security group + """ + self.expect_http() + self.mox.ReplayAll() + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + + # At the moment, you need both of these to actually be netadmin + self.manager.add_role('fake', 'netadmin') + project.add_role('fake', 'netadmin') + + security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") + for x in range(random.randint(4, 8))) + + group = self.ec2.create_security_group(security_group_name, + 'test group') + + self.expect_http() + self.mox.ReplayAll() + group.connection = self.ec2 + + group.authorize('tcp', 80, 81, '0.0.0.0/0') + + self.expect_http() + self.mox.ReplayAll() + + rv = self.ec2.get_all_security_groups() + # I don't bother checkng that we actually find it here, + # because the create/delete unit test further up should + # be good enough for that. + for group in rv: + if group.name == security_group_name: + self.assertEquals(len(group.rules), 1) + self.assertEquals(int(group.rules[0].from_port), 80) + self.assertEquals(int(group.rules[0].to_port), 81) + self.assertEquals(len(group.rules[0].grants), 1) + self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0') + + self.expect_http() + self.mox.ReplayAll() + group.connection = self.ec2 + + group.revoke('tcp', 80, 81, '0.0.0.0/0') + + self.expect_http() + self.mox.ReplayAll() + + self.ec2.delete_security_group(security_group_name) + + self.expect_http() + self.mox.ReplayAll() + group.connection = self.ec2 + + rv = self.ec2.get_all_security_groups() + + self.assertEqual(len(rv), 1) + self.assertEqual(rv[0].name, 'default') + + self.manager.delete_project(project) + self.manager.delete_user(user) + + return + + def test_authorize_revoke_security_group_foreign_group(self): + """ + Test that we can grant and revoke another security group access + to a security group + """ + self.expect_http() + self.mox.ReplayAll() + user = self.manager.create_user('fake', 'fake', 'fake', admin=True) + project = self.manager.create_project('fake', 'fake', 'fake') + + # At the moment, you need both of these to actually be netadmin + self.manager.add_role('fake', 'netadmin') + project.add_role('fake', 'netadmin') + + rand_string = 'sdiuisudfsdcnpaqwertasd' + security_group_name = "".join(random.choice(rand_string) + for x in range(random.randint(4, 8))) + other_security_group_name = "".join(random.choice(rand_string) + for x in range(random.randint(4, 8))) + + group = self.ec2.create_security_group(security_group_name, + 'test group') + + self.expect_http() + self.mox.ReplayAll() + + other_group = self.ec2.create_security_group(other_security_group_name, + 'some other group') + + self.expect_http() + self.mox.ReplayAll() + group.connection = self.ec2 + + group.authorize(src_group=other_group) + + self.expect_http() + self.mox.ReplayAll() + + rv = self.ec2.get_all_security_groups() + + # I don't bother checkng that we actually find it here, + # because the create/delete unit test further up should + # be good enough for that. + for group in rv: + if group.name == security_group_name: + self.assertEquals(len(group.rules), 1) + self.assertEquals(len(group.rules[0].grants), 1) + self.assertEquals(str(group.rules[0].grants[0]), '%s-%s' % + (other_security_group_name, 'fake')) + + self.expect_http() + self.mox.ReplayAll() + + rv = self.ec2.get_all_security_groups() + + for group in rv: + if group.name == security_group_name: + self.expect_http() + self.mox.ReplayAll() + group.connection = self.ec2 + group.revoke(src_group=other_group) + + self.expect_http() + self.mox.ReplayAll() + + self.ec2.delete_security_group(security_group_name) + + self.manager.delete_project(project) + self.manager.delete_user(user) + + return diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py new file mode 100644 index 000000000..4508d6721 --- /dev/null +++ b/nova/tests/test_auth.py @@ -0,0 +1,352 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +from M2Crypto import X509 +import unittest + +from nova import crypto +from nova import flags +from nova import test +from nova.auth import manager +from nova.api.ec2 import cloud + +FLAGS = flags.FLAGS + + +class user_generator(object): + def __init__(self, manager, **user_state): + if 'name' not in user_state: + user_state['name'] = 'test1' + self.manager = manager + self.user = manager.create_user(**user_state) + + def __enter__(self): + return self.user + + def __exit__(self, value, type, trace): + self.manager.delete_user(self.user) + + +class project_generator(object): + def __init__(self, manager, **project_state): + if 'name' not in project_state: + project_state['name'] = 'testproj' + if 'manager_user' not in project_state: + project_state['manager_user'] = 'test1' + self.manager = manager + self.project = manager.create_project(**project_state) + + def __enter__(self): + return self.project + + def __exit__(self, value, type, trace): + self.manager.delete_project(self.project) + + +class user_and_project_generator(object): + def __init__(self, manager, user_state={}, project_state={}): + self.manager = manager + if 'name' not in user_state: + user_state['name'] = 'test1' + if 'name' not in project_state: + project_state['name'] = 'testproj' + if 'manager_user' not in project_state: + project_state['manager_user'] = 'test1' + self.user = manager.create_user(**user_state) + self.project = manager.create_project(**project_state) + + def __enter__(self): + return (self.user, self.project) + + def __exit__(self, value, type, trace): + self.manager.delete_user(self.user) + self.manager.delete_project(self.project) + + +class AuthManagerTestCase(object): + def setUp(self): + FLAGS.auth_driver = self.auth_driver + super(AuthManagerTestCase, self).setUp() + self.flags(connection_type='fake') + self.manager = manager.AuthManager(new=True) + + def test_create_and_find_user(self): + with user_generator(self.manager): + self.assert_(self.manager.get_user('test1')) + + def test_create_and_find_with_properties(self): + with user_generator(self.manager, name="herbert", secret="classified", + access="private-party"): + u = self.manager.get_user('herbert') + self.assertEqual('herbert', u.id) + self.assertEqual('herbert', u.name) + self.assertEqual('classified', u.secret) + self.assertEqual('private-party', u.access) + + def test_004_signature_is_valid(self): + #self.assertTrue(self.manager.authenticate(**boto.generate_url ...? )) + pass + #raise NotImplementedError + + def test_005_can_get_credentials(self): + return + credentials = self.manager.get_user('test1').get_credentials() + self.assertEqual(credentials, + 'export EC2_ACCESS_KEY="access"\n' + + 'export EC2_SECRET_KEY="secret"\n' + + 'export EC2_URL="http://127.0.0.1:8773/services/Cloud"\n' + + 'export S3_URL="http://127.0.0.1:3333/"\n' + + 'export EC2_USER_ID="test1"\n') + + def test_can_list_users(self): + with user_generator(self.manager): + with user_generator(self.manager, name="test2"): + users = self.manager.get_users() + self.assert_(filter(lambda u: u.id == 'test1', users)) + self.assert_(filter(lambda u: u.id == 'test2', users)) + self.assert_(not filter(lambda u: u.id == 'test3', users)) + + def test_can_add_and_remove_user_role(self): + with user_generator(self.manager): + self.assertFalse(self.manager.has_role('test1', 'itsec')) + self.manager.add_role('test1', 'itsec') + self.assertTrue(self.manager.has_role('test1', 'itsec')) + self.manager.remove_role('test1', 'itsec') + self.assertFalse(self.manager.has_role('test1', 'itsec')) + + def test_can_create_and_get_project(self): + with user_and_project_generator(self.manager) as (u, p): + self.assert_(self.manager.get_user('test1')) + self.assert_(self.manager.get_user('test1')) + self.assert_(self.manager.get_project('testproj')) + + def test_can_list_projects(self): + with user_and_project_generator(self.manager): + with project_generator(self.manager, name="testproj2"): + projects = self.manager.get_projects() + self.assert_(filter(lambda p: p.name == 'testproj', projects)) + self.assert_(filter(lambda p: p.name == 'testproj2', projects)) + self.assert_(not filter(lambda p: p.name == 'testproj3', + projects)) + + def test_can_create_and_get_project_with_attributes(self): + with user_generator(self.manager): + with project_generator(self.manager, description='A test project'): + project = self.manager.get_project('testproj') + self.assertEqual('A test project', project.description) + + def test_can_create_project_with_manager(self): + with user_and_project_generator(self.manager) as (user, project): + self.assertEqual('test1', project.project_manager_id) + self.assertTrue(self.manager.is_project_manager(user, project)) + + def test_create_project_assigns_manager_to_members(self): + with user_and_project_generator(self.manager) as (user, project): + self.assertTrue(self.manager.is_project_member(user, project)) + + def test_no_extra_project_members(self): + with user_generator(self.manager, name='test2') as baduser: + with user_and_project_generator(self.manager) as (user, project): + self.assertFalse(self.manager.is_project_member(baduser, + project)) + + def test_no_extra_project_managers(self): + with user_generator(self.manager, name='test2') as baduser: + with user_and_project_generator(self.manager) as (user, project): + self.assertFalse(self.manager.is_project_manager(baduser, + project)) + + def test_can_add_user_to_project(self): + with user_generator(self.manager, name='test2') as user: + with user_and_project_generator(self.manager) as (_user, project): + self.manager.add_to_project(user, project) + project = self.manager.get_project('testproj') + self.assertTrue(self.manager.is_project_member(user, project)) + + def test_can_remove_user_from_project(self): + with user_generator(self.manager, name='test2') as user: + with user_and_project_generator(self.manager) as (_user, project): + self.manager.add_to_project(user, project) + project = self.manager.get_project('testproj') + self.assertTrue(self.manager.is_project_member(user, project)) + self.manager.remove_from_project(user, project) + project = self.manager.get_project('testproj') + self.assertFalse(self.manager.is_project_member(user, project)) + + def test_can_add_remove_user_with_role(self): + with user_generator(self.manager, name='test2') as user: + with user_and_project_generator(self.manager) as (_user, project): + # NOTE(todd): after modifying users you must reload project + self.manager.add_to_project(user, project) + project = self.manager.get_project('testproj') + self.manager.add_role(user, 'developer', project) + self.assertTrue(self.manager.is_project_member(user, project)) + self.manager.remove_from_project(user, project) + project = self.manager.get_project('testproj') + self.assertFalse(self.manager.has_role(user, 'developer', + project)) + self.assertFalse(self.manager.is_project_member(user, project)) + + def test_can_generate_x509(self): + # NOTE(todd): this doesn't assert against the auth manager + # so it probably belongs in crypto_unittest + # but I'm leaving it where I found it. + with user_and_project_generator(self.manager) as (user, project): + # NOTE(todd): Should mention why we must setup controller first + # (somebody please clue me in) + cloud_controller = cloud.CloudController() + cloud_controller.setup() + _key, cert_str = self.manager._generate_x509_cert('test1', + 'testproj') + logging.debug(cert_str) + + # Need to verify that it's signed by the right intermediate CA + full_chain = crypto.fetch_ca(project_id='testproj', chain=True) + int_cert = crypto.fetch_ca(project_id='testproj', chain=False) + cloud_cert = crypto.fetch_ca() + logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain) + signed_cert = X509.load_cert_string(cert_str) + chain_cert = X509.load_cert_string(full_chain) + int_cert = X509.load_cert_string(int_cert) + cloud_cert = X509.load_cert_string(cloud_cert) + self.assertTrue(signed_cert.verify(chain_cert.get_pubkey())) + self.assertTrue(signed_cert.verify(int_cert.get_pubkey())) + if not FLAGS.use_intermediate_ca: + self.assertTrue(signed_cert.verify(cloud_cert.get_pubkey())) + else: + self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey())) + + def test_adding_role_to_project_is_ignored_unless_added_to_user(self): + with user_and_project_generator(self.manager) as (user, project): + self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) + self.manager.add_role(user, 'sysadmin', project) + # NOTE(todd): it will still show up in get_user_roles(u, project) + self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) + self.manager.add_role(user, 'sysadmin') + self.assertTrue(self.manager.has_role(user, 'sysadmin', project)) + + def test_add_user_role_doesnt_infect_project_roles(self): + with user_and_project_generator(self.manager) as (user, project): + self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) + self.manager.add_role(user, 'sysadmin') + self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) + + def test_can_list_user_roles(self): + with user_and_project_generator(self.manager) as (user, project): + self.manager.add_role(user, 'sysadmin') + roles = self.manager.get_user_roles(user) + self.assertTrue('sysadmin' in roles) + self.assertFalse('netadmin' in roles) + + def test_can_list_project_roles(self): + with user_and_project_generator(self.manager) as (user, project): + self.manager.add_role(user, 'sysadmin') + self.manager.add_role(user, 'sysadmin', project) + self.manager.add_role(user, 'netadmin', project) + project_roles = self.manager.get_user_roles(user, project) + self.assertTrue('sysadmin' in project_roles) + self.assertTrue('netadmin' in project_roles) + # has role should be false user-level role is missing + self.assertFalse(self.manager.has_role(user, 'netadmin', project)) + + def test_can_remove_user_roles(self): + with user_and_project_generator(self.manager) as (user, project): + self.manager.add_role(user, 'sysadmin') + self.assertTrue(self.manager.has_role(user, 'sysadmin')) + self.manager.remove_role(user, 'sysadmin') + self.assertFalse(self.manager.has_role(user, 'sysadmin')) + + def test_removing_user_role_hides_it_from_project(self): + with user_and_project_generator(self.manager) as (user, project): + self.manager.add_role(user, 'sysadmin') + self.manager.add_role(user, 'sysadmin', project) + self.assertTrue(self.manager.has_role(user, 'sysadmin', project)) + self.manager.remove_role(user, 'sysadmin') + self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) + + def test_can_remove_project_role_but_keep_user_role(self): + with user_and_project_generator(self.manager) as (user, project): + self.manager.add_role(user, 'sysadmin') + self.manager.add_role(user, 'sysadmin', project) + self.assertTrue(self.manager.has_role(user, 'sysadmin')) + self.manager.remove_role(user, 'sysadmin', project) + self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) + self.assertTrue(self.manager.has_role(user, 'sysadmin')) + + def test_can_retrieve_project_by_user(self): + with user_and_project_generator(self.manager) as (user, project): + self.assertEqual(1, len(self.manager.get_projects('test1'))) + + def test_can_modify_project(self): + with user_and_project_generator(self.manager): + with user_generator(self.manager, name='test2'): + self.manager.modify_project('testproj', 'test2', 'new desc') + project = self.manager.get_project('testproj') + self.assertEqual('test2', project.project_manager_id) + self.assertEqual('new desc', project.description) + + def test_can_delete_project(self): + with user_generator(self.manager): + self.manager.create_project('testproj', 'test1') + self.assert_(self.manager.get_project('testproj')) + self.manager.delete_project('testproj') + projectlist = self.manager.get_projects() + self.assert_(not filter(lambda p: p.name == 'testproj', + projectlist)) + + def test_can_delete_user(self): + self.manager.create_user('test1') + self.assert_(self.manager.get_user('test1')) + self.manager.delete_user('test1') + userlist = self.manager.get_users() + self.assert_(not filter(lambda u: u.id == 'test1', userlist)) + + def test_can_modify_users(self): + with user_generator(self.manager): + self.manager.modify_user('test1', 'access', 'secret', True) + user = self.manager.get_user('test1') + self.assertEqual('access', user.access) + self.assertEqual('secret', user.secret) + self.assertTrue(user.is_admin()) + + +class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): + auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' + + def __init__(self, *args, **kwargs): + AuthManagerTestCase.__init__(self) + test.TestCase.__init__(self, *args, **kwargs) + import nova.auth.fakeldap as fakeldap + FLAGS.redis_db = 8 + if FLAGS.flush_db: + logging.info("Flushing redis datastore") + try: + r = fakeldap.Redis.instance() + r.flushdb() + except: + self.skip = True + + +class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase): + auth_driver = 'nova.auth.dbdriver.DbDriver' + + +if __name__ == "__main__": + # TODO: Implement use_fake as an option + unittest.main() diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py new file mode 100644 index 000000000..53a762310 --- /dev/null +++ b/nova/tests/test_cloud.py @@ -0,0 +1,332 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from base64 import b64decode +import json +import logging +from M2Crypto import BIO +from M2Crypto import RSA +import os +import StringIO +import tempfile +import time + +from eventlet import greenthread +from xml.etree import ElementTree + +from nova import context +from nova import crypto +from nova import db +from nova import flags +from nova import rpc +from nova import test +from nova import utils +from nova.auth import manager +from nova.compute import power_state +from nova.api.ec2 import cloud +from nova.objectstore import image + + +FLAGS = flags.FLAGS + +# Temp dirs for working with image attributes through the cloud controller +# (stole this from objectstore_unittest.py) +OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-') +IMAGES_PATH = os.path.join(OSS_TEMPDIR, 'images') +os.makedirs(IMAGES_PATH) + + +class CloudTestCase(test.TestCase): + def setUp(self): + super(CloudTestCase, self).setUp() + self.flags(connection_type='fake', images_path=IMAGES_PATH) + + self.conn = rpc.Connection.instance() + logging.getLogger().setLevel(logging.DEBUG) + + # set up our cloud + self.cloud = cloud.CloudController() + + # set up a service + self.compute = utils.import_object(FLAGS.compute_manager) + self.compute_consumer = rpc.AdapterConsumer(connection=self.conn, + topic=FLAGS.compute_topic, + proxy=self.compute) + self.compute_consumer.attach_to_eventlet() + self.network = utils.import_object(FLAGS.network_manager) + self.network_consumer = rpc.AdapterConsumer(connection=self.conn, + topic=FLAGS.network_topic, + proxy=self.network) + self.network_consumer.attach_to_eventlet() + + self.manager = manager.AuthManager() + self.user = self.manager.create_user('admin', 'admin', 'admin', True) + self.project = self.manager.create_project('proj', 'admin', 'proj') + self.context = context.RequestContext(user=self.user, + project=self.project) + + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + super(CloudTestCase, self).tearDown() + + def _create_key(self, name): + # NOTE(vish): create depends on pool, so just call helper directly + return cloud._gen_key(self.context, self.context.user.id, name) + + def test_describe_addresses(self): + """Makes sure describe addresses runs without raising an exception""" + address = "10.10.10.10" + db.floating_ip_create(self.context, + {'address': address, + 'host': FLAGS.host}) + self.cloud.allocate_address(self.context) + self.cloud.describe_addresses(self.context) + self.cloud.release_address(self.context, + public_ip=address) + greenthread.sleep(0.3) + db.floating_ip_destroy(self.context, address) + + def test_associate_disassociate_address(self): + """Verifies associate runs cleanly without raising an exception""" + address = "10.10.10.10" + db.floating_ip_create(self.context, + {'address': address, + 'host': FLAGS.host}) + self.cloud.allocate_address(self.context) + inst = db.instance_create(self.context, {}) + fixed = self.network.allocate_fixed_ip(self.context, inst['id']) + ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) + self.cloud.associate_address(self.context, + instance_id=ec2_id, + public_ip=address) + self.cloud.disassociate_address(self.context, + public_ip=address) + self.cloud.release_address(self.context, + public_ip=address) + greenthread.sleep(0.3) + self.network.deallocate_fixed_ip(self.context, fixed) + db.instance_destroy(self.context, inst['id']) + db.floating_ip_destroy(self.context, address) + + def test_describe_volumes(self): + """Makes sure describe_volumes works and filters results.""" + vol1 = db.volume_create(self.context, {}) + vol2 = db.volume_create(self.context, {}) + result = self.cloud.describe_volumes(self.context) + self.assertEqual(len(result['volumeSet']), 2) + result = self.cloud.describe_volumes(self.context, + volume_id=[vol2['ec2_id']]) + self.assertEqual(len(result['volumeSet']), 1) + self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['ec2_id']) + db.volume_destroy(self.context, vol1['id']) + db.volume_destroy(self.context, vol2['id']) + + def test_console_output(self): + image_id = FLAGS.default_image + instance_type = FLAGS.default_instance_type + max_count = 1 + kwargs = {'image_id': image_id, + 'instance_type': instance_type, + 'max_count': max_count} + rv = yield self.cloud.run_instances(self.context, **kwargs) + instance_id = rv['instancesSet'][0]['instanceId'] + output = yield self.cloud.get_console_output(context=self.context, + instance_id=[instance_id]) + self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT') + # TODO(soren): We need this until we can stop polling in the rpc code + # for unit tests. + greenthread.sleep(0.3) + rv = yield self.cloud.terminate_instances(self.context, [instance_id]) + + def test_key_generation(self): + result = self._create_key('test') + private_key = result['private_key'] + key = RSA.load_key_string(private_key, callback=lambda: None) + bio = BIO.MemoryBuffer() + public_key = db.key_pair_get(self.context, + self.context.user.id, + 'test')['public_key'] + key.save_pub_key_bio(bio) + converted = crypto.ssl_pub_to_ssh_pub(bio.read()) + # assert key fields are equal + self.assertEqual(public_key.split(" ")[1].strip(), + converted.split(" ")[1].strip()) + + def test_describe_key_pairs(self): + self._create_key('test1') + self._create_key('test2') + result = self.cloud.describe_key_pairs(self.context) + keys = result["keypairsSet"] + self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys)) + self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) + + def test_delete_key_pair(self): + self._create_key('test') + self.cloud.delete_key_pair(self.context, 'test') + + def test_run_instances(self): + if FLAGS.connection_type == 'fake': + logging.debug("Can't test instances without a real virtual env.") + return + image_id = FLAGS.default_image + instance_type = FLAGS.default_instance_type + max_count = 1 + kwargs = {'image_id': image_id, + 'instance_type': instance_type, + 'max_count': max_count} + rv = yield self.cloud.run_instances(self.context, **kwargs) + # TODO: check for proper response + instance_id = rv['reservationSet'][0].keys()[0] + instance = rv['reservationSet'][0][instance_id][0] + logging.debug("Need to watch instance %s until it's running..." % + instance['instance_id']) + while True: + greenthread.sleep(1) + info = self.cloud._get_instance(instance['instance_id']) + logging.debug(info['state']) + if info['state'] == power_state.RUNNING: + break + self.assert_(rv) + + if connection_type != 'fake': + time.sleep(45) # Should use boto for polling here + for reservations in rv['reservationSet']: + # for res_id in reservations.keys(): + # logging.debug(reservations[res_id]) + # for instance in reservations[res_id]: + for instance in reservations[reservations.keys()[0]]: + instance_id = instance['instance_id'] + logging.debug("Terminating instance %s" % instance_id) + rv = yield self.compute.terminate_instance(instance_id) + + def test_instance_update_state(self): + def instance(num): + return { + 'reservation_id': 'r-1', + 'instance_id': 'i-%s' % num, + 'image_id': 'ami-%s' % num, + 'private_dns_name': '10.0.0.%s' % num, + 'dns_name': '10.0.0%s' % num, + 'ami_launch_index': str(num), + 'instance_type': 'fake', + 'availability_zone': 'fake', + 'key_name': None, + 'kernel_id': 'fake', + 'ramdisk_id': 'fake', + 'groups': ['default'], + 'product_codes': None, + 'state': 0x01, + 'user_data': ''} + rv = self.cloud._format_describe_instances(self.context) + self.assert_(len(rv['reservationSet']) == 0) + + # simulate launch of 5 instances + # self.cloud.instances['pending'] = {} + #for i in xrange(5): + # inst = instance(i) + # self.cloud.instances['pending'][inst['instance_id']] = inst + + #rv = self.cloud._format_instances(self.admin) + #self.assert_(len(rv['reservationSet']) == 1) + #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5) + # report 4 nodes each having 1 of the instances + #for i in xrange(4): + # self.cloud.update_state('instances', + # {('node-%s' % i): {('i-%s' % i): + # instance(i)}}) + + # one instance should be pending still + #self.assert_(len(self.cloud.instances['pending'].keys()) == 1) + + # check that the reservations collapse + #rv = self.cloud._format_instances(self.admin) + #self.assert_(len(rv['reservationSet']) == 1) + #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5) + + # check that we can get metadata for each instance + #for i in xrange(4): + # data = self.cloud.get_metadata(instance(i)['private_dns_name']) + # self.assert_(data['meta-data']['ami-id'] == 'ami-%s' % i) + + @staticmethod + def _fake_set_image_description(ctxt, image_id, description): + from nova.objectstore import handler + + class req: + pass + + request = req() + request.context = ctxt + request.args = {'image_id': [image_id], + 'description': [description]} + + resource = handler.ImagesResource() + resource.render_POST(request) + + def test_user_editable_image_endpoint(self): + pathdir = os.path.join(FLAGS.images_path, 'ami-testing') + os.mkdir(pathdir) + info = {'isPublic': False} + with open(os.path.join(pathdir, 'info.json'), 'w') as f: + json.dump(info, f) + img = image.Image('ami-testing') + # self.cloud.set_image_description(self.context, 'ami-testing', + # 'Foo Img') + # NOTE(vish): Above won't work unless we start objectstore or create + # a fake version of api/ec2/images.py conn that can + # call methods directly instead of going through boto. + # for now, just cheat and call the method directly + self._fake_set_image_description(self.context, 'ami-testing', + 'Foo Img') + self.assertEqual('Foo Img', img.metadata['description']) + self._fake_set_image_description(self.context, 'ami-testing', '') + self.assertEqual('', img.metadata['description']) + + def test_update_of_instance_display_fields(self): + inst = db.instance_create(self.context, {}) + ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) + self.cloud.update_instance(self.context, ec2_id, + display_name='c00l 1m4g3') + inst = db.instance_get(self.context, inst['id']) + self.assertEqual('c00l 1m4g3', inst['display_name']) + db.instance_destroy(self.context, inst['id']) + + def test_update_of_instance_wont_update_private_fields(self): + inst = db.instance_create(self.context, {}) + self.cloud.update_instance(self.context, inst['id'], + mac_address='DE:AD:BE:EF') + inst = db.instance_get(self.context, inst['id']) + self.assertEqual(None, inst['mac_address']) + db.instance_destroy(self.context, inst['id']) + + def test_update_of_volume_display_fields(self): + vol = db.volume_create(self.context, {}) + self.cloud.update_volume(self.context, vol['id'], + display_name='c00l v0lum3') + vol = db.volume_get(self.context, vol['id']) + self.assertEqual('c00l v0lum3', vol['display_name']) + db.volume_destroy(self.context, vol['id']) + + def test_update_of_volume_wont_update_private_fields(self): + vol = db.volume_create(self.context, {}) + self.cloud.update_volume(self.context, vol['id'], + mountpoint='/not/here') + vol = db.volume_get(self.context, vol['id']) + self.assertEqual(None, vol['mountpoint']) + db.volume_destroy(self.context, vol['id']) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py new file mode 100644 index 000000000..c6353d357 --- /dev/null +++ b/nova/tests/test_compute.py @@ -0,0 +1,155 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Compute +""" + +import datetime +import logging + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import test +from nova import utils +from nova.auth import manager +from nova.compute import api as compute_api + + +FLAGS = flags.FLAGS + + +class ComputeTestCase(test.TestCase): + """Test case for compute""" + def setUp(self): + logging.getLogger().setLevel(logging.DEBUG) + super(ComputeTestCase, self).setUp() + self.flags(connection_type='fake', + network_manager='nova.network.manager.FlatManager') + self.compute = utils.import_object(FLAGS.compute_manager) + self.compute_api = compute_api.ComputeAPI() + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake') + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.get_admin_context() + + def tearDown(self): + self.manager.delete_user(self.user) + self.manager.delete_project(self.project) + super(ComputeTestCase, self).tearDown() + + def _create_instance(self): + """Create a test instance""" + inst = {} + inst['image_id'] = 'ami-test' + inst['reservation_id'] = 'r-fakeres' + inst['launch_time'] = '10' + inst['user_id'] = self.user.id + inst['project_id'] = self.project.id + inst['instance_type'] = 'm1.tiny' + inst['mac_address'] = utils.generate_mac() + inst['ami_launch_index'] = 0 + return db.instance_create(self.context, inst)['id'] + + def test_create_instance_defaults_display_name(self): + """Verify that an instance cannot be created without a display_name.""" + cases = [dict(), dict(display_name=None)] + for instance in cases: + ref = self.compute_api.create_instances(self.context, + FLAGS.default_instance_type, None, **instance) + try: + self.assertNotEqual(ref[0].display_name, None) + finally: + db.instance_destroy(self.context, ref[0]['id']) + + def test_create_instance_associates_security_groups(self): + """Make sure create_instances associates security groups""" + values = {'name': 'default', + 'description': 'default', + 'user_id': self.user.id, + 'project_id': self.project.id} + group = db.security_group_create(self.context, values) + ref = self.compute_api.create_instances(self.context, + FLAGS.default_instance_type, None, security_group=['default']) + try: + self.assertEqual(len(ref[0]['security_groups']), 1) + finally: + db.security_group_destroy(self.context, group['id']) + db.instance_destroy(self.context, ref[0]['id']) + + def test_run_terminate(self): + """Make sure it is possible to run and terminate instance""" + instance_id = self._create_instance() + + self.compute.run_instance(self.context, instance_id) + + instances = db.instance_get_all(context.get_admin_context()) + logging.info("Running instances: %s", instances) + self.assertEqual(len(instances), 1) + + self.compute.terminate_instance(self.context, instance_id) + + instances = db.instance_get_all(context.get_admin_context()) + logging.info("After terminating instances: %s", instances) + self.assertEqual(len(instances), 0) + + def test_run_terminate_timestamps(self): + """Make sure timestamps are set for launched and destroyed""" + instance_id = self._create_instance() + instance_ref = db.instance_get(self.context, instance_id) + self.assertEqual(instance_ref['launched_at'], None) + self.assertEqual(instance_ref['deleted_at'], None) + launch = datetime.datetime.utcnow() + self.compute.run_instance(self.context, instance_id) + instance_ref = db.instance_get(self.context, instance_id) + self.assert_(instance_ref['launched_at'] > launch) + self.assertEqual(instance_ref['deleted_at'], None) + terminate = datetime.datetime.utcnow() + self.compute.terminate_instance(self.context, instance_id) + self.context = self.context.elevated(True) + instance_ref = db.instance_get(self.context, instance_id) + self.assert_(instance_ref['launched_at'] < terminate) + self.assert_(instance_ref['deleted_at'] > terminate) + + def test_reboot(self): + """Ensure instance can be rebooted""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.reboot_instance(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) + + def test_console_output(self): + """Make sure we can get console output from instance""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + + console = self.compute.get_console_output(self.context, + instance_id) + self.assert_(console) + self.compute.terminate_instance(self.context, instance_id) + + def test_run_instance_existing(self): + """Ensure failure when running an instance that already exists""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.assertRaises(exception.Error, + self.compute.run_instance, + self.context, + instance_id) + self.compute.terminate_instance(self.context, instance_id) diff --git a/nova/tests/test_flags.py b/nova/tests/test_flags.py new file mode 100644 index 000000000..707300fcf --- /dev/null +++ b/nova/tests/test_flags.py @@ -0,0 +1,102 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import exception +from nova import flags +from nova import test + +FLAGS = flags.FLAGS +flags.DEFINE_string('flags_unittest', 'foo', 'for testing purposes only') + + +class FlagsTestCase(test.TestCase): + + def setUp(self): + super(FlagsTestCase, self).setUp() + self.FLAGS = flags.FlagValues() + self.global_FLAGS = flags.FLAGS + + def test_define(self): + self.assert_('string' not in self.FLAGS) + self.assert_('int' not in self.FLAGS) + self.assert_('false' not in self.FLAGS) + self.assert_('true' not in self.FLAGS) + + flags.DEFINE_string('string', 'default', 'desc', + flag_values=self.FLAGS) + flags.DEFINE_integer('int', 1, 'desc', flag_values=self.FLAGS) + flags.DEFINE_bool('false', False, 'desc', flag_values=self.FLAGS) + flags.DEFINE_bool('true', True, 'desc', flag_values=self.FLAGS) + + self.assert_(self.FLAGS['string']) + self.assert_(self.FLAGS['int']) + self.assert_(self.FLAGS['false']) + self.assert_(self.FLAGS['true']) + self.assertEqual(self.FLAGS.string, 'default') + self.assertEqual(self.FLAGS.int, 1) + self.assertEqual(self.FLAGS.false, False) + self.assertEqual(self.FLAGS.true, True) + + argv = ['flags_test', + '--string', 'foo', + '--int', '2', + '--false', + '--notrue'] + + self.FLAGS(argv) + self.assertEqual(self.FLAGS.string, 'foo') + self.assertEqual(self.FLAGS.int, 2) + self.assertEqual(self.FLAGS.false, True) + self.assertEqual(self.FLAGS.true, False) + + def test_declare(self): + self.assert_('answer' not in self.global_FLAGS) + flags.DECLARE('answer', 'nova.tests.declare_flags') + self.assert_('answer' in self.global_FLAGS) + self.assertEqual(self.global_FLAGS.answer, 42) + + # Make sure we don't overwrite anything + self.global_FLAGS.answer = 256 + self.assertEqual(self.global_FLAGS.answer, 256) + flags.DECLARE('answer', 'nova.tests.declare_flags') + self.assertEqual(self.global_FLAGS.answer, 256) + + def test_runtime_and_unknown_flags(self): + self.assert_('runtime_answer' not in self.global_FLAGS) + + argv = ['flags_test', '--runtime_answer=60', 'extra_arg'] + args = self.global_FLAGS(argv) + self.assertEqual(len(args), 2) + self.assertEqual(args[1], 'extra_arg') + + self.assert_('runtime_answer' not in self.global_FLAGS) + + import nova.tests.runtime_flags + + self.assert_('runtime_answer' in self.global_FLAGS) + self.assertEqual(self.global_FLAGS.runtime_answer, 60) + + def test_flag_leak_left(self): + self.assertEqual(FLAGS.flags_unittest, 'foo') + FLAGS.flags_unittest = 'bar' + self.assertEqual(FLAGS.flags_unittest, 'bar') + + def test_flag_leak_right(self): + self.assertEqual(FLAGS.flags_unittest, 'foo') + FLAGS.flags_unittest = 'bar' + self.assertEqual(FLAGS.flags_unittest, 'bar') diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py new file mode 100644 index 000000000..33c1777d5 --- /dev/null +++ b/nova/tests/test_misc.py @@ -0,0 +1,55 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from nova import test +from nova.utils import parse_mailmap, str_dict_replace + + +class ProjectTestCase(test.TestCase): + def test_authors_up_to_date(self): + if os.path.exists('.bzr'): + contributors = set() + + mailmap = parse_mailmap('.mailmap') + + import bzrlib.workingtree + tree = bzrlib.workingtree.WorkingTree.open('.') + tree.lock_read() + try: + parents = tree.get_parent_ids() + g = tree.branch.repository.get_graph() + for p in parents[1:]: + rev_ids = [r for r, _ in g.iter_ancestry(parents) + if r != "null:"] + revs = tree.branch.repository.get_revisions(rev_ids) + for r in revs: + for author in r.get_apparent_authors(): + email = author.split(' ')[-1] + contributors.add(str_dict_replace(email, mailmap)) + + authors_file = open('Authors', 'r').read() + + missing = set() + for contributor in contributors: + if not contributor in authors_file: + missing.add(contributor) + + self.assertTrue(len(missing) == 0, + '%r not listed in Authors' % missing) + finally: + tree.unlock() diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py new file mode 100644 index 000000000..8cf2a5e54 --- /dev/null +++ b/nova/tests/test_quota.py @@ -0,0 +1,153 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import quota +from nova import test +from nova import utils +from nova.auth import manager +from nova.api.ec2 import cloud + + +FLAGS = flags.FLAGS + + +class QuotaTestCase(test.TestCase): + def setUp(self): + logging.getLogger().setLevel(logging.DEBUG) + super(QuotaTestCase, self).setUp() + self.flags(connection_type='fake', + quota_instances=2, + quota_cores=4, + quota_volumes=2, + quota_gigabytes=20, + quota_floating_ips=1) + + self.cloud = cloud.CloudController() + self.manager = manager.AuthManager() + self.user = self.manager.create_user('admin', 'admin', 'admin', True) + self.project = self.manager.create_project('admin', 'admin', 'admin') + self.network = utils.import_object(FLAGS.network_manager) + self.context = context.RequestContext(project=self.project, + user=self.user) + + def tearDown(self): + manager.AuthManager().delete_project(self.project) + manager.AuthManager().delete_user(self.user) + super(QuotaTestCase, self).tearDown() + + def _create_instance(self, cores=2): + """Create a test instance""" + inst = {} + inst['image_id'] = 'ami-test' + inst['reservation_id'] = 'r-fakeres' + inst['user_id'] = self.user.id + inst['project_id'] = self.project.id + inst['instance_type'] = 'm1.large' + inst['vcpus'] = cores + inst['mac_address'] = utils.generate_mac() + return db.instance_create(self.context, inst)['id'] + + def _create_volume(self, size=10): + """Create a test volume""" + vol = {} + vol['user_id'] = self.user.id + vol['project_id'] = self.project.id + vol['size'] = size + return db.volume_create(self.context, vol)['id'] + + def test_quota_overrides(self): + """Make sure overriding a projects quotas works""" + num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + self.assertEqual(num_instances, 2) + db.quota_create(self.context, {'project_id': self.project.id, + 'instances': 10}) + num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + self.assertEqual(num_instances, 4) + db.quota_update(self.context, self.project.id, {'cores': 100}) + num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + self.assertEqual(num_instances, 10) + db.quota_destroy(self.context, self.project.id) + + def test_too_many_instances(self): + instance_ids = [] + for i in range(FLAGS.quota_instances): + instance_id = self._create_instance() + instance_ids.append(instance_id) + self.assertRaises(quota.QuotaError, self.cloud.run_instances, + self.context, + min_count=1, + max_count=1, + instance_type='m1.small', + image_id='fake') + for instance_id in instance_ids: + db.instance_destroy(self.context, instance_id) + + def test_too_many_cores(self): + instance_ids = [] + instance_id = self._create_instance(cores=4) + instance_ids.append(instance_id) + self.assertRaises(quota.QuotaError, self.cloud.run_instances, + self.context, + min_count=1, + max_count=1, + instance_type='m1.small', + image_id='fake') + for instance_id in instance_ids: + db.instance_destroy(self.context, instance_id) + + def test_too_many_volumes(self): + volume_ids = [] + for i in range(FLAGS.quota_volumes): + volume_id = self._create_volume() + volume_ids.append(volume_id) + self.assertRaises(quota.QuotaError, self.cloud.create_volume, + self.context, + size=10) + for volume_id in volume_ids: + db.volume_destroy(self.context, volume_id) + + def test_too_many_gigabytes(self): + volume_ids = [] + volume_id = self._create_volume(size=20) + volume_ids.append(volume_id) + self.assertRaises(quota.QuotaError, + self.cloud.create_volume, + self.context, + size=10) + for volume_id in volume_ids: + db.volume_destroy(self.context, volume_id) + + def test_too_many_addresses(self): + address = '192.168.0.100' + db.floating_ip_create(context.get_admin_context(), + {'address': address, 'host': FLAGS.host}) + float_addr = self.network.allocate_floating_ip(self.context, + self.project.id) + # NOTE(vish): This assert never fails. When cloud attempts to + # make an rpc.call, the test just finishes with OK. It + # appears to be something in the magic inline callbacks + # that is breaking. + self.assertRaises(quota.QuotaError, self.cloud.allocate_address, + self.context) + db.floating_ip_destroy(context.get_admin_context(), address) diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py new file mode 100644 index 000000000..a2495e65a --- /dev/null +++ b/nova/tests/test_rpc.py @@ -0,0 +1,103 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using queue +""" +import logging + +from nova import context +from nova import flags +from nova import rpc +from nova import test + + +FLAGS = flags.FLAGS + + +class RpcTestCase(test.TestCase): + """Test cases for rpc""" + def setUp(self): + super(RpcTestCase, self).setUp() + self.conn = rpc.Connection.instance() + self.receiver = TestReceiver() + self.consumer = rpc.AdapterConsumer(connection=self.conn, + topic='test', + proxy=self.receiver) + self.consumer.attach_to_eventlet() + self.context = context.get_admin_context() + + def test_call_succeed(self): + """Get a value through rpc call""" + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_context_passed(self): + """Makes sure a context is passed through rpc call""" + value = 42 + result = rpc.call(self.context, + 'test', {"method": "context", + "args": {"value": value}}) + self.assertEqual(self.context.to_dict(), result) + + def test_call_exception(self): + """Test that exception gets passed back properly + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + """ + value = 42 + self.assertRaises(rpc.RemoteError, + rpc.call, + self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + try: + rpc.call(self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + self.fail("should have thrown rpc.RemoteError") + except rpc.RemoteError as exc: + self.assertEqual(int(exc.value), value) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call + + Uses static methods because we aren't actually storing any state""" + + @staticmethod + def echo(context, value): + """Simply returns whatever value is sent in""" + logging.debug("Received %s", value) + return value + + @staticmethod + def context(context, value): + """Returns dictionary version of context""" + logging.debug("Received %s", context) + return context.to_dict() + + @staticmethod + def fail(context, value): + """Raises an exception with the value sent in""" + raise Exception(value) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py new file mode 100644 index 000000000..c56f69698 --- /dev/null +++ b/nova/tests/test_scheduler.py @@ -0,0 +1,246 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler +""" + +from nova import context +from nova import db +from nova import flags +from nova import service +from nova import test +from nova import rpc +from nova import utils +from nova.auth import manager as auth_manager +from nova.scheduler import manager +from nova.scheduler import driver + + +FLAGS = flags.FLAGS +flags.DECLARE('max_cores', 'nova.scheduler.simple') + + +class TestDriver(driver.Scheduler): + """Scheduler Driver for Tests""" + def schedule(context, topic, *args, **kwargs): + return 'fallback_host' + + def schedule_named_method(context, topic, num): + return 'named_host' + + +class SchedulerTestCase(test.TestCase): + """Test case for scheduler""" + def setUp(self): + super(SchedulerTestCase, self).setUp() + self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') + + def test_fallback(self): + scheduler = manager.SchedulerManager() + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + ctxt = context.get_admin_context() + rpc.cast(ctxt, + 'topic.fallback_host', + {'method': 'noexist', + 'args': {'num': 7}}) + self.mox.ReplayAll() + scheduler.noexist(ctxt, 'topic', num=7) + + def test_named_method(self): + scheduler = manager.SchedulerManager() + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + ctxt = context.get_admin_context() + rpc.cast(ctxt, + 'topic.named_host', + {'method': 'named_method', + 'args': {'num': 7}}) + self.mox.ReplayAll() + scheduler.named_method(ctxt, 'topic', num=7) + + +class SimpleDriverTestCase(test.TestCase): + """Test case for simple driver""" + def setUp(self): + super(SimpleDriverTestCase, self).setUp() + self.flags(connection_type='fake', + max_cores=4, + max_gigabytes=4, + network_manager='nova.network.manager.FlatManager', + volume_driver='nova.volume.driver.FakeISCSIDriver', + scheduler_driver='nova.scheduler.simple.SimpleScheduler') + self.scheduler = manager.SchedulerManager() + self.manager = auth_manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake') + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.get_admin_context() + + def tearDown(self): + self.manager.delete_user(self.user) + self.manager.delete_project(self.project) + + def _create_instance(self): + """Create a test instance""" + inst = {} + inst['image_id'] = 'ami-test' + inst['reservation_id'] = 'r-fakeres' + inst['user_id'] = self.user.id + inst['project_id'] = self.project.id + inst['instance_type'] = 'm1.tiny' + inst['mac_address'] = utils.generate_mac() + inst['ami_launch_index'] = 0 + inst['vcpus'] = 1 + return db.instance_create(self.context, inst)['id'] + + def _create_volume(self): + """Create a test volume""" + vol = {} + vol['image_id'] = 'ami-test' + vol['reservation_id'] = 'r-fakeres' + vol['size'] = 1 + return db.volume_create(self.context, vol)['id'] + + def test_hosts_are_up(self): + """Ensures driver can find the hosts that are up""" + # NOTE(vish): constructing service without create method + # because we are going to use it without queue + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(len(hosts), 2) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_instance(self): + """Ensures the host with less cores gets the next one""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance() + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual(host, 'host2') + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_too_many_cores(self): + """Ensures we don't go over max cores""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_ids1 = [] + instance_ids2 = [] + for index in xrange(FLAGS.max_cores): + instance_id = self._create_instance() + compute1.run_instance(self.context, instance_id) + instance_ids1.append(instance_id) + instance_id = self._create_instance() + compute2.run_instance(self.context, instance_id) + instance_ids2.append(instance_id) + instance_id = self._create_instance() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id) + for instance_id in instance_ids1: + compute1.terminate_instance(self.context, instance_id) + for instance_id in instance_ids2: + compute2.terminate_instance(self.context, instance_id) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_volume(self): + """Ensures the host with less gigabytes gets the next one""" + volume1 = service.Service('host1', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume1.start() + volume2 = service.Service('host2', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume2.start() + volume_id1 = self._create_volume() + volume1.create_volume(self.context, volume_id1) + volume_id2 = self._create_volume() + host = self.scheduler.driver.schedule_create_volume(self.context, + volume_id2) + self.assertEqual(host, 'host2') + volume1.delete_volume(self.context, volume_id1) + db.volume_destroy(self.context, volume_id2) + volume1.kill() + volume2.kill() + + def test_too_many_gigabytes(self): + """Ensures we don't go over max gigabytes""" + volume1 = service.Service('host1', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume1.start() + volume2 = service.Service('host2', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume2.start() + volume_ids1 = [] + volume_ids2 = [] + for index in xrange(FLAGS.max_gigabytes): + volume_id = self._create_volume() + volume1.create_volume(self.context, volume_id) + volume_ids1.append(volume_id) + volume_id = self._create_volume() + volume2.create_volume(self.context, volume_id) + volume_ids2.append(volume_id) + volume_id = self._create_volume() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_create_volume, + self.context, + volume_id) + for volume_id in volume_ids1: + volume1.delete_volume(self.context, volume_id) + for volume_id in volume_ids2: + volume2.delete_volume(self.context, volume_id) + volume1.kill() + volume2.kill() diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py new file mode 100644 index 000000000..b30838ad7 --- /dev/null +++ b/nova/tests/test_service.py @@ -0,0 +1,227 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for remote procedure calls using queue +""" + +import mox + +from nova import exception +from nova import flags +from nova import rpc +from nova import test +from nova import service +from nova import manager + +FLAGS = flags.FLAGS +flags.DEFINE_string("fake_manager", "nova.tests.test_service.FakeManager", + "Manager for testing") + + +class FakeManager(manager.Manager): + """Fake manager for tests""" + def test_method(self): + return 'manager' + + +class ExtendedService(service.Service): + def test_method(self): + return 'service' + + +class ServiceManagerTestCase(test.TestCase): + """Test cases for Services""" + + def test_attribute_error_for_no_manager(self): + serv = service.Service('test', + 'test', + 'test', + 'nova.tests.test_service.FakeManager') + self.assertRaises(AttributeError, getattr, serv, 'test_method') + + def test_message_gets_to_manager(self): + serv = service.Service('test', + 'test', + 'test', + 'nova.tests.test_service.FakeManager') + serv.start() + self.assertEqual(serv.test_method(), 'manager') + + def test_override_manager_method(self): + serv = ExtendedService('test', + 'test', + 'test', + 'nova.tests.test_service.FakeManager') + serv.start() + self.assertEqual(serv.test_method(), 'service') + + +class ServiceTestCase(test.TestCase): + """Test cases for Services""" + + def setUp(self): + super(ServiceTestCase, self).setUp() + self.mox.StubOutWithMock(service, 'db') + + def test_create(self): + host = 'foo' + binary = 'nova-fake' + topic = 'fake' + + # NOTE(vish): Create was moved out of mox replay to make sure that + # the looping calls are created in StartService. + app = service.Service.create(host=host, binary=binary) + + self.mox.StubOutWithMock(rpc, + 'AdapterConsumer', + use_mock_anything=True) + rpc.AdapterConsumer(connection=mox.IgnoreArg(), + topic=topic, + proxy=mox.IsA(service.Service)).AndReturn( + rpc.AdapterConsumer) + + rpc.AdapterConsumer(connection=mox.IgnoreArg(), + topic='%s.%s' % (topic, host), + proxy=mox.IsA(service.Service)).AndReturn( + rpc.AdapterConsumer) + + rpc.AdapterConsumer.attach_to_eventlet() + rpc.AdapterConsumer.attach_to_eventlet() + + service_create = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0} + service_ref = {'host': host, + 'binary': binary, + 'report_count': 0, + 'id': 1} + + service.db.service_get_by_args(mox.IgnoreArg(), + host, + binary).AndRaise(exception.NotFound()) + service.db.service_create(mox.IgnoreArg(), + service_create).AndReturn(service_ref) + self.mox.ReplayAll() + + app.start() + app.stop() + self.assert_(app) + + # We're testing sort of weird behavior in how report_state decides + # whether it is disconnected, it looks for a variable on itself called + # 'model_disconnected' and report_state doesn't really do much so this + # these are mostly just for coverage + def test_report_state_no_service(self): + host = 'foo' + binary = 'bar' + topic = 'test' + service_create = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0} + service_ref = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'id': 1} + + service.db.service_get_by_args(mox.IgnoreArg(), + host, + binary).AndRaise(exception.NotFound()) + service.db.service_create(mox.IgnoreArg(), + service_create).AndReturn(service_ref) + service.db.service_get(mox.IgnoreArg(), + service_ref['id']).AndReturn(service_ref) + service.db.service_update(mox.IgnoreArg(), service_ref['id'], + mox.ContainsKeyValue('report_count', 1)) + + self.mox.ReplayAll() + serv = service.Service(host, + binary, + topic, + 'nova.tests.test_service.FakeManager') + serv.start() + serv.report_state() + + def test_report_state_newly_disconnected(self): + host = 'foo' + binary = 'bar' + topic = 'test' + service_create = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0} + service_ref = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'id': 1} + + service.db.service_get_by_args(mox.IgnoreArg(), + host, + binary).AndRaise(exception.NotFound()) + service.db.service_create(mox.IgnoreArg(), + service_create).AndReturn(service_ref) + service.db.service_get(mox.IgnoreArg(), + mox.IgnoreArg()).AndRaise(Exception()) + + self.mox.ReplayAll() + serv = service.Service(host, + binary, + topic, + 'nova.tests.test_service.FakeManager') + serv.start() + serv.report_state() + self.assert_(serv.model_disconnected) + + def test_report_state_newly_connected(self): + host = 'foo' + binary = 'bar' + topic = 'test' + service_create = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0} + service_ref = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'id': 1} + + service.db.service_get_by_args(mox.IgnoreArg(), + host, + binary).AndRaise(exception.NotFound()) + service.db.service_create(mox.IgnoreArg(), + service_create).AndReturn(service_ref) + service.db.service_get(mox.IgnoreArg(), + service_ref['id']).AndReturn(service_ref) + service.db.service_update(mox.IgnoreArg(), service_ref['id'], + mox.ContainsKeyValue('report_count', 1)) + + self.mox.ReplayAll() + serv = service.Service(host, + binary, + topic, + 'nova.tests.test_service.FakeManager') + serv.start() + serv.model_disconnected = True + serv.report_state() + + self.assert_(not serv.model_disconnected) diff --git a/nova/tests/test_twistd.py b/nova/tests/test_twistd.py new file mode 100644 index 000000000..75007b9c8 --- /dev/null +++ b/nova/tests/test_twistd.py @@ -0,0 +1,53 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import StringIO +import sys + +from nova import twistd +from nova import exception +from nova import flags +from nova import test + + +FLAGS = flags.FLAGS + + +class TwistdTestCase(test.TrialTestCase): + def setUp(self): + super(TwistdTestCase, self).setUp() + self.Options = twistd.WrapTwistedOptions(twistd.TwistdServerOptions) + sys.stdout = StringIO.StringIO() + + def tearDown(self): + super(TwistdTestCase, self).tearDown() + sys.stdout = sys.__stdout__ + + def test_basic(self): + options = self.Options() + argv = options.parseOptions() + + def test_logfile(self): + options = self.Options() + argv = options.parseOptions(['--logfile=foo']) + self.assertEqual(FLAGS.logfile, 'foo') + + def test_help(self): + options = self.Options() + self.assertRaises(SystemExit, options.parseOptions, ['--help']) + self.assert_('pidfile' in sys.stdout.getvalue()) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py new file mode 100644 index 000000000..85e569858 --- /dev/null +++ b/nova/tests/test_virt.py @@ -0,0 +1,258 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from xml.etree.ElementTree import fromstring as xml_to_tree +from xml.dom.minidom import parseString as xml_to_dom + +from nova import context +from nova import db +from nova import flags +from nova import test +from nova import utils +from nova.api.ec2 import cloud +from nova.auth import manager +from nova.virt import libvirt_conn + +FLAGS = flags.FLAGS +flags.DECLARE('instances_path', 'nova.compute.manager') + + +class LibvirtConnTestCase(test.TestCase): + def setUp(self): + super(LibvirtConnTestCase, self).setUp() + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.network = utils.import_object(FLAGS.network_manager) + FLAGS.instances_path = '' + + def test_get_uri_and_template(self): + ip = '10.11.12.13' + + instance = {'internal_id': 1, + 'memory_kb': '1024000', + 'basepath': '/some/path', + 'bridge_name': 'br100', + 'mac_address': '02:12:34:46:56:67', + 'vcpus': 2, + 'project_id': 'fake', + 'bridge': 'br101', + 'instance_type': 'm1.small'} + + user_context = context.RequestContext(project=self.project, + user=self.user) + instance_ref = db.instance_create(user_context, instance) + network_ref = self.network.get_network(user_context) + self.network.set_network_host(context.get_admin_context(), + network_ref['id']) + + fixed_ip = {'address': ip, + 'network_id': network_ref['id']} + + ctxt = context.get_admin_context() + fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) + db.fixed_ip_update(ctxt, ip, {'allocated': True, + 'instance_id': instance_ref['id']}) + + type_uri_map = {'qemu': ('qemu:///system', + [(lambda t: t.find('.').get('type'), 'qemu'), + (lambda t: t.find('./os/type').text, 'hvm'), + (lambda t: t.find('./devices/emulator'), None)]), + 'kvm': ('qemu:///system', + [(lambda t: t.find('.').get('type'), 'kvm'), + (lambda t: t.find('./os/type').text, 'hvm'), + (lambda t: t.find('./devices/emulator'), None)]), + 'uml': ('uml:///system', + [(lambda t: t.find('.').get('type'), 'uml'), + (lambda t: t.find('./os/type').text, 'uml')])} + + common_checks = [ + (lambda t: t.find('.').tag, 'domain'), + (lambda t: t.find('./devices/interface/filterref/parameter').\ + get('name'), 'IP'), + (lambda t: t.find('./devices/interface/filterref/parameter').\ + get('value'), '10.11.12.13')] + + for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): + FLAGS.libvirt_type = libvirt_type + conn = libvirt_conn.LibvirtConnection(True) + + uri, _template, _rescue = conn.get_uri_and_templates() + self.assertEquals(uri, expected_uri) + + xml = conn.to_xml(instance_ref) + tree = xml_to_tree(xml) + for i, (check, expected_result) in enumerate(checks): + self.assertEqual(check(tree), + expected_result, + '%s failed check %d' % (xml, i)) + + for i, (check, expected_result) in enumerate(common_checks): + self.assertEqual(check(tree), + expected_result, + '%s failed common check %d' % (xml, i)) + + # Deliberately not just assigning this string to FLAGS.libvirt_uri and + # checking against that later on. This way we make sure the + # implementation doesn't fiddle around with the FLAGS. + testuri = 'something completely different' + FLAGS.libvirt_uri = testuri + for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): + FLAGS.libvirt_type = libvirt_type + conn = libvirt_conn.LibvirtConnection(True) + uri, _template, _rescue = conn.get_uri_and_templates() + self.assertEquals(uri, testuri) + + def tearDown(self): + super(LibvirtConnTestCase, self).tearDown() + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + + +class NWFilterTestCase(test.TestCase): + + def setUp(self): + super(NWFilterTestCase, self).setUp() + + class Mock(object): + pass + + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.RequestContext(self.user, self.project) + + self.fake_libvirt_connection = Mock() + + self.fw = libvirt_conn.NWFilterFirewall(self.fake_libvirt_connection) + + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + + def test_cidr_rule_nwfilter_xml(self): + cloud_controller = cloud.CloudController() + cloud_controller.create_security_group(self.context, + 'testgroup', + 'test group description') + cloud_controller.authorize_security_group_ingress(self.context, + 'testgroup', + from_port='80', + to_port='81', + ip_protocol='tcp', + cidr_ip='0.0.0.0/0') + + security_group = db.security_group_get_by_name(self.context, + 'fake', + 'testgroup') + + xml = self.fw.security_group_to_nwfilter_xml(security_group.id) + + dom = xml_to_dom(xml) + self.assertEqual(dom.firstChild.tagName, 'filter') + + rules = dom.getElementsByTagName('rule') + self.assertEqual(len(rules), 1) + + # It's supposed to allow inbound traffic. + self.assertEqual(rules[0].getAttribute('action'), 'accept') + self.assertEqual(rules[0].getAttribute('direction'), 'in') + + # Must be lower priority than the base filter (which blocks everything) + self.assertTrue(int(rules[0].getAttribute('priority')) < 1000) + + ip_conditions = rules[0].getElementsByTagName('tcp') + self.assertEqual(len(ip_conditions), 1) + self.assertEqual(ip_conditions[0].getAttribute('srcipaddr'), '0.0.0.0') + self.assertEqual(ip_conditions[0].getAttribute('srcipmask'), '0.0.0.0') + self.assertEqual(ip_conditions[0].getAttribute('dstportstart'), '80') + self.assertEqual(ip_conditions[0].getAttribute('dstportend'), '81') + self.teardown_security_group() + + def teardown_security_group(self): + cloud_controller = cloud.CloudController() + cloud_controller.delete_security_group(self.context, 'testgroup') + + def setup_and_return_security_group(self): + cloud_controller = cloud.CloudController() + cloud_controller.create_security_group(self.context, + 'testgroup', + 'test group description') + cloud_controller.authorize_security_group_ingress(self.context, + 'testgroup', + from_port='80', + to_port='81', + ip_protocol='tcp', + cidr_ip='0.0.0.0/0') + + return db.security_group_get_by_name(self.context, 'fake', 'testgroup') + + def test_creates_base_rule_first(self): + # These come pre-defined by libvirt + self.defined_filters = ['no-mac-spoofing', + 'no-ip-spoofing', + 'no-arp-spoofing', + 'allow-dhcp-server'] + + self.recursive_depends = {} + for f in self.defined_filters: + self.recursive_depends[f] = [] + + def _filterDefineXMLMock(xml): + dom = xml_to_dom(xml) + name = dom.firstChild.getAttribute('name') + self.recursive_depends[name] = [] + for f in dom.getElementsByTagName('filterref'): + ref = f.getAttribute('filter') + self.assertTrue(ref in self.defined_filters, + ('%s referenced filter that does ' + + 'not yet exist: %s') % (name, ref)) + dependencies = [ref] + self.recursive_depends[ref] + self.recursive_depends[name] += dependencies + + self.defined_filters.append(name) + return True + + self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock + + instance_ref = db.instance_create(self.context, + {'user_id': 'fake', + 'project_id': 'fake'}) + inst_id = instance_ref['id'] + + def _ensure_all_called(): + instance_filter = 'nova-instance-%s' % instance_ref['name'] + secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] + for required in [secgroup_filter, 'allow-dhcp-server', + 'no-arp-spoofing', 'no-ip-spoofing', + 'no-mac-spoofing']: + self.assertTrue(required in + self.recursive_depends[instance_filter], + "Instance's filter does not include %s" % + required) + + self.security_group = self.setup_and_return_security_group() + + db.instance_add_security_group(self.context, inst_id, + self.security_group.id) + instance = db.instance_get(self.context, inst_id) + + d = self.fw.setup_nwfilters_for_instance(instance) + _ensure_all_called() + self.teardown_security_group() + return d diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py new file mode 100644 index 000000000..b13455fb0 --- /dev/null +++ b/nova/tests/test_volume.py @@ -0,0 +1,175 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for Volume Code. + +""" +import logging + +from nova import context +from nova import exception +from nova import db +from nova import flags +from nova import test +from nova import utils + +FLAGS = flags.FLAGS + + +class VolumeTestCase(test.TestCase): + """Test Case for volumes.""" + + def setUp(self): + logging.getLogger().setLevel(logging.DEBUG) + super(VolumeTestCase, self).setUp() + self.compute = utils.import_object(FLAGS.compute_manager) + self.flags(connection_type='fake') + self.volume = utils.import_object(FLAGS.volume_manager) + self.context = context.get_admin_context() + + @staticmethod + def _create_volume(size='0'): + """Create a volume object.""" + vol = {} + vol['size'] = size + vol['user_id'] = 'fake' + vol['project_id'] = 'fake' + vol['availability_zone'] = FLAGS.storage_availability_zone + vol['status'] = "creating" + vol['attach_status'] = "detached" + return db.volume_create(context.get_admin_context(), vol)['id'] + + def test_create_delete_volume(self): + """Test volume can be created and deleted.""" + volume_id = self._create_volume() + self.volume.create_volume(self.context, volume_id) + self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), + volume_id).id) + + self.volume.delete_volume(self.context, volume_id) + self.assertRaises(exception.NotFound, + db.volume_get, + self.context, + volume_id) + + def test_too_big_volume(self): + """Ensure failure if a too large of a volume is requested.""" + # FIXME(vish): validation needs to move into the data layer in + # volume_create + return True + try: + volume_id = self._create_volume('1001') + self.volume.create_volume(self.context, volume_id) + self.fail("Should have thrown TypeError") + except TypeError: + pass + + def test_too_many_volumes(self): + """Ensure that NoMoreTargets is raised when we run out of volumes.""" + vols = [] + total_slots = FLAGS.iscsi_num_targets + for _index in xrange(total_slots): + volume_id = self._create_volume() + self.volume.create_volume(self.context, volume_id) + vols.append(volume_id) + volume_id = self._create_volume() + self.assertRaises(db.NoMoreTargets, + self.volume.create_volume, + self.context, + volume_id) + db.volume_destroy(context.get_admin_context(), volume_id) + for volume_id in vols: + self.volume.delete_volume(self.context, volume_id) + + def test_run_attach_detach_volume(self): + """Make sure volume can be attached and detached from instance.""" + inst = {} + inst['image_id'] = 'ami-test' + inst['reservation_id'] = 'r-fakeres' + inst['launch_time'] = '10' + inst['user_id'] = 'fake' + inst['project_id'] = 'fake' + inst['instance_type'] = 'm1.tiny' + inst['mac_address'] = utils.generate_mac() + inst['ami_launch_index'] = 0 + instance_id = db.instance_create(self.context, inst)['id'] + mountpoint = "/dev/sdf" + volume_id = self._create_volume() + self.volume.create_volume(self.context, volume_id) + if FLAGS.fake_tests: + db.volume_attached(self.context, volume_id, instance_id, + mountpoint) + else: + self.compute.attach_volume(self.context, + instance_id, + volume_id, + mountpoint) + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(vol['status'], "in-use") + self.assertEqual(vol['attach_status'], "attached") + self.assertEqual(vol['mountpoint'], mountpoint) + instance_ref = db.volume_get_instance(self.context, volume_id) + self.assertEqual(instance_ref['id'], instance_id) + + self.assertRaises(exception.Error, + self.volume.delete_volume, + self.context, + volume_id) + if FLAGS.fake_tests: + db.volume_detached(self.context, volume_id) + else: + self.compute.detach_volume(self.context, + instance_id, + volume_id) + vol = db.volume_get(self.context, volume_id) + self.assertEqual(vol['status'], "available") + + self.volume.delete_volume(self.context, volume_id) + self.assertRaises(exception.Error, + db.volume_get, + self.context, + volume_id) + db.instance_destroy(self.context, instance_id) + + def test_concurrent_volumes_get_different_targets(self): + """Ensure multiple concurrent volumes get different targets.""" + volume_ids = [] + targets = [] + + def _check(volume_id): + """Make sure targets aren't duplicated.""" + volume_ids.append(volume_id) + admin_context = context.get_admin_context() + iscsi_target = db.volume_get_iscsi_target_num(admin_context, + volume_id) + self.assert_(iscsi_target not in targets) + targets.append(iscsi_target) + logging.debug("Target %s allocated", iscsi_target) + total_slots = FLAGS.iscsi_num_targets + for _index in xrange(total_slots): + volume_id = self._create_volume() + d = self.volume.create_volume(self.context, volume_id) + _check(d) + for volume_id in volume_ids: + self.volume.delete_volume(self.context, volume_id) + + def test_multi_node(self): + # TODO(termie): Figure out how to test with two nodes, + # each of them having a different FLAG for storage_node + # This will allow us to test cross-node interactions + pass diff --git a/nova/tests/twistd_unittest.py b/nova/tests/twistd_unittest.py deleted file mode 100644 index 75007b9c8..000000000 --- a/nova/tests/twistd_unittest.py +++ /dev/null @@ -1,53 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import StringIO -import sys - -from nova import twistd -from nova import exception -from nova import flags -from nova import test - - -FLAGS = flags.FLAGS - - -class TwistdTestCase(test.TrialTestCase): - def setUp(self): - super(TwistdTestCase, self).setUp() - self.Options = twistd.WrapTwistedOptions(twistd.TwistdServerOptions) - sys.stdout = StringIO.StringIO() - - def tearDown(self): - super(TwistdTestCase, self).tearDown() - sys.stdout = sys.__stdout__ - - def test_basic(self): - options = self.Options() - argv = options.parseOptions() - - def test_logfile(self): - options = self.Options() - argv = options.parseOptions(['--logfile=foo']) - self.assertEqual(FLAGS.logfile, 'foo') - - def test_help(self): - options = self.Options() - self.assertRaises(SystemExit, options.parseOptions, ['--help']) - self.assert_('pidfile' in sys.stdout.getvalue()) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py deleted file mode 100644 index 85e569858..000000000 --- a/nova/tests/virt_unittest.py +++ /dev/null @@ -1,258 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2010 OpenStack LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from xml.etree.ElementTree import fromstring as xml_to_tree -from xml.dom.minidom import parseString as xml_to_dom - -from nova import context -from nova import db -from nova import flags -from nova import test -from nova import utils -from nova.api.ec2 import cloud -from nova.auth import manager -from nova.virt import libvirt_conn - -FLAGS = flags.FLAGS -flags.DECLARE('instances_path', 'nova.compute.manager') - - -class LibvirtConnTestCase(test.TestCase): - def setUp(self): - super(LibvirtConnTestCase, self).setUp() - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake', - admin=True) - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.network = utils.import_object(FLAGS.network_manager) - FLAGS.instances_path = '' - - def test_get_uri_and_template(self): - ip = '10.11.12.13' - - instance = {'internal_id': 1, - 'memory_kb': '1024000', - 'basepath': '/some/path', - 'bridge_name': 'br100', - 'mac_address': '02:12:34:46:56:67', - 'vcpus': 2, - 'project_id': 'fake', - 'bridge': 'br101', - 'instance_type': 'm1.small'} - - user_context = context.RequestContext(project=self.project, - user=self.user) - instance_ref = db.instance_create(user_context, instance) - network_ref = self.network.get_network(user_context) - self.network.set_network_host(context.get_admin_context(), - network_ref['id']) - - fixed_ip = {'address': ip, - 'network_id': network_ref['id']} - - ctxt = context.get_admin_context() - fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) - db.fixed_ip_update(ctxt, ip, {'allocated': True, - 'instance_id': instance_ref['id']}) - - type_uri_map = {'qemu': ('qemu:///system', - [(lambda t: t.find('.').get('type'), 'qemu'), - (lambda t: t.find('./os/type').text, 'hvm'), - (lambda t: t.find('./devices/emulator'), None)]), - 'kvm': ('qemu:///system', - [(lambda t: t.find('.').get('type'), 'kvm'), - (lambda t: t.find('./os/type').text, 'hvm'), - (lambda t: t.find('./devices/emulator'), None)]), - 'uml': ('uml:///system', - [(lambda t: t.find('.').get('type'), 'uml'), - (lambda t: t.find('./os/type').text, 'uml')])} - - common_checks = [ - (lambda t: t.find('.').tag, 'domain'), - (lambda t: t.find('./devices/interface/filterref/parameter').\ - get('name'), 'IP'), - (lambda t: t.find('./devices/interface/filterref/parameter').\ - get('value'), '10.11.12.13')] - - for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): - FLAGS.libvirt_type = libvirt_type - conn = libvirt_conn.LibvirtConnection(True) - - uri, _template, _rescue = conn.get_uri_and_templates() - self.assertEquals(uri, expected_uri) - - xml = conn.to_xml(instance_ref) - tree = xml_to_tree(xml) - for i, (check, expected_result) in enumerate(checks): - self.assertEqual(check(tree), - expected_result, - '%s failed check %d' % (xml, i)) - - for i, (check, expected_result) in enumerate(common_checks): - self.assertEqual(check(tree), - expected_result, - '%s failed common check %d' % (xml, i)) - - # Deliberately not just assigning this string to FLAGS.libvirt_uri and - # checking against that later on. This way we make sure the - # implementation doesn't fiddle around with the FLAGS. - testuri = 'something completely different' - FLAGS.libvirt_uri = testuri - for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): - FLAGS.libvirt_type = libvirt_type - conn = libvirt_conn.LibvirtConnection(True) - uri, _template, _rescue = conn.get_uri_and_templates() - self.assertEquals(uri, testuri) - - def tearDown(self): - super(LibvirtConnTestCase, self).tearDown() - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - - -class NWFilterTestCase(test.TestCase): - - def setUp(self): - super(NWFilterTestCase, self).setUp() - - class Mock(object): - pass - - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake', - admin=True) - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.RequestContext(self.user, self.project) - - self.fake_libvirt_connection = Mock() - - self.fw = libvirt_conn.NWFilterFirewall(self.fake_libvirt_connection) - - def tearDown(self): - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - - def test_cidr_rule_nwfilter_xml(self): - cloud_controller = cloud.CloudController() - cloud_controller.create_security_group(self.context, - 'testgroup', - 'test group description') - cloud_controller.authorize_security_group_ingress(self.context, - 'testgroup', - from_port='80', - to_port='81', - ip_protocol='tcp', - cidr_ip='0.0.0.0/0') - - security_group = db.security_group_get_by_name(self.context, - 'fake', - 'testgroup') - - xml = self.fw.security_group_to_nwfilter_xml(security_group.id) - - dom = xml_to_dom(xml) - self.assertEqual(dom.firstChild.tagName, 'filter') - - rules = dom.getElementsByTagName('rule') - self.assertEqual(len(rules), 1) - - # It's supposed to allow inbound traffic. - self.assertEqual(rules[0].getAttribute('action'), 'accept') - self.assertEqual(rules[0].getAttribute('direction'), 'in') - - # Must be lower priority than the base filter (which blocks everything) - self.assertTrue(int(rules[0].getAttribute('priority')) < 1000) - - ip_conditions = rules[0].getElementsByTagName('tcp') - self.assertEqual(len(ip_conditions), 1) - self.assertEqual(ip_conditions[0].getAttribute('srcipaddr'), '0.0.0.0') - self.assertEqual(ip_conditions[0].getAttribute('srcipmask'), '0.0.0.0') - self.assertEqual(ip_conditions[0].getAttribute('dstportstart'), '80') - self.assertEqual(ip_conditions[0].getAttribute('dstportend'), '81') - self.teardown_security_group() - - def teardown_security_group(self): - cloud_controller = cloud.CloudController() - cloud_controller.delete_security_group(self.context, 'testgroup') - - def setup_and_return_security_group(self): - cloud_controller = cloud.CloudController() - cloud_controller.create_security_group(self.context, - 'testgroup', - 'test group description') - cloud_controller.authorize_security_group_ingress(self.context, - 'testgroup', - from_port='80', - to_port='81', - ip_protocol='tcp', - cidr_ip='0.0.0.0/0') - - return db.security_group_get_by_name(self.context, 'fake', 'testgroup') - - def test_creates_base_rule_first(self): - # These come pre-defined by libvirt - self.defined_filters = ['no-mac-spoofing', - 'no-ip-spoofing', - 'no-arp-spoofing', - 'allow-dhcp-server'] - - self.recursive_depends = {} - for f in self.defined_filters: - self.recursive_depends[f] = [] - - def _filterDefineXMLMock(xml): - dom = xml_to_dom(xml) - name = dom.firstChild.getAttribute('name') - self.recursive_depends[name] = [] - for f in dom.getElementsByTagName('filterref'): - ref = f.getAttribute('filter') - self.assertTrue(ref in self.defined_filters, - ('%s referenced filter that does ' + - 'not yet exist: %s') % (name, ref)) - dependencies = [ref] + self.recursive_depends[ref] - self.recursive_depends[name] += dependencies - - self.defined_filters.append(name) - return True - - self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock - - instance_ref = db.instance_create(self.context, - {'user_id': 'fake', - 'project_id': 'fake'}) - inst_id = instance_ref['id'] - - def _ensure_all_called(): - instance_filter = 'nova-instance-%s' % instance_ref['name'] - secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] - for required in [secgroup_filter, 'allow-dhcp-server', - 'no-arp-spoofing', 'no-ip-spoofing', - 'no-mac-spoofing']: - self.assertTrue(required in - self.recursive_depends[instance_filter], - "Instance's filter does not include %s" % - required) - - self.security_group = self.setup_and_return_security_group() - - db.instance_add_security_group(self.context, inst_id, - self.security_group.id) - instance = db.instance_get(self.context, inst_id) - - d = self.fw.setup_nwfilters_for_instance(instance) - _ensure_all_called() - self.teardown_security_group() - return d diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py deleted file mode 100644 index b13455fb0..000000000 --- a/nova/tests/volume_unittest.py +++ /dev/null @@ -1,175 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for Volume Code. - -""" -import logging - -from nova import context -from nova import exception -from nova import db -from nova import flags -from nova import test -from nova import utils - -FLAGS = flags.FLAGS - - -class VolumeTestCase(test.TestCase): - """Test Case for volumes.""" - - def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) - super(VolumeTestCase, self).setUp() - self.compute = utils.import_object(FLAGS.compute_manager) - self.flags(connection_type='fake') - self.volume = utils.import_object(FLAGS.volume_manager) - self.context = context.get_admin_context() - - @staticmethod - def _create_volume(size='0'): - """Create a volume object.""" - vol = {} - vol['size'] = size - vol['user_id'] = 'fake' - vol['project_id'] = 'fake' - vol['availability_zone'] = FLAGS.storage_availability_zone - vol['status'] = "creating" - vol['attach_status'] = "detached" - return db.volume_create(context.get_admin_context(), vol)['id'] - - def test_create_delete_volume(self): - """Test volume can be created and deleted.""" - volume_id = self._create_volume() - self.volume.create_volume(self.context, volume_id) - self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), - volume_id).id) - - self.volume.delete_volume(self.context, volume_id) - self.assertRaises(exception.NotFound, - db.volume_get, - self.context, - volume_id) - - def test_too_big_volume(self): - """Ensure failure if a too large of a volume is requested.""" - # FIXME(vish): validation needs to move into the data layer in - # volume_create - return True - try: - volume_id = self._create_volume('1001') - self.volume.create_volume(self.context, volume_id) - self.fail("Should have thrown TypeError") - except TypeError: - pass - - def test_too_many_volumes(self): - """Ensure that NoMoreTargets is raised when we run out of volumes.""" - vols = [] - total_slots = FLAGS.iscsi_num_targets - for _index in xrange(total_slots): - volume_id = self._create_volume() - self.volume.create_volume(self.context, volume_id) - vols.append(volume_id) - volume_id = self._create_volume() - self.assertRaises(db.NoMoreTargets, - self.volume.create_volume, - self.context, - volume_id) - db.volume_destroy(context.get_admin_context(), volume_id) - for volume_id in vols: - self.volume.delete_volume(self.context, volume_id) - - def test_run_attach_detach_volume(self): - """Make sure volume can be attached and detached from instance.""" - inst = {} - inst['image_id'] = 'ami-test' - inst['reservation_id'] = 'r-fakeres' - inst['launch_time'] = '10' - inst['user_id'] = 'fake' - inst['project_id'] = 'fake' - inst['instance_type'] = 'm1.tiny' - inst['mac_address'] = utils.generate_mac() - inst['ami_launch_index'] = 0 - instance_id = db.instance_create(self.context, inst)['id'] - mountpoint = "/dev/sdf" - volume_id = self._create_volume() - self.volume.create_volume(self.context, volume_id) - if FLAGS.fake_tests: - db.volume_attached(self.context, volume_id, instance_id, - mountpoint) - else: - self.compute.attach_volume(self.context, - instance_id, - volume_id, - mountpoint) - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual(vol['status'], "in-use") - self.assertEqual(vol['attach_status'], "attached") - self.assertEqual(vol['mountpoint'], mountpoint) - instance_ref = db.volume_get_instance(self.context, volume_id) - self.assertEqual(instance_ref['id'], instance_id) - - self.assertRaises(exception.Error, - self.volume.delete_volume, - self.context, - volume_id) - if FLAGS.fake_tests: - db.volume_detached(self.context, volume_id) - else: - self.compute.detach_volume(self.context, - instance_id, - volume_id) - vol = db.volume_get(self.context, volume_id) - self.assertEqual(vol['status'], "available") - - self.volume.delete_volume(self.context, volume_id) - self.assertRaises(exception.Error, - db.volume_get, - self.context, - volume_id) - db.instance_destroy(self.context, instance_id) - - def test_concurrent_volumes_get_different_targets(self): - """Ensure multiple concurrent volumes get different targets.""" - volume_ids = [] - targets = [] - - def _check(volume_id): - """Make sure targets aren't duplicated.""" - volume_ids.append(volume_id) - admin_context = context.get_admin_context() - iscsi_target = db.volume_get_iscsi_target_num(admin_context, - volume_id) - self.assert_(iscsi_target not in targets) - targets.append(iscsi_target) - logging.debug("Target %s allocated", iscsi_target) - total_slots = FLAGS.iscsi_num_targets - for _index in xrange(total_slots): - volume_id = self._create_volume() - d = self.volume.create_volume(self.context, volume_id) - _check(d) - for volume_id in volume_ids: - self.volume.delete_volume(self.context, volume_id) - - def test_multi_node(self): - # TODO(termie): Figure out how to test with two nodes, - # each of them having a different FLAG for storage_node - # This will allow us to test cross-node interactions - pass diff --git a/run_tests.py b/run_tests.py deleted file mode 100644 index 6a4b7f1ab..000000000 --- a/run_tests.py +++ /dev/null @@ -1,125 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -This is our basic test running framework based on Twisted's Trial. - -Usage Examples: - - # to run all the tests - python run_tests.py - - # to run a specific test suite imported here - python run_tests.py NodeConnectionTestCase - - # to run a specific test imported here - python run_tests.py NodeConnectionTestCase.test_reboot - - # to run some test suites elsewhere - python run_tests.py nova.tests.node_unittest - python run_tests.py nova.tests.node_unittest.NodeConnectionTestCase - -Due to our use of multiprocessing it we frequently get some ignorable -'Interrupted system call' exceptions after test completion. - -""" - -import eventlet -eventlet.monkey_patch() - -import __main__ -import gettext -import os -import sys - -gettext.install('nova', unicode=1) - -from twisted.scripts import trial as trial_script - -from nova import flags -from nova import twistd - -from nova.tests.access_unittest import * -from nova.tests.api_unittest import * -from nova.tests.auth_unittest import * -from nova.tests.cloud_unittest import * -from nova.tests.compute_unittest import * -from nova.tests.flags_unittest import * -from nova.tests.misc_unittest import * -from nova.tests.network_unittest import * -#from nova.tests.objectstore_unittest import * -from nova.tests.quota_unittest import * -from nova.tests.rpc_unittest import * -from nova.tests.scheduler_unittest import * -from nova.tests.service_unittest import * -from nova.tests.twistd_unittest import * -from nova.tests.virt_unittest import * -from nova.tests.volume_unittest import * - - -FLAGS = flags.FLAGS -flags.DEFINE_bool('flush_db', True, - 'Flush the database before running fake tests') -flags.DEFINE_string('tests_stderr', 'run_tests.err.log', - 'Path to where to pipe STDERR during test runs.' - ' Default = "run_tests.err.log"') - - -if __name__ == '__main__': - OptionsClass = twistd.WrapTwistedOptions(trial_script.Options) - config = OptionsClass() - argv = config.parseOptions() - - FLAGS.verbose = True - - # TODO(termie): these should make a call instead of doing work on import - if FLAGS.fake_tests: - from nova.tests.fake_flags import * - else: - from nova.tests.real_flags import * - - # Establish redirect for STDERR - sys.stderr.flush() - err = open(FLAGS.tests_stderr, 'w+', 0) - os.dup2(err.fileno(), sys.stderr.fileno()) - - if len(argv) == 1 and len(config['tests']) == 0: - # If no tests were specified run the ones imported in this file - # NOTE(termie): "tests" is not a flag, just some Trial related stuff - config['tests'].update(['__main__']) - elif len(config['tests']): - # If we specified tests check first whether they are in __main__ - for arg in config['tests']: - key = arg.split('.')[0] - if hasattr(__main__, key): - config['tests'].remove(arg) - config['tests'].add('__main__.%s' % arg) - - trial_script._initialDebugSetup(config) - trialRunner = trial_script._makeRunner(config) - suite = trial_script._getSuite(config) - if config['until-failure']: - test_result = trialRunner.runUntilFailure(suite) - else: - test_result = trialRunner.run(suite) - if config.tracer: - sys.settrace(None) - results = config.tracer.results() - results.write_results(show_missing=1, summary=False, - coverdir=config.coverdir) - sys.exit(not test_result.wasSuccessful()) -- cgit From b1d4579404f9e49fcdea23c21733fdf65edc1da3 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 16 Dec 2010 17:29:26 -0800 Subject: Fixed network test (thanks Vish!) and fixed run_tests.sh. --- bin/nova-dhcpbridge | 1 - nova/tests/network_unittest.py | 347 ----------------------------------------- nova/tests/test_network.py | 347 +++++++++++++++++++++++++++++++++++++++++ run_tests.sh | 12 +- 4 files changed, 355 insertions(+), 352 deletions(-) delete mode 100644 nova/tests/network_unittest.py create mode 100644 nova/tests/test_network.py diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 81b9b6dd3..828aba3d1 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -110,7 +110,6 @@ def main(): FLAGS.num_networks = 5 path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', - '_trial_temp', 'nova.sqlite')) FLAGS.sql_connection = 'sqlite:///%s' % path action = argv[1] diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py deleted file mode 100644 index bcac20585..000000000 --- a/nova/tests/network_unittest.py +++ /dev/null @@ -1,347 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for network code -""" -import IPy -import os -import logging - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import test -from nova import utils -from nova.auth import manager - -FLAGS = flags.FLAGS - - -class NetworkTestCase(test.TestCase): - """Test cases for network code""" - def setUp(self): - super(NetworkTestCase, self).setUp() - # NOTE(vish): if you change these flags, make sure to change the - # flags in the corresponding section in nova-dhcpbridge - self.flags(connection_type='fake', - fake_network=True, - network_size=16, - num_networks=5) - logging.getLogger().setLevel(logging.DEBUG) - self.manager = manager.AuthManager() - self.user = self.manager.create_user('netuser', 'netuser', 'netuser') - self.projects = [] - self.network = utils.import_object(FLAGS.network_manager) - self.context = context.RequestContext(project=None, user=self.user) - for i in range(5): - name = 'project%s' % i - project = self.manager.create_project(name, 'netuser', name) - self.projects.append(project) - # create the necessary network data for the project - user_context = context.RequestContext(project=self.projects[i], - user=self.user) - network_ref = self.network.get_network(user_context) - self.network.set_network_host(context.get_admin_context(), - network_ref['id']) - instance_ref = self._create_instance(0) - self.instance_id = instance_ref['id'] - instance_ref = self._create_instance(1) - self.instance2_id = instance_ref['id'] - - def tearDown(self): - super(NetworkTestCase, self).tearDown() - # TODO(termie): this should really be instantiating clean datastores - # in between runs, one failure kills all the tests - db.instance_destroy(context.get_admin_context(), self.instance_id) - db.instance_destroy(context.get_admin_context(), self.instance2_id) - for project in self.projects: - self.manager.delete_project(project) - self.manager.delete_user(self.user) - - def _create_instance(self, project_num, mac=None): - if not mac: - mac = utils.generate_mac() - project = self.projects[project_num] - self.context._project = project - self.context.project_id = project.id - return db.instance_create(self.context, - {'project_id': project.id, - 'mac_address': mac}) - - def _create_address(self, project_num, instance_id=None): - """Create an address in given project num""" - if instance_id is None: - instance_id = self.instance_id - self.context._project = self.projects[project_num] - self.context.project_id = self.projects[project_num].id - return self.network.allocate_fixed_ip(self.context, instance_id) - - def _deallocate_address(self, project_num, address): - self.context._project = self.projects[project_num] - self.context.project_id = self.projects[project_num].id - self.network.deallocate_fixed_ip(self.context, address) - - def test_public_network_association(self): - """Makes sure that we can allocaate a public ip""" - # TODO(vish): better way of adding floating ips - self.context._project = self.projects[0] - self.context.project_id = self.projects[0].id - pubnet = IPy.IP(flags.FLAGS.floating_range) - address = str(pubnet[0]) - try: - db.floating_ip_get_by_address(context.get_admin_context(), address) - except exception.NotFound: - db.floating_ip_create(context.get_admin_context(), - {'address': address, - 'host': FLAGS.host}) - float_addr = self.network.allocate_floating_ip(self.context, - self.projects[0].id) - fix_addr = self._create_address(0) - lease_ip(fix_addr) - self.assertEqual(float_addr, str(pubnet[0])) - self.network.associate_floating_ip(self.context, float_addr, fix_addr) - address = db.instance_get_floating_address(context.get_admin_context(), - self.instance_id) - self.assertEqual(address, float_addr) - self.network.disassociate_floating_ip(self.context, float_addr) - address = db.instance_get_floating_address(context.get_admin_context(), - self.instance_id) - self.assertEqual(address, None) - self.network.deallocate_floating_ip(self.context, float_addr) - self.network.deallocate_fixed_ip(self.context, fix_addr) - release_ip(fix_addr) - db.floating_ip_destroy(context.get_admin_context(), float_addr) - - def test_allocate_deallocate_fixed_ip(self): - """Makes sure that we can allocate and deallocate a fixed ip""" - address = self._create_address(0) - self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) - lease_ip(address) - self._deallocate_address(0, address) - - # Doesn't go away until it's dhcp released - self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) - - release_ip(address) - self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) - - def test_side_effects(self): - """Ensures allocating and releasing has no side effects""" - address = self._create_address(0) - address2 = self._create_address(1, self.instance2_id) - - self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) - self.assertTrue(is_allocated_in_project(address2, self.projects[1].id)) - self.assertFalse(is_allocated_in_project(address, self.projects[1].id)) - - # Addresses are allocated before they're issued - lease_ip(address) - lease_ip(address2) - - self._deallocate_address(0, address) - release_ip(address) - self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) - - # First address release shouldn't affect the second - self.assertTrue(is_allocated_in_project(address2, self.projects[1].id)) - - self._deallocate_address(1, address2) - release_ip(address2) - self.assertFalse(is_allocated_in_project(address2, - self.projects[1].id)) - - def test_subnet_edge(self): - """Makes sure that private ips don't overlap""" - first = self._create_address(0) - lease_ip(first) - instance_ids = [] - for i in range(1, 5): - instance_ref = self._create_instance(i, mac=utils.generate_mac()) - instance_ids.append(instance_ref['id']) - address = self._create_address(i, instance_ref['id']) - instance_ref = self._create_instance(i, mac=utils.generate_mac()) - instance_ids.append(instance_ref['id']) - address2 = self._create_address(i, instance_ref['id']) - instance_ref = self._create_instance(i, mac=utils.generate_mac()) - instance_ids.append(instance_ref['id']) - address3 = self._create_address(i, instance_ref['id']) - lease_ip(address) - lease_ip(address2) - lease_ip(address3) - self.context._project = self.projects[i] - self.context.project_id = self.projects[i].id - self.assertFalse(is_allocated_in_project(address, - self.projects[0].id)) - self.assertFalse(is_allocated_in_project(address2, - self.projects[0].id)) - self.assertFalse(is_allocated_in_project(address3, - self.projects[0].id)) - self.network.deallocate_fixed_ip(self.context, address) - self.network.deallocate_fixed_ip(self.context, address2) - self.network.deallocate_fixed_ip(self.context, address3) - release_ip(address) - release_ip(address2) - release_ip(address3) - for instance_id in instance_ids: - db.instance_destroy(context.get_admin_context(), instance_id) - self.context._project = self.projects[0] - self.context.project_id = self.projects[0].id - self.network.deallocate_fixed_ip(self.context, first) - self._deallocate_address(0, first) - release_ip(first) - - def test_vpn_ip_and_port_looks_valid(self): - """Ensure the vpn ip and port are reasonable""" - self.assert_(self.projects[0].vpn_ip) - self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start) - self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start + - FLAGS.num_networks) - - def test_too_many_networks(self): - """Ensure error is raised if we run out of networks""" - projects = [] - networks_left = (FLAGS.num_networks - - db.network_count(context.get_admin_context())) - for i in range(networks_left): - project = self.manager.create_project('many%s' % i, self.user) - projects.append(project) - db.project_get_network(context.get_admin_context(), project.id) - project = self.manager.create_project('last', self.user) - projects.append(project) - self.assertRaises(db.NoMoreNetworks, - db.project_get_network, - context.get_admin_context(), - project.id) - for project in projects: - self.manager.delete_project(project) - - def test_ips_are_reused(self): - """Makes sure that ip addresses that are deallocated get reused""" - address = self._create_address(0) - lease_ip(address) - self.network.deallocate_fixed_ip(self.context, address) - release_ip(address) - - address2 = self._create_address(0) - self.assertEqual(address, address2) - lease_ip(address) - self.network.deallocate_fixed_ip(self.context, address2) - release_ip(address) - - def test_available_ips(self): - """Make sure the number of available ips for the network is correct - - The number of available IP addresses depends on the test - environment's setup. - - Network size is set in test fixture's setUp method. - - There are ips reserved at the bottom and top of the range. - services (network, gateway, CloudPipe, broadcast) - """ - network = db.project_get_network(context.get_admin_context(), - self.projects[0].id) - net_size = flags.FLAGS.network_size - admin_context = context.get_admin_context() - total_ips = (db.network_count_available_ips(admin_context, - network['id']) + - db.network_count_reserved_ips(admin_context, - network['id']) + - db.network_count_allocated_ips(admin_context, - network['id'])) - self.assertEqual(total_ips, net_size) - - def test_too_many_addresses(self): - """Test for a NoMoreAddresses exception when all fixed ips are used. - """ - admin_context = context.get_admin_context() - network = db.project_get_network(admin_context, self.projects[0].id) - num_available_ips = db.network_count_available_ips(admin_context, - network['id']) - addresses = [] - instance_ids = [] - for i in range(num_available_ips): - instance_ref = self._create_instance(0) - instance_ids.append(instance_ref['id']) - address = self._create_address(0, instance_ref['id']) - addresses.append(address) - lease_ip(address) - - ip_count = db.network_count_available_ips(context.get_admin_context(), - network['id']) - self.assertEqual(ip_count, 0) - self.assertRaises(db.NoMoreAddresses, - self.network.allocate_fixed_ip, - self.context, - 'foo') - - for i in range(num_available_ips): - self.network.deallocate_fixed_ip(self.context, addresses[i]) - release_ip(addresses[i]) - db.instance_destroy(context.get_admin_context(), instance_ids[i]) - ip_count = db.network_count_available_ips(context.get_admin_context(), - network['id']) - self.assertEqual(ip_count, num_available_ips) - - -def is_allocated_in_project(address, project_id): - """Returns true if address is in specified project""" - project_net = db.project_get_network(context.get_admin_context(), - project_id) - network = db.fixed_ip_get_network(context.get_admin_context(), address) - instance = db.fixed_ip_get_instance(context.get_admin_context(), address) - # instance exists until release - return instance is not None and network['id'] == project_net['id'] - - -def binpath(script): - """Returns the absolute path to a script in bin""" - return os.path.abspath(os.path.join(__file__, "../../../bin", script)) - - -def lease_ip(private_ip): - """Run add command on dhcpbridge""" - network_ref = db.fixed_ip_get_network(context.get_admin_context(), - private_ip) - instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), - private_ip) - cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'), - instance_ref['mac_address'], - private_ip) - env = {'DNSMASQ_INTERFACE': network_ref['bridge'], - 'TESTING': '1', - 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("ISSUE_IP: %s, %s ", out, err) - - -def release_ip(private_ip): - """Run del command on dhcpbridge""" - network_ref = db.fixed_ip_get_network(context.get_admin_context(), - private_ip) - instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), - private_ip) - cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'), - instance_ref['mac_address'], - private_ip) - env = {'DNSMASQ_INTERFACE': network_ref['bridge'], - 'TESTING': '1', - 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("RELEASE_IP: %s, %s ", out, err) diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py new file mode 100644 index 000000000..bcac20585 --- /dev/null +++ b/nova/tests/test_network.py @@ -0,0 +1,347 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for network code +""" +import IPy +import os +import logging + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import test +from nova import utils +from nova.auth import manager + +FLAGS = flags.FLAGS + + +class NetworkTestCase(test.TestCase): + """Test cases for network code""" + def setUp(self): + super(NetworkTestCase, self).setUp() + # NOTE(vish): if you change these flags, make sure to change the + # flags in the corresponding section in nova-dhcpbridge + self.flags(connection_type='fake', + fake_network=True, + network_size=16, + num_networks=5) + logging.getLogger().setLevel(logging.DEBUG) + self.manager = manager.AuthManager() + self.user = self.manager.create_user('netuser', 'netuser', 'netuser') + self.projects = [] + self.network = utils.import_object(FLAGS.network_manager) + self.context = context.RequestContext(project=None, user=self.user) + for i in range(5): + name = 'project%s' % i + project = self.manager.create_project(name, 'netuser', name) + self.projects.append(project) + # create the necessary network data for the project + user_context = context.RequestContext(project=self.projects[i], + user=self.user) + network_ref = self.network.get_network(user_context) + self.network.set_network_host(context.get_admin_context(), + network_ref['id']) + instance_ref = self._create_instance(0) + self.instance_id = instance_ref['id'] + instance_ref = self._create_instance(1) + self.instance2_id = instance_ref['id'] + + def tearDown(self): + super(NetworkTestCase, self).tearDown() + # TODO(termie): this should really be instantiating clean datastores + # in between runs, one failure kills all the tests + db.instance_destroy(context.get_admin_context(), self.instance_id) + db.instance_destroy(context.get_admin_context(), self.instance2_id) + for project in self.projects: + self.manager.delete_project(project) + self.manager.delete_user(self.user) + + def _create_instance(self, project_num, mac=None): + if not mac: + mac = utils.generate_mac() + project = self.projects[project_num] + self.context._project = project + self.context.project_id = project.id + return db.instance_create(self.context, + {'project_id': project.id, + 'mac_address': mac}) + + def _create_address(self, project_num, instance_id=None): + """Create an address in given project num""" + if instance_id is None: + instance_id = self.instance_id + self.context._project = self.projects[project_num] + self.context.project_id = self.projects[project_num].id + return self.network.allocate_fixed_ip(self.context, instance_id) + + def _deallocate_address(self, project_num, address): + self.context._project = self.projects[project_num] + self.context.project_id = self.projects[project_num].id + self.network.deallocate_fixed_ip(self.context, address) + + def test_public_network_association(self): + """Makes sure that we can allocaate a public ip""" + # TODO(vish): better way of adding floating ips + self.context._project = self.projects[0] + self.context.project_id = self.projects[0].id + pubnet = IPy.IP(flags.FLAGS.floating_range) + address = str(pubnet[0]) + try: + db.floating_ip_get_by_address(context.get_admin_context(), address) + except exception.NotFound: + db.floating_ip_create(context.get_admin_context(), + {'address': address, + 'host': FLAGS.host}) + float_addr = self.network.allocate_floating_ip(self.context, + self.projects[0].id) + fix_addr = self._create_address(0) + lease_ip(fix_addr) + self.assertEqual(float_addr, str(pubnet[0])) + self.network.associate_floating_ip(self.context, float_addr, fix_addr) + address = db.instance_get_floating_address(context.get_admin_context(), + self.instance_id) + self.assertEqual(address, float_addr) + self.network.disassociate_floating_ip(self.context, float_addr) + address = db.instance_get_floating_address(context.get_admin_context(), + self.instance_id) + self.assertEqual(address, None) + self.network.deallocate_floating_ip(self.context, float_addr) + self.network.deallocate_fixed_ip(self.context, fix_addr) + release_ip(fix_addr) + db.floating_ip_destroy(context.get_admin_context(), float_addr) + + def test_allocate_deallocate_fixed_ip(self): + """Makes sure that we can allocate and deallocate a fixed ip""" + address = self._create_address(0) + self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) + lease_ip(address) + self._deallocate_address(0, address) + + # Doesn't go away until it's dhcp released + self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) + + release_ip(address) + self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) + + def test_side_effects(self): + """Ensures allocating and releasing has no side effects""" + address = self._create_address(0) + address2 = self._create_address(1, self.instance2_id) + + self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) + self.assertTrue(is_allocated_in_project(address2, self.projects[1].id)) + self.assertFalse(is_allocated_in_project(address, self.projects[1].id)) + + # Addresses are allocated before they're issued + lease_ip(address) + lease_ip(address2) + + self._deallocate_address(0, address) + release_ip(address) + self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) + + # First address release shouldn't affect the second + self.assertTrue(is_allocated_in_project(address2, self.projects[1].id)) + + self._deallocate_address(1, address2) + release_ip(address2) + self.assertFalse(is_allocated_in_project(address2, + self.projects[1].id)) + + def test_subnet_edge(self): + """Makes sure that private ips don't overlap""" + first = self._create_address(0) + lease_ip(first) + instance_ids = [] + for i in range(1, 5): + instance_ref = self._create_instance(i, mac=utils.generate_mac()) + instance_ids.append(instance_ref['id']) + address = self._create_address(i, instance_ref['id']) + instance_ref = self._create_instance(i, mac=utils.generate_mac()) + instance_ids.append(instance_ref['id']) + address2 = self._create_address(i, instance_ref['id']) + instance_ref = self._create_instance(i, mac=utils.generate_mac()) + instance_ids.append(instance_ref['id']) + address3 = self._create_address(i, instance_ref['id']) + lease_ip(address) + lease_ip(address2) + lease_ip(address3) + self.context._project = self.projects[i] + self.context.project_id = self.projects[i].id + self.assertFalse(is_allocated_in_project(address, + self.projects[0].id)) + self.assertFalse(is_allocated_in_project(address2, + self.projects[0].id)) + self.assertFalse(is_allocated_in_project(address3, + self.projects[0].id)) + self.network.deallocate_fixed_ip(self.context, address) + self.network.deallocate_fixed_ip(self.context, address2) + self.network.deallocate_fixed_ip(self.context, address3) + release_ip(address) + release_ip(address2) + release_ip(address3) + for instance_id in instance_ids: + db.instance_destroy(context.get_admin_context(), instance_id) + self.context._project = self.projects[0] + self.context.project_id = self.projects[0].id + self.network.deallocate_fixed_ip(self.context, first) + self._deallocate_address(0, first) + release_ip(first) + + def test_vpn_ip_and_port_looks_valid(self): + """Ensure the vpn ip and port are reasonable""" + self.assert_(self.projects[0].vpn_ip) + self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start) + self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start + + FLAGS.num_networks) + + def test_too_many_networks(self): + """Ensure error is raised if we run out of networks""" + projects = [] + networks_left = (FLAGS.num_networks - + db.network_count(context.get_admin_context())) + for i in range(networks_left): + project = self.manager.create_project('many%s' % i, self.user) + projects.append(project) + db.project_get_network(context.get_admin_context(), project.id) + project = self.manager.create_project('last', self.user) + projects.append(project) + self.assertRaises(db.NoMoreNetworks, + db.project_get_network, + context.get_admin_context(), + project.id) + for project in projects: + self.manager.delete_project(project) + + def test_ips_are_reused(self): + """Makes sure that ip addresses that are deallocated get reused""" + address = self._create_address(0) + lease_ip(address) + self.network.deallocate_fixed_ip(self.context, address) + release_ip(address) + + address2 = self._create_address(0) + self.assertEqual(address, address2) + lease_ip(address) + self.network.deallocate_fixed_ip(self.context, address2) + release_ip(address) + + def test_available_ips(self): + """Make sure the number of available ips for the network is correct + + The number of available IP addresses depends on the test + environment's setup. + + Network size is set in test fixture's setUp method. + + There are ips reserved at the bottom and top of the range. + services (network, gateway, CloudPipe, broadcast) + """ + network = db.project_get_network(context.get_admin_context(), + self.projects[0].id) + net_size = flags.FLAGS.network_size + admin_context = context.get_admin_context() + total_ips = (db.network_count_available_ips(admin_context, + network['id']) + + db.network_count_reserved_ips(admin_context, + network['id']) + + db.network_count_allocated_ips(admin_context, + network['id'])) + self.assertEqual(total_ips, net_size) + + def test_too_many_addresses(self): + """Test for a NoMoreAddresses exception when all fixed ips are used. + """ + admin_context = context.get_admin_context() + network = db.project_get_network(admin_context, self.projects[0].id) + num_available_ips = db.network_count_available_ips(admin_context, + network['id']) + addresses = [] + instance_ids = [] + for i in range(num_available_ips): + instance_ref = self._create_instance(0) + instance_ids.append(instance_ref['id']) + address = self._create_address(0, instance_ref['id']) + addresses.append(address) + lease_ip(address) + + ip_count = db.network_count_available_ips(context.get_admin_context(), + network['id']) + self.assertEqual(ip_count, 0) + self.assertRaises(db.NoMoreAddresses, + self.network.allocate_fixed_ip, + self.context, + 'foo') + + for i in range(num_available_ips): + self.network.deallocate_fixed_ip(self.context, addresses[i]) + release_ip(addresses[i]) + db.instance_destroy(context.get_admin_context(), instance_ids[i]) + ip_count = db.network_count_available_ips(context.get_admin_context(), + network['id']) + self.assertEqual(ip_count, num_available_ips) + + +def is_allocated_in_project(address, project_id): + """Returns true if address is in specified project""" + project_net = db.project_get_network(context.get_admin_context(), + project_id) + network = db.fixed_ip_get_network(context.get_admin_context(), address) + instance = db.fixed_ip_get_instance(context.get_admin_context(), address) + # instance exists until release + return instance is not None and network['id'] == project_net['id'] + + +def binpath(script): + """Returns the absolute path to a script in bin""" + return os.path.abspath(os.path.join(__file__, "../../../bin", script)) + + +def lease_ip(private_ip): + """Run add command on dhcpbridge""" + network_ref = db.fixed_ip_get_network(context.get_admin_context(), + private_ip) + instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), + private_ip) + cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'), + instance_ref['mac_address'], + private_ip) + env = {'DNSMASQ_INTERFACE': network_ref['bridge'], + 'TESTING': '1', + 'FLAGFILE': FLAGS.dhcpbridge_flagfile} + (out, err) = utils.execute(cmd, addl_env=env) + logging.debug("ISSUE_IP: %s, %s ", out, err) + + +def release_ip(private_ip): + """Run del command on dhcpbridge""" + network_ref = db.fixed_ip_get_network(context.get_admin_context(), + private_ip) + instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), + private_ip) + cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'), + instance_ref['mac_address'], + private_ip) + env = {'DNSMASQ_INTERFACE': network_ref['bridge'], + 'TESTING': '1', + 'FLAGFILE': FLAGS.dhcpbridge_flagfile} + (out, err) = utils.execute(cmd, addl_env=env) + logging.debug("RELEASE_IP: %s, %s ", out, err) diff --git a/run_tests.sh b/run_tests.sh index a11dcd7cc..67214996d 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -36,7 +36,8 @@ done if [ $never_venv -eq 1 ]; then # Just run the test suites in current environment - python run_tests.py + rm -f nova.sqlite + nosetests -v exit fi @@ -47,7 +48,8 @@ if [ $force -eq 1 ]; then fi if [ -e ${venv} ]; then - ${with_venv} python run_tests.py $@ + ${with_venv} rm -f nova.sqlite + ${with_venv} nosetests -v $@ else if [ $always_venv -eq 1 ]; then # Automatically install the virtualenv @@ -59,9 +61,11 @@ else # Install the virtualenv and run the test suite in it python tools/install_venv.py else - python run_tests.py + rm -f nova.sqlite + nosetests -v exit fi fi - ${with_venv} python run_tests.py $@ + ${with_venv} rm -f nova.sqlite + ${with_venv} nosetests -v $@ fi -- cgit From d16a41f552c70708a909067ce8555b40c3785027 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 17 Dec 2010 11:07:59 -0600 Subject: Replaced redis with a modified dict class --- nova/auth/fakeldap.py | 102 +++++++++++++++++++++++++++++--------------- nova/auth/manager.py | 10 ++++- nova/tests/auth_unittest.py | 5 +-- 3 files changed, 77 insertions(+), 40 deletions(-) diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index 46e0135b4..e46bb91ab 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -15,7 +15,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""Fake LDAP server for test harness, backs to ReDIS. +"""Fake LDAP server for test harness. This class does very little error checking, and knows nothing about ldap class definitions. It implements the minimum emulation of the python ldap @@ -23,20 +23,11 @@ library to work with nova. """ +import fnmatch import json -import redis -from nova import flags -FLAGS = flags.FLAGS -flags.DEFINE_string('redis_host', '127.0.0.1', - 'Host that redis is running on.') -flags.DEFINE_integer('redis_port', 6379, - 'Port that redis is running on.') -flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') - - -class Redis(object): +class Store(object): def __init__(self): if hasattr(self.__class__, '_instance'): raise Exception('Attempted to instantiate singleton') @@ -44,13 +35,55 @@ class Redis(object): @classmethod def instance(cls): if not hasattr(cls, '_instance'): - inst = redis.Redis(host=FLAGS.redis_host, - port=FLAGS.redis_port, - db=FLAGS.redis_db) - cls._instance = inst + cls._instance = _StorageDict() return cls._instance +class _StorageDict(dict): + def keys(self, pat=None): + ret = super(_StorageDict, self).keys() + if pat is not None: + ret = fnmatch.filter(ret, pat) + return ret + + def delete(self, key): + try: + del self[key] + except KeyError: + pass + + def flushdb(self): + self.clear() + + def hgetall(self, key): + """Returns the hash for the given key; creates + the hash if the key doesn't exist.""" + try: + return self[key] + except KeyError: + self[key] = {} + return self[key] + + def hget(self, key, field): + hashdict = self.hgetall(key) + try: + return hashdict[field] + except KeyError: + hashdict[field] = {} + return hashdict[field] + + def hset(self, key, field, val): + hashdict = self.hgetall(key) + hashdict[field] = val + + def hmset(self, key, value_dict): + hashdict = self.hgetall(key) + for field, val in value_dict.items(): + hashdict[field] = val + + + + SCOPE_BASE = 0 SCOPE_ONELEVEL = 1 # Not implemented SCOPE_SUBTREE = 2 @@ -169,8 +202,6 @@ def _to_json(unencoded): class FakeLDAP(object): - #TODO(vish): refactor this class to use a wrapper instead of accessing - # redis directly """Fake LDAP connection.""" def simple_bind_s(self, dn, password): @@ -183,14 +214,14 @@ class FakeLDAP(object): def add_s(self, dn, attr): """Add an object with the specified attributes at dn.""" - key = "%s%s" % (self.__redis_prefix, dn) - + key = "%s%s" % (self.__prefix, dn) value_dict = dict([(k, _to_json(v)) for k, v in attr]) - Redis.instance().hmset(key, value_dict) + Store.instance().hmset(key, value_dict) + def delete_s(self, dn): """Remove the ldap object at specified dn.""" - Redis.instance().delete("%s%s" % (self.__redis_prefix, dn)) + Store.instance().delete("%s%s" % (self.__prefix, dn)) def modify_s(self, dn, attrs): """Modify the object at dn using the attribute list. @@ -201,18 +232,18 @@ class FakeLDAP(object): ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value) """ - redis = Redis.instance() - key = "%s%s" % (self.__redis_prefix, dn) + store = Store.instance() + key = "%s%s" % (self.__prefix, dn) for cmd, k, v in attrs: - values = _from_json(redis.hget(key, k)) + values = _from_json(store.hget(key, k)) if cmd == MOD_ADD: values.append(v) elif cmd == MOD_REPLACE: values = [v] else: values.remove(v) - values = redis.hset(key, k, _to_json(values)) + values = store.hset(key, k, _to_json(values)) def search_s(self, dn, scope, query=None, fields=None): """Search for all matching objects under dn using the query. @@ -226,16 +257,17 @@ class FakeLDAP(object): """ if scope != SCOPE_BASE and scope != SCOPE_SUBTREE: raise NotImplementedError(str(scope)) - redis = Redis.instance() + store = Store.instance() if scope == SCOPE_BASE: - keys = ["%s%s" % (self.__redis_prefix, dn)] + keys = ["%s%s" % (self.__prefix, dn)] else: - keys = redis.keys("%s*%s" % (self.__redis_prefix, dn)) + keys = store.keys("%s*%s" % (self.__prefix, dn)) + objects = [] for key in keys: - # get the attributes from redis - attrs = redis.hgetall(key) - # turn the values from redis into lists + # get the attributes from the store + attrs = store.hgetall(key) + # turn the values from the store into lists # pylint: disable-msg=E1103 attrs = dict([(k, _from_json(v)) for k, v in attrs.iteritems()]) @@ -244,13 +276,13 @@ class FakeLDAP(object): # filter the attributes by fields attrs = dict([(k, v) for k, v in attrs.iteritems() if not fields or k in fields]) - objects.append((key[len(self.__redis_prefix):], attrs)) + objects.append((key[len(self.__prefix):], attrs)) # pylint: enable-msg=E1103 if objects == []: raise NO_SUCH_OBJECT() return objects @property - def __redis_prefix(self): # pylint: disable-msg=R0201 - """Get the prefix to use for all redis keys.""" + def __prefix(self): # pylint: disable-msg=R0201 + """Get the prefix to use for all keys.""" return 'ldap:' diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 11c3bd6df..5a7020a93 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -478,10 +478,13 @@ class AuthManager(object): if member_users: member_users = [User.safe_id(u) for u in member_users] with self.driver() as drv: - project_dict = drv.create_project(name, + try: + project_dict = drv.create_project(name, User.safe_id(manager_user), description, member_users) + except: + project_dict = drv.get_project(name) if project_dict: project = Project(**project_dict) return project @@ -604,7 +607,10 @@ class AuthManager(object): if secret == None: secret = str(uuid.uuid4()) with self.driver() as drv: - user_dict = drv.create_user(name, access, secret, admin) + try: + user_dict = drv.create_user(name, access, secret, admin) + except: + user_dict = drv.get_user(name) if user_dict: return User(**user_dict) diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 4508d6721..32cb2c542 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -333,11 +333,10 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): AuthManagerTestCase.__init__(self) test.TestCase.__init__(self, *args, **kwargs) import nova.auth.fakeldap as fakeldap - FLAGS.redis_db = 8 if FLAGS.flush_db: - logging.info("Flushing redis datastore") + logging.info("Flushing datastore") try: - r = fakeldap.Redis.instance() + r = fakeldap.Store.instance() r.flushdb() except: self.skip = True -- cgit From 02d2e305bd71aec3f723a42da620d2939e041f0c Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 17 Dec 2010 11:14:32 -0600 Subject: Fixed some old code that was merged incorrectly --- nova/auth/manager.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 5a7020a93..11c3bd6df 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -478,13 +478,10 @@ class AuthManager(object): if member_users: member_users = [User.safe_id(u) for u in member_users] with self.driver() as drv: - try: - project_dict = drv.create_project(name, + project_dict = drv.create_project(name, User.safe_id(manager_user), description, member_users) - except: - project_dict = drv.get_project(name) if project_dict: project = Project(**project_dict) return project @@ -607,10 +604,7 @@ class AuthManager(object): if secret == None: secret = str(uuid.uuid4()) with self.driver() as drv: - try: - user_dict = drv.create_user(name, access, secret, admin) - except: - user_dict = drv.get_user(name) + user_dict = drv.create_user(name, access, secret, admin) if user_dict: return User(**user_dict) -- cgit From 56a0dfde6e7d598df15bdce2541cd60c7757f557 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 17 Dec 2010 11:24:06 -0600 Subject: pep8 cleanup --- nova/auth/fakeldap.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index e46bb91ab..1ac579dbd 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -82,8 +82,6 @@ class _StorageDict(dict): hashdict[field] = val - - SCOPE_BASE = 0 SCOPE_ONELEVEL = 1 # Not implemented SCOPE_SUBTREE = 2 @@ -218,7 +216,6 @@ class FakeLDAP(object): value_dict = dict([(k, _to_json(v)) for k, v in attr]) Store.instance().hmset(key, value_dict) - def delete_s(self, dn): """Remove the ldap object at specified dn.""" Store.instance().delete("%s%s" % (self.__prefix, dn)) -- cgit From 6383f7f9f63e348a12adeff66a266ef796d98ded Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 17 Dec 2010 11:54:59 -0600 Subject: Some typo fixes --- nova/api/openstack/__init__.py | 4 ++-- nova/api/openstack/auth.py | 4 ++-- nova/api/openstack/ratelimiting/__init__.py | 4 ++-- nova/tests/api/openstack/__init__.py | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index e941694d9..e78080012 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -85,7 +85,7 @@ class AuthMiddleware(wsgi.Middleware): @webob.dec.wsgify def __call__(self, req): - if not self.auth_driver.has_authentication(req) + if not self.auth_driver.has_authentication(req): return self.auth_driver.authenticate(req) user = self.auth_driver.get_user_by_authentication(req) @@ -108,7 +108,7 @@ class RateLimitingMiddleware(wsgi.Middleware): at the given host+port to keep rate counters. """ super(RateLimitingMiddleware, self).__init__(application) - self._limiting_driver = + self._limiting_driver = \ utils.import_class(FLAGS.os_api_ratelimiting)(service_host) @webob.dec.wsgify diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index da8ebcfcd..26cb50dca 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -28,10 +28,10 @@ class BasicApiAuthManager(object): super(BasicApiAuthManager, self).__init__() def has_authentication(self, req): - return 'X-Auth-Token' in req.headers: + return 'X-Auth-Token' in req.headers def get_user_by_authentication(self, req): - return self.auth_driver.authorize_token(req.headers["X-Auth-Token"]) + return self.authorize_token(req.headers["X-Auth-Token"]) def authenticate(self, req): # Unless the request is explicitly made against // don't diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index d1da9afa7..2dc5ec32e 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -21,7 +21,7 @@ class BasicRateLimiting(object): if not service_host: #TODO(gundlach): These limits were based on limitations of Cloud #Servers. We should revisit them in Nova. - self.limiter = ratelimiting.Limiter(limits={ + self.limiter = Limiter(limits={ 'DELETE': (100, ratelimiting.PER_MINUTE), 'PUT': (10, ratelimiting.PER_MINUTE), 'POST': (10, ratelimiting.PER_MINUTE), @@ -29,7 +29,7 @@ class BasicRateLimiting(object): 'GET changes-since': (3, ratelimiting.PER_MINUTE), }) else: - self.limiter = ratelimiting.WSGIAppProxy(service_host) + self.limiter = WSGIAppProxy(service_host) def limited_request(self, req): """Rate limit the request. diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py index 2e357febe..4e4dfe4fc 100644 --- a/nova/tests/api/openstack/__init__.py +++ b/nova/tests/api/openstack/__init__.py @@ -17,7 +17,7 @@ import unittest -from nova.api.openstack import limited +from nova.api.openstack.common import limited from nova.api.openstack import RateLimitingMiddleware from nova.tests.api.fakes import APIStub from webob import Request -- cgit From e2a41d30c55edeba92f0e59f84e8d9eb7e16ca62 Mon Sep 17 00:00:00 2001 From: Jonathan Bryce Date: Fri, 17 Dec 2010 15:25:44 -0600 Subject: Adding in Ed Leafe so we can land his remove-redis test branch --- Authors | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Authors b/Authors index 565444ee1..fa38ef0b1 100644 --- a/Authors +++ b/Authors @@ -6,6 +6,7 @@ Chris Behrens Chmouel Boudjnah Dean Troyer Devin Carlen +Ed Leafe Eldar Nugaev Eric Day Ewan Mellor @@ -14,6 +15,7 @@ Jay Pipes Jesse Andrews Joe Heck Joel Moore +Jonathan Bryce Josh Kearney Joshua McKenty Justin Santa Barbara -- cgit From 75d6de8a67db02f886636edfedcf3f3fc8cff9cc Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 17 Dec 2010 16:03:21 -0600 Subject: Some tweaks --- nova/db/sqlalchemy/api.py | 3 +++ nova/tests/api/openstack/fakes.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 55036d1d1..0614c14e8 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1062,11 +1062,13 @@ def iscsi_target_create_safe(context, values): ################### +@require_admin_context def auth_destroy_token(_context, token): session = get_session() session.delete(token) +@require_admin_context def auth_get_token(_context, token_hash): session = get_session() tk = session.query(models.AuthToken).\ @@ -1077,6 +1079,7 @@ def auth_get_token(_context, token_hash): return tk +@require_admin_context def auth_create_token(_context, token): tk = models.AuthToken() tk.update(token) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 21b8aac1c..6c30761d6 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -173,7 +173,7 @@ class FakeToken(object): class FakeRequestContext(object): - def __init__(self, user, project): + def __init__(self, user, project, *args, **kwargs): self.user_id = 1 self.project_id = 1 -- cgit From 7f5ec9caa5cad314c6fec598d6c230dfbc8f0eae Mon Sep 17 00:00:00 2001 From: Jonathan Bryce Date: Fri, 17 Dec 2010 16:29:55 -0600 Subject: Removing unneeded Trial specific code --- nova/tests/auth_unittest.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 32cb2c542..61ae43fb1 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -335,11 +335,8 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): import nova.auth.fakeldap as fakeldap if FLAGS.flush_db: logging.info("Flushing datastore") - try: - r = fakeldap.Store.instance() - r.flushdb() - except: - self.skip = True + r = fakeldap.Store.instance() + r.flushdb() class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase): -- cgit From 5b8362d0f56bdbeba7ee8292222863a501bad6af Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 17 Dec 2010 16:56:42 -0600 Subject: A few more tweaks to get the OS API tests passing --- nova/tests/api/openstack/fakes.py | 5 +++-- nova/tests/api/openstack/test_servers.py | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 6c30761d6..96689d2cd 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -29,8 +29,9 @@ from nova import exception as exc from nova import flags from nova import utils import nova.api.openstack.auth -from nova.image import service from nova.image import glance +from nova.image import local +from nova.image import service from nova.tests import fake_flags from nova.wsgi import Router @@ -75,7 +76,7 @@ def stub_out_image_service(stubs): def fake_image_show(meh, context, id): return dict(kernelId=1, ramdiskId=1) - stubs.Set(nova.image.local.LocalImageService, 'show', fake_image_show) + stubs.Set(local.LocalImageService, 'show', fake_image_show) def stub_out_auth(stubs): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 8444b6fce..dcd2fe766 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -27,6 +27,7 @@ import nova.api.openstack from nova.api.openstack import servers import nova.db.api from nova.db.sqlalchemy.models import Instance +import nova.image import nova.rpc from nova.tests.api.openstack import fakes -- cgit From ca81b0c12a3853942e9ce85154c38dad381ead0e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Sat, 18 Dec 2010 00:50:49 +0000 Subject: fixed unittests and further clean-up post-eventlet merge --- nova/tests/xenapi_unittest.py | 48 +++++++++++++-------------------------- nova/virt/xenapi/__init__.py | 2 +- nova/virt/xenapi/network_utils.py | 2 +- nova/virt/xenapi/vm_utils.py | 3 +-- nova/virt/xenapi/vmops.py | 8 ++++--- nova/virt/xenapi/volume_utils.py | 2 +- nova/virt/xenapi/volumeops.py | 1 - nova/virt/xenapi_conn.py | 8 +++---- 8 files changed, 29 insertions(+), 45 deletions(-) diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index 839d6aa44..74401ce5d 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -78,13 +78,10 @@ class XenAPIVolumeTestCase(test.TrialTestCase): helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() - info = yield helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') + info = helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') label = 'SR-%s' % vol['ec2_id'] description = 'Test-SR' - sr_ref = helper.create_iscsi_storage_blocking(session, - info, - label, - description) + sr_ref = helper.create_iscsi_storage(session, info, label, description) srs = fake.get_all('SR') self.assertEqual(sr_ref, srs[0]) db.volume_destroy(context.get_admin_context(), vol['id']) @@ -97,13 +94,10 @@ class XenAPIVolumeTestCase(test.TrialTestCase): helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() # oops, wrong mount point! - info = helper.parse_volume_info(vol['ec2_id'], '/dev/sd') - - def check(exc): - """ handler """ - self.assertIsInstance(exc.value, volume_utils.StorageError) - - info.addErrback(check) + self.assertRaises(volume_utils.StorageError, + helper.parse_volume_info, + vol['ec2_id'], + '/dev/sd') db.volume_destroy(context.get_admin_context(), vol['id']) def test_attach_volume(self): @@ -116,8 +110,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') - def check(_): - """ handler """ + def check(): # check that the VM has a VBD attached to it # Get XenAPI reference for the VM vms = fake.get_all('VM') @@ -127,8 +120,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): vm_ref = vbd['VM'] self.assertEqual(vm_ref, vms[0]) - result.addCallback(check) - return result + check() def test_attach_volume_raise_exception(self): """ This shows how to test when exceptions are raised """ @@ -138,17 +130,11 @@ class XenAPIVolumeTestCase(test.TrialTestCase): volume = self._create_volume() instance = db.instance_create(self.values) fake.create_vm(instance.name, 'Running') - result = conn.attach_volume(instance.name, volume['ec2_id'], - '/dev/sdc') - - def check_exception(exc): - """ handler """ - if exc: - pass - else: - self.fail('Oops, no exception has been raised!') - result.addErrback(check_exception) - return result + self.assertRaises(Exception, + conn.attach_volume, + instance.name, + volume['ec2_id'], + '/dev/sdc') def tearDown(self): super(XenAPIVolumeTestCase, self).tearDown() @@ -192,10 +178,9 @@ class XenAPIVMTestCase(test.TrialTestCase): } conn = xenapi_conn.get_connection(False) instance = db.instance_create(values) - result = conn.spawn(instance) + conn.spawn(instance) - def check(_): - """ handler """ + def check(): instances = conn.list_instances() self.assertEquals(instances, [1]) @@ -225,8 +210,7 @@ class XenAPIVMTestCase(test.TrialTestCase): # Check that the VM is running according to XenAPI. self.assertEquals(vm['power_state'], 'Running') - result.addCallback(check) - return result + check() def tearDown(self): super(XenAPIVMTestCase, self).tearDown() diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index c7038deae..c75162f08 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -20,7 +20,7 @@ """ -class HelperBase(): +class HelperBase(object): """ The base for helper classes. This adds the XenAPI class attribute """ diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index c292383b6..e783120fe 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -29,7 +29,7 @@ class NetworkHelper(HelperBase): The class that wraps the helper methods together. """ def __init__(self): - return + super(NetworkHelper, self).__init__() @classmethod def find_network_with_bridge(cls, session, bridge): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 57d419773..911fcc9b2 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -24,7 +24,6 @@ import urllib from xml.dom import minidom from nova import flags -from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state @@ -48,7 +47,7 @@ class VMHelper(HelperBase): The class that wraps the helper methods together. """ def __init__(self): - return + super(VMHelper, self).__init__() @classmethod def create_vm(cls, session, instance, kernel, ramdisk): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0be4ed07d..aa3a3ae53 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -22,7 +22,6 @@ import logging from nova import db from nova import context -from nova import flags from nova import exception from nova import utils @@ -78,6 +77,7 @@ class VMOps(object): logging.info('Spawning VM %s created %s.', instance.name, vm_ref) + # NOTE(armando): Do we really need to do this in virt? timer = utils.LoopingCall(f=None) def _wait_for_boot(): @@ -88,7 +88,8 @@ class VMOps(object): if state == power_state.RUNNING: logging.debug('Instance %s: booted', instance['name']) timer.stop() - except: + except Exception, exc: + logging.warn(exc) logging.exception('instance %s: failed to boot', instance['name']) db.instance_set_state(context.get_admin_context(), @@ -131,6 +132,7 @@ class VMOps(object): self._session.wait_for_task(task) except self.XenAPI.Failure, exc: logging.warn(exc) + # VM Destroy try: task = self._session.call_xenapi('Async.VM.destroy', vm) self._session.wait_for_task(task) @@ -149,7 +151,7 @@ class VMOps(object): """Return data about VM diagnostics""" vm = VMHelper.lookup(self._session, instance_id) if vm is None: - raise exception.Exception("Instance not found %s" % instance_id) + raise exception.NotFound("Instance not found %s" % instance_id) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_diagnostics(self._session, rec) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 255f23d88..5366078ce 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -43,7 +43,7 @@ class VolumeHelper(HelperBase): The class that wraps the helper methods together. """ def __init__(self): - return + super(VolumeHelper, self).__init__() @classmethod def create_iscsi_storage(cls, session, info, label, description): diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 6c7516073..9dbb1bb14 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -19,7 +19,6 @@ Management class for Storage-related functions (attach, detach, etc). """ import logging -from nova import flags from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 207222744..2a8614cfd 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -119,11 +119,11 @@ class XenAPIConnection(object): def spawn(self, instance): """ Create VM instance """ - return self._vmops.spawn(instance) + self._vmops.spawn(instance) def reboot(self, instance): """ Reboot VM instance """ - return self._vmops.reboot(instance) + self._vmops.reboot(instance) def destroy(self, instance): """ Destroy VM instance """ @@ -143,13 +143,13 @@ class XenAPIConnection(object): def attach_volume(self, instance_name, device_path, mountpoint): """ Attach volume storage to VM instance """ - return self._volumeops.attach_volume(instance_name, + self._volumeops.attach_volume(instance_name, device_path, mountpoint) def detach_volume(self, instance_name, mountpoint): """ Detach volume storage to VM instance """ - return self._volumeops.detach_volume(instance_name, mountpoint) + self._volumeops.detach_volume(instance_name, mountpoint) class XenAPISession(object): -- cgit From ca1017988f98a246aa82c16f471791c7ea3eceec Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 19 Dec 2010 20:05:41 +0000 Subject: Support proxying api by using X-Forwarded-For --- nova/api/ec2/__init__.py | 8 +++++++- nova/api/ec2/metadatarequesthandler.py | 11 +++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index a6ee16c33..c455144a9 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -37,6 +37,9 @@ from nova.auth import manager FLAGS = flags.FLAGS +flags.DEFINE_boolean('use_forwarded_for', False, + 'Treat X-Forwarded-For as the canonical remote address. ' + 'Only enable this if you have a sanitizing proxy.') _log = logging.getLogger("api") _log.setLevel(logging.DEBUG) @@ -81,9 +84,12 @@ class Authenticate(wsgi.Middleware): raise webob.exc.HTTPForbidden() # Authenticated! + remote_address = req.remote_addr + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) ctxt = context.RequestContext(user=user, project=project, - remote_address=req.remote_addr) + remote_address=remote_address) req.environ['ec2.context'] = ctxt return self.application diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 2f4f414cc..66439a0df 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -23,9 +23,13 @@ import logging import webob.dec import webob.exc +from nova import flags from nova.api.ec2 import cloud +FLAGS = flags.FLAGS + + class MetadataRequestHandler(object): """Serve metadata from the EC2 API.""" @@ -63,10 +67,13 @@ class MetadataRequestHandler(object): @webob.dec.wsgify def __call__(self, req): cc = cloud.CloudController() - meta_data = cc.get_metadata(req.remote_addr) + remote_address = req.remote_addr + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + meta_data = cc.get_metadata(remote_address) if meta_data is None: logging.error('Failed to get metadata for ip: %s' % - req.remote_addr) + remote_address) raise webob.exc.HTTPNotFound() data = self.lookup(req.path_info, meta_data) if data is None: -- cgit From d00a0cec7e8bb02b44c7d9fd94cb9763c37c505e Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Sun, 19 Dec 2010 22:05:46 -0400 Subject: pep8 (again) --- nova/tests/api/openstack/test_servers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index ba432f6c3..1283167a1 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -62,7 +62,9 @@ def stub_instance(id, user_id=1): def fake_compute_api(cls, req, id): return True + class ServersTest(unittest.TestCase): + def setUp(self): self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} -- cgit From 8ddae1280da59a0e86e1daf1c8de97248ef6cb13 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Sun, 19 Dec 2010 22:14:36 -0400 Subject: pep8 (again again) --- nova/tests/api/openstack/test_servers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 1283167a1..3820f5f27 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -59,6 +59,7 @@ def stub_instance(id, user_id=1): return Instance(id=int(id) + 123456, state=0, image_id=10, user_id=user_id, display_name='server%s' % id, internal_id=id) + def fake_compute_api(cls, req, id): return True -- cgit From 2eafa204703785f314226eeebb31a840d3dd502f Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Mon, 20 Dec 2010 11:31:21 -0600 Subject: Added InstanceActions DB model --- nova/db/sqlalchemy/models.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 61764ee8d..6eac1b873 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -243,6 +243,17 @@ class InstanceDiagnostics(BASE, NovaBase): vif_0_rx = Column(Float) +class InstanceActions(BASE, NovaBase): + """Represents a guest VM's actions and results""" + __tablename__ = "instance_actions" + id = Column(Integer, primary_key=True) + instance_id = Column(Integer, ForeignKey('instances.id')) + + action = Column(String(255)) + result = Column(Boolean) + error = Column(Text) + + class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a vm.""" __tablename__ = 'volumes' @@ -543,10 +554,11 @@ def register_models(): it will never need to be called explicitly elsewhere. """ from sqlalchemy import create_engine - models = (Service, Instance, InstanceDiagnostics, Volume, ExportDevice, - IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, - SecurityGroupIngressRule, SecurityGroupInstanceAssociation, - AuthToken, User, Project) # , Image, Host + models = (Service, Instance, InstanceDiagnostics, InstanceActions, + Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, + Network, SecurityGroup, SecurityGroupIngressRule, + SecurityGroupInstanceAssociation, AuthToken, User, + Project) # , Image, Host engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) -- cgit From e91590145962827e68bbb3518990d7d94e99608d Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 20 Dec 2010 20:37:56 +0100 Subject: PEP8 fixups --- nova/compute/disk.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 3d5e07724..0169a9831 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -105,12 +105,14 @@ def partition(infile, outfile, local_bytes=0, resize=True, execute('parted --script %s mkpartfs primary %s %ds %ds' % (outfile, local_type, local_first, local_last)) + def extend(image, size, execute): file_size = os.path.getsize(image) if file_size >= size: return return execute('truncate -s size %s' % (image,)) + def inject_data(image, key=None, net=None, partition=None, execute=None): """Injects a ssh key and optionally net data into a disk image. -- cgit From 27d89c2d425aefcd32549b08e3cff8b132d3e75b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 20 Dec 2010 20:04:24 +0000 Subject: don't add the ip to bridge on compute hosts --- nova/network/manager.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 6a30f30b7..004dc6ff6 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -361,8 +361,7 @@ class FlatDHCPManager(FlatManager): """Sets up matching network for compute hosts.""" network_ref = db.network_get_by_instance(context, instance_id) self.driver.ensure_bridge(network_ref['bridge'], - FLAGS.flat_interface, - network_ref) + FLAGS.flat_interface) def setup_fixed_ip(self, context, address): """Setup dhcp for this network.""" -- cgit From f53532bf17d0fac1cc4a1f51f6c12e2ae12d0d74 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Mon, 20 Dec 2010 14:21:01 -0600 Subject: Make column names more generic --- nova/db/sqlalchemy/models.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 6eac1b873..96d981571 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -232,15 +232,13 @@ class InstanceDiagnostics(BASE, NovaBase): id = Column(Integer, primary_key=True) instance_id = Column(Integer, ForeignKey('instances.id')) - vbd_xvda_read = Column(Float) - vbd_xvda_write = Column(Float) - vbd_xvdb_read = Column(Float) - vbd_xvdb_write = Column(Float) - memory = Column(Float) - memory_internal_free = Column(Float) - cpu0 = Column(Float) - vif_0_tx = Column(Float) - vif_0_rx = Column(Float) + memory_available = Column(Float) + memory_free = Column(Float) + cpu_load = Column(Float) + disk_read = Column(Float) + disk_write = Column(Float) + net_tx = Column(Float) + net_rx = Column(Float) class InstanceActions(BASE, NovaBase): -- cgit From de383023e4d5c30d3ad4474af104f6b659e1bd32 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 20 Dec 2010 21:04:54 +0000 Subject: directly copy ip allocation into compute --- nova/compute/api.py | 44 --------------------------------------- nova/compute/manager.py | 55 +++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 53 insertions(+), 46 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 8e0efa4cc..2dae9cdbc 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -145,18 +145,6 @@ class ComputeAPI(base.Base): instance = self.update_instance(context, instance_id, **updates) instances.append(instance) - # TODO(vish): This probably should be done in the scheduler - # or in compute as a call. The network should be - # allocated after the host is assigned and setup - # can happen at the same time. - address = self.network_manager.allocate_fixed_ip(context, - instance_id, - is_vpn) - rpc.cast(elevated, - self._get_network_topic(context), - {"method": "setup_fixed_ip", - "args": {"address": address}}) - logging.debug("Casting to scheduler for %s/%s's instance %s", context.project_id, context.user_id, instance_id) rpc.cast(context, @@ -219,28 +207,6 @@ class ComputeAPI(base.Base): state=0, terminated_at=datetime.datetime.utcnow()) - # FIXME(ja): where should network deallocate occur? - address = self.db.instance_get_floating_address(context, - instance['id']) - if address: - logging.debug("Disassociating address %s" % address) - # NOTE(vish): Right now we don't really care if the ip is - # disassociated. We may need to worry about - # checking this later. Perhaps in the scheduler? - rpc.cast(context, - self._get_network_topic(context), - {"method": "disassociate_floating_ip", - "args": {"floating_address": address}}) - - address = self.db.instance_get_fixed_address(context, instance['id']) - if address: - logging.debug("Deallocating address %s" % address) - # NOTE(vish): Currently, nothing needs to be done on the - # network node until release. If this changes, - # we will need to cast here. - self.network_manager.deallocate_fixed_ip(context.elevated(), - address) - host = instance['host'] if host: rpc.cast(context, @@ -293,13 +259,3 @@ class ComputeAPI(base.Base): {"method": "unrescue_instance", "args": {"instance_id": instance['id']}}) - def _get_network_topic(self, context): - """Retrieves the network host for a project""" - network_ref = self.network_manager.get_network(context) - host = network_ref['host'] - if not host: - host = rpc.call(context, - FLAGS.network_topic, - {"method": "set_network_host", - "args": {"network_id": network_ref['id']}}) - return self.db.queue_get_for(context, FLAGS.network_topic, host) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7eb60e262..6d705f983 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -40,6 +40,7 @@ import logging from nova import exception from nova import flags from nova import manager +from nova import rpc from nova import utils from nova.compute import power_state @@ -76,6 +77,17 @@ class ComputeManager(manager.Manager): state = power_state.NOSTATE self.db.instance_set_state(context, instance_id, state) + def _get_network_topic(self, context): + """Retrieves the network host for a project""" + network_ref = self.network_manager.get_network(context) + host = network_ref['host'] + if not host: + host = rpc.call(context, + FLAGS.network_topic, + {"method": "set_network_host", + "args": {"network_id": network_ref['id']}}) + return self.db.queue_get_for(context, FLAGS.network_topic, host) + @exception.wrap_exception def refresh_security_group(self, context, security_group_id, **_kwargs): """This call passes stright through to the virtualization driver.""" @@ -89,11 +101,26 @@ class ComputeManager(manager.Manager): if instance_ref['name'] in self.driver.list_instances(): raise exception.Error("Instance has already been created") logging.debug("instance %s: starting...", instance_id) - self.network_manager.setup_compute_network(context, instance_id) self.db.instance_update(context, instance_id, {'host': self.host}) + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'networking') + + is_vpn = instance_ref['image_id'] == FLAGS.vpn_image_id + address = self.network_manager.allocate_fixed_ip(context, + instance_id, + is_vpn) + rpc.cast(context, + self._get_network_topic(context), + {"method": "setup_fixed_ip", + "args": {"address": address}}) + + self.network_manager.setup_compute_network(context, instance_id) + # TODO(vish) check to make sure the availability zone matches self.db.instance_set_state(context, instance_id, @@ -119,9 +146,33 @@ class ComputeManager(manager.Manager): def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() - logging.debug("instance %s: terminating", instance_id) instance_ref = self.db.instance_get(context, instance_id) + + address = self.db.instance_get_floating_address(context, + instance_ref['id']) + if address: + logging.debug("Disassociating address %s" % address) + # NOTE(vish): Right now we don't really care if the ip is + # disassociated. We may need to worry about + # checking this later. + rpc.cast(context, + self._get_network_topic(context), + {"method": "disassociate_floating_ip", + "args": {"floating_address": address}}) + + address = self.db.instance_get_fixed_address(context, + instance_ref['id']) + if address: + logging.debug("Deallocating address %s" % address) + # NOTE(vish): Currently, nothing needs to be done on the + # network node until release. If this changes, + # we will need to cast here. + self.network_manager.deallocate_fixed_ip(context.elevated(), + address) + + logging.debug("instance %s: terminating", instance_id) + volumes = instance_ref.get('volumes', []) or [] for volume in volumes: self.detach_volume(context, instance_id, volume['id']) -- cgit From 9b5d8600ab3cadd5f3174056eaedd0db420f1f1b Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 20 Dec 2010 22:53:07 +0100 Subject: Add my @linux2go.dk address to .mailmap --- .mailmap | 1 + 1 file changed, 1 insertion(+) diff --git a/.mailmap b/.mailmap index 2a6eb8d7d..8041e2341 100644 --- a/.mailmap +++ b/.mailmap @@ -19,6 +19,7 @@ + -- cgit From 83cf1f7140c20ea2188272b57e4e2c1a95f8ff9e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 03:26:50 +0000 Subject: remove extra print statements --- nova/api/ec2/cloud.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 91b2f0064..ad6debb11 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -723,10 +723,8 @@ class CloudController(object): floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) # NOTE(vish): Perhaps we should just pass this on to compute and # let compute communicate with network. - print "in cloud get" network_topic = self.compute_api.get_network_topic(context, internal_id) - print "got the network topic", network_topic rpc.cast(context, network_topic, {"method": "associate_floating_ip", -- cgit From 3a0878b7a94ba3411feb9a7944f42c9f352d3a45 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 03:34:30 +0000 Subject: add a few extra joined objects to get instance --- nova/compute/manager.py | 1 - nova/db/sqlalchemy/api.py | 4 ++++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3b7fd9c32..b3e97011f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -158,7 +158,6 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) - if not FLAGS.stub_network: address = self.db.instance_get_floating_address(context, instance_ref['id']) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index f7787d1f0..d02084da7 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -580,13 +580,17 @@ def instance_get(context, instance_id, session=None): if is_admin_context(context): result = session.query(models.Instance).\ + options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload('volumes')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Instance).\ + options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload('volumes')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ filter_by(deleted=False).\ -- cgit From a2019a14f7e7902c0bfef9fe3e9b576d9f45defe Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 03:39:28 +0000 Subject: add missing greenthread import --- nova/virt/libvirt_conn.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 5a8c71850..feef1390c 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -45,6 +45,7 @@ import logging import os import shutil +from eventlet import greenthread from eventlet import event from eventlet import tpool -- cgit From fb24146290e6cf49397441d36878652da376f66d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 03:43:47 +0000 Subject: pep8 and removed extra imports --- nova/compute/api.py | 1 - nova/tests/cloud_unittest.py | 6 ++---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 606344c03..1dbe6e02d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -275,4 +275,3 @@ class ComputeAPI(base.Base): self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "unrescue_instance", "args": {"instance_id": instance['id']}}) - diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 185e4b4e5..af544e3cb 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -22,12 +22,10 @@ import logging from M2Crypto import BIO from M2Crypto import RSA import os -import StringIO import tempfile import time from eventlet import greenthread -from xml.etree import ElementTree from nova import context from nova import crypto @@ -36,7 +34,6 @@ from nova import flags from nova import rpc from nova import service from nova import test -from nova import utils from nova.auth import manager from nova.compute import power_state from nova.api.ec2 import cloud @@ -75,7 +72,8 @@ class CloudTestCase(test.TestCase): self.user = self.manager.create_user('admin', 'admin', 'admin', True) self.project = self.manager.create_project('proj', 'admin', 'proj') self.context = context.RequestContext(user=self.user, - project=self.project) + project=self.project) + def tearDown(self): self.manager.delete_project(self.project) self.manager.delete_user(self.user) -- cgit From d2eb04cea6b7f0a669758fc1fba32e77a008a7eb Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 21 Dec 2010 11:42:25 -0600 Subject: PEP8 cleanup --- nova/virt/xenapi/vmops.py | 16 ++++++++-------- nova/virt/xenapi_conn.py | 30 +++++++++++++++--------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index a18eacf07..bedf131df 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -44,12 +44,12 @@ class VMOps(object): VMHelper.late_import() def list_instances(self): - """ List VM instances """ + """List VM instances""" return [self._session.get_xenapi().VM.get_name_label(vm) \ for vm in self._session.get_xenapi().VM.get_all()] def spawn(self, instance): - """ Create VM instance """ + """Create VM instance""" vm = VMHelper.lookup(self._session, instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % @@ -81,7 +81,7 @@ class VMOps(object): vm_ref) def reboot(self, instance): - """ Reboot VM instance """ + """Reboot VM instance""" instance_name = instance.name vm = VMHelper.lookup(self._session, instance_name) if vm is None: @@ -90,7 +90,7 @@ class VMOps(object): self._session.wait_for_task(task) def destroy(self, instance): - """ Destroy VM instance """ + """Destroy VM instance""" vm = VMHelper.lookup(self._session, instance.name) if vm is None: # Don't complain, just return. This lets us clean up instances @@ -127,7 +127,7 @@ class VMOps(object): callback(ret) def pause(self, instance, callback): - """ Pause VM instance """ + """Pause VM instance""" instance_name = instance.name vm = VMHelper.lookup(self._session, instance_name) if vm is None: @@ -136,7 +136,7 @@ class VMOps(object): self._wait_with_callback(task, callback) def unpause(self, instance, callback): - """ Unpause VM instance """ + """Unpause VM instance""" instance_name = instance.name vm = VMHelper.lookup(self._session, instance_name) if vm is None: @@ -145,7 +145,7 @@ class VMOps(object): self._wait_with_callback(task, callback) def get_info(self, instance_id): - """ Return data about VM instance """ + """Return data about VM instance""" vm = VMHelper.lookup_blocking(self._session, instance_id) if vm is None: raise Exception('instance not present %s' % instance_id) @@ -161,6 +161,6 @@ class VMOps(object): return VMHelper.compile_diagnostics(self._session, rec) def get_console_output(self, instance): - """ Return snapshot of console """ + """Return snapshot of console""" # TODO: implement this to fix pylint! return 'FAKE CONSOLE OUTPUT of instance' diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 21ed2cd65..3a9084d89 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -101,7 +101,7 @@ def get_connection(_): class XenAPIConnection(object): - """ A connection to XenServer or Xen Cloud Platform """ + """A connection to XenServer or Xen Cloud Platform""" def __init__(self, url, user, pw): session = XenAPISession(url, user, pw) @@ -109,31 +109,31 @@ class XenAPIConnection(object): self._volumeops = VolumeOps(session) def list_instances(self): - """ List VM instances """ + """List VM instances""" return self._vmops.list_instances() def spawn(self, instance): - """ Create VM instance """ + """Create VM instance""" self._vmops.spawn(instance) def reboot(self, instance): - """ Reboot VM instance """ + """Reboot VM instance""" self._vmops.reboot(instance) def destroy(self, instance): - """ Destroy VM instance """ + """Destroy VM instance""" self._vmops.destroy(instance) def pause(self, instance, callback): - """ Pause VM instance """ + """Pause VM instance""" self._vmops.pause(instance, callback) def unpause(self, instance, callback): - """ Unpause paused VM instance """ + """Unpause paused VM instance""" self._vmops.unpause(instance, callback) def get_info(self, instance_id): - """ Return data about VM instance """ + """Return data about VM instance""" return self._vmops.get_info(instance_id) def get_diagnostics(self, instance_id): @@ -141,33 +141,33 @@ class XenAPIConnection(object): return self._vmops.get_diagnostics(instance_id) def get_console_output(self, instance): - """ Return snapshot of console """ + """Return snapshot of console""" return self._vmops.get_console_output(instance) def attach_volume(self, instance_name, device_path, mountpoint): - """ Attach volume storage to VM instance """ + """Attach volume storage to VM instance""" return self._volumeops.attach_volume(instance_name, device_path, mountpoint) def detach_volume(self, instance_name, mountpoint): - """ Detach volume storage to VM instance """ + """Detach volume storage to VM instance""" return self._volumeops.detach_volume(instance_name, mountpoint) class XenAPISession(object): - """ The session to invoke XenAPI SDK calls """ + """The session to invoke XenAPI SDK calls""" def __init__(self, url, user, pw): self._session = XenAPI.Session(url) self._session.login_with_password(user, pw) def get_xenapi(self): - """ Return the xenapi object """ + """Return the xenapi object""" return self._session.xenapi def get_xenapi_host(self): - """ Return the xenapi host """ + """Return the xenapi host""" return self._session.xenapi.session.get_this_host(self._session.handle) def call_xenapi(self, method, *args): @@ -218,7 +218,7 @@ class XenAPISession(object): def _unwrap_plugin_exceptions(func, *args, **kwargs): - """ Parse exception details """ + """Parse exception details""" try: return func(*args, **kwargs) except XenAPI.Failure, exc: -- cgit From a17fa6df76a3215d84d99738a1a7752a54cfe914 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 18:43:41 +0000 Subject: don't allocate networks when getting vpn info --- nova/auth/manager.py | 6 +++--- nova/db/sqlalchemy/api.py | 24 +++++++++++++----------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 74da8e045..f9a7dd000 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -540,10 +540,10 @@ class AuthManager(object): """ network_ref = db.project_get_network(context.get_admin_context(), - Project.safe_id(project)) + Project.safe_id(project), False) - if not network_ref['vpn_public_port']: - raise exception.NotFound('project network data has not been set') + if not network_ref: + return (None, None) return (network_ref['vpn_public_address'], network_ref['vpn_public_port']) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 6ecd824e1..35c2c76fd 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1075,24 +1075,26 @@ def network_update(context, network_id, values): @require_context -def project_get_network(context, project_id): +def project_get_network(context, project_id, associate=True): session = get_session() - rv = session.query(models.Network).\ - filter_by(project_id=project_id).\ - filter_by(deleted=False).\ - first() - if not rv: + result = session.query(models.Network).\ + filter_by(project_id=project_id).\ + filter_by(deleted=False).\ + first() + if not result: + if not associate: + return None try: return network_associate(context, project_id) except IntegrityError: # NOTE(vish): We hit this if there is a race and two # processes are attempting to allocate the # network at the same time - rv = session.query(models.Network).\ - filter_by(project_id=project_id).\ - filter_by(deleted=False).\ - first() - return rv + result = session.query(models.Network).\ + filter_by(project_id=project_id).\ + filter_by(deleted=False).\ + first() + return result ################### -- cgit From 902db577ea19459c9b01ed7b262024b900440573 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 18:48:07 +0000 Subject: update db/api.py as well --- nova/db/api.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 339e0a3ae..69cfa3377 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -512,12 +512,14 @@ def network_update(context, network_id, values): ################### -def project_get_network(context, project_id): +def project_get_network(context, project_id, associate=True): """Return the network associated with the project. - Raises NotFound if no such network can be found. + If associate is true, it will attempt to associate a new + network if one is not found, otherwise it returns None. """ + return IMPL.project_get_network(context, project_id) -- cgit From aa0639b00c3cd4b7bd5dd7dc9027e86d0f43150a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 18:57:11 +0000 Subject: change virtualization to not get network through project --- nova/virt/libvirt_conn.py | 4 ++-- nova/virt/xenapi/vmops.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index feef1390c..9be363661 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -488,8 +488,8 @@ class LibvirtConnection(object): def to_xml(self, instance, rescue=False): # TODO(termie): cache? logging.debug('instance %s: starting toXML method', instance['name']) - network = db.project_get_network(context.get_admin_context(), - instance['project_id']) + network = db.network_get_by_instance(context.get_admin_context(), + instance['id']) # FIXME(vish): stick this in db instance_type = instance['instance_type'] instance_type = instance_types.INSTANCE_TYPES[instance_type] diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 3034df9e1..de40815e2 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -54,8 +54,8 @@ class VMOps(object): raise Exception('Attempted to create non-unique name %s' % instance.name) - bridge = db.project_get_network(context.get_admin_context(), - instance.project_id).bridge + bridge = db.network_get_by_instance(context.get_admin_context(), + instance['id'])['bridge'] network_ref = \ NetworkHelper.find_network_with_bridge(self._session, bridge) -- cgit From 132b12e27fa69319f85dee0089fad1ba1a342fd8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 19:24:12 +0000 Subject: activate fake rabbit for debugging --- nova/tests/rpc_unittest.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py index 8c3e31037..6ea2edcab 100644 --- a/nova/tests/rpc_unittest.py +++ b/nova/tests/rpc_unittest.py @@ -33,7 +33,6 @@ class RpcTestCase(test.TestCase): """Test cases for rpc""" def setUp(self): super(RpcTestCase, self).setUp() - self.flags(fake_rabbit=False) self.conn = rpc.Connection.instance(True) self.receiver = TestReceiver() self.consumer = rpc.AdapterConsumer(connection=self.conn, -- cgit From b3fce81e384aec46c0963db1f144cc58d02340a4 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 21 Dec 2010 14:13:18 -0600 Subject: Log all XenAPI actions --- nova/db/api.py | 5 +++++ nova/db/sqlalchemy/api.py | 12 ++++++++++++ nova/db/sqlalchemy/models.py | 1 - nova/virt/xenapi/vmops.py | 16 ++++++++-------- nova/virt/xenapi_conn.py | 33 +++++++++++++++++++++++---------- 5 files changed, 48 insertions(+), 19 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 8f9dc2443..4e15596d9 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -334,6 +334,11 @@ def instance_add_security_group(context, instance_id, security_group_id): security_group_id) +def instance_action_create(context, values): + """Create an instance action from the values dictionary.""" + return IMPL.instance_action_create(context, values) + + ################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 935063609..63b367d2e 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -749,6 +749,18 @@ def instance_add_security_group(context, instance_id, security_group_id): instance_ref.save(session=session) +@require_context +def instance_action_create(context, values): + """Create an instance action and the action results""" + action_ref = models.InstanceActions() + action_ref.update(values) + + session = get_session() + with session.begin(): + action_ref.save(session=session) + return action_ref + + ################### diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 96d981571..eac6a304e 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -248,7 +248,6 @@ class InstanceActions(BASE, NovaBase): instance_id = Column(Integer, ForeignKey('instances.id')) action = Column(String(255)) - result = Column(Boolean) error = Column(Text) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index bedf131df..5b9495b67 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -87,7 +87,7 @@ class VMOps(object): if vm is None: raise Exception('instance not present %s' % instance_name) task = self._session.call_xenapi('Async.VM.clean_reboot', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) def destroy(self, instance): """Destroy VM instance""" @@ -101,7 +101,7 @@ class VMOps(object): try: task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) # Disk clean-up @@ -109,19 +109,19 @@ class VMOps(object): for vdi in vdis: try: task = self._session.call_xenapi('Async.VDI.destroy', vdi) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) try: task = self._session.call_xenapi('Async.VM.destroy', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) - def _wait_with_callback(self, task, callback): + def _wait_with_callback(self, instance_id, task, callback): ret = None try: - ret = self._session.wait_for_task(task) + ret = self._session.wait_for_task(instance_id, task) except XenAPI.Failure, exc: logging.warn(exc) callback(ret) @@ -133,7 +133,7 @@ class VMOps(object): if vm is None: raise Exception('instance not present %s' % instance_name) task = self._session.call_xenapi('Async.VM.pause', vm) - self._wait_with_callback(task, callback) + self._wait_with_callback(instance.id, task, callback) def unpause(self, instance, callback): """Unpause VM instance""" @@ -142,7 +142,7 @@ class VMOps(object): if vm is None: raise Exception('instance not present %s' % instance_name) task = self._session.call_xenapi('Async.VM.unpause', vm) - self._wait_with_callback(task, callback) + self._wait_with_callback(instance.id, task, callback) def get_info(self, instance_id): """Return data about VM instance""" diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 3a9084d89..33a55d7b2 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -54,6 +54,8 @@ import xmlrpclib from eventlet import event from eventlet import tpool +from nova import context +from nova import db from nova import utils from nova import flags from nova.virt.xenapi.vmops import VMOps @@ -183,35 +185,46 @@ class XenAPISession(object): self._session.xenapi.Async.host.call_plugin, self.get_xenapi_host(), plugin, fn, args) - def wait_for_task(self, task): + def wait_for_task(self, instance_id, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" done = event.Event() - loop = utils.LoopingCall(self._poll_task, task, done) + loop = utils.LoopingCall(self._poll_task, instance_id, task, done) loop.start(FLAGS.xenapi_task_poll_interval, now=True) rv = done.wait() loop.stop() return rv - def _poll_task(self, task, done): + def _poll_task(self, instance_id, task, done): """Poll the given XenAPI task, and fire the given Deferred if we get a result.""" try: - #logging.debug('Polling task %s...', task) + name = self._session.xenapi.task.get_name_label(task) status = self._session.xenapi.task.get_status(task) - if status == 'pending': + action = dict( + instance_id=int(instance_id), + action=name, + error=None) + if status == "pending": return - elif status == 'success': + elif status == "success": result = self._session.xenapi.task.get_result(task) - logging.info('Task %s status: success. %s', task, result) + logging.info("Task [%s] %s status: success %s" % ( + name, + task, + result)) done.send(_parse_xmlrpc_value(result)) else: error_info = self._session.xenapi.task.get_error_info(task) - logging.warn('Task %s status: %s. %s', task, status, - error_info) + action["error"] = str(error_info) + logging.warn("Task [%s] %s status: %s %s" % ( + name, + task, + status, + error_info)) done.send_exception(XenAPI.Failure(error_info)) - #logging.debug('Polling task %s done.', task) + db.instance_action_create(context.get_admin_context(), action) except XenAPI.Failure, exc: logging.warn(exc) done.send_exception(*sys.exc_info()) -- cgit From 7da5fdf45add8e5c049321c5553f98229446e6b9 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 21 Dec 2010 14:28:20 -0600 Subject: PEP8 cleanup --- nova/db/sqlalchemy/api.py | 2 +- nova/tests/virt_unittest.py | 46 +++++++++++++++++++++++---------------------- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 63b367d2e..f409560cf 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -751,7 +751,7 @@ def instance_add_security_group(context, instance_id, security_group_id): @require_context def instance_action_create(context, values): - """Create an instance action and the action results""" + """Create an instance action from the values dictionary.""" action_ref = models.InstanceActions() action_ref.update(values) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 9bbba4ba9..0cf016380 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -129,43 +129,45 @@ class LibvirtConnTestCase(test.TestCase): check_list.append(check) else: if expect_kernel: - check = (lambda t: t.find('./os/kernel').text.split('/' - )[1], 'kernel') + check = (lambda t: t.find('./os/kernel').text.split( + '/')[1], 'kernel') else: check = (lambda t: t.find('./os/kernel'), None) check_list.append(check) if expect_ramdisk: - check = (lambda t: t.find('./os/initrd').text.split('/' - )[1], 'ramdisk') + check = (lambda t: t.find('./os/initrd').text.split( + '/')[1], 'ramdisk') else: check = (lambda t: t.find('./os/initrd'), None) check_list.append(check) common_checks = [ (lambda t: t.find('.').tag, 'domain'), - (lambda t: t.find('./devices/interface/filterref/parameter' - ).get('name'), 'IP'), - (lambda t: t.find('./devices/interface/filterref/parameter' - ).get('value'), '10.11.12.13'), - (lambda t: t.findall('./devices/interface/filterref/parameter' - )[1].get('name'), 'DHCPSERVER'), - (lambda t: t.findall('./devices/interface/filterref/parameter' - )[1].get('value'), '10.0.0.1'), - (lambda t: t.find('./devices/serial/source').get('path' - ).split('/')[1], 'console.log'), + (lambda t: t.find( + './devices/interface/filterref/parameter').get('name'), 'IP'), + (lambda t: t.find( + './devices/interface/filterref/parameter').get( + 'value'), '10.11.12.13'), + (lambda t: t.findall( + './devices/interface/filterref/parameter')[1].get( + 'name'), 'DHCPSERVER'), + (lambda t: t.findall( + './devices/interface/filterref/parameter')[1].get( + 'value'), '10.0.0.1'), + (lambda t: t.find('./devices/serial/source').get( + 'path').split('/')[1], 'console.log'), (lambda t: t.find('./memory').text, '2097152')] if rescue: - common_checks += [(lambda t: t.findall('./devices/disk/source' - )[0].get('file').split('/')[1], - 'rescue-disk'), - (lambda t: t.findall('./devices/disk/source' - )[1].get('file').split('/')[1], - 'disk')] + common_checks += [(lambda t: t.findall( + './devices/disk/source')[0].get('file').split('/')[1], + 'rescue-disk'), (lambda t: t.findall( + './devices/disk/source')[1].get( + 'file').split('/')[1], 'disk')] else: - common_checks += [(lambda t: t.findall('./devices/disk/source' - )[0].get('file').split('/')[1], + common_checks += [(lambda t: t.findall( + './devices/disk/source')[0].get('file').split('/')[1], 'disk')] for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): -- cgit From 40dba7dc0b8faef6dace3e9d54f43b19398c73fc Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 21 Dec 2010 13:00:30 -0800 Subject: Reworked fakerabbit backend so each connection has it's own. Moved queues and exchanges to be globals. --- nova/fakerabbit.py | 135 ++++++++++++++++++++++++----------------------------- 1 file changed, 60 insertions(+), 75 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index c64617931..792e4c344 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -25,6 +25,9 @@ from carrot.backends import base from eventlet import greenthread +EXCHANGES = {} +QUEUES = {} + class Message(base.BaseMessage): pass @@ -68,81 +71,63 @@ class Queue(object): return self._queue.get() -class Backend(object): - """ Singleton backend for testing """ - class __impl(base.BaseBackend): - def __init__(self, *args, **kwargs): - #super(__impl, self).__init__(*args, **kwargs) - self._exchanges = {} - self._queues = {} - - def _reset_all(self): - self._exchanges = {} - self._queues = {} - - def queue_declare(self, queue, **kwargs): - if queue not in self._queues: - logging.debug('Declaring queue %s', queue) - self._queues[queue] = Queue(queue) - - def exchange_declare(self, exchange, type, *args, **kwargs): - if exchange not in self._exchanges: - logging.debug('Declaring exchange %s', exchange) - self._exchanges[exchange] = Exchange(exchange, type) - - def queue_bind(self, queue, exchange, routing_key, **kwargs): - logging.debug('Binding %s to %s with key %s', - queue, exchange, routing_key) - self._exchanges[exchange].bind(self._queues[queue].push, - routing_key) - - def declare_consumer(self, queue, callback, *args, **kwargs): - self.current_queue = queue - self.current_callback = callback - - def consume(self, *args, **kwargs): - while True: - item = self.get(self.current_queue) - if item: - self.current_callback(item) - raise StopIteration() - greenthread.sleep(0) - - def get(self, queue, no_ack=False): - if not queue in self._queues or not self._queues[queue].size(): - return None - (message_data, content_type, content_encoding) = \ - self._queues[queue].pop() - message = Message(backend=self, body=message_data, - content_type=content_type, - content_encoding=content_encoding) - message.result = True - logging.debug('Getting from %s: %s', queue, message) - return message - - def prepare_message(self, message_data, delivery_mode, - content_type, content_encoding, **kwargs): - """Prepare message for sending.""" - return (message_data, content_type, content_encoding) - - def publish(self, message, exchange, routing_key, **kwargs): - if exchange in self._exchanges: - self._exchanges[exchange].publish( - message, routing_key=routing_key) - - __instance = None - - def __init__(self, *args, **kwargs): - if Backend.__instance is None: - Backend.__instance = Backend.__impl(*args, **kwargs) - self.__dict__['_Backend__instance'] = Backend.__instance - - def __getattr__(self, attr): - return getattr(self.__instance, attr) - - def __setattr__(self, attr, value): - return setattr(self.__instance, attr, value) +class Backend(base.BaseBackend): + def queue_declare(self, queue, **kwargs): + global QUEUES + if queue not in QUEUES: + logging.debug('Declaring queue %s', queue) + QUEUES[queue] = Queue(queue) + + def exchange_declare(self, exchange, type, *args, **kwargs): + global EXCHANGES + if exchange not in EXCHANGES: + logging.debug('Declaring exchange %s', exchange) + EXCHANGES[exchange] = Exchange(exchange, type) + + def queue_bind(self, queue, exchange, routing_key, **kwargs): + global EXCHANGES + global QUEUES + logging.debug('Binding %s to %s with key %s', + queue, exchange, routing_key) + EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) + + def declare_consumer(self, queue, callback, *args, **kwargs): + self.current_queue = queue + self.current_callback = callback + + def consume(self, limit=None): + while True: + item = self.get(self.current_queue) + if item: + self.current_callback(item) + raise StopIteration() + greenthread.sleep(0) + + def get(self, queue, no_ack=False): + global QUEUES + if not queue in QUEUES or not QUEUES[queue].size(): + return None + (message_data, content_type, content_encoding) = QUEUES[queue].pop() + message = Message(backend=self, body=message_data, + content_type=content_type, + content_encoding=content_encoding) + message.result = True + logging.debug('Getting from %s: %s', queue, message) + return message + + def prepare_message(self, message_data, delivery_mode, + content_type, content_encoding, **kwargs): + """Prepare message for sending.""" + return (message_data, content_type, content_encoding) + + def publish(self, message, exchange, routing_key, **kwargs): + global EXCHANGES + if exchange in EXCHANGES: + EXCHANGES[exchange].publish(message, routing_key=routing_key) def reset_all(): - Backend()._reset_all() + global EXCHANGES + global QUEUES + EXCHANGES = {} + QUEUES = {} -- cgit From db938f975da64540ebb942e9dfd640db4dd7f939 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 21:34:51 +0000 Subject: removed unused import and fix docstring --- nova/api/ec2/cloud.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ad6debb11..c845e6a2e 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -27,7 +27,6 @@ import datetime import logging import re import os -import time from nova import context import IPy @@ -706,7 +705,7 @@ class CloudController(object): def release_address(self, context, public_ip, **kwargs): floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) # NOTE(vish): We don't know which network host should get the ip - # when we allocate, so just send it to any one. This + # when we deallocate, so just send it to any one. This # will probably need to move into a network supervisor # at some point. rpc.cast(context, -- cgit From 555bea30cddfd32c42b6d7453b5afd2e7fcfb7f2 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 21 Dec 2010 15:46:44 -0600 Subject: Filter templates and dom0 from list_instances() --- nova/virt/xenapi/vmops.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 5b9495b67..3b00ce8bf 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -45,8 +45,12 @@ class VMOps(object): def list_instances(self): """List VM instances""" - return [self._session.get_xenapi().VM.get_name_label(vm) \ - for vm in self._session.get_xenapi().VM.get_all()] + vms = [] + for vm in self._session.get_xenapi().VM.get_all(): + rec = self._session.get_xenapi().VM.get_record(vm) + if not rec["is_a_template"] and not rec["is_control_domain"]: + vms.append(rec["name_label"]) + return vms def spawn(self, instance): """Create VM instance""" -- cgit From f0195ebfd2cc56cee5797fff19fb9702c51df51b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 21:47:13 +0000 Subject: fix reboot command to work even if a host is rebooted --- nova/compute/manager.py | 4 +++- nova/virt/libvirt_conn.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index a84af6bb9..6d3ea966d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -138,8 +138,8 @@ class ComputeManager(manager.Manager): def reboot_instance(self, context, instance_id): """Reboot an instance on this server.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) self._update_state(context, instance_id) + instance_ref = self.db.instance_get(context, instance_id) if instance_ref['state'] != power_state.RUNNING: logging.warn('trying to reboot a non-running ' @@ -153,6 +153,7 @@ class ComputeManager(manager.Manager): instance_id, power_state.NOSTATE, 'rebooting') + self.network_manager.setup_compute_network(context, instance_id) self.driver.reboot(instance_ref) self._update_state(context, instance_id) @@ -168,6 +169,7 @@ class ComputeManager(manager.Manager): instance_id, power_state.NOSTATE, 'rescuing') + self.network_manager.setup_compute_network(context, instance_id) self.driver.rescue(instance_ref) self._update_state(context, instance_id) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ad101db2a..845167d9f 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -40,6 +40,7 @@ import logging import os import shutil +from eventlet import greenthread from eventlet import event from eventlet import tpool @@ -183,7 +184,8 @@ class LibvirtConnection(object): # everything has been vetted a bit def _wait_for_timer(): timer_done.wait() - self._cleanup(instance) + if cleanup: + self._cleanup(instance) done.send() greenthread.spawn(_wait_for_timer) -- cgit From 3b05f5b5b46dd58a891f2e4c7a15231ea44a3e46 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 21 Dec 2010 15:56:12 -0600 Subject: Style correction --- nova/tests/virt_unittest.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 0cf016380..cb35db1e1 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -160,11 +160,11 @@ class LibvirtConnTestCase(test.TestCase): (lambda t: t.find('./memory').text, '2097152')] if rescue: - common_checks += [(lambda t: t.findall( - './devices/disk/source')[0].get('file').split('/')[1], - 'rescue-disk'), (lambda t: t.findall( - './devices/disk/source')[1].get( - 'file').split('/')[1], 'disk')] + common_checks += [ + (lambda t: t.findall('./devices/disk/source')[0].get( + 'file').split('/')[1], 'rescue-disk'), + (lambda t: t.findall('./devices/disk/source')[1].get( + 'file').split('/')[1], 'disk')] else: common_checks += [(lambda t: t.findall( './devices/disk/source')[0].get('file').split('/')[1], -- cgit From 7af11742b6bab492eb87c212d05bf77c0c13aea9 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Wed, 22 Dec 2010 12:24:53 +0100 Subject: Populate user_data field from run-instances call parameter, default to empty string to avoid metadata base64 decoding failure, LP: #691598 --- nova/api/ec2/cloud.py | 1 + nova/compute/api.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 8375c4399..13c2b4574 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -756,6 +756,7 @@ class CloudController(object): display_name=kwargs.get('display_name'), description=kwargs.get('display_description'), key_name=kwargs.get('key_name'), + user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), generate_hostname=internal_id_to_ec2_id) return self._format_run_instances(context, diff --git a/nova/compute/api.py b/nova/compute/api.py index c740814da..8fdc0fc9e 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -57,6 +57,7 @@ class ComputeAPI(base.Base): max_count=1, kernel_id=None, ramdisk_id=None, display_name='', description='', key_name=None, key_data=None, security_group='default', + user_data=None, generate_hostname=generate_default_hostname): """Create the number of instances requested if quote and other arguments check out ok.""" @@ -120,6 +121,7 @@ class ComputeAPI(base.Base): 'local_gb': type_data['local_gb'], 'display_name': display_name, 'display_description': description, + 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data} -- cgit From f783e8ad65c2ba3f605bdc350ac64f4beaf27e9d Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Wed, 22 Dec 2010 13:52:44 +0100 Subject: Adding me in the Authors file --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index fa38ef0b1..0b048becb 100644 --- a/Authors +++ b/Authors @@ -27,6 +27,7 @@ Rick Clark Ryan Lucio Sandy Walsh Soren Hansen +Thierry Carrez Todd Willey Trey Morris Vishvananda Ishaya -- cgit From 46c4d44affb289209dd6024cbb289b265d9c89c7 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Wed, 22 Dec 2010 10:40:24 -0500 Subject: Problem was with a missplaced parentheses. ugh. --- nova/api/ec2/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index de5079286..503a5fd6c 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -114,7 +114,7 @@ class CloudController(object): start = os.getcwd() os.chdir(FLAGS.ca_path) # TODO(vish): Do this with M2Crypto instead - utils.runthis(_("Generating root CA: %s", "sh genrootca.sh")) + utils.runthis(_("Generating root CA: %s"), "sh genrootca.sh") os.chdir(start) def _get_mpi_data(self, context, project_id): -- cgit From 56856ac1103ec9f3ba0f2da81832a59e7e773256 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Wed, 22 Dec 2010 11:12:20 -0500 Subject: Fix doc building endpoint for gettext. --- doc/ext/nova_autodoc.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/ext/nova_autodoc.py b/doc/ext/nova_autodoc.py index 39aa2c2cf..5429bb656 100644 --- a/doc/ext/nova_autodoc.py +++ b/doc/ext/nova_autodoc.py @@ -1,5 +1,8 @@ +import gettext import os +gettext.install('nova') + from nova import utils def setup(app): -- cgit From 21867297b673ec9fe055fb6c7e4a3dadcfa6fdd2 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 22 Dec 2010 12:39:59 -0600 Subject: Minor bug fix --- nova/api/__init__.py | 1 + nova/api/openstack/images.py | 1 + 2 files changed, 2 insertions(+) diff --git a/nova/api/__init__.py b/nova/api/__init__.py index 80f9f2109..e081ec10b 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -24,6 +24,7 @@ Root WSGI middleware for all API controllers. :ec2api_subdomain: subdomain running the EC2 API (default: ec2) """ +import logging import routes import webob.dec diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index fe8d9d75f..d3312aba8 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -53,6 +53,7 @@ class Controller(wsgi.Controller): images = common.limited(images, req) except NotImplementedError: # Emulate detail() using repeated calls to show() + ctxt = req.environ['nova.context'] images = self._service.index(ctxt) images = common.limited(images, req) images = [self._service.show(ctxt, i['id']) for i in images] -- cgit From c4fb755b169895f9ffab6ab4d18f5227688b7ae4 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 22 Dec 2010 13:18:26 -0600 Subject: Abstracted auth and ratelimiting more --- nova/api/openstack/__init__.py | 56 ++++------------------------- nova/api/openstack/auth.py | 22 +++++++++--- nova/api/openstack/ratelimiting/__init__.py | 22 ++++++++++-- 3 files changed, 43 insertions(+), 57 deletions(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index cdc25e2b7..b18edc8e7 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -45,12 +45,12 @@ from nova.auth import manager FLAGS = flags.FLAGS -flags.DEFINE_string('nova_api_auth', - 'nova.api.openstack.auth.BasicApiAuthManager', +flags.DEFINE_string('os_api_auth', + 'nova.api.openstack.auth.AuthMiddleware', 'The auth mechanism to use for the OpenStack API implemenation') flags.DEFINE_string('os_api_ratelimiting', - 'nova.api.openstack.ratelimiting.BasicRateLimiting', + 'nova.api.openstack.ratelimiting.RateLimitingMiddleware', 'Default ratelimiting implementation for the Openstack API') flags.DEFINE_bool('allow_admin_api', @@ -62,7 +62,10 @@ class API(wsgi.Middleware): """WSGI entry point for all OpenStack API requests.""" def __init__(self): - app = AuthMiddleware(RateLimitingMiddleware(APIRouter())) + auth_middleware = utils.import_class(FLAGS.os_api_auth) + ratelimiting_middleware = \ + utils.import_class(FLAGS.os_api_ratelimiting) + app = auth_middleware(ratelimiting_middleware(APIRouter())) super(API, self).__init__(app) @webob.dec.wsgify @@ -76,51 +79,6 @@ class API(wsgi.Middleware): return faults.Fault(exc) -class AuthMiddleware(wsgi.Middleware): - """Authorize the openstack API request or return an HTTP Forbidden.""" - - def __init__(self, application): - self.auth_driver = utils.import_class(FLAGS.nova_api_auth)() - super(AuthMiddleware, self).__init__(application) - - @webob.dec.wsgify - def __call__(self, req): - if not self.auth_driver.has_authentication(req): - return self.auth_driver.authenticate(req) - - user = self.auth_driver.get_user_by_authentication(req) - - if not user: - return faults.Fault(webob.exc.HTTPUnauthorized()) - - req.environ['nova.context'] = context.RequestContext(user, user) - return self.application - - -class RateLimitingMiddleware(wsgi.Middleware): - """Rate limit incoming requests according to the OpenStack rate limits.""" - - def __init__(self, application, service_host=None): - """Create a rate limiting middleware that wraps the given application. - - By default, rate counters are stored in memory. If service_host is - specified, the middleware instead relies on the ratelimiting.WSGIApp - at the given host+port to keep rate counters. - """ - super(RateLimitingMiddleware, self).__init__(application) - self._limiting_driver = \ - utils.import_class(FLAGS.os_api_ratelimiting)(service_host) - - @webob.dec.wsgify - def __call__(self, req): - """Rate limit the request. - - If the request should be rate limited, return a 413 status with a - Retry-After header giving the time when the request would succeed. - """ - return self._limiting_driver.limited_request(req, self.application) - - class APIRouter(wsgi.Router): """ Routes requests on the OpenStack API to the appropriate controller diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 26cb50dca..3850dd1f0 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -16,16 +16,28 @@ from nova.api.openstack import faults FLAGS = flags.FLAGS +class AuthMiddleware(wsgi.Middleware): + """Authorize the openstack API request or return an HTTP Forbidden.""" -class BasicApiAuthManager(object): - """ Implements a somewhat rudimentary version of OpenStack Auth""" - - def __init__(self, db_driver=None): + def __init__(self, application): if not db_driver: db_driver = FLAGS.db_driver self.db = utils.import_object(db_driver) self.auth = auth.manager.AuthManager() - super(BasicApiAuthManager, self).__init__() + super(AuthMiddleware, self).__init__(application) + + @webob.dec.wsgify + def __call__(self, req): + if not self.has_authentication(req): + return self.authenticate(req) + + user = self.get_user_by_authentication(req) + + if not user: + return faults.Fault(webob.exc.HTTPUnauthorized()) + + req.environ['nova.context'] = context.RequestContext(user, user) + return self.application def has_authentication(self, req): return 'X-Auth-Token' in req.headers diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index 1bf44bc7b..9892e792e 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -14,11 +14,16 @@ PER_MINUTE = 60 PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 +class RateLimitingMiddleware(wsgi.Middleware): + """Rate limit incoming requests according to the OpenStack rate limits.""" -class BasicRateLimiting(object): - """ Implements Rate limits as per the Rackspace CloudServers API spec. """ + def __init__(self, application, service_host=None): + """Create a rate limiting middleware that wraps the given application. - def __init__(self, service_host): + By default, rate counters are stored in memory. If service_host is + specified, the middleware instead relies on the ratelimiting.WSGIApp + at the given host+port to keep rate counters. + """ if not service_host: #TODO(gundlach): These limits were based on limitations of Cloud #Servers. We should revisit them in Nova. @@ -31,6 +36,16 @@ class BasicRateLimiting(object): }) else: self.limiter = WSGIAppProxy(service_host) + super(RateLimitingMiddleware, self).__init__(application) + + @webob.dec.wsgify + def __call__(self, req): + """Rate limit the request. + + If the request should be rate limited, return a 413 status with a + Retry-After header giving the time when the request would succeed. + """ + return self.limited_request(req, self.application) def limited_request(self, req, application): """Rate limit the request. @@ -75,6 +90,7 @@ class BasicRateLimiting(object): return req.method return None + class Limiter(object): """Class providing rate limiting of arbitrary actions.""" -- cgit From e419c27a00a85b7daba42f580e332d31713ae271 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 22 Dec 2010 13:33:26 -0600 Subject: Moved some things for testing --- nova/api/openstack/auth.py | 1 + nova/api/openstack/ratelimiting/__init__.py | 1 + nova/tests/api/openstack/__init__.py | 2 ++ 3 files changed, 4 insertions(+) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 3850dd1f0..6c3c870a1 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -12,6 +12,7 @@ from nova import db from nova import flags from nova import manager from nova import utils +from nova import wsgi from nova.api.openstack import faults FLAGS = flags.FLAGS diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index 9892e792e..a2e0734ef 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -6,6 +6,7 @@ import urllib import webob.dec import webob.exc +from nova import wsgi from nova.api.openstack import faults # Convenience constants for the limits dictionary passed to Limiter(). diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py index fffc57e67..efe73b8e2 100644 --- a/nova/tests/api/openstack/__init__.py +++ b/nova/tests/api/openstack/__init__.py @@ -27,6 +27,8 @@ from webob import Request FLAGS = flags.FLAGS +RateLimitingMiddleware = utils.import_class(FLAGS.os_api_ratelimiting) + class RateLimitingMiddlewareTest(unittest.TestCase): def test_get_action_name(self): -- cgit From 168cde072542f9f4df7e7eb26f6b632306c0b7d2 Mon Sep 17 00:00:00 2001 From: mdietz Date: Wed, 22 Dec 2010 19:52:13 +0000 Subject: Finished moving the middleware layers and fixed the API tests again --- nova/api/openstack/auth.py | 2 +- nova/tests/api/openstack/__init__.py | 13 ++++++------- nova/tests/api/openstack/fakes.py | 15 +++++++++------ nova/tests/api/openstack/test_auth.py | 4 ++-- 4 files changed, 18 insertions(+), 16 deletions(-) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 6c3c870a1..99cae2c75 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -20,7 +20,7 @@ FLAGS = flags.FLAGS class AuthMiddleware(wsgi.Middleware): """Authorize the openstack API request or return an HTTP Forbidden.""" - def __init__(self, application): + def __init__(self, application, db_driver=None): if not db_driver: db_driver = FLAGS.db_driver self.db = utils.import_object(db_driver) diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py index efe73b8e2..9e183bd0d 100644 --- a/nova/tests/api/openstack/__init__.py +++ b/nova/tests/api/openstack/__init__.py @@ -19,15 +19,14 @@ import unittest from nova import context from nova import flags +from nova.api.openstack.ratelimiting import RateLimitingMiddleware from nova.api.openstack.common import limited -from nova.api.openstack import RateLimitingMiddleware from nova.tests.api.fakes import APIStub from nova import utils from webob import Request FLAGS = flags.FLAGS -RateLimitingMiddleware = utils.import_class(FLAGS.os_api_ratelimiting) class RateLimitingMiddlewareTest(unittest.TestCase): @@ -37,7 +36,7 @@ class RateLimitingMiddlewareTest(unittest.TestCase): def verify(method, url, action_name): req = Request.blank(url) req.method = method - action = middleware._limiting_driver.get_action_name(req) + action = middleware.get_action_name(req) self.assertEqual(action, action_name) verify('PUT', '/servers/4', 'PUT') @@ -70,7 +69,7 @@ class RateLimitingMiddlewareTest(unittest.TestCase): middleware = RateLimitingMiddleware(APIStub()) self.exhaust(middleware, 'POST', '/servers/4', 'usr1', 10) self.exhaust(middleware, 'POST', '/images/4', 'usr2', 10) - self.assertTrue(set(middleware._limiting_driver.limiter._levels) == \ + self.assertTrue(set(middleware.limiter._levels) == \ set(['usr1:POST', 'usr1:POST servers', 'usr2:POST'])) def test_POST_servers_action_correctly_ratelimited(self): @@ -79,15 +78,15 @@ class RateLimitingMiddlewareTest(unittest.TestCase): for i in range(5): self.exhaust(middleware, 'POST', '/servers/4', 'usr1', 10) # Reset the 'POST' action counter. - del middleware._limiting_driver.limiter._levels['usr1:POST'] + del middleware.limiter._levels['usr1:POST'] # All 50 daily "POST servers" actions should be all used up self.exhaust(middleware, 'POST', '/servers/4', 'usr1', 0) def test_proxy_ctor_works(self): middleware = RateLimitingMiddleware(APIStub()) - self.assertEqual(middleware._limiting_driver.limiter.__class__.__name__, "Limiter") + self.assertEqual(middleware.limiter.__class__.__name__, "Limiter") middleware = RateLimitingMiddleware(APIStub(), service_host='foobar') - self.assertEqual(middleware._limiting_driver.limiter.__class__.__name__, "WSGIAppProxy") + self.assertEqual(middleware.limiter.__class__.__name__, "WSGIAppProxy") class LimiterTest(unittest.TestCase): diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 96689d2cd..f773b26a7 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -29,6 +29,8 @@ from nova import exception as exc from nova import flags from nova import utils import nova.api.openstack.auth +from nova.api.openstack import auth +from nova.api.openstack import ratelimiting from nova.image import glance from nova.image import local from nova.image import service @@ -52,10 +54,11 @@ class FakeRouter(Router): return res -def fake_auth_init(self): +def fake_auth_init(self, application): self.db = FakeAuthDatabase() self.context = Context() self.auth = FakeAuthManager() + self.application = application @webob.dec.wsgify @@ -83,21 +86,21 @@ def stub_out_auth(stubs): def fake_auth_init(self, app): self.application = app - stubs.Set(nova.api.openstack.AuthMiddleware, + stubs.Set(nova.api.openstack.auth.AuthMiddleware, '__init__', fake_auth_init) - stubs.Set(nova.api.openstack.AuthMiddleware, + stubs.Set(nova.api.openstack.auth.AuthMiddleware, '__call__', fake_wsgi) def stub_out_rate_limiting(stubs): def fake_rate_init(self, app): - super(nova.api.openstack.RateLimitingMiddleware, self).__init__(app) + super(nova.api.openstack.ratelimiting.RateLimitingMiddleware, self).__init__(app) self.application = app - stubs.Set(nova.api.openstack.RateLimitingMiddleware, + stubs.Set(nova.api.openstack.ratelimiting.RateLimitingMiddleware, '__init__', fake_rate_init) - stubs.Set(nova.api.openstack.RateLimitingMiddleware, + stubs.Set(nova.api.openstack.ratelimiting.RateLimitingMiddleware, '__call__', fake_wsgi) diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index 7b427c2db..489a1dfbf 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -34,7 +34,7 @@ class Test(unittest.TestCase): def setUp(self): self.stubs = stubout.StubOutForTesting() - self.stubs.Set(nova.api.openstack.auth.BasicApiAuthManager, + self.stubs.Set(nova.api.openstack.auth.AuthMiddleware, '__init__', fakes.fake_auth_init) self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext) fakes.FakeAuthManager.auth_data = {} @@ -131,7 +131,7 @@ class Test(unittest.TestCase): class TestLimiter(unittest.TestCase): def setUp(self): self.stubs = stubout.StubOutForTesting() - self.stubs.Set(nova.api.openstack.auth.BasicApiAuthManager, + self.stubs.Set(nova.api.openstack.auth.AuthMiddleware, '__init__', fakes.fake_auth_init) self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext) fakes.FakeAuthManager.auth_data = {} -- cgit From c2faf1c5e689ac5e81068a305a624e626e9a87b5 Mon Sep 17 00:00:00 2001 From: mdietz Date: Wed, 22 Dec 2010 20:06:22 +0000 Subject: Forgot the copyright info --- nova/api/openstack/auth.py | 18 +++++++++++++++++- nova/api/openstack/ratelimiting/__init__.py | 17 +++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 99cae2c75..72ad4ffa9 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -1,4 +1,20 @@ -import datetime +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.import datetime + import hashlib import json import time diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index a2e0734ef..8ca575b36 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -1,3 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.import datetime + """Rate limiting of arbitrary actions.""" import httplib -- cgit From 81191660cf6d1e5ea47630ed45041dc923f6b57a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 22 Dec 2010 20:59:53 +0000 Subject: merge trunk and upgrade to cheetah templating --- .mailmap | 1 + Authors | 2 + MANIFEST.in | 2 +- nova/adminclient.py | 1 + nova/api/cloudpipe/__init__.py | 4 +- nova/api/ec2/__init__.py | 6 +- nova/api/ec2/admin.py | 1 + nova/api/ec2/apirequest.py | 4 +- nova/api/ec2/cloud.py | 54 ++++---- nova/api/ec2/metadatarequesthandler.py | 2 +- nova/api/openstack/__init__.py | 17 ++- nova/api/openstack/backup_schedules.py | 1 + nova/api/openstack/servers.py | 29 ++++ nova/auth/dbdriver.py | 20 +-- nova/auth/fakeldap.py | 101 +++++++++----- nova/auth/ldapdriver.py | 69 +++++----- nova/auth/manager.py | 30 ++--- nova/cloudpipe/pipelib.py | 2 +- nova/compute/api.py | 47 +++++-- nova/compute/disk.py | 38 ++++-- nova/compute/instance_types.py | 3 +- nova/compute/manager.py | 73 +++++++--- nova/compute/monitor.py | 12 +- nova/crypto.py | 18 +-- nova/db/api.py | 5 + nova/db/sqlalchemy/api.py | 64 +++++---- nova/db/sqlalchemy/models.py | 36 ++++- nova/exception.py | 11 +- nova/fakerabbit.py | 12 +- nova/flags.py | 7 +- nova/image/glance.py | 8 +- nova/image/s3.py | 3 +- nova/network/linux_net.py | 10 +- nova/network/manager.py | 20 +-- nova/objectstore/handler.py | 24 ++-- nova/rpc.py | 34 ++--- nova/scheduler/chance.py | 2 +- nova/scheduler/driver.py | 2 +- nova/scheduler/manager.py | 2 +- nova/scheduler/simple.py | 13 +- nova/service.py | 14 +- nova/tests/api/openstack/test_servers.py | 37 +++++- nova/tests/auth_unittest.py | 10 +- nova/tests/compute_unittest.py | 8 ++ nova/tests/virt_unittest.py | 134 +++++++++++++++---- nova/twistd.py | 8 +- nova/utils.py | 14 +- nova/virt/connection.py | 2 +- nova/virt/fake.py | 16 ++- nova/virt/libvirt.qemu.xml.template | 34 ----- nova/virt/libvirt.rescue.qemu.xml.template | 38 ------ nova/virt/libvirt.rescue.qemu.xml.template.THIS | 38 ++++++ nova/virt/libvirt.rescue.uml.xml.template | 31 ----- nova/virt/libvirt.rescue.uml.xml.template.THIS | 31 +++++ nova/virt/libvirt.rescue.xen.xml.template | 34 ----- nova/virt/libvirt.uml.xml.template | 27 ---- nova/virt/libvirt.uml.xml.template.THIS | 27 ++++ nova/virt/libvirt.xen.xml.template | 30 ----- nova/virt/libvirt.xml.template | 79 +++++++++++ nova/virt/libvirt_conn.py | 168 +++++++++++++----------- nova/virt/xenapi/network_utils.py | 1 + nova/virt/xenapi/vm_utils.py | 7 +- nova/virt/xenapi/vmops.py | 55 ++++++-- nova/virt/xenapi/volumeops.py | 1 + nova/virt/xenapi_conn.py | 81 ++++++++---- nova/volume/driver.py | 10 +- nova/volume/manager.py | 20 +-- tools/pip-requires | 1 + 68 files changed, 1074 insertions(+), 672 deletions(-) delete mode 100644 nova/virt/libvirt.qemu.xml.template delete mode 100644 nova/virt/libvirt.rescue.qemu.xml.template create mode 100644 nova/virt/libvirt.rescue.qemu.xml.template.THIS delete mode 100644 nova/virt/libvirt.rescue.uml.xml.template create mode 100644 nova/virt/libvirt.rescue.uml.xml.template.THIS delete mode 100644 nova/virt/libvirt.rescue.xen.xml.template delete mode 100644 nova/virt/libvirt.uml.xml.template create mode 100644 nova/virt/libvirt.uml.xml.template.THIS delete mode 100644 nova/virt/libvirt.xen.xml.template create mode 100644 nova/virt/libvirt.xml.template diff --git a/.mailmap b/.mailmap index 2a6eb8d7d..8041e2341 100644 --- a/.mailmap +++ b/.mailmap @@ -19,6 +19,7 @@ + diff --git a/Authors b/Authors index 565444ee1..fa38ef0b1 100644 --- a/Authors +++ b/Authors @@ -6,6 +6,7 @@ Chris Behrens Chmouel Boudjnah Dean Troyer Devin Carlen +Ed Leafe Eldar Nugaev Eric Day Ewan Mellor @@ -14,6 +15,7 @@ Jay Pipes Jesse Andrews Joe Heck Joel Moore +Jonathan Bryce Josh Kearney Joshua McKenty Justin Santa Barbara diff --git a/MANIFEST.in b/MANIFEST.in index 982b727aa..199ce30b6 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -13,7 +13,7 @@ include nova/cloudpipe/client.ovpn.template include nova/compute/fakevirtinstance.xml include nova/compute/interfaces.template include nova/virt/interfaces.template -include nova/virt/libvirt.*.xml.template +include nova/virt/libvirt*.xml.template include nova/tests/CA/ include nova/tests/CA/cacert.pem include nova/tests/CA/private/ diff --git a/nova/adminclient.py b/nova/adminclient.py index 5a62cce7d..6ae9f0c0f 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -194,6 +194,7 @@ class HostInfo(object): class NovaAdminClient(object): + def __init__( self, clc_url=DEFAULT_CLC_URL, diff --git a/nova/api/cloudpipe/__init__.py b/nova/api/cloudpipe/__init__.py index 6d40990a8..00ad38913 100644 --- a/nova/api/cloudpipe/__init__.py +++ b/nova/api/cloudpipe/__init__.py @@ -45,7 +45,7 @@ class API(wsgi.Application): def __call__(self, req): if req.method == 'POST': return self.sign_csr(req) - _log.debug("Cloudpipe path is %s" % req.path_info) + _log.debug(_("Cloudpipe path is %s") % req.path_info) if req.path_info.endswith("/getca/"): return self.send_root_ca(req) return webob.exc.HTTPNotFound() @@ -56,7 +56,7 @@ class API(wsgi.Application): return instance['project_id'] def send_root_ca(self, req): - _log.debug("Getting root ca") + _log.debug(_("Getting root ca")) project_id = self.get_project_id_from_ip(req.remote_addr) res = webob.Response() res.headers["Content-Type"] = "text/plain" diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index a6ee16c33..dd87d1f71 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -77,7 +77,7 @@ class Authenticate(wsgi.Middleware): req.host, req.path) except exception.Error, ex: - logging.debug("Authentication Failure: %s" % ex) + logging.debug(_("Authentication Failure: %s") % ex) raise webob.exc.HTTPForbidden() # Authenticated! @@ -120,9 +120,9 @@ class Router(wsgi.Middleware): except: raise webob.exc.HTTPBadRequest() - _log.debug('action: %s' % action) + _log.debug(_('action: %s') % action) for key, value in args.items(): - _log.debug('arg: %s\t\tval: %s' % (key, value)) + _log.debug(_('arg: %s\t\tval: %s') % (key, value)) # Success! req.environ['ec2.controller'] = controller diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index 1c6ab688d..fac01369e 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -168,6 +168,7 @@ class AdminController(object): # FIXME(vish): these host commands don't work yet, perhaps some of the # required data can be retrieved from service objects? + def describe_hosts(self, _context, **_kwargs): """Returns status info for all nodes. Includes: * Disk Space diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index 5758781b6..a90fbeb0c 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -92,8 +92,8 @@ class APIRequest(object): method = getattr(self.controller, _camelcase_to_underscore(self.action)) except AttributeError: - _error = ('Unsupported API request: controller = %s,' - 'action = %s') % (self.controller, self.action) + _error = _('Unsupported API request: controller = %s,' + 'action = %s') % (self.controller, self.action) _log.warning(_error) # TODO: Raise custom exception, trap in apiserver, # and reraise as 400 error. diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ebb13aedc..503a5fd6c 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -114,7 +114,7 @@ class CloudController(object): start = os.getcwd() os.chdir(FLAGS.ca_path) # TODO(vish): Do this with M2Crypto instead - utils.runthis("Generating root CA: %s", "sh genrootca.sh") + utils.runthis(_("Generating root CA: %s"), "sh genrootca.sh") os.chdir(start) def _get_mpi_data(self, context, project_id): @@ -318,11 +318,11 @@ class CloudController(object): ip_protocol = str(ip_protocol) if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: - raise InvalidInputException('%s is not a valid ipProtocol' % + raise InvalidInputException(_('%s is not a valid ipProtocol') % (ip_protocol,)) if ((min(from_port, to_port) < -1) or (max(from_port, to_port) > 65535)): - raise InvalidInputException('Invalid port range') + raise InvalidInputException(_('Invalid port range')) values['protocol'] = ip_protocol values['from_port'] = from_port @@ -360,7 +360,8 @@ class CloudController(object): criteria = self._revoke_rule_args_to_dict(context, **kwargs) if criteria == None: - raise exception.ApiError("No rule for the specified parameters.") + raise exception.ApiError(_("No rule for the specified " + "parameters.")) for rule in security_group.rules: match = True @@ -371,7 +372,7 @@ class CloudController(object): db.security_group_rule_destroy(context, rule['id']) self._trigger_refresh_security_group(context, security_group) return True - raise exception.ApiError("No rule for the specified parameters.") + raise exception.ApiError(_("No rule for the specified parameters.")) # TODO(soren): This has only been tested with Boto as the client. # Unfortunately, it seems Boto is using an old API @@ -387,8 +388,8 @@ class CloudController(object): values['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values): - raise exception.ApiError('This rule already exists in group %s' % - group_name) + raise exception.ApiError(_('This rule already exists in group %s') + % group_name) security_group_rule = db.security_group_rule_create(context, values) @@ -416,7 +417,7 @@ class CloudController(object): def create_security_group(self, context, group_name, group_description): self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): - raise exception.ApiError('group %s already exists' % group_name) + raise exception.ApiError(_('group %s already exists') % group_name) group = {'user_id': context.user.id, 'project_id': context.project_id, @@ -529,13 +530,13 @@ class CloudController(object): def attach_volume(self, context, volume_id, instance_id, device, **kwargs): volume_ref = db.volume_get_by_ec2_id(context, volume_id) if not re.match("^/dev/[a-z]d[a-z]+$", device): - raise exception.ApiError("Invalid device specified: %s. " - "Example device: /dev/vdb" % device) + raise exception.ApiError(_("Invalid device specified: %s. " + "Example device: /dev/vdb") % device) # TODO(vish): abstract status checking? if volume_ref['status'] != "available": - raise exception.ApiError("Volume status must be available") + raise exception.ApiError(_("Volume status must be available")) if volume_ref['attach_status'] == "attached": - raise exception.ApiError("Volume is already attached") + raise exception.ApiError(_("Volume is already attached")) internal_id = ec2_id_to_internal_id(instance_id) instance_ref = self.compute_api.get_instance(context, internal_id) host = instance_ref['host'] @@ -557,10 +558,10 @@ class CloudController(object): instance_ref = db.volume_get_instance(context.elevated(), volume_ref['id']) if not instance_ref: - raise exception.ApiError("Volume isn't attached to anything!") + raise exception.ApiError(_("Volume isn't attached to anything!")) # TODO(vish): abstract status checking? if volume_ref['status'] == "available": - raise exception.ApiError("Volume is already detached") + raise exception.ApiError(_("Volume is already detached")) try: host = instance_ref['host'] rpc.cast(context, @@ -689,10 +690,11 @@ class CloudController(object): def allocate_address(self, context, **kwargs): # check quota if quota.allowed_floating_ips(context, 1) < 1: - logging.warn("Quota exceeeded for %s, tried to allocate address", + logging.warn(_("Quota exceeeded for %s, tried to allocate " + "address"), context.project_id) - raise quota.QuotaError("Address quota exceeded. You cannot " - "allocate any more addresses") + raise quota.QuotaError(_("Address quota exceeded. You cannot " + "allocate any more addresses")) network_topic = self._get_network_topic(context) public_ip = rpc.call(context, network_topic, @@ -751,7 +753,7 @@ class CloudController(object): kwargs['image_id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, - kernel_id=kwargs.get('kernel_id'), + kernel_id=kwargs.get('kernel_id', None), ramdisk_id=kwargs.get('ramdisk_id'), display_name=kwargs.get('display_name'), description=kwargs.get('display_description'), @@ -805,7 +807,7 @@ class CloudController(object): # TODO: return error if not authorized volume_ref = db.volume_get_by_ec2_id(context, volume_id) if volume_ref['status'] != "available": - raise exception.ApiError("Volume status must be available") + raise exception.ApiError(_("Volume status must be available")) now = datetime.datetime.utcnow() db.volume_update(context, volume_ref['id'], {'status': 'deleting', 'terminated_at': now}) @@ -836,11 +838,12 @@ class CloudController(object): def describe_image_attribute(self, context, image_id, attribute, **kwargs): if attribute != 'launchPermission': - raise exception.ApiError('attribute not supported: %s' % attribute) + raise exception.ApiError(_('attribute not supported: %s') + % attribute) try: image = self.image_service.show(context, image_id) except IndexError: - raise exception.ApiError('invalid id: %s' % image_id) + raise exception.ApiError(_('invalid id: %s') % image_id) result = {'image_id': image_id, 'launchPermission': []} if image['isPublic']: result['launchPermission'].append({'group': 'all'}) @@ -850,13 +853,14 @@ class CloudController(object): operation_type, **kwargs): # TODO(devcamcar): Support users and groups other than 'all'. if attribute != 'launchPermission': - raise exception.ApiError('attribute not supported: %s' % attribute) + raise exception.ApiError(_('attribute not supported: %s') + % attribute) if not 'user_group' in kwargs: - raise exception.ApiError('user or group not specified') + raise exception.ApiError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': - raise exception.ApiError('only group "all" is supported') + raise exception.ApiError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: - raise exception.ApiError('operation_type must be add or remove') + raise exception.ApiError(_('operation_type must be add or remove')) return self.image_service.modify(context, image_id, operation_type) def update_image(self, context, image_id, **kwargs): diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 2f4f414cc..0e9e686ff 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -65,7 +65,7 @@ class MetadataRequestHandler(object): cc = cloud.CloudController() meta_data = cc.get_metadata(req.remote_addr) if meta_data is None: - logging.error('Failed to get metadata for ip: %s' % + logging.error(_('Failed to get metadata for ip: %s') % req.remote_addr) raise webob.exc.HTTPNotFound() data = self.lookup(req.path_info, meta_data) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index b9ecbd9b8..de95ee548 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -66,7 +66,7 @@ class API(wsgi.Middleware): try: return req.get_response(self.application) except Exception as ex: - logging.warn("Caught error: %s" % str(ex)) + logging.warn(_("Caught error: %s") % str(ex)) logging.debug(traceback.format_exc()) exc = webob.exc.HTTPInternalServerError(explanation=str(ex)) return faults.Fault(exc) @@ -133,7 +133,7 @@ class RateLimitingMiddleware(wsgi.Middleware): if delay: # TODO(gundlach): Get the retry-after format correct. exc = webob.exc.HTTPRequestEntityTooLarge( - explanation='Too many requests.', + explanation=_('Too many requests.'), headers={'Retry-After': time.time() + delay}) raise faults.Fault(exc) return self.application @@ -170,9 +170,16 @@ class APIRouter(wsgi.Router): def __init__(self): mapper = routes.Mapper() + + server_members = {'action': 'POST'} + if FLAGS.allow_admin_api: + logging.debug("Including admin operations in API.") + server_members['pause'] = 'POST' + server_members['unpause'] = 'POST' + mapper.resource("server", "servers", controller=servers.Controller(), collection={'detail': 'GET'}, - member={'action': 'POST'}) + member=server_members) mapper.resource("backup_schedule", "backup_schedules", controller=backup_schedules.Controller(), @@ -186,10 +193,6 @@ class APIRouter(wsgi.Router): mapper.resource("sharedipgroup", "sharedipgroups", controller=sharedipgroups.Controller()) - if FLAGS.allow_admin_api: - logging.debug("Including admin operations in API.") - # TODO: Place routes for admin operations here. - super(APIRouter, self).__init__(mapper) diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index 3ed691d7b..fc70b5c6c 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -24,6 +24,7 @@ import nova.image.service class Controller(wsgi.Controller): + def __init__(self): pass diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 7704f48f1..5c3322f7c 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -15,6 +15,9 @@ # License for the specific language governing permissions and limitations # under the License. +import logging +import traceback + from webob import exc from nova import exception @@ -27,6 +30,10 @@ from nova.compute import power_state import nova.api.openstack +LOG = logging.getLogger('server') +LOG.setLevel(logging.DEBUG) + + def _entity_list(entities): """ Coerces a list of servers into proper dictionary format """ return dict(servers=entities) @@ -166,3 +173,25 @@ class Controller(wsgi.Controller): except: return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + + def pause(self, req, id): + """ Permit Admins to Pause the server. """ + ctxt = req.environ['nova.context'] + try: + self.compute_api.pause(ctxt, id) + except: + readable = traceback.format_exc() + logging.error("Compute.api::pause %s", readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def unpause(self, req, id): + """ Permit Admins to Unpause the server. """ + ctxt = req.environ['nova.context'] + try: + self.compute_api.unpause(ctxt, id) + except: + readable = traceback.format_exc() + logging.error("Compute.api::unpause %s", readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py index a1584322b..47e435cb6 100644 --- a/nova/auth/dbdriver.py +++ b/nova/auth/dbdriver.py @@ -37,7 +37,6 @@ class DbDriver(object): def __init__(self): """Imports the LDAP module""" pass - db def __enter__(self): return self @@ -83,7 +82,7 @@ class DbDriver(object): user_ref = db.user_create(context.get_admin_context(), values) return self._db_user_to_auth_user(user_ref) except exception.Duplicate, e: - raise exception.Duplicate('User %s already exists' % name) + raise exception.Duplicate(_('User %s already exists') % name) def _db_user_to_auth_user(self, user_ref): return {'id': user_ref['id'], @@ -105,8 +104,9 @@ class DbDriver(object): """Create a project""" manager = db.user_get(context.get_admin_context(), manager_uid) if not manager: - raise exception.NotFound("Project can't be created because " - "manager %s doesn't exist" % manager_uid) + raise exception.NotFound(_("Project can't be created because " + "manager %s doesn't exist") + % manager_uid) # description is a required attribute if description is None: @@ -133,8 +133,8 @@ class DbDriver(object): try: project = db.project_create(context.get_admin_context(), values) except exception.Duplicate: - raise exception.Duplicate("Project can't be created because " - "project %s already exists" % name) + raise exception.Duplicate(_("Project can't be created because " + "project %s already exists") % name) for member in members: db.project_add_member(context.get_admin_context(), @@ -155,8 +155,8 @@ class DbDriver(object): if manager_uid: manager = db.user_get(context.get_admin_context(), manager_uid) if not manager: - raise exception.NotFound("Project can't be modified because " - "manager %s doesn't exist" % + raise exception.NotFound(_("Project can't be modified because " + "manager %s doesn't exist") % manager_uid) values['project_manager'] = manager['id'] if description: @@ -243,8 +243,8 @@ class DbDriver(object): def _validate_user_and_project(self, user_id, project_id): user = db.user_get(context.get_admin_context(), user_id) if not user: - raise exception.NotFound('User "%s" not found' % user_id) + raise exception.NotFound(_('User "%s" not found') % user_id) project = db.project_get(context.get_admin_context(), project_id) if not project: - raise exception.NotFound('Project "%s" not found' % project_id) + raise exception.NotFound(_('Project "%s" not found') % project_id) return user, project diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index 46e0135b4..33cd03430 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -15,7 +15,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""Fake LDAP server for test harness, backs to ReDIS. +"""Fake LDAP server for test harness. This class does very little error checking, and knows nothing about ldap class definitions. It implements the minimum emulation of the python ldap @@ -23,34 +23,65 @@ library to work with nova. """ +import fnmatch import json -import redis -from nova import flags -FLAGS = flags.FLAGS -flags.DEFINE_string('redis_host', '127.0.0.1', - 'Host that redis is running on.') -flags.DEFINE_integer('redis_port', 6379, - 'Port that redis is running on.') -flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') - - -class Redis(object): +class Store(object): def __init__(self): if hasattr(self.__class__, '_instance'): - raise Exception('Attempted to instantiate singleton') + raise Exception(_('Attempted to instantiate singleton')) @classmethod def instance(cls): if not hasattr(cls, '_instance'): - inst = redis.Redis(host=FLAGS.redis_host, - port=FLAGS.redis_port, - db=FLAGS.redis_db) - cls._instance = inst + cls._instance = _StorageDict() return cls._instance +class _StorageDict(dict): + def keys(self, pat=None): + ret = super(_StorageDict, self).keys() + if pat is not None: + ret = fnmatch.filter(ret, pat) + return ret + + def delete(self, key): + try: + del self[key] + except KeyError: + pass + + def flushdb(self): + self.clear() + + def hgetall(self, key): + """Returns the hash for the given key; creates + the hash if the key doesn't exist.""" + try: + return self[key] + except KeyError: + self[key] = {} + return self[key] + + def hget(self, key, field): + hashdict = self.hgetall(key) + try: + return hashdict[field] + except KeyError: + hashdict[field] = {} + return hashdict[field] + + def hset(self, key, field, val): + hashdict = self.hgetall(key) + hashdict[field] = val + + def hmset(self, key, value_dict): + hashdict = self.hgetall(key) + for field, val in value_dict.items(): + hashdict[field] = val + + SCOPE_BASE = 0 SCOPE_ONELEVEL = 1 # Not implemented SCOPE_SUBTREE = 2 @@ -169,8 +200,6 @@ def _to_json(unencoded): class FakeLDAP(object): - #TODO(vish): refactor this class to use a wrapper instead of accessing - # redis directly """Fake LDAP connection.""" def simple_bind_s(self, dn, password): @@ -183,14 +212,13 @@ class FakeLDAP(object): def add_s(self, dn, attr): """Add an object with the specified attributes at dn.""" - key = "%s%s" % (self.__redis_prefix, dn) - + key = "%s%s" % (self.__prefix, dn) value_dict = dict([(k, _to_json(v)) for k, v in attr]) - Redis.instance().hmset(key, value_dict) + Store.instance().hmset(key, value_dict) def delete_s(self, dn): """Remove the ldap object at specified dn.""" - Redis.instance().delete("%s%s" % (self.__redis_prefix, dn)) + Store.instance().delete("%s%s" % (self.__prefix, dn)) def modify_s(self, dn, attrs): """Modify the object at dn using the attribute list. @@ -201,18 +229,18 @@ class FakeLDAP(object): ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value) """ - redis = Redis.instance() - key = "%s%s" % (self.__redis_prefix, dn) + store = Store.instance() + key = "%s%s" % (self.__prefix, dn) for cmd, k, v in attrs: - values = _from_json(redis.hget(key, k)) + values = _from_json(store.hget(key, k)) if cmd == MOD_ADD: values.append(v) elif cmd == MOD_REPLACE: values = [v] else: values.remove(v) - values = redis.hset(key, k, _to_json(values)) + values = store.hset(key, k, _to_json(values)) def search_s(self, dn, scope, query=None, fields=None): """Search for all matching objects under dn using the query. @@ -226,16 +254,17 @@ class FakeLDAP(object): """ if scope != SCOPE_BASE and scope != SCOPE_SUBTREE: raise NotImplementedError(str(scope)) - redis = Redis.instance() + store = Store.instance() if scope == SCOPE_BASE: - keys = ["%s%s" % (self.__redis_prefix, dn)] + keys = ["%s%s" % (self.__prefix, dn)] else: - keys = redis.keys("%s*%s" % (self.__redis_prefix, dn)) + keys = store.keys("%s*%s" % (self.__prefix, dn)) + objects = [] for key in keys: - # get the attributes from redis - attrs = redis.hgetall(key) - # turn the values from redis into lists + # get the attributes from the store + attrs = store.hgetall(key) + # turn the values from the store into lists # pylint: disable-msg=E1103 attrs = dict([(k, _from_json(v)) for k, v in attrs.iteritems()]) @@ -244,13 +273,13 @@ class FakeLDAP(object): # filter the attributes by fields attrs = dict([(k, v) for k, v in attrs.iteritems() if not fields or k in fields]) - objects.append((key[len(self.__redis_prefix):], attrs)) + objects.append((key[len(self.__prefix):], attrs)) # pylint: enable-msg=E1103 if objects == []: raise NO_SUCH_OBJECT() return objects @property - def __redis_prefix(self): # pylint: disable-msg=R0201 - """Get the prefix to use for all redis keys.""" + def __prefix(self): # pylint: disable-msg=R0201 + """Get the prefix to use for all keys.""" return 'ldap:' diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index c10939d74..e289ea5a2 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -159,7 +159,7 @@ class LdapDriver(object): self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: - raise exception.NotFound("LDAP object for %s doesn't exist" + raise exception.NotFound(_("LDAP object for %s doesn't exist") % name) else: attr = [ @@ -182,11 +182,12 @@ class LdapDriver(object): description=None, member_uids=None): """Create a project""" if self.__project_exists(name): - raise exception.Duplicate("Project can't be created because " - "project %s already exists" % name) + raise exception.Duplicate(_("Project can't be created because " + "project %s already exists") % name) if not self.__user_exists(manager_uid): - raise exception.NotFound("Project can't be created because " - "manager %s doesn't exist" % manager_uid) + raise exception.NotFound(_("Project can't be created because " + "manager %s doesn't exist") + % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) # description is a required attribute if description is None: @@ -195,8 +196,8 @@ class LdapDriver(object): if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Project can't be created " - "because user %s doesn't exist" + raise exception.NotFound(_("Project can't be created " + "because user %s doesn't exist") % member_uid) members.append(self.__uid_to_dn(member_uid)) # always add the manager as a member because members is required @@ -218,9 +219,9 @@ class LdapDriver(object): attr = [] if manager_uid: if not self.__user_exists(manager_uid): - raise exception.NotFound("Project can't be modified because " - "manager %s doesn't exist" % - manager_uid) + raise exception.NotFound(_("Project can't be modified because " + "manager %s doesn't exist") + % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn)) if description: @@ -416,8 +417,9 @@ class LdapDriver(object): if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Group can't be created " - "because user %s doesn't exist" % member_uid) + raise exception.NotFound(_("Group can't be created " + "because user %s doesn't exist") + % member_uid) members.append(self.__uid_to_dn(member_uid)) dn = self.__uid_to_dn(uid) if not dn in members: @@ -432,8 +434,9 @@ class LdapDriver(object): def __is_in_group(self, uid, group_dn): """Check if user is in group""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be searched in group " - "becuase the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be searched in group " + "because the user doesn't exist") + % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -444,28 +447,30 @@ class LdapDriver(object): def __add_to_group(self, uid, group_dn): """Add user to group""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be added to the group " - "becuase the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be added to the group " + "because the user doesn't exist") + % uid) if not self.__group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + raise exception.NotFound(_("The group at dn %s doesn't exist") + % group_dn) if self.__is_in_group(uid, group_dn): - raise exception.Duplicate("User %s is already a member of " - "the group %s" % (uid, group_dn)) + raise exception.Duplicate(_("User %s is already a member of " + "the group %s") % (uid, group_dn)) attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))] self.conn.modify_s(group_dn, attr) def __remove_from_group(self, uid, group_dn): """Remove user from group""" if not self.__group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + raise exception.NotFound(_("The group at dn %s doesn't exist") + % group_dn) if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be removed from the " + "group because the user doesn't exist") + % uid) if not self.__is_in_group(uid, group_dn): - raise exception.NotFound("User %s is not a member of the group" % - (uid,)) + raise exception.NotFound(_("User %s is not a member of the group") + % uid) # NOTE(vish): remove user from group and any sub_groups sub_dns = self.__find_group_dns_with_member( group_dn, uid) @@ -479,15 +484,16 @@ class LdapDriver(object): try: self.conn.modify_s(group_dn, attr) except self.ldap.OBJECT_CLASS_VIOLATION: - logging.debug("Attempted to remove the last member of a group. " - "Deleting the group at %s instead.", group_dn) + logging.debug(_("Attempted to remove the last member of a group. " + "Deleting the group at %s instead."), group_dn) self.__delete_group(group_dn) def __remove_from_all(self, uid): """Remove user from all roles and projects""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be removed from all " + "because the user doesn't exist") + % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -500,7 +506,8 @@ class LdapDriver(object): def __delete_group(self, group_dn): """Delete Group""" if not self.__group_exists(group_dn): - raise exception.NotFound("Group at dn %s doesn't exist" % group_dn) + raise exception.NotFound(_("Group at dn %s doesn't exist") + % group_dn) self.conn.delete_s(group_dn) def __delete_roles(self, project_dn): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 11c3bd6df..417f2b76d 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -257,12 +257,12 @@ class AuthManager(object): # TODO(vish): check for valid timestamp (access_key, _sep, project_id) = access.partition(':') - logging.info('Looking up user: %r', access_key) + logging.info(_('Looking up user: %r'), access_key) user = self.get_user_from_access_key(access_key) logging.info('user: %r', user) if user == None: - raise exception.NotFound('No user found for access key %s' % - access_key) + raise exception.NotFound(_('No user found for access key %s') + % access_key) # NOTE(vish): if we stop using project name as id we need better # logic to find a default project for user @@ -271,12 +271,12 @@ class AuthManager(object): project = self.get_project(project_id) if project == None: - raise exception.NotFound('No project called %s could be found' % - project_id) + raise exception.NotFound(_('No project called %s could be found') + % project_id) if not self.is_admin(user) and not self.is_project_member(user, project): - raise exception.NotFound('User %s is not a member of project %s' % - (user.id, project.id)) + raise exception.NotFound(_('User %s is not a member of project %s') + % (user.id, project.id)) if check_type == 's3': sign = signer.Signer(user.secret.encode()) expected_signature = sign.s3_authorization(headers, verb, path) @@ -284,7 +284,7 @@ class AuthManager(object): logging.debug('expected_signature: %s', expected_signature) logging.debug('signature: %s', signature) if signature != expected_signature: - raise exception.NotAuthorized('Signature does not match') + raise exception.NotAuthorized(_('Signature does not match')) elif check_type == 'ec2': # NOTE(vish): hmac can't handle unicode, so encode ensures that # secret isn't unicode @@ -294,7 +294,7 @@ class AuthManager(object): logging.debug('expected_signature: %s', expected_signature) logging.debug('signature: %s', signature) if signature != expected_signature: - raise exception.NotAuthorized('Signature does not match') + raise exception.NotAuthorized(_('Signature does not match')) return (user, project) def get_access_key(self, user, project): @@ -364,7 +364,7 @@ class AuthManager(object): with self.driver() as drv: if role == 'projectmanager': if not project: - raise exception.Error("Must specify project") + raise exception.Error(_("Must specify project")) return self.is_project_manager(user, project) global_role = drv.has_role(User.safe_id(user), @@ -398,9 +398,9 @@ class AuthManager(object): @param project: Project in which to add local role. """ if role not in FLAGS.allowed_roles: - raise exception.NotFound("The %s role can not be found" % role) + raise exception.NotFound(_("The %s role can not be found") % role) if project is not None and role in FLAGS.global_roles: - raise exception.NotFound("The %s role is global only" % role) + raise exception.NotFound(_("The %s role is global only") % role) with self.driver() as drv: drv.add_role(User.safe_id(user), role, Project.safe_id(project)) @@ -546,7 +546,8 @@ class AuthManager(object): Project.safe_id(project)) if not network_ref['vpn_public_port']: - raise exception.NotFound('project network data has not been set') + raise exception.NotFound(_('project network data has not ' + 'been set')) return (network_ref['vpn_public_address'], network_ref['vpn_public_port']) @@ -659,8 +660,7 @@ class AuthManager(object): port=vpn_port) zippy.writestr(FLAGS.credential_vpn_file, config) else: - logging.warn("No vpn data for project %s" % - pid) + logging.warn(_("No vpn data for project %s"), pid) zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id)) zippy.close() diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 3472201cd..bbe91a70c 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -49,7 +49,7 @@ class CloudPipe(object): self.manager = manager.AuthManager() def launch_vpn_instance(self, project_id): - logging.debug("Launching VPN for %s" % (project_id)) + logging.debug(_("Launching VPN for %s") % (project_id)) project = self.manager.get_project(project_id) # Make a payload.zip tmpfolder = tempfile.mkdtemp() diff --git a/nova/compute/api.py b/nova/compute/api.py index 8e0efa4cc..a33ed2dc4 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -73,14 +73,19 @@ class ComputeAPI(base.Base): is_vpn = image_id == FLAGS.vpn_image_id if not is_vpn: image = self.image_service.show(context, image_id) + + # If kernel_id/ramdisk_id isn't explicitly set in API call + # we take the defaults from the image's metadata if kernel_id is None: - kernel_id = image.get('kernelId', FLAGS.default_kernel) + kernel_id = image.get('kernelId', None) if ramdisk_id is None: - ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) + ramdisk_id = image.get('ramdiskId', None) # Make sure we have access to kernel and ramdisk - self.image_service.show(context, kernel_id) - self.image_service.show(context, ramdisk_id) + if kernel_id: + self.image_service.show(context, kernel_id) + if ramdisk_id: + self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] @@ -103,8 +108,8 @@ class ComputeAPI(base.Base): base_options = { 'reservation_id': utils.generate_uid('r'), 'image_id': image_id, - 'kernel_id': kernel_id, - 'ramdisk_id': ramdisk_id, + 'kernel_id': kernel_id or '', + 'ramdisk_id': ramdisk_id or '', 'state_description': 'scheduling', 'user_id': context.user_id, 'project_id': context.project_id, @@ -120,7 +125,7 @@ class ComputeAPI(base.Base): elevated = context.elevated() instances = [] - logging.debug("Going to run %s instances...", num_instances) + logging.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, @@ -157,7 +162,7 @@ class ComputeAPI(base.Base): {"method": "setup_fixed_ip", "args": {"address": address}}) - logging.debug("Casting to scheduler for %s/%s's instance %s", + logging.debug(_("Casting to scheduler for %s/%s's instance %s"), context.project_id, context.user_id, instance_id) rpc.cast(context, FLAGS.scheduler_topic, @@ -204,12 +209,12 @@ class ComputeAPI(base.Base): instance = self.db.instance_get_by_internal_id(context, instance_id) except exception.NotFound as e: - logging.warning("Instance %d was not found during terminate", + logging.warning(_("Instance %d was not found during terminate"), instance_id) raise e if (instance['state_description'] == 'terminating'): - logging.warning("Instance %d is already being terminated", + logging.warning(_("Instance %d is already being terminated"), instance_id) return @@ -223,7 +228,7 @@ class ComputeAPI(base.Base): address = self.db.instance_get_floating_address(context, instance['id']) if address: - logging.debug("Disassociating address %s" % address) + logging.debug(_("Disassociating address %s") % address) # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. Perhaps in the scheduler? @@ -234,7 +239,7 @@ class ComputeAPI(base.Base): address = self.db.instance_get_fixed_address(context, instance['id']) if address: - logging.debug("Deallocating address %s" % address) + logging.debug(_("Deallocating address %s") % address) # NOTE(vish): Currently, nothing needs to be done on the # network node until release. If this changes, # we will need to cast here. @@ -275,6 +280,24 @@ class ComputeAPI(base.Base): {"method": "reboot_instance", "args": {"instance_id": instance['id']}}) + def pause(self, context, instance_id): + """Pause the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "pause_instance", + "args": {"instance_id": instance['id']}}) + + def unpause(self, context, instance_id): + """Unpause the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unpause_instance", + "args": {"instance_id": instance['id']}}) + def rescue(self, context, instance_id): """Rescue the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 675cd0259..814a258cd 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -67,12 +67,12 @@ def partition(infile, outfile, local_bytes=0, resize=True, execute('resize2fs %s' % infile) file_size = FLAGS.minimum_root_size elif file_size % sector_size != 0: - logging.warn("Input partition size not evenly divisible by" - " sector size: %d / %d", file_size, sector_size) + logging.warn(_("Input partition size not evenly divisible by" + " sector size: %d / %d"), file_size, sector_size) primary_sectors = file_size / sector_size if local_bytes % sector_size != 0: - logging.warn("Bytes for local storage not evenly divisible" - " by sector size: %d / %d", local_bytes, sector_size) + logging.warn(_("Bytes for local storage not evenly divisible" + " by sector size: %d / %d"), local_bytes, sector_size) local_sectors = local_bytes / sector_size mbr_last = 62 # a @@ -106,6 +106,13 @@ def partition(infile, outfile, local_bytes=0, resize=True, % (outfile, local_type, local_first, local_last)) +def extend(image, size, execute): + file_size = os.path.getsize(image) + if file_size >= size: + return + return execute('truncate -s size %s' % (image,)) + + def inject_data(image, key=None, net=None, partition=None, execute=None): """Injects a ssh key and optionally net data into a disk image. @@ -115,20 +122,30 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): If partition is not specified it mounts the image as a single partition. """ - out, err = execute('sudo losetup -f --show %s' % image) + out, err = execute('sudo losetup --find --show %s' % image) if err: - raise exception.Error('Could not attach image to loopback: %s' % err) + raise exception.Error(_('Could not attach image to loopback: %s') + % err) device = out.strip() try: if not partition is None: # create partition out, err = execute('sudo kpartx -a %s' % device) if err: - raise exception.Error('Failed to load partition: %s' % err) + raise exception.Error(_('Failed to load partition: %s') % err) mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1], partition) else: mapped_device = device + + # We can only loopback mount raw images. If the device isn't there, + # it's normally because it's a .vmdk or a .vdi etc + if not os.path.exists(mapped_device): + raise exception.Error('Mapped device was not found (we can' + ' only inject raw disk images): %s' % + mapped_device) + + # Configure ext2fs so that it doesn't auto-check every N boots out, err = execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) tmpdir = tempfile.mkdtemp() @@ -137,7 +154,8 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): out, err = execute( 'sudo mount %s %s' % (mapped_device, tmpdir)) if err: - raise exception.Error('Failed to mount filesystem: %s' % err) + raise exception.Error(_('Failed to mount filesystem: %s') + % err) try: if key: @@ -156,7 +174,7 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): execute('sudo kpartx -d %s' % device) finally: # remove loopback - execute('sudo losetup -d %s' % device) + execute('sudo losetup --detach %s' % device) def _inject_key_into_fs(key, fs, execute=None): @@ -165,7 +183,7 @@ def _inject_key_into_fs(key, fs, execute=None): key is an ssh key string. fs is the path to the base of the filesystem into which to inject the key. """ - sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh') + sshdir = os.path.join(fs, 'root', '.ssh') execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter execute('sudo chown root %s' % sshdir) execute('sudo chmod 700 %s' % sshdir) diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index 6e47170bd..196d6a8df 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -38,7 +38,8 @@ def get_by_type(instance_type): if instance_type is None: return FLAGS.default_instance_type if instance_type not in INSTANCE_TYPES: - raise exception.ApiError("Unknown instance type: %s" % instance_type) + raise exception.ApiError(_("Unknown instance type: %s"), + instance_type) return instance_type diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 59d4fb29d..cc607f9d4 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -93,8 +93,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): - raise exception.Error("Instance has already been created") - logging.debug("instance %s: starting...", instance_id) + raise exception.Error(_("Instance has already been created")) + logging.debug(_("instance %s: starting..."), instance_id) self.network_manager.setup_compute_network(context, instance_id) self.db.instance_update(context, instance_id, @@ -113,7 +113,7 @@ class ComputeManager(manager.Manager): instance_id, {'launched_at': now}) except Exception: # pylint: disable-msg=W0702 - logging.exception("instance %s: Failed to spawn", + logging.exception(_("instance %s: Failed to spawn"), instance_ref['name']) self.db.instance_set_state(context, instance_id, @@ -125,7 +125,7 @@ class ComputeManager(manager.Manager): def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() - logging.debug("instance %s: terminating", instance_id) + logging.debug(_("instance %s: terminating"), instance_id) instance_ref = self.db.instance_get(context, instance_id) volumes = instance_ref.get('volumes', []) or [] @@ -133,8 +133,8 @@ class ComputeManager(manager.Manager): self.detach_volume(context, instance_id, volume['id']) if instance_ref['state'] == power_state.SHUTOFF: self.db.instance_destroy(context, instance_id) - raise exception.Error('trying to destroy already destroyed' - ' instance: %s' % instance_id) + raise exception.Error(_('trying to destroy already destroyed' + ' instance: %s') % instance_id) self.driver.destroy(instance_ref) # TODO(ja): should we keep it in a terminated state for a bit? @@ -148,13 +148,13 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) if instance_ref['state'] != power_state.RUNNING: - logging.warn('trying to reboot a non-running ' - 'instance: %s (state: %s excepted: %s)', + logging.warn(_('trying to reboot a non-running ' + 'instance: %s (state: %s excepted: %s)'), instance_ref['internal_id'], instance_ref['state'], power_state.RUNNING) - logging.debug('instance %s: rebooting', instance_ref['name']) + logging.debug(_('instance %s: rebooting'), instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -168,7 +168,7 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug('instance %s: rescuing', + logging.debug(_('instance %s: rescuing'), instance_ref['internal_id']) self.db.instance_set_state(context, instance_id, @@ -183,7 +183,7 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug('instance %s: unrescuing', + logging.debug(_('instance %s: unrescuing'), instance_ref['internal_id']) self.db.instance_set_state(context, instance_id, @@ -192,11 +192,52 @@ class ComputeManager(manager.Manager): self.driver.unrescue(instance_ref) self._update_state(context, instance_id) + @staticmethod + def _update_state_callback(self, context, instance_id, result): + """Update instance state when async task completes.""" + self._update_state(context, instance_id) + + @exception.wrap_exception + def pause_instance(self, context, instance_id): + """Pause an instance on this server.""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + logging.debug('instance %s: pausing', + instance_ref['internal_id']) + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'pausing') + self.driver.pause(instance_ref, + lambda result: self._update_state_callback(self, + context, + instance_id, + result)) + + @exception.wrap_exception + def unpause_instance(self, context, instance_id): + """Unpause a paused instance on this server.""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + logging.debug('instance %s: unpausing', + instance_ref['internal_id']) + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'unpausing') + self.driver.unpause(instance_ref, + lambda result: self._update_state_callback(self, + context, + instance_id, + result)) + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" context = context.elevated() - logging.debug("instance %s: getting console output", instance_id) + logging.debug(_("instance %s: getting console output"), instance_id) instance_ref = self.db.instance_get(context, instance_id) return self.driver.get_console_output(instance_ref) @@ -205,7 +246,7 @@ class ComputeManager(manager.Manager): def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() - logging.debug("instance %s: attaching volume %s to %s", instance_id, + logging.debug(_("instance %s: attaching volume %s to %s"), instance_id, volume_id, mountpoint) instance_ref = self.db.instance_get(context, instance_id) dev_path = self.volume_manager.setup_compute_volume(context, @@ -222,7 +263,7 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - logging.exception("instance %s: attach failed %s, removing", + logging.exception(_("instance %s: attach failed %s, removing"), instance_id, mountpoint) self.volume_manager.remove_compute_volume(context, volume_id) @@ -234,13 +275,13 @@ class ComputeManager(manager.Manager): def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" context = context.elevated() - logging.debug("instance %s: detaching volume %s", + logging.debug(_("instance %s: detaching volume %s"), instance_id, volume_id) instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) if instance_ref['name'] not in self.driver.list_instances(): - logging.warn("Detaching volume from unknown instance %s", + logging.warn(_("Detaching volume from unknown instance %s"), instance_ref['name']) else: self.driver.detach_volume(instance_ref['name'], diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 22653113a..60c347a5e 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -255,7 +255,7 @@ class Instance(object): Updates the instances statistics and stores the resulting graphs in the internal object store on the cloud controller. """ - logging.debug('updating %s...', self.instance_id) + logging.debug(_('updating %s...'), self.instance_id) try: data = self.fetch_cpu_stats() @@ -285,7 +285,7 @@ class Instance(object): graph_disk(self, '1w') graph_disk(self, '1m') except Exception: - logging.exception('unexpected error during update') + logging.exception(_('unexpected error during update')) self.last_updated = utcnow() @@ -351,7 +351,7 @@ class Instance(object): rd += rd_bytes wr += wr_bytes except TypeError: - logging.error('Cannot get blockstats for "%s" on "%s"', + logging.error(_('Cannot get blockstats for "%s" on "%s"'), disk, self.instance_id) raise @@ -373,7 +373,7 @@ class Instance(object): rx += stats[0] tx += stats[4] except TypeError: - logging.error('Cannot get ifstats for "%s" on "%s"', + logging.error(_('Cannot get ifstats for "%s" on "%s"'), interface, self.instance_id) raise @@ -408,7 +408,7 @@ class InstanceMonitor(object, service.Service): try: conn = virt_connection.get_connection(read_only=True) except Exception, exn: - logging.exception('unexpected exception getting connection') + logging.exception(_('unexpected exception getting connection')) time.sleep(FLAGS.monitoring_instances_delay) return @@ -423,7 +423,7 @@ class InstanceMonitor(object, service.Service): if not domain_id in self._instances: instance = Instance(conn, domain_id) self._instances[domain_id] = instance - logging.debug('Found instance: %s', domain_id) + logging.debug(_('Found instance: %s'), domain_id) for key in self._instances.keys(): instance = self._instances[key] diff --git a/nova/crypto.py b/nova/crypto.py index aacc50b17..af4a06a0c 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -39,13 +39,13 @@ from nova import flags FLAGS = flags.FLAGS -flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA') +flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA')) flags.DEFINE_string('keys_path', '$state_path/keys', - 'Where we keep our keys') + _('Where we keep our keys')) flags.DEFINE_string('ca_path', '$state_path/CA', - 'Where we keep our root CA') + _('Where we keep our root CA')) flags.DEFINE_boolean('use_intermediate_ca', False, - 'Should we use intermediate CAs for each project?') + _('Should we use intermediate CAs for each project?')) def ca_path(project_id): @@ -111,9 +111,9 @@ def generate_x509_cert(subject, bits=1024): keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key')) csrfile = os.path.join(tmpdir, 'temp.csr') logging.debug("openssl genrsa -out %s %s" % (keyfile, bits)) - utils.runthis("Generating private key: %s", + utils.runthis(_("Generating private key: %s"), "openssl genrsa -out %s %s" % (keyfile, bits)) - utils.runthis("Generating CSR: %s", + utils.runthis(_("Generating CSR: %s"), "openssl req -new -key %s -out %s -batch -subj %s" % (keyfile, csrfile, subject)) private_key = open(keyfile).read() @@ -131,7 +131,7 @@ def sign_csr(csr_text, intermediate=None): if not os.path.exists(user_ca): start = os.getcwd() os.chdir(FLAGS.ca_path) - utils.runthis("Generating intermediate CA: %s", + utils.runthis(_("Generating intermediate CA: %s"), "sh geninter.sh %s" % (intermediate)) os.chdir(start) return _sign_csr(csr_text, user_ca) @@ -142,11 +142,11 @@ def _sign_csr(csr_text, ca_folder): csrfile = open("%s/inbound.csr" % (tmpfolder), "w") csrfile.write(csr_text) csrfile.close() - logging.debug("Flags path: %s" % ca_folder) + logging.debug(_("Flags path: %s") % ca_folder) start = os.getcwd() # Change working dir to CA os.chdir(ca_folder) - utils.runthis("Signing cert: %s", + utils.runthis(_("Signing cert: %s"), "openssl ca -batch -out %s/outbound.crt " "-config ./openssl.cnf -infiles %s/inbound.csr" % (tmpfolder, tmpfolder)) diff --git a/nova/db/api.py b/nova/db/api.py index 8f9dc2443..4e15596d9 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -334,6 +334,11 @@ def instance_add_security_group(context, instance_id, security_group_id): security_group_id) +def instance_action_create(context, values): + """Create an instance action from the values dictionary.""" + return IMPL.instance_action_create(context, values) + + ################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 55036d1d1..a36f767a7 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -41,7 +41,7 @@ FLAGS = flags.FLAGS def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: - warnings.warn('Use of empty request context is deprecated', + warnings.warn(_('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin @@ -130,7 +130,7 @@ def service_get(context, service_id, session=None): first() if not result: - raise exception.NotFound('No service for id %s' % service_id) + raise exception.NotFound(_('No service for id %s') % service_id) return result @@ -227,7 +227,7 @@ def service_get_by_args(context, host, binary): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound('No service for %s, %s' % (host, binary)) + raise exception.NotFound(_('No service for %s, %s') % (host, binary)) return result @@ -491,7 +491,7 @@ def fixed_ip_get_by_address(context, address, session=None): options(joinedload('instance')).\ first() if not result: - raise exception.NotFound('No floating ip for address %s' % address) + raise exception.NotFound(_('No floating ip for address %s') % address) if is_user_context(context): authorize_project_context(context, result.instance.project_id) @@ -528,6 +528,8 @@ def fixed_ip_update(context, address, values): #TODO(gundlach): instance_create and volume_create are nearly identical #and should be refactored. I expect there are other copy-and-paste #functions between the two of them as well. + + @require_context def instance_create(context, values): """Create a new Instance record in the database. @@ -591,7 +593,7 @@ def instance_get(context, instance_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.NotFound('No instance for id %s' % instance_id) + raise exception.NotFound(_('No instance for id %s') % instance_id) return result @@ -669,7 +671,7 @@ def instance_get_by_internal_id(context, internal_id): filter_by(deleted=False).\ first() if not result: - raise exception.NotFound('Instance %s not found' % (internal_id)) + raise exception.NotFound(_('Instance %s not found') % (internal_id)) return result @@ -747,6 +749,18 @@ def instance_add_security_group(context, instance_id, security_group_id): instance_ref.save(session=session) +@require_context +def instance_action_create(context, values): + """Create an instance action from the values dictionary.""" + action_ref = models.InstanceActions() + action_ref.update(values) + + session = get_session() + with session.begin(): + action_ref.save(session=session) + return action_ref + + ################### @@ -790,7 +804,7 @@ def key_pair_get(context, user_id, name, session=None): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound('no keypair for user %s, name %s' % + raise exception.NotFound(_('no keypair for user %s, name %s') % (user_id, name)) return result @@ -905,7 +919,7 @@ def network_get(context, network_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.NotFound('No network for id %s' % network_id) + raise exception.NotFound(_('No network for id %s') % network_id) return result @@ -913,6 +927,8 @@ def network_get(context, network_id, session=None): # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable-msg=C0103 + + @require_admin_context def network_get_associated_fixed_ips(context, network_id): session = get_session() @@ -933,7 +949,7 @@ def network_get_by_bridge(context, bridge): first() if not result: - raise exception.NotFound('No network for bridge %s' % bridge) + raise exception.NotFound(_('No network for bridge %s') % bridge) return result @@ -947,7 +963,7 @@ def network_get_by_instance(_context, instance_id): filter_by(deleted=False).\ first() if not rv: - raise exception.NotFound('No network for instance %s' % instance_id) + raise exception.NotFound(_('No network for instance %s') % instance_id) return rv @@ -961,7 +977,7 @@ def network_set_host(context, network_id, host_id): with_lockmode('update').\ first() if not network_ref: - raise exception.NotFound('No network for id %s' % network_id) + raise exception.NotFound(_('No network for id %s') % network_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues @@ -1073,7 +1089,7 @@ def auth_get_token(_context, token_hash): filter_by(token_hash=token_hash).\ first() if not tk: - raise exception.NotFound('Token %s does not exist' % token_hash) + raise exception.NotFound(_('Token %s does not exist') % token_hash) return tk @@ -1097,7 +1113,7 @@ def quota_get(context, project_id, session=None): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound('No quota for project_id %s' % project_id) + raise exception.NotFound(_('No quota for project_id %s') % project_id) return result @@ -1252,7 +1268,7 @@ def volume_get(context, volume_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.NotFound('No volume for id %s' % volume_id) + raise exception.NotFound(_('No volume for id %s') % volume_id) return result @@ -1308,7 +1324,7 @@ def volume_get_by_ec2_id(context, ec2_id): raise exception.NotAuthorized() if not result: - raise exception.NotFound('Volume %s not found' % ec2_id) + raise exception.NotFound(_('Volume %s not found') % ec2_id) return result @@ -1332,7 +1348,7 @@ def volume_get_instance(context, volume_id): options(joinedload('instance')).\ first() if not result: - raise exception.NotFound('Volume %s not found' % ec2_id) + raise exception.NotFound(_('Volume %s not found') % ec2_id) return result.instance @@ -1344,7 +1360,7 @@ def volume_get_shelf_and_blade(context, volume_id): filter_by(volume_id=volume_id).\ first() if not result: - raise exception.NotFound('No export device found for volume %s' % + raise exception.NotFound(_('No export device found for volume %s') % volume_id) return (result.shelf_id, result.blade_id) @@ -1357,7 +1373,7 @@ def volume_get_iscsi_target_num(context, volume_id): filter_by(volume_id=volume_id).\ first() if not result: - raise exception.NotFound('No target id found for volume %s' % + raise exception.NotFound(_('No target id found for volume %s') % volume_id) return result.target_num @@ -1402,7 +1418,7 @@ def security_group_get(context, security_group_id, session=None): options(joinedload_all('rules')).\ first() if not result: - raise exception.NotFound("No secuity group with id %s" % + raise exception.NotFound(_("No security group with id %s") % security_group_id) return result @@ -1419,7 +1435,7 @@ def security_group_get_by_name(context, project_id, group_name): first() if not result: raise exception.NotFound( - 'No security group named %s for project: %s' \ + _('No security group named %s for project: %s') % (group_name, project_id)) return result @@ -1507,7 +1523,7 @@ def security_group_rule_get(context, security_group_rule_id, session=None): filter_by(id=security_group_rule_id).\ first() if not result: - raise exception.NotFound("No secuity group rule with id %s" % + raise exception.NotFound(_("No secuity group rule with id %s") % security_group_rule_id) return result @@ -1543,7 +1559,7 @@ def user_get(context, id, session=None): first() if not result: - raise exception.NotFound('No user for id %s' % id) + raise exception.NotFound(_('No user for id %s') % id) return result @@ -1559,7 +1575,7 @@ def user_get_by_access_key(context, access_key, session=None): first() if not result: - raise exception.NotFound('No user for access key %s' % access_key) + raise exception.NotFound(_('No user for access key %s') % access_key) return result @@ -1621,7 +1637,7 @@ def project_get(context, id, session=None): first() if not result: - raise exception.NotFound("No project with id %s" % id) + raise exception.NotFound(_("No project with id %s") % id) return result diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index fe0a9a921..eac6a304e 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -22,7 +22,7 @@ SQLAlchemy models for nova data. import datetime from sqlalchemy.orm import relationship, backref, object_mapper -from sqlalchemy import Column, Integer, String, schema +from sqlalchemy import Column, Integer, Float, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base @@ -226,6 +226,31 @@ class Instance(BASE, NovaBase): # 'shutdown', 'shutoff', 'crashed']) +class InstanceDiagnostics(BASE, NovaBase): + """Represents a guest VM's diagnostics""" + __tablename__ = "instance_diagnostics" + id = Column(Integer, primary_key=True) + instance_id = Column(Integer, ForeignKey('instances.id')) + + memory_available = Column(Float) + memory_free = Column(Float) + cpu_load = Column(Float) + disk_read = Column(Float) + disk_write = Column(Float) + net_tx = Column(Float) + net_rx = Column(Float) + + +class InstanceActions(BASE, NovaBase): + """Represents a guest VM's actions and results""" + __tablename__ = "instance_actions" + id = Column(Integer, primary_key=True) + instance_id = Column(Integer, ForeignKey('instances.id')) + + action = Column(String(255)) + error = Column(Text) + + class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a vm.""" __tablename__ = 'volumes' @@ -526,10 +551,11 @@ def register_models(): it will never need to be called explicitly elsewhere. """ from sqlalchemy import create_engine - models = (Service, Instance, Volume, ExportDevice, IscsiTarget, FixedIp, - FloatingIp, Network, SecurityGroup, - SecurityGroupIngressRule, SecurityGroupInstanceAssociation, - AuthToken, User, Project) # , Image, Host + models = (Service, Instance, InstanceDiagnostics, InstanceActions, + Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, + Network, SecurityGroup, SecurityGroupIngressRule, + SecurityGroupInstanceAssociation, AuthToken, User, + Project) # , Image, Host engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/exception.py b/nova/exception.py index 6d6c37338..277033e0f 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -27,23 +27,26 @@ import traceback class ProcessExecutionError(IOError): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): if description is None: - description = "Unexpected error while running command." + description = _("Unexpected error while running command.") if exit_code is None: exit_code = '-' - message = "%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" % ( - description, cmd, exit_code, stdout, stderr) + message = _("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r")\ + % (description, cmd, exit_code, stdout, stderr) IOError.__init__(self, message) class Error(Exception): + def __init__(self, message=None): super(Error, self).__init__(message) class ApiError(Error): + def __init__(self, message='Unknown', code='Unknown'): self.message = message self.code = code @@ -81,7 +84,7 @@ def wrap_exception(f): except Exception, e: if not isinstance(e, Error): #exc_type, exc_value, exc_traceback = sys.exc_info() - logging.exception('Uncaught exception') + logging.exception(_('Uncaught exception')) #logging.error(traceback.extract_stack(exc_traceback)) raise Error(str(e)) raise diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index c64617931..41e686cff 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -37,12 +37,12 @@ class Exchange(object): self._routes = {} def publish(self, message, routing_key=None): - logging.debug('(%s) publish (key: %s) %s', + logging.debug(_('(%s) publish (key: %s) %s'), self.name, routing_key, message) routing_key = routing_key.split('.')[0] if routing_key in self._routes: for f in self._routes[routing_key]: - logging.debug('Publishing to route %s', f) + logging.debug(_('Publishing to route %s'), f) f(message, routing_key=routing_key) def bind(self, callback, routing_key): @@ -82,16 +82,16 @@ class Backend(object): def queue_declare(self, queue, **kwargs): if queue not in self._queues: - logging.debug('Declaring queue %s', queue) + logging.debug(_('Declaring queue %s'), queue) self._queues[queue] = Queue(queue) def exchange_declare(self, exchange, type, *args, **kwargs): if exchange not in self._exchanges: - logging.debug('Declaring exchange %s', exchange) + logging.debug(_('Declaring exchange %s'), exchange) self._exchanges[exchange] = Exchange(exchange, type) def queue_bind(self, queue, exchange, routing_key, **kwargs): - logging.debug('Binding %s to %s with key %s', + logging.debug(_('Binding %s to %s with key %s'), queue, exchange, routing_key) self._exchanges[exchange].bind(self._queues[queue].push, routing_key) @@ -117,7 +117,7 @@ class Backend(object): content_type=content_type, content_encoding=content_encoding) message.result = True - logging.debug('Getting from %s: %s', queue, message) + logging.debug(_('Getting from %s: %s'), queue, message) return message def prepare_message(self, message_data, delivery_mode, diff --git a/nova/flags.py b/nova/flags.py index 87444565a..8fa0beb7a 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -235,12 +235,11 @@ DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', DEFINE_string('default_image', 'ami-11111', 'default image to use, testing only') -DEFINE_string('default_kernel', 'aki-11111', - 'default kernel to use, testing only') -DEFINE_string('default_ramdisk', 'ari-11111', - 'default ramdisk to use, testing only') DEFINE_string('default_instance_type', 'm1.small', 'default instance type to use, testing only') +DEFINE_string('null_kernel', 'nokernel', + 'kernel image that indicates not to use a kernel,' + ' but to use a raw disk image instead') DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server') DEFINE_string('vpn_key_suffix', diff --git a/nova/image/glance.py b/nova/image/glance.py index 1ca6cf2eb..cb3936df1 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -77,8 +77,8 @@ class ParallaxClient(object): data = json.loads(res.read())['images'] return data else: - logging.warn("Parallax returned HTTP error %d from " - "request for /images", res.status_int) + logging.warn(_("Parallax returned HTTP error %d from " + "request for /images"), res.status_int) return [] finally: c.close() @@ -96,8 +96,8 @@ class ParallaxClient(object): data = json.loads(res.read())['images'] return data else: - logging.warn("Parallax returned HTTP error %d from " - "request for /images/detail", res.status_int) + logging.warn(_("Parallax returned HTTP error %d from " + "request for /images/detail"), res.status_int) return [] finally: c.close() diff --git a/nova/image/s3.py b/nova/image/s3.py index 0a25161de..7b04aa072 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -79,7 +79,8 @@ class S3ImageService(service.BaseImageService): result = self.index(context) result = [i for i in result if i['imageId'] == image_id] if not result: - raise exception.NotFound('Image %s could not be found' % image_id) + raise exception.NotFound(_('Image %s could not be found') + % image_id) image = result[0] return image diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 0fefd9415..16add7689 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -135,7 +135,7 @@ def ensure_vlan(vlan_num): """Create a vlan unless it already exists""" interface = "vlan%s" % vlan_num if not _device_exists(interface): - logging.debug("Starting VLAN inteface %s", interface) + logging.debug(_("Starting VLAN inteface %s"), interface) _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num)) _execute("sudo ifconfig %s up" % interface) @@ -145,7 +145,7 @@ def ensure_vlan(vlan_num): def ensure_bridge(bridge, interface, net_attrs=None): """Create a bridge unless it already exists""" if not _device_exists(bridge): - logging.debug("Starting Bridge interface for %s", interface) + logging.debug(_("Starting Bridge interface for %s"), interface) _execute("sudo brctl addbr %s" % bridge) _execute("sudo brctl setfd %s 0" % bridge) # _execute("sudo brctl setageing %s 10" % bridge) @@ -202,9 +202,9 @@ def update_dhcp(context, network_id): _execute('sudo kill -HUP %d' % pid) return except Exception as exc: # pylint: disable-msg=W0703 - logging.debug("Hupping dnsmasq threw %s", exc) + logging.debug(_("Hupping dnsmasq threw %s"), exc) else: - logging.debug("Pid %d is stale, relaunching dnsmasq", pid) + logging.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, @@ -276,7 +276,7 @@ def _stop_dnsmasq(network): try: _execute('sudo kill -TERM %d' % pid) except Exception as exc: # pylint: disable-msg=W0703 - logging.debug("Killing dnsmasq threw %s", exc) + logging.debug(_("Killing dnsmasq threw %s"), exc) def _dhcp_file(bridge, kind): diff --git a/nova/network/manager.py b/nova/network/manager.py index 6a30f30b7..8c300e305 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -115,7 +115,7 @@ class NetworkManager(manager.Manager): def set_network_host(self, context, network_id): """Safely sets the host of the network.""" - logging.debug("setting network host") + logging.debug(_("setting network host")) host = self.db.network_set_host(context, network_id, self.host) @@ -174,10 +174,10 @@ class NetworkManager(manager.Manager): fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: - raise exception.Error("IP %s leased that isn't associated" % + raise exception.Error(_("IP %s leased that isn't associated") % address) if instance_ref['mac_address'] != mac: - raise exception.Error("IP %s leased to bad mac %s vs %s" % + raise exception.Error(_("IP %s leased to bad mac %s vs %s") % (address, instance_ref['mac_address'], mac)) now = datetime.datetime.utcnow() self.db.fixed_ip_update(context, @@ -185,7 +185,8 @@ class NetworkManager(manager.Manager): {'leased': True, 'updated_at': now}) if not fixed_ip_ref['allocated']: - logging.warn("IP %s leased that was already deallocated", address) + logging.warn(_("IP %s leased that was already deallocated"), + address) def release_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is released.""" @@ -193,13 +194,13 @@ class NetworkManager(manager.Manager): fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: - raise exception.Error("IP %s released that isn't associated" % + raise exception.Error(_("IP %s released that isn't associated") % address) if instance_ref['mac_address'] != mac: - raise exception.Error("IP %s released from bad mac %s vs %s" % + raise exception.Error(_("IP %s released from bad mac %s vs %s") % (address, instance_ref['mac_address'], mac)) if not fixed_ip_ref['leased']: - logging.warn("IP %s released that was not leased", address) + logging.warn(_("IP %s released that was not leased"), address) self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': False}) @@ -361,8 +362,7 @@ class FlatDHCPManager(FlatManager): """Sets up matching network for compute hosts.""" network_ref = db.network_get_by_instance(context, instance_id) self.driver.ensure_bridge(network_ref['bridge'], - FLAGS.flat_interface, - network_ref) + FLAGS.flat_interface) def setup_fixed_ip(self, context, address): """Setup dhcp for this network.""" @@ -408,7 +408,7 @@ class VlanManager(NetworkManager): self.host, time) if num: - logging.debug("Dissassociated %s stale fixed ip(s)", num) + logging.debug(_("Dissassociated %s stale fixed ip(s)"), num) def init_host(self): """Do any initialization that needs to be run if this is a diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index c8920b00c..52257f69f 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -102,7 +102,7 @@ def _render_parts(value, write_cb): _render_parts(subsubvalue, write_cb) write_cb('') else: - raise Exception("Unknown S3 value type %r", value) + raise Exception(_("Unknown S3 value type %r"), value) def get_argument(request, key, default_value): @@ -134,7 +134,7 @@ def get_context(request): check_type='s3') return context.RequestContext(user, project) except exception.Error as ex: - logging.debug("Authentication Failure: %s", ex) + logging.debug(_("Authentication Failure: %s"), ex) raise exception.NotAuthorized() @@ -227,7 +227,7 @@ class BucketResource(ErrorHandlingResource): def render_PUT(self, request): "Creates the bucket resource""" - logging.debug("Creating bucket %s", self.name) + logging.debug(_("Creating bucket %s"), self.name) logging.debug("calling bucket.Bucket.create(%r, %r)", self.name, request.context) @@ -237,7 +237,7 @@ class BucketResource(ErrorHandlingResource): def render_DELETE(self, request): """Deletes the bucket resource""" - logging.debug("Deleting bucket %s", self.name) + logging.debug(_("Deleting bucket %s"), self.name) bucket_object = bucket.Bucket(self.name) if not bucket_object.is_authorized(request.context): @@ -261,7 +261,9 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug("Getting object: %s / %s", self.bucket.name, self.name) + logging.debug(_("Getting object: %s / %s"), + self.bucket.name, + self.name) if not self.bucket.is_authorized(request.context): raise exception.NotAuthorized() @@ -279,7 +281,9 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug("Putting object: %s / %s", self.bucket.name, self.name) + logging.debug(_("Putting object: %s / %s"), + self.bucket.name, + self.name) if not self.bucket.is_authorized(request.context): raise exception.NotAuthorized() @@ -298,7 +302,7 @@ class ObjectResource(ErrorHandlingResource): authorized to delete the object. """ - logging.debug("Deleting object: %s / %s", + logging.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name) @@ -394,17 +398,17 @@ class ImagesResource(resource.Resource): image_id = get_argument(request, 'image_id', u'') image_object = image.Image(image_id) if not image_object.is_authorized(request.context): - logging.debug("not authorized for render_POST in images") + logging.debug(_("not authorized for render_POST in images")) raise exception.NotAuthorized() operation = get_argument(request, 'operation', u'') if operation: # operation implies publicity toggle - logging.debug("handling publicity toggle") + logging.debug(_("handling publicity toggle")) image_object.set_public(operation == 'add') else: # other attributes imply update - logging.debug("update user fields") + logging.debug(_("update user fields")) clean_args = {} for arg in request.args.keys(): clean_args[arg] = request.args[arg][0] diff --git a/nova/rpc.py b/nova/rpc.py index 6a3f552db..6e2cf051a 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -91,15 +91,15 @@ class Consumer(messaging.Consumer): self.failed_connection = False break except: # Catching all because carrot sucks - logging.exception("AMQP server on %s:%d is unreachable." \ - " Trying again in %d seconds." % ( + logging.exception(_("AMQP server on %s:%d is unreachable." + " Trying again in %d seconds.") % ( FLAGS.rabbit_host, FLAGS.rabbit_port, FLAGS.rabbit_retry_interval)) self.failed_connection = True if self.failed_connection: - logging.exception("Unable to connect to AMQP server" \ - " after %d tries. Shutting down." % FLAGS.rabbit_max_retries) + logging.exception(_("Unable to connect to AMQP server" + " after %d tries. Shutting down.") % FLAGS.rabbit_max_retries) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): @@ -116,14 +116,14 @@ class Consumer(messaging.Consumer): self.declare() super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) if self.failed_connection: - logging.error("Reconnected to queue") + logging.error(_("Reconnected to queue")) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't # exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: - logging.exception("Failed to fetch message from queue") + logging.exception(_("Failed to fetch message from queue")) self.failed_connection = True def attach_to_eventlet(self): @@ -153,7 +153,7 @@ class TopicConsumer(Consumer): class AdapterConsumer(TopicConsumer): """Calls methods on a proxy object based on method and args""" def __init__(self, connection=None, topic="broadcast", proxy=None): - LOG.debug('Initing the Adapter Consumer for %s' % (topic)) + LOG.debug(_('Initing the Adapter Consumer for %s') % (topic)) self.proxy = proxy super(AdapterConsumer, self).__init__(connection=connection, topic=topic) @@ -168,7 +168,7 @@ class AdapterConsumer(TopicConsumer): Example: {'method': 'echo', 'args': {'value': 42}} """ - LOG.debug('received %s' % (message_data)) + LOG.debug(_('received %s') % (message_data)) msg_id = message_data.pop('_msg_id', None) ctxt = _unpack_context(message_data) @@ -181,8 +181,8 @@ class AdapterConsumer(TopicConsumer): # messages stay in the queue indefinitely, so for now # we just log the message and send an error string # back to the caller - LOG.warn('no method for message: %s' % (message_data)) - msg_reply(msg_id, 'No method for message: %s' % message_data) + LOG.warn(_('no method for message: %s') % (message_data)) + msg_reply(msg_id, _('No method for message: %s') % message_data) return node_func = getattr(self.proxy, str(method)) @@ -242,7 +242,7 @@ def msg_reply(msg_id, reply=None, failure=None): if failure: message = str(failure[1]) tb = traceback.format_exception(*failure) - logging.error("Returning exception %s to caller", message) + logging.error(_("Returning exception %s to caller"), message) logging.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) conn = Connection.instance() @@ -283,7 +283,7 @@ def _unpack_context(msg): if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value - LOG.debug('unpacked context: %s', context_dict) + LOG.debug(_('unpacked context: %s'), context_dict) return context.RequestContext.from_dict(context_dict) @@ -302,10 +302,10 @@ def _pack_context(msg, context): def call(context, topic, msg): """Sends a message on a topic and wait for a response""" - LOG.debug("Making asynchronous call...") + LOG.debug(_("Making asynchronous call...")) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) - LOG.debug("MSG_ID is %s" % (msg_id)) + LOG.debug(_("MSG_ID is %s") % (msg_id)) _pack_context(msg, context) class WaitMessage(object): @@ -353,7 +353,7 @@ def cast(context, topic, msg): def generic_response(message_data, message): """Logs a result and exits""" - LOG.debug('response %s', message_data) + LOG.debug(_('response %s'), message_data) message.ack() sys.exit(0) @@ -362,8 +362,8 @@ def send_message(topic, message, wait=True): """Sends a message for testing""" msg_id = uuid.uuid4().hex message.update({'_msg_id': msg_id}) - LOG.debug('topic is %s', topic) - LOG.debug('message %s', message) + LOG.debug(_('topic is %s'), topic) + LOG.debug(_('message %s'), message) if wait: consumer = messaging.Consumer(connection=Connection.instance(), diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py index 7fd09b053..9deaa2777 100644 --- a/nova/scheduler/chance.py +++ b/nova/scheduler/chance.py @@ -34,5 +34,5 @@ class ChanceScheduler(driver.Scheduler): hosts = self.hosts_up(context, topic) if not hosts: - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) return hosts[int(random.random() * len(hosts))] diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index f271d573f..08d7033f5 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -58,4 +58,4 @@ class Scheduler(object): def schedule(self, context, topic, *_args, **_kwargs): """Must override at least this method for scheduler to work.""" - raise NotImplementedError("Must implement a fallback schedule") + raise NotImplementedError(_("Must implement a fallback schedule")) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 60a3d2b4b..44e21f2fd 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -65,4 +65,4 @@ class SchedulerManager(manager.Manager): db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) - logging.debug("Casting to %s %s for %s", topic, host, method) + logging.debug(_("Casting to %s %s for %s"), topic, host, method) diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index 7f5093656..f9171ab35 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -47,7 +47,7 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, instance_cores) = result if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores: - raise driver.NoValidHost("All hosts have too many cores") + raise driver.NoValidHost(_("All hosts have too many cores")) if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow @@ -57,7 +57,7 @@ class SimpleScheduler(chance.ChanceScheduler): {'host': service['host'], 'scheduled_at': now}) return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks a host that is up and has the fewest volumes.""" @@ -66,7 +66,8 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, volume_gigabytes) = result if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes: - raise driver.NoValidHost("All hosts have too many gigabytes") + raise driver.NoValidHost(_("All hosts have too many " + "gigabytes")) if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow @@ -76,7 +77,7 @@ class SimpleScheduler(chance.ChanceScheduler): {'host': service['host'], 'scheduled_at': now}) return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) def schedule_set_network_host(self, context, *_args, **_kwargs): """Picks a host that is up and has the fewest networks.""" @@ -85,7 +86,7 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, instance_count) = result if instance_count >= FLAGS.max_networks: - raise driver.NoValidHost("All hosts have too many networks") + raise driver.NoValidHost(_("All hosts have too many networks")) if self.service_is_up(service): return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) diff --git a/nova/service.py b/nova/service.py index ac30aaceb..f1f90742f 100644 --- a/nova/service.py +++ b/nova/service.py @@ -151,7 +151,7 @@ class Service(object): report_interval = FLAGS.report_interval if not periodic_interval: periodic_interval = FLAGS.periodic_interval - logging.warn("Starting %s node", topic) + logging.warn(_("Starting %s node"), topic) service_obj = cls(host, binary, topic, manager, report_interval, periodic_interval) @@ -163,7 +163,7 @@ class Service(object): try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: - logging.warn("Service killed that has no database entry") + logging.warn(_("Service killed that has no database entry")) def stop(self): for x in self.timers: @@ -184,8 +184,8 @@ class Service(object): try: service_ref = db.service_get(ctxt, self.service_id) except exception.NotFound: - logging.debug("The service database object disappeared, " - "Recreating it.") + logging.debug(_("The service database object disappeared, " + "Recreating it.")) self._create_service_ref(ctxt) service_ref = db.service_get(ctxt, self.service_id) @@ -196,13 +196,13 @@ class Service(object): # TODO(termie): make this pattern be more elegant. if getattr(self, "model_disconnected", False): self.model_disconnected = False - logging.error("Recovered model server connection!") + logging.error(_("Recovered model server connection!")) # TODO(vish): this should probably only catch connection errors except Exception: # pylint: disable-msg=W0702 if not getattr(self, "model_disconnected", False): self.model_disconnected = True - logging.exception("model server went away") + logging.exception(_("model server went away")) def serve(*services): @@ -221,7 +221,7 @@ def serve(*services): else: logging.getLogger().setLevel(logging.WARNING) - logging.debug("Full set of FLAGS:") + logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 8444b6fce..3820f5f27 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -56,11 +56,16 @@ def instance_address(context, instance_id): def stub_instance(id, user_id=1): - return Instance(id=id + 123456, state=0, image_id=10, user_id=user_id, + return Instance(id=int(id) + 123456, state=0, image_id=10, user_id=user_id, display_name='server%s' % id, internal_id=id) +def fake_compute_api(cls, req, id): + return True + + class ServersTest(unittest.TestCase): + def setUp(self): self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} @@ -82,9 +87,15 @@ class ServersTest(unittest.TestCase): instance_address) self.stubs.Set(nova.db.api, 'instance_get_floating_address', instance_address) + self.stubs.Set(nova.compute.api.ComputeAPI, 'pause', + fake_compute_api) + self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause', + fake_compute_api) + self.allow_admin = FLAGS.allow_admin_api def tearDown(self): self.stubs.UnsetAll() + FLAGS.allow_admin_api = self.allow_admin def test_get_server_by_id(self): req = webob.Request.blank('/v1.0/servers/1') @@ -211,6 +222,30 @@ class ServersTest(unittest.TestCase): self.assertEqual(s['imageId'], 10) i += 1 + def test_server_pause(self): + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/pause') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(nova.api.API('os')) + self.assertEqual(res.status_int, 202) + + def test_server_unpause(self): + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/unpause') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(nova.api.API('os')) + self.assertEqual(res.status_int, 202) + def test_server_reboot(self): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 4508d6721..61ae43fb1 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -333,14 +333,10 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): AuthManagerTestCase.__init__(self) test.TestCase.__init__(self, *args, **kwargs) import nova.auth.fakeldap as fakeldap - FLAGS.redis_db = 8 if FLAGS.flush_db: - logging.info("Flushing redis datastore") - try: - r = fakeldap.Redis.instance() - r.flushdb() - except: - self.skip = True + logging.info("Flushing datastore") + r = fakeldap.Store.instance() + r.flushdb() class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase): diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index c6353d357..187ca31de 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -127,6 +127,14 @@ class ComputeTestCase(test.TestCase): self.assert_(instance_ref['launched_at'] < terminate) self.assert_(instance_ref['deleted_at'] > terminate) + def test_pause(self): + """Ensure instance can be paused""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.pause_instance(self.context, instance_id) + self.compute.unpause_instance(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) + def test_reboot(self): """Ensure instance can be rebooted""" instance_id = self._create_instance() diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index d190cdabf..7682f9662 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -40,19 +40,51 @@ class LibvirtConnTestCase(test.TestCase): self.network = utils.import_object(FLAGS.network_manager) FLAGS.instances_path = '' - def test_get_uri_and_template(self): - ip = '10.11.12.13' - - instance = {'internal_id': 1, - 'memory_kb': '1024000', - 'basepath': '/some/path', - 'bridge_name': 'br100', - 'mac_address': '02:12:34:46:56:67', - 'vcpus': 2, - 'project_id': 'fake', - 'bridge': 'br101', - 'instance_type': 'm1.small'} - + test_ip = '10.11.12.13' + test_instance = {'memory_kb': '1024000', + 'basepath': '/some/path', + 'bridge_name': 'br100', + 'mac_address': '02:12:34:46:56:67', + 'vcpus': 2, + 'project_id': 'fake', + 'bridge': 'br101', + 'instance_type': 'm1.small'} + + def test_xml_and_uri_no_ramdisk_no_kernel(self): + instance_data = dict(self.test_instance) + self.do_test_xml_and_uri(instance_data, + expect_kernel=False, expect_ramdisk=False) + + def test_xml_and_uri_no_ramdisk(self): + instance_data = dict(self.test_instance) + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=False) + + def test_xml_and_uri_no_kernel(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=False, expect_ramdisk=False) + + def test_xml_and_uri(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=True) + + def test_xml_and_uri_rescue(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=True, + rescue=True) + + def do_test_xml_and_uri(self, instance, + expect_ramdisk, expect_kernel, + rescue=False): user_context = context.RequestContext(project=self.project, user=self.user) instance_ref = db.instance_create(user_context, instance) @@ -60,13 +92,14 @@ class LibvirtConnTestCase(test.TestCase): self.network.set_network_host(context.get_admin_context(), network_ref['id']) - fixed_ip = {'address': ip, + fixed_ip = {'address': self.test_ip, 'network_id': network_ref['id']} ctxt = context.get_admin_context() fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) - db.fixed_ip_update(ctxt, ip, {'allocated': True, - 'instance_id': instance_ref['id']}) + db.fixed_ip_update(ctxt, self.test_ip, + {'allocated': True, + 'instance_id': instance_ref['id']}) type_uri_map = {'qemu': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'qemu'), @@ -78,23 +111,73 @@ class LibvirtConnTestCase(test.TestCase): (lambda t: t.find('./devices/emulator'), None)]), 'uml': ('uml:///system', [(lambda t: t.find('.').get('type'), 'uml'), - (lambda t: t.find('./os/type').text, 'uml')])} + (lambda t: t.find('./os/type').text, 'uml')]), + 'xen': ('xen:///', + [(lambda t: t.find('.').get('type'), 'xen'), + (lambda t: t.find('./os/type').text, 'linux')]), + } + + for hypervisor_type in ['qemu', 'kvm', 'xen']: + check_list = type_uri_map[hypervisor_type][1] + + if rescue: + check = (lambda t: t.find('./os/kernel').text.split('/')[1], + 'rescue-kernel') + check_list.append(check) + check = (lambda t: t.find('./os/initrd').text.split('/')[1], + 'rescue-ramdisk') + check_list.append(check) + else: + if expect_kernel: + check = (lambda t: t.find('./os/kernel').text.split( + '/')[1], 'kernel') + else: + check = (lambda t: t.find('./os/kernel'), None) + check_list.append(check) + + if expect_ramdisk: + check = (lambda t: t.find('./os/initrd').text.split( + '/')[1], 'ramdisk') + else: + check = (lambda t: t.find('./os/initrd'), None) + check_list.append(check) common_checks = [ (lambda t: t.find('.').tag, 'domain'), - (lambda t: t.find('./devices/interface/filterref/parameter').\ - get('name'), 'IP'), - (lambda t: t.find('./devices/interface/filterref/parameter').\ - get('value'), '10.11.12.13')] + (lambda t: t.find( + './devices/interface/filterref/parameter').get('name'), 'IP'), + (lambda t: t.find( + './devices/interface/filterref/parameter').get( + 'value'), '10.11.12.13'), + (lambda t: t.findall( + './devices/interface/filterref/parameter')[1].get( + 'name'), 'DHCPSERVER'), + (lambda t: t.findall( + './devices/interface/filterref/parameter')[1].get( + 'value'), '10.0.0.1'), + (lambda t: t.find('./devices/serial/source').get( + 'path').split('/')[1], 'console.log'), + (lambda t: t.find('./memory').text, '2097152')] + + if rescue: + common_checks += [ + (lambda t: t.findall('./devices/disk/source')[0].get( + 'file').split('/')[1], 'rescue-disk'), + (lambda t: t.findall('./devices/disk/source')[1].get( + 'file').split('/')[1], 'disk')] + else: + common_checks += [(lambda t: t.findall( + './devices/disk/source')[0].get('file').split('/')[1], + 'disk')] for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type conn = libvirt_conn.LibvirtConnection(True) - uri, _template, _rescue = conn.get_uri_and_templates() + uri = conn.get_uri() self.assertEquals(uri, expected_uri) - xml = conn.to_xml(instance_ref) + xml = conn.to_xml(instance_ref, rescue) tree = xml_to_tree(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), @@ -106,6 +189,9 @@ class LibvirtConnTestCase(test.TestCase): expected_result, '%s failed common check %d' % (xml, i)) + # This test is supposed to make sure we don't override a specifically + # set uri + # # Deliberately not just assigning this string to FLAGS.libvirt_uri and # checking against that later on. This way we make sure the # implementation doesn't fiddle around with the FLAGS. @@ -114,7 +200,7 @@ class LibvirtConnTestCase(test.TestCase): for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type conn = libvirt_conn.LibvirtConnection(True) - uri, _template, _rescue = conn.get_uri_and_templates() + uri = conn.get_uri() self.assertEquals(uri, testuri) def tearDown(self): diff --git a/nova/twistd.py b/nova/twistd.py index cb5648ce6..29be9c4e1 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -43,7 +43,7 @@ else: FLAGS = flags.FLAGS -flags.DEFINE_string('logdir', None, 'directory to keep log files in ' +flags.DEFINE_string('logdir', None, 'directory to keep log files in ' '(will be prepended to $logfile)') @@ -208,7 +208,7 @@ def stop(pidfile): pid = None if not pid: - message = "pidfile %s does not exist. Daemon not running?\n" + message = _("pidfile %s does not exist. Daemon not running?\n") sys.stderr.write(message % pidfile) # Not an error in a restart return @@ -229,7 +229,7 @@ def stop(pidfile): def serve(filename): - logging.debug("Serving %s" % filename) + logging.debug(_("Serving %s") % filename) name = os.path.basename(filename) OptionsClass = WrapTwistedOptions(TwistdServerOptions) options = OptionsClass() @@ -281,7 +281,7 @@ def serve(filename): else: logging.getLogger().setLevel(logging.WARNING) - logging.debug("Full set of FLAGS:") + logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) diff --git a/nova/utils.py b/nova/utils.py index ea1f04ca7..16b509b48 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -50,7 +50,7 @@ def import_class(import_str): __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ImportError, ValueError, AttributeError): - raise exception.NotFound('Class %s cannot be found' % class_str) + raise exception.NotFound(_('Class %s cannot be found') % class_str) def import_object(import_str): @@ -64,7 +64,7 @@ def import_object(import_str): def fetchfile(url, target): - logging.debug("Fetching %s" % url) + logging.debug(_("Fetching %s") % url) # c = pycurl.Curl() # fp = open(target, "wb") # c.setopt(c.URL, url) @@ -76,7 +76,7 @@ def fetchfile(url, target): def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): - logging.debug("Running cmd (subprocess): %s", cmd) + logging.debug(_("Running cmd (subprocess): %s"), cmd) env = os.environ.copy() if addl_env: env.update(addl_env) @@ -89,7 +89,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): result = obj.communicate() obj.stdin.close() if obj.returncode: - logging.debug("Result was %s" % (obj.returncode)) + logging.debug(_("Result was %s") % (obj.returncode)) if check_exit_code and obj.returncode != 0: (stdout, stderr) = result raise ProcessExecutionError(exit_code=obj.returncode, @@ -127,7 +127,7 @@ def debug(arg): def runthis(prompt, cmd, check_exit_code=True): - logging.debug("Running %s" % (cmd)) + logging.debug(_("Running %s") % (cmd)) rv, err = execute(cmd, check_exit_code=check_exit_code) @@ -160,7 +160,7 @@ def get_my_ip(): csock.close() return addr except socket.gaierror as ex: - logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex) + logging.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex) return "127.0.0.1" @@ -204,7 +204,7 @@ class LazyPluggable(object): if not self.__backend: backend_name = self.__pivot.value if backend_name not in self.__backends: - raise exception.Error('Invalid backend: %s' % backend_name) + raise exception.Error(_('Invalid backend: %s') % backend_name) backend = self.__backends[backend_name] if type(backend) == type(tuple()): diff --git a/nova/virt/connection.py b/nova/virt/connection.py index c40bb4bb4..61e99944e 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -66,6 +66,6 @@ def get_connection(read_only=False): raise Exception('Unknown connection type "%s"' % t) if conn is None: - logging.error('Failed to open connection to the hypervisor') + logging.error(_('Failed to open connection to the hypervisor')) sys.exit(1) return conn diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 76a04f18f..238acf798 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -136,6 +136,18 @@ class FakeConnection(object): """ pass + def pause(self, instance, callback): + """ + Pause the specified instance. + """ + pass + + def unpause(self, instance, callback): + """ + Unpause the specified instance. + """ + pass + def destroy(self, instance): """ Destroy (shutdown and delete) the specified instance. @@ -169,7 +181,8 @@ class FakeConnection(object): knowledge of the instance """ if instance_name not in self.instances: - raise exception.NotFound("Instance %s Not Found" % instance_name) + raise exception.NotFound(_("Instance %s Not Found") + % instance_name) i = self.instances[instance_name] return {'state': i._state, 'max_mem': 0, @@ -249,5 +262,6 @@ class FakeConnection(object): class FakeInstance(object): + def __init__(self): self._state = power_state.NOSTATE diff --git a/nova/virt/libvirt.qemu.xml.template b/nova/virt/libvirt.qemu.xml.template deleted file mode 100644 index d90afea81..000000000 --- a/nova/virt/libvirt.qemu.xml.template +++ /dev/null @@ -1,34 +0,0 @@ - - %(name)s - - hvm - %(basepath)s/kernel - %(basepath)s/ramdisk - root=/dev/vda1 console=ttyS0 - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - %(extra_params)s - - - - - - - - diff --git a/nova/virt/libvirt.rescue.qemu.xml.template b/nova/virt/libvirt.rescue.qemu.xml.template deleted file mode 100644 index a3b88106c..000000000 --- a/nova/virt/libvirt.rescue.qemu.xml.template +++ /dev/null @@ -1,38 +0,0 @@ - - %(name)s - - hvm - %(basepath)s/rescue-kernel - %(basepath)s/rescue-ramdisk - root=/dev/vda1 console=ttyS0 - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - - %(extra_params)s - - - - - - - - diff --git a/nova/virt/libvirt.rescue.qemu.xml.template.THIS b/nova/virt/libvirt.rescue.qemu.xml.template.THIS new file mode 100644 index 000000000..a3b88106c --- /dev/null +++ b/nova/virt/libvirt.rescue.qemu.xml.template.THIS @@ -0,0 +1,38 @@ + + %(name)s + + hvm + %(basepath)s/rescue-kernel + %(basepath)s/rescue-ramdisk + root=/dev/vda1 console=ttyS0 + + + + + %(memory_kb)s + %(vcpus)s + + + + + + + + + + + + + + + + + %(extra_params)s + + + + + + + + diff --git a/nova/virt/libvirt.rescue.uml.xml.template b/nova/virt/libvirt.rescue.uml.xml.template deleted file mode 100644 index a254692d4..000000000 --- a/nova/virt/libvirt.rescue.uml.xml.template +++ /dev/null @@ -1,31 +0,0 @@ - - %(name)s - %(memory_kb)s - - %(type)s - /usr/bin/linux - /dev/ubda1 - - - - - - - - - - - - - - - - - %(extra_params)s - - - - - - - diff --git a/nova/virt/libvirt.rescue.uml.xml.template.THIS b/nova/virt/libvirt.rescue.uml.xml.template.THIS new file mode 100644 index 000000000..a254692d4 --- /dev/null +++ b/nova/virt/libvirt.rescue.uml.xml.template.THIS @@ -0,0 +1,31 @@ + + %(name)s + %(memory_kb)s + + %(type)s + /usr/bin/linux + /dev/ubda1 + + + + + + + + + + + + + + + + + %(extra_params)s + + + + + + + diff --git a/nova/virt/libvirt.rescue.xen.xml.template b/nova/virt/libvirt.rescue.xen.xml.template deleted file mode 100644 index 3b8d27237..000000000 --- a/nova/virt/libvirt.rescue.xen.xml.template +++ /dev/null @@ -1,34 +0,0 @@ - - %(name)s - - linux - %(basepath)s/kernel - %(basepath)s/ramdisk - /dev/xvda1 - ro - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template deleted file mode 100644 index 506f2ef72..000000000 --- a/nova/virt/libvirt.uml.xml.template +++ /dev/null @@ -1,27 +0,0 @@ - - %(name)s - %(memory_kb)s - - %(type)s - /usr/bin/linux - /dev/ubda1 - - - - - - - - - - - - - %(extra_params)s - - - - - - - diff --git a/nova/virt/libvirt.uml.xml.template.THIS b/nova/virt/libvirt.uml.xml.template.THIS new file mode 100644 index 000000000..506f2ef72 --- /dev/null +++ b/nova/virt/libvirt.uml.xml.template.THIS @@ -0,0 +1,27 @@ + + %(name)s + %(memory_kb)s + + %(type)s + /usr/bin/linux + /dev/ubda1 + + + + + + + + + + + + + %(extra_params)s + + + + + + + diff --git a/nova/virt/libvirt.xen.xml.template b/nova/virt/libvirt.xen.xml.template deleted file mode 100644 index 9677902c6..000000000 --- a/nova/virt/libvirt.xen.xml.template +++ /dev/null @@ -1,30 +0,0 @@ - - %(name)s - - linux - %(basepath)s/kernel - %(basepath)s/ramdisk - /dev/xvda1 - ro - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template new file mode 100644 index 000000000..3fb2243da --- /dev/null +++ b/nova/virt/libvirt.xml.template @@ -0,0 +1,79 @@ + + ${name} + ${memory_kb} + +#if $type == 'uml' + #set $disk_prefix = 'ubd' + #set $disk_bus = 'uml' + uml + /usr/bin/linux + /dev/ubda1 +#else + #if $type == 'xen' + #set $disk_prefix = 'sd' + #set $disk_bus = 'scsi' + linux + /dev/xvda1 + #else + #set $disk_prefix = 'vd' + #set $disk_bus = 'virtio' + hvm + #end if + #if $getVar('rescue', False) + ${basepath}/rescue-kernel + ${basepath}/rescue-ramdisk + #else + #if $getVar('kernel', None) + ${kernel} + #if $type == 'xen' + ro + #else + root=/dev/vda1 console=ttyS0 + #end if + #if $getVar('ramdisk', None) + ${ramdisk} + #end if + #else + + #end if + #end if +#end if + + + + + ${vcpus} + +#if $getVar('rescue', False) + + + + + + + + +#else + + + + +#end if + + + + + + + +#if $getVar('extra_params', False) + ${extra_params} +#end if + + + + + + + + diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 0d42ce2f8..16bcfe3c0 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -27,12 +27,7 @@ Supports KVM, QEMU, UML, and XEN. :libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen (default: kvm). :libvirt_uri: Override for the default libvirt URI (depends on libvirt_type). -:libvirt_xml_template: Libvirt XML Template (QEmu/KVM). -:libvirt_xen_xml_template: Libvirt XML Template (Xen). -:libvirt_uml_xml_template: Libvirt XML Template (User Mode Linux). -:libvirt_rescue_xml_template: XML template for rescue mode (KVM & QEMU). -:libvirt_rescue_xen_xml_template: XML templage for rescue mode (XEN). -:libvirt_rescue_uml_xml_template: XML template for rescue mode (UML). +:libvirt_xml_template: Libvirt XML Template. :rescue_image_id: Rescue ami image (default: ami-rescue). :rescue_kernel_id: Rescue aki image (default: aki-rescue). :rescue_ramdisk_id: Rescue ari image (default: ari-rescue). @@ -62,36 +57,20 @@ from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +from Cheetah.Template import Template + libvirt = None libxml2 = None FLAGS = flags.FLAGS -flags.DEFINE_string('libvirt_rescue_xml_template', - utils.abspath('virt/libvirt.rescue.qemu.xml.template'), - 'Libvirt RESCUE XML Template for QEmu/KVM') -flags.DEFINE_string('libvirt_rescue_xen_xml_template', - utils.abspath('virt/libvirt.rescue.xen.xml.template'), - 'Libvirt RESCUE XML Template for xen') -flags.DEFINE_string('libvirt_rescue_uml_xml_template', - utils.abspath('virt/libvirt.rescue.uml.xml.template'), - 'Libvirt RESCUE XML Template for user-mode-linux') # TODO(vish): These flags should probably go into a shared location flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') flags.DEFINE_string('libvirt_xml_template', - utils.abspath('virt/libvirt.qemu.xml.template'), - 'Libvirt XML Template for QEmu/KVM') -flags.DEFINE_string('libvirt_xen_xml_template', - utils.abspath('virt/libvirt.xen.xml.template'), - 'Libvirt XML Template for Xen') -flags.DEFINE_string('libvirt_uml_xml_template', - utils.abspath('virt/libvirt.uml.xml.template'), - 'Libvirt XML Template for user-mode-linux') -flags.DEFINE_string('injected_network_template', - utils.abspath('virt/interfaces.template'), - 'Template file for injected network') + utils.abspath('virt/libvirt.xml.template'), + 'Libvirt XML Template') flags.DEFINE_string('libvirt_type', 'kvm', 'Libvirt domain type (valid options are: ' @@ -123,13 +102,11 @@ def _get_net_and_mask(cidr): class LibvirtConnection(object): + def __init__(self, read_only): - (self.libvirt_uri, - template_file, - rescue_file) = self.get_uri_and_templates() + self.libvirt_uri = self.get_uri() - self.libvirt_xml = open(template_file).read() - self.rescue_xml = open(rescue_file).read() + self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() self._wrapped_conn = None self.read_only = read_only @@ -139,7 +116,7 @@ class LibvirtConnection(object): @property def _conn(self): if not self._wrapped_conn or not self._test_connection(): - logging.debug('Connecting to libvirt: %s' % self.libvirt_uri) + logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri) self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn @@ -151,24 +128,18 @@ class LibvirtConnection(object): except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ e.get_error_domain() == libvirt.VIR_FROM_REMOTE: - logging.debug('Connection to libvirt broke') + logging.debug(_('Connection to libvirt broke')) return False raise - def get_uri_and_templates(self): + def get_uri(self): if FLAGS.libvirt_type == 'uml': uri = FLAGS.libvirt_uri or 'uml:///system' - template_file = FLAGS.libvirt_uml_xml_template - rescue_file = FLAGS.libvirt_rescue_uml_xml_template elif FLAGS.libvirt_type == 'xen': uri = FLAGS.libvirt_uri or 'xen:///' - template_file = FLAGS.libvirt_xen_xml_template - rescue_file = FLAGS.libvirt_rescue_xen_xml_template else: uri = FLAGS.libvirt_uri or 'qemu:///system' - template_file = FLAGS.libvirt_xml_template - rescue_file = FLAGS.libvirt_rescue_xml_template - return uri, template_file, rescue_file + return uri def _connect(self, uri, read_only): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], @@ -228,7 +199,7 @@ class LibvirtConnection(object): def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) - logging.info('instance %s: deleting instance files %s', + logging.info(_('instance %s: deleting instance files %s'), instance['name'], target) if os.path.exists(target): shutil.rmtree(target) @@ -270,7 +241,7 @@ class LibvirtConnection(object): mount_device = mountpoint.rpartition("/")[2] xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device) if not xml: - raise exception.NotFound("No disk at %s" % mount_device) + raise exception.NotFound(_("No disk at %s") % mount_device) virt_dom.detachDevice(xml) @exception.wrap_exception @@ -286,10 +257,10 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: rebooted', instance['name']) + logging.debug(_('instance %s: rebooted'), instance['name']) timer.stop() except Exception, exn: - logging.error('_wait_for_reboot failed: %s', exn) + logging.error(_('_wait_for_reboot failed: %s'), exn) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -298,6 +269,14 @@ class LibvirtConnection(object): timer.f = _wait_for_reboot return timer.start(interval=0.5, now=True) + @exception.wrap_exception + def pause(self, instance, callback): + raise exception.APIError("pause not supported for libvirt.") + + @exception.wrap_exception + def unpause(self, instance, callback): + raise exception.APIError("unpause not supported for libvirt.") + @exception.wrap_exception def rescue(self, instance): self.destroy(instance, False) @@ -316,10 +295,10 @@ class LibvirtConnection(object): state = self.get_info(instance['name'])['state'] db.instance_set_state(None, instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: rescued', instance['name']) + logging.debug(_('instance %s: rescued'), instance['name']) timer.stop() except Exception, exn: - logging.error('_wait_for_rescue failed: %s', exn) + logging.error(_('_wait_for_rescue failed: %s'), exn) db.instance_set_state(None, instance['id'], power_state.SHUTDOWN) @@ -344,7 +323,7 @@ class LibvirtConnection(object): NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance) self._create_image(instance, xml) self._conn.createXML(xml, 0) - logging.debug("instance %s: is running", instance['name']) + logging.debug(_("instance %s: is running"), instance['name']) timer = utils.LoopingCall(f=None) @@ -354,10 +333,10 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: booted', instance['name']) + logging.debug(_('instance %s: booted'), instance['name']) timer.stop() except: - logging.exception('instance %s: failed to boot', + logging.exception(_('instance %s: failed to boot'), instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], @@ -372,7 +351,7 @@ class LibvirtConnection(object): virsh_output = virsh_output[0].strip() if virsh_output.startswith('/dev/'): - logging.info('cool, it\'s a device') + logging.info(_('cool, it\'s a device')) out, err = utils.execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) return out @@ -380,7 +359,7 @@ class LibvirtConnection(object): return '' def _append_to_file(self, data, fpath): - logging.info('data: %r, fpath: %r' % (data, fpath)) + logging.info(_('data: %r, fpath: %r') % (data, fpath)) fp = open(fpath, 'a+') fp.write(data) return fpath @@ -422,7 +401,7 @@ class LibvirtConnection(object): # TODO(termie): these are blocking calls, it would be great # if they weren't. - logging.info('instance %s: Creating image', inst['name']) + logging.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') f.write(libvirt_xml) f.close() @@ -441,18 +420,28 @@ class LibvirtConnection(object): if not os.path.exists(basepath('disk')): images.fetch(inst.image_id, basepath('disk-raw'), user, project) - if not os.path.exists(basepath('kernel')): - images.fetch(inst.kernel_id, basepath('kernel'), user, - project) - if not os.path.exists(basepath('ramdisk')): - images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, - project) + + if inst['kernel_id']: + if not os.path.exists(basepath('kernel')): + images.fetch(inst['kernel_id'], basepath('kernel'), + user, project) + if inst['ramdisk_id']: + if not os.path.exists(basepath('ramdisk')): + images.fetch(inst['ramdisk_id'], basepath('ramdisk'), + user, project) def execute(cmd, process_input=None, check_exit_code=True): return utils.execute(cmd=cmd, process_input=process_input, check_exit_code=check_exit_code) + # For now, we assume that if we're not using a kernel, we're using a + # partitioned disk image where the target partition is the first + # partition + target_partition = None + if not inst['kernel_id']: + target_partition = "1" + key = str(inst['key_data']) net = None network_ref = db.network_get_by_instance(context.get_admin_context(), @@ -468,16 +457,24 @@ class LibvirtConnection(object): 'dns': network_ref['dns']} if key or net: if key: - logging.info('instance %s: injecting key into image %s', + logging.info(_('instance %s: injecting key into image %s'), inst['name'], inst.image_id) if net: - logging.info('instance %s: injecting net into image %s', - inst['name'], inst.image_id) - disk.inject_data(basepath('disk-raw'), key, net, - execute=execute) - - if os.path.exists(basepath('disk')): - utils.execute('rm -f %s' % basepath('disk')) + logging.info(_('instance %s: injecting net into image %s'), + inst['name'], inst.image_id) + try: + disk.inject_data(basepath('disk-raw'), key, net, + partition=target_partition, + execute=execute) + except Exception as e: + # This could be a windows image, or a vmdk format disk + logging.warn(_('instance %s: ignoring error injecting data' + ' into image %s (%s)'), + inst['name'], inst.image_id, e) + + if inst['kernel_id']: + if os.path.exists(basepath('disk')): + utils.execute('rm -f %s' % basepath('disk')) local_bytes = (instance_types.INSTANCE_TYPES[inst.instance_type] ['local_gb'] @@ -486,15 +483,21 @@ class LibvirtConnection(object): resize = True if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-': resize = False - disk.partition(basepath('disk-raw'), basepath('disk'), - local_bytes, resize, execute=execute) + + if inst['kernel_id']: + disk.partition(basepath('disk-raw'), basepath('disk'), + local_bytes, resize, execute=execute) + else: + os.rename(basepath('disk-raw'), basepath('disk')) + disk.extend(basepath('disk'), local_bytes, execute=execute) if FLAGS.libvirt_type == 'uml': utils.execute('sudo chown root %s' % basepath('disk')) def to_xml(self, instance, rescue=False): # TODO(termie): cache? - logging.debug('instance %s: starting toXML method', instance['name']) + logging.debug(_('instance %s: starting toXML method'), + instance['name']) network = db.project_get_network(context.get_admin_context(), instance['project_id']) # FIXME(vish): stick this in db @@ -523,20 +526,29 @@ class LibvirtConnection(object): 'mac_address': instance['mac_address'], 'ip_address': ip_address, 'dhcp_server': dhcp_server, - 'extra_params': extra_params} - if rescue: - libvirt_xml = self.rescue_xml % xml_info - else: - libvirt_xml = self.libvirt_xml % xml_info - logging.debug('instance %s: finished toXML method', instance['name']) + 'extra_params': extra_params, + 'rescue': rescue} + if not rescue: + if instance['kernel_id']: + xml_info['kernel'] = xml_info['basepath'] + "/kernel" + + if instance['ramdisk_id']: + xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk" + + xml_info['disk'] = xml_info['basepath'] + "/disk" - return libvirt_xml + xml = str(Template(self.libvirt_xml, searchList=[xml_info])) + logging.debug(_('instance %s: finished toXML method'), + instance['name']) + + return xml def get_info(self, instance_name): try: virt_dom = self._conn.lookupByName(instance_name) except: - raise exception.NotFound("Instance %s not found" % instance_name) + raise exception.NotFound(_("Instance %s not found") + % instance_name) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() return {'state': state, 'max_mem': max_mem, diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index 012954394..ce2c68ce0 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -25,6 +25,7 @@ class NetworkHelper(): """ The class that wraps the helper methods together. """ + def __init__(self): return diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 2f5d78e75..badaaedc1 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -47,6 +47,7 @@ class VMHelper(): """ The class that wraps the helper methods together. """ + def __init__(self): return @@ -228,11 +229,7 @@ class VMHelper(): try: host = session.get_xenapi_host() host_ip = session.get_xenapi().host.get_record(host)["address"] - metrics = session.get_xenapi().VM_guest_metrics.get_record( - record["guest_metrics"]) - diags = { - "Kernel": metrics["os_version"]["uname"], - "Distro": metrics["os_version"]["name"]} + diags = {} xml = get_rrd(host_ip, record["uuid"]) if xml: rrd = minidom.parseString(xml) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 3034df9e1..3b00ce8bf 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -34,6 +34,7 @@ class VMOps(object): """ Management class for VM-related tasks """ + def __init__(self, session): global XenAPI if XenAPI is None: @@ -43,12 +44,16 @@ class VMOps(object): VMHelper.late_import() def list_instances(self): - """ List VM instances """ - return [self._session.get_xenapi().VM.get_name_label(vm) \ - for vm in self._session.get_xenapi().VM.get_all()] + """List VM instances""" + vms = [] + for vm in self._session.get_xenapi().VM.get_all(): + rec = self._session.get_xenapi().VM.get_record(vm) + if not rec["is_a_template"] and not rec["is_control_domain"]: + vms.append(rec["name_label"]) + return vms def spawn(self, instance): - """ Create VM instance """ + """Create VM instance""" vm = VMHelper.lookup(self._session, instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % @@ -80,16 +85,16 @@ class VMOps(object): vm_ref) def reboot(self, instance): - """ Reboot VM instance """ + """Reboot VM instance""" instance_name = instance.name vm = VMHelper.lookup(self._session, instance_name) if vm is None: raise Exception('instance not present %s' % instance_name) task = self._session.call_xenapi('Async.VM.clean_reboot', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) def destroy(self, instance): - """ Destroy VM instance """ + """Destroy VM instance""" vm = VMHelper.lookup(self._session, instance.name) if vm is None: # Don't complain, just return. This lets us clean up instances @@ -100,7 +105,7 @@ class VMOps(object): try: task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) # Disk clean-up @@ -108,17 +113,43 @@ class VMOps(object): for vdi in vdis: try: task = self._session.call_xenapi('Async.VDI.destroy', vdi) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) try: task = self._session.call_xenapi('Async.VM.destroy', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) + except XenAPI.Failure, exc: + logging.warn(exc) + + def _wait_with_callback(self, instance_id, task, callback): + ret = None + try: + ret = self._session.wait_for_task(instance_id, task) except XenAPI.Failure, exc: logging.warn(exc) + callback(ret) + + def pause(self, instance, callback): + """Pause VM instance""" + instance_name = instance.name + vm = VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception('instance not present %s' % instance_name) + task = self._session.call_xenapi('Async.VM.pause', vm) + self._wait_with_callback(instance.id, task, callback) + + def unpause(self, instance, callback): + """Unpause VM instance""" + instance_name = instance.name + vm = VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception('instance not present %s' % instance_name) + task = self._session.call_xenapi('Async.VM.unpause', vm) + self._wait_with_callback(instance.id, task, callback) def get_info(self, instance_id): - """ Return data about VM instance """ + """Return data about VM instance""" vm = VMHelper.lookup_blocking(self._session, instance_id) if vm is None: raise Exception('instance not present %s' % instance_id) @@ -134,6 +165,6 @@ class VMOps(object): return VMHelper.compile_diagnostics(self._session, rec) def get_console_output(self, instance): - """ Return snapshot of console """ + """Return snapshot of console""" # TODO: implement this to fix pylint! return 'FAKE CONSOLE OUTPUT of instance' diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index a4c7a3861..1943ccab0 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -20,6 +20,7 @@ Management class for Storage-related functions (attach, detach, etc). class VolumeOps(object): + def __init__(self, session): self._session = session diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 6beb08f5e..146e2f153 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -54,6 +54,8 @@ import xmlrpclib from eventlet import event from eventlet import tpool +from nova import context +from nova import db from nova import utils from nova import flags from nova.virt.xenapi.vmops import VMOps @@ -93,38 +95,47 @@ def get_connection(_): username = FLAGS.xenapi_connection_username password = FLAGS.xenapi_connection_password if not url or password is None: - raise Exception('Must specify xenapi_connection_url, ' - 'xenapi_connection_username (optionally), and ' - 'xenapi_connection_password to use ' - 'connection_type=xenapi') + raise Exception(_('Must specify xenapi_connection_url, ' + 'xenapi_connection_username (optionally), and ' + 'xenapi_connection_password to use ' + 'connection_type=xenapi')) return XenAPIConnection(url, username, password) class XenAPIConnection(object): - """ A connection to XenServer or Xen Cloud Platform """ + """A connection to XenServer or Xen Cloud Platform""" + def __init__(self, url, user, pw): session = XenAPISession(url, user, pw) self._vmops = VMOps(session) self._volumeops = VolumeOps(session) def list_instances(self): - """ List VM instances """ + """List VM instances""" return self._vmops.list_instances() def spawn(self, instance): - """ Create VM instance """ + """Create VM instance""" self._vmops.spawn(instance) def reboot(self, instance): - """ Reboot VM instance """ + """Reboot VM instance""" self._vmops.reboot(instance) def destroy(self, instance): - """ Destroy VM instance """ + """Destroy VM instance""" self._vmops.destroy(instance) + def pause(self, instance, callback): + """Pause VM instance""" + self._vmops.pause(instance, callback) + + def unpause(self, instance, callback): + """Unpause paused VM instance""" + self._vmops.unpause(instance, callback) + def get_info(self, instance_id): - """ Return data about VM instance """ + """Return data about VM instance""" return self._vmops.get_info(instance_id) def get_diagnostics(self, instance_id): @@ -132,32 +143,33 @@ class XenAPIConnection(object): return self._vmops.get_diagnostics(instance_id) def get_console_output(self, instance): - """ Return snapshot of console """ + """Return snapshot of console""" return self._vmops.get_console_output(instance) def attach_volume(self, instance_name, device_path, mountpoint): - """ Attach volume storage to VM instance """ + """Attach volume storage to VM instance""" return self._volumeops.attach_volume(instance_name, device_path, mountpoint) def detach_volume(self, instance_name, mountpoint): - """ Detach volume storage to VM instance """ + """Detach volume storage to VM instance""" return self._volumeops.detach_volume(instance_name, mountpoint) class XenAPISession(object): - """ The session to invoke XenAPI SDK calls """ + """The session to invoke XenAPI SDK calls""" + def __init__(self, url, user, pw): self._session = XenAPI.Session(url) self._session.login_with_password(user, pw) def get_xenapi(self): - """ Return the xenapi object """ + """Return the xenapi object""" return self._session.xenapi def get_xenapi_host(self): - """ Return the xenapi host """ + """Return the xenapi host""" return self._session.xenapi.session.get_this_host(self._session.handle) def call_xenapi(self, method, *args): @@ -173,46 +185,57 @@ class XenAPISession(object): self._session.xenapi.Async.host.call_plugin, self.get_xenapi_host(), plugin, fn, args) - def wait_for_task(self, task): + def wait_for_task(self, instance_id, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" done = event.Event() - loop = utils.LoopingCall(self._poll_task, task, done) + loop = utils.LoopingCall(self._poll_task, instance_id, task, done) loop.start(FLAGS.xenapi_task_poll_interval, now=True) rv = done.wait() loop.stop() return rv - def _poll_task(self, task, done): + def _poll_task(self, instance_id, task, done): """Poll the given XenAPI task, and fire the given Deferred if we get a result.""" try: - #logging.debug('Polling task %s...', task) + name = self._session.xenapi.task.get_name_label(task) status = self._session.xenapi.task.get_status(task) - if status == 'pending': + action = dict( + instance_id=int(instance_id), + action=name, + error=None) + if status == "pending": return - elif status == 'success': + elif status == "success": result = self._session.xenapi.task.get_result(task) - logging.info('Task %s status: success. %s', task, result) + logging.info(_("Task [%s] %s status: success %s") % ( + name, + task, + result)) done.send(_parse_xmlrpc_value(result)) else: error_info = self._session.xenapi.task.get_error_info(task) - logging.warn('Task %s status: %s. %s', task, status, - error_info) + action["error"] = str(error_info) + logging.warn(_("Task [%s] %s status: %s %s") % ( + name, + task, + status, + error_info)) done.send_exception(XenAPI.Failure(error_info)) - #logging.debug('Polling task %s done.', task) + db.instance_action_create(context.get_admin_context(), action) except XenAPI.Failure, exc: logging.warn(exc) done.send_exception(*sys.exc_info()) def _unwrap_plugin_exceptions(func, *args, **kwargs): - """ Parse exception details """ + """Parse exception details""" try: return func(*args, **kwargs) except XenAPI.Failure, exc: - logging.debug("Got exception: %s", exc) + logging.debug(_("Got exception: %s"), exc) if (len(exc.details) == 4 and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[2] == 'Failure'): @@ -225,7 +248,7 @@ def _unwrap_plugin_exceptions(func, *args, **kwargs): else: raise except xmlrpclib.ProtocolError, exc: - logging.debug("Got exception: %s", exc) + logging.debug(_("Got exception: %s"), exc) raise diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 1cd4c1fd4..8353b9712 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -73,14 +73,14 @@ class VolumeDriver(object): tries = tries + 1 if tries >= FLAGS.num_shell_tries: raise - logging.exception("Recovering from a failed execute." - "Try number %s", tries) + logging.exception(_("Recovering from a failed execute." + "Try number %s"), tries) time.sleep(tries ** 2) def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" if not os.path.isdir("/dev/%s" % FLAGS.volume_group): - raise exception.Error("volume group %s doesn't exist" + raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) def create_volume(self, volume): @@ -205,7 +205,7 @@ class FakeAOEDriver(AOEDriver): @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" - logging.debug("FAKE AOE: %s", cmd) + logging.debug(_("FAKE AOE: %s"), cmd) return (None, None) @@ -310,5 +310,5 @@ class FakeISCSIDriver(ISCSIDriver): @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" - logging.debug("FAKE ISCSI: %s", cmd) + logging.debug(_("FAKE ISCSI: %s"), cmd) return (None, None) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 7da125cac..966334c50 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -81,7 +81,7 @@ class VolumeManager(manager.Manager): self.driver.check_for_setup_error() ctxt = context.get_admin_context() volumes = self.db.volume_get_all_by_host(ctxt, self.host) - logging.debug("Re-exporting %s volumes", len(volumes)) + logging.debug(_("Re-exporting %s volumes"), len(volumes)) for volume in volumes: self.driver.ensure_export(ctxt, volume) @@ -89,7 +89,7 @@ class VolumeManager(manager.Manager): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) - logging.info("volume %s: creating", volume_ref['name']) + logging.info(_("volume %s: creating"), volume_ref['name']) self.db.volume_update(context, volume_id, @@ -98,18 +98,18 @@ class VolumeManager(manager.Manager): # before passing it to the driver. volume_ref['host'] = self.host - logging.debug("volume %s: creating lv of size %sG", + logging.debug(_("volume %s: creating lv of size %sG"), volume_ref['name'], volume_ref['size']) self.driver.create_volume(volume_ref) - logging.debug("volume %s: creating export", volume_ref['name']) + logging.debug(_("volume %s: creating export"), volume_ref['name']) self.driver.create_export(context, volume_ref) now = datetime.datetime.utcnow() self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) - logging.debug("volume %s: created successfully", volume_ref['name']) + logging.debug(_("volume %s: created successfully"), volume_ref['name']) return volume_id def delete_volume(self, context, volume_id): @@ -117,15 +117,15 @@ class VolumeManager(manager.Manager): context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) if volume_ref['attach_status'] == "attached": - raise exception.Error("Volume is still attached") + raise exception.Error(_("Volume is still attached")) if volume_ref['host'] != self.host: - raise exception.Error("Volume is not local to this node") - logging.debug("volume %s: removing export", volume_ref['name']) + raise exception.Error(_("Volume is not local to this node")) + logging.debug(_("volume %s: removing export"), volume_ref['name']) self.driver.remove_export(context, volume_ref) - logging.debug("volume %s: deleting", volume_ref['name']) + logging.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) self.db.volume_destroy(context, volume_id) - logging.debug("volume %s: deleted successfully", volume_ref['name']) + logging.debug(_("volume %s: deleted successfully"), volume_ref['name']) return True def setup_compute_volume(self, context, volume_id): diff --git a/tools/pip-requires b/tools/pip-requires index 52451b8cb..e9559521b 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -2,6 +2,7 @@ SQLAlchemy==0.6.3 pep8==0.5.0 pylint==0.19 IPy==0.70 +Cheetah==2.4.2.1 M2Crypto==0.20.2 amqplib==0.6.1 anyjson==0.2.4 -- cgit From 9e42bc18a12fe19ae333a3447d153bf3796d54d7 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 22 Dec 2010 21:38:06 +0000 Subject: add missing flag --- nova/network/linux_net.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 771f1c932..931a89554 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -51,6 +51,8 @@ flags.DEFINE_bool('use_nova_chains', False, 'use the nova_ routing chains instead of default') flags.DEFINE_string('dns_server', None, 'if set, uses specific dns server for dnsmasq') +flags.DEFINE_string('dmz_cidr', '10.128.0.0/24', + 'dmz range that should be accepted') def metadata_forward(): -- cgit From 1509d51c7e9be04f8ca7aa9fb50d7b06b20a4e71 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 22 Dec 2010 21:41:40 +0000 Subject: remove extra files that slipped in --- nova/virt/libvirt.uml.xml.template.THIS | 27 --------------------------- 1 file changed, 27 deletions(-) delete mode 100644 nova/virt/libvirt.uml.xml.template.THIS diff --git a/nova/virt/libvirt.uml.xml.template.THIS b/nova/virt/libvirt.uml.xml.template.THIS deleted file mode 100644 index 506f2ef72..000000000 --- a/nova/virt/libvirt.uml.xml.template.THIS +++ /dev/null @@ -1,27 +0,0 @@ - - %(name)s - %(memory_kb)s - - %(type)s - /usr/bin/linux - /dev/ubda1 - - - - - - - - - - - - - %(extra_params)s - - - - - - - -- cgit From 775958e3a020b6b4b4c9fd4777aa72f7e9b0bdbc Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 22 Dec 2010 15:50:26 -0600 Subject: Accidentally yanked the datetime line in auth --- nova/api/openstack/__init__.py | 2 +- nova/api/openstack/auth.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index b18edc8e7..c49399f28 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -74,7 +74,7 @@ class API(wsgi.Middleware): return req.get_response(self.application) except Exception as ex: logging.warn(_("Caught error: %s") % str(ex)) - logging.debug(traceback.format_exc()) + logging.error(traceback.format_exc()) exc = webob.exc.HTTPInternalServerError(explanation=str(ex)) return faults.Fault(exc) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 72ad4ffa9..c9d21ed49 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License.import datetime +import datetime import hashlib import json import time -- cgit From 0704c0c4073f6c03959c113f90c51dfe4d72fd76 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 22 Dec 2010 21:55:11 +0000 Subject: pep8 fix --- nova/fakerabbit.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 42daa9767..79d8b894d 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -28,6 +28,7 @@ from eventlet import greenthread EXCHANGES = {} QUEUES = {} + class Message(base.BaseMessage): pass -- cgit From a7e5a4a39b93b32974ca82b77391368c4f01cdd8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 22 Dec 2010 22:54:43 +0000 Subject: removed extra files --- nova/virt/libvirt.rescue.qemu.xml.template.THIS | 38 ------------------------- nova/virt/libvirt.rescue.uml.xml.template.THIS | 31 -------------------- 2 files changed, 69 deletions(-) delete mode 100644 nova/virt/libvirt.rescue.qemu.xml.template.THIS delete mode 100644 nova/virt/libvirt.rescue.uml.xml.template.THIS diff --git a/nova/virt/libvirt.rescue.qemu.xml.template.THIS b/nova/virt/libvirt.rescue.qemu.xml.template.THIS deleted file mode 100644 index a3b88106c..000000000 --- a/nova/virt/libvirt.rescue.qemu.xml.template.THIS +++ /dev/null @@ -1,38 +0,0 @@ - - %(name)s - - hvm - %(basepath)s/rescue-kernel - %(basepath)s/rescue-ramdisk - root=/dev/vda1 console=ttyS0 - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - - %(extra_params)s - - - - - - - - diff --git a/nova/virt/libvirt.rescue.uml.xml.template.THIS b/nova/virt/libvirt.rescue.uml.xml.template.THIS deleted file mode 100644 index a254692d4..000000000 --- a/nova/virt/libvirt.rescue.uml.xml.template.THIS +++ /dev/null @@ -1,31 +0,0 @@ - - %(name)s - %(memory_kb)s - - %(type)s - /usr/bin/linux - /dev/ubda1 - - - - - - - - - - - - - - - - - %(extra_params)s - - - - - - - -- cgit From 93dcd9088108b17c44337f556d0c624e802ba788 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 23 Dec 2010 01:32:57 +0000 Subject: fix commits from Anthony and Vish that were committed with the wrong email --- .mailmap | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.mailmap b/.mailmap index 8041e2341..c01f964d2 100644 --- a/.mailmap +++ b/.mailmap @@ -24,7 +24,7 @@ + + # These are from people who failed to set a proper committer -. . -. -- cgit From 588b39981d03c61c7b4eafdb489467ab57540d13 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 23 Dec 2010 02:03:39 +0000 Subject: Add Ryan Lane as well --- .mailmap | 3 +-- Authors | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.mailmap b/.mailmap index c01f964d2..9ab7db743 100644 --- a/.mailmap +++ b/.mailmap @@ -26,5 +26,4 @@ -# These are from people who failed to set a proper committer -. + diff --git a/Authors b/Authors index 0b048becb..250b3b2ad 100644 --- a/Authors +++ b/Authors @@ -24,6 +24,7 @@ Michael Gundlach Monty Taylor Paul Voccio Rick Clark +Ryan Lane Ryan Lucio Sandy Walsh Soren Hansen -- cgit From 12a9dc88f6ae947d005568dd2e644566cd1a9677 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 22 Dec 2010 21:14:06 -0600 Subject: And the common module --- nova/api/openstack/common.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 29e9a8623..83919cc23 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -1,3 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + def limited(items, req): """Return a slice of items according to requested offset and limit. -- cgit From c273c2b93471ad0d3ab4990458147c253d22bdc5 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 23 Dec 2010 08:57:04 -0800 Subject: Added reference in setup.py so that python setup.py test works now. --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index d88bc1e6f..1abf4d9fe 100644 --- a/setup.py +++ b/setup.py @@ -58,6 +58,7 @@ setup(name='nova', 'build_sphinx' : local_BuildDoc }, packages=find_packages(exclude=['bin', 'smoketests']), include_package_data=True, + test_suite='nose.collector', scripts=['bin/nova-api', 'bin/nova-compute', 'bin/nova-dhcpbridge', -- cgit From ba6a99f926180d47870dcb18e4387d18cddad9b0 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 23 Dec 2010 11:58:13 -0600 Subject: Superfluous images include and added basic routes for shared ip groups --- nova/api/openstack/sharedipgroups.py | 20 +++++++++++++++++++- nova/tests/api/openstack/test_servers.py | 1 - 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/sharedipgroups.py b/nova/api/openstack/sharedipgroups.py index e805ca9f7..423ee61e1 100644 --- a/nova/api/openstack/sharedipgroups.py +++ b/nova/api/openstack/sharedipgroups.py @@ -19,4 +19,22 @@ from nova import wsgi class Controller(wsgi.Controller): - pass + """ The Shared IP Groups Controller for the Openstack API """ + + def index(self, req): + raise NotImplementedError + + def show(self, req, id): + raise NotImplementedError + + def update(self, req, id): + raise NotImplementedError + + def delete(self, req, id): + raise NotImplementedError + + def detail(self, req): + raise NotImplementedError + + def create(self, req): + raise NotImplementedError diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 97cf3ed65..3820f5f27 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -27,7 +27,6 @@ import nova.api.openstack from nova.api.openstack import servers import nova.db.api from nova.db.sqlalchemy.models import Instance -import nova.image import nova.rpc from nova.tests.api.openstack import fakes -- cgit From cb679a01e5905e4f7316f81de7c9ead9dc6536b8 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 23 Dec 2010 13:17:53 -0600 Subject: Pep8 cleanup --- nova/api/openstack/auth.py | 1 + nova/api/openstack/common.py | 2 ++ nova/api/openstack/ratelimiting/__init__.py | 1 + nova/api/openstack/sharedipgroups.py | 2 +- 4 files changed, 5 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index c9d21ed49..e24e58fd3 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -34,6 +34,7 @@ from nova.api.openstack import faults FLAGS = flags.FLAGS + class AuthMiddleware(wsgi.Middleware): """Authorize the openstack API request or return an HTTP Forbidden.""" diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 83919cc23..ac0572c96 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. + def limited(items, req): """Return a slice of items according to requested offset and limit. @@ -25,6 +26,7 @@ def limited(items, req): If limit is not specified, 0, or > 1000, defaults to 1000. """ + offset = int(req.GET.get('offset', 0)) limit = int(req.GET.get('limit', 0)) if not limit: diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index 8ca575b36..91a8b2e55 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -32,6 +32,7 @@ PER_MINUTE = 60 PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 + class RateLimitingMiddleware(wsgi.Middleware): """Rate limit incoming requests according to the OpenStack rate limits.""" diff --git a/nova/api/openstack/sharedipgroups.py b/nova/api/openstack/sharedipgroups.py index 423ee61e1..75d02905c 100644 --- a/nova/api/openstack/sharedipgroups.py +++ b/nova/api/openstack/sharedipgroups.py @@ -32,7 +32,7 @@ class Controller(wsgi.Controller): def delete(self, req, id): raise NotImplementedError - + def detail(self, req): raise NotImplementedError -- cgit From e275fbd8e16e6dc55c54072aa162815d522f9242 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 23 Dec 2010 13:30:24 -0600 Subject: One more time --- nova/tests/api/openstack/fakes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index f773b26a7..79663e43a 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -94,7 +94,7 @@ def stub_out_auth(stubs): def stub_out_rate_limiting(stubs): def fake_rate_init(self, app): - super(nova.api.openstack.ratelimiting.RateLimitingMiddleware, self).__init__(app) + super(ratelimiting.RateLimitingMiddleware, self).__init__(app) self.application = app stubs.Set(nova.api.openstack.ratelimiting.RateLimitingMiddleware, -- cgit