From f835c01f41fdba5791190b9275775ae7fcfcafc6 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 18 Mar 2011 20:35:44 -0400 Subject: * committing ovs scripts --- .../xensource/scripts/ovs_configure_base_flows.py | 68 ++++++++ .../xensource/scripts/ovs_configure_vif_flows.py | 194 +++++++++++++++++++++ .../networking/etc/xensource/scripts/vif_rules.py | 9 +- 3 files changed, 267 insertions(+), 4 deletions(-) create mode 100755 plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py create mode 100755 plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py new file mode 100755 index 000000000..c46fb4b60 --- /dev/null +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +This script is used to configure base openvswitch flows for XenServer hosts. +""" + +import os +import subprocess +import sys + + +PNIC_NAME="eth1" +XEN_BRIDGE="xenbr1" + +def main(dom_id, command, only_this_vif=None): + pnic_ofport = execute('/usr/bin/ovs-ofctl', 'get', 'Interface', PNIC_NAME, + 'ofport', return_stdout=True) + ovs_ofctl = lambda *rule: execute('/usr/bin/ovs-ofctl', *rule) + + # clear all flows + ovs_ofctl('del-flows', XEN_BRIDGE) + + # these flows are lower priority than all VM-specific flows. + + # allow all traffic from the physical NIC, as it is trusted (i.e., from a + # filtered vif, or from the physical infrastructure + ovs_ofctl('add-flow', XEN_BRIDGE, + "priority=2,in_port=%s,action=normal" % pnic_ofport) + + # default drop + ovs_ofctl('add-flow', XEN_BRIDGE, 'priority=1,action=drop') + + +def execute(*command, return_stdout=False): + devnull = open(os.devnull, 'w') + command = map(str, command) + proc = subprocess.Popen(command, close_fds=True, + stdout=subprocess.PIPE, stderr=devnull) + devnull.close() + if return_stdout: + return proc.stdout.read() + else: + return None + + +if __name__ == "__main__": + if sys.argv: + print "This script configures base ovs flows." + print "usage: %s" % os.path.basename(sys.argv[0]) + sys.exit(1) + else: + main() diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py new file mode 100755 index 000000000..a77bbbf4b --- /dev/null +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +This script is used to configure openvswitch flows on XenServer hosts. +""" + +import os +import subprocess +import sys + +# This is written to Python 2.4, since that is what is available on XenServer +import simplejson as json + + +XEN_BRIDGE = 'xenbr1' +OVS_OFCTL = '/usr/bin/ovs-ofctl' + + +def execute(*command, return_stdout=False): + devnull = open(os.devnull, 'w') + command = map(str, command) + proc = subprocess.Popen(command, close_fds=True, + stdout=subprocess.PIPE, stderr=devnull) + devnull.close() + if return_stdout: + return proc.stdout.read() + else: + return None + + +class OvsFlow(): + def __init__(self, command, params, bridge=None): + self.command = command + self.params = params + self.bridge = bridge or XEN_BRIDGE + + def add(self, rule): + execute(OVS_OFCTL, 'add-flow', self.bridge, rule) + + def delete(self, rule): + execute(OVS_OFCTL, 'del-flow', self.bridge, rule) + + def apply(self, rule): + self.delete(rule % self.params) + if self.command == 'online': + self.add(rule % params) + + +def main(dom_id, command, net, only_this_vif=None): + vif_ofport = execute('/usr/bin/ovs-ofctl', 'get', 'Interface', + only_this_vif, 'ofport', return_stdout=True) + + xsls = execute('/usr/bin/xenstore-ls', + '/local/domain/%s/vm-data/networking' % dom_id, + return_stdout=True) + macs = [line.split("=")[0].strip() for line in xsls.splitlines()] + + for mac in macs: + xsread = execute('/usr/bin/enstore-read', + '/local/domain/%s/vm-data/networking/%s' % + (dom_id, mac), True) + data = json.loads(xsread) + if data["label"] == "public": + vif = "vif%s.0" % dom_id + else: + vif = "vif%s.1" % dom_id + + if (only_this_vif is None) or (vif == only_this_vif): + params = dict(VIF=vif, MAC=data['mac']) + if net in ('ipv4', 'all'): + for ip4 in data['ips']: + params.update({'IP': ip4['ip']}) + apply_ovs_ipv4_flows(command, params) + if net in ('ipv6', 'all'): + for ip6 in data['ip6s']: + params.update({'IP': ip6['ip']}) + apply_ovs_ipv6_flows(command, params) + + +# usage: +# XEN_BRIDGE=xenbr1 +# VIF_NAME=$1 +# VIF_MAC=$2 +# VIF_IPv4=$3 +# VIF_GLOBAL_IPv6=$4 +# VIF_LOCAL_IPv6=$5 + +# # find the openflow port number associated with the vif interface +# VIF_OFPORT=`ovs-vsctl get Interface $VIF_NAME ofport` + +def apply_ovs_ipv4_flows(command, params): + flow = OvsFlow(command, params) + + # allow valid ARP outbound (both request / reply) + flow.apply("priority=3,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,arp," + "arp_sha=$VIF_MAC,nw_src=$VIF_IPv4,action=normal") + + flow.apply("priority=3,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,arp," + "arp_sha=$VIF_MAC,nw_src=0.0.0.0,action=normal") + + # allow valid IPv4 outbound + flow.apply("priority=3,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,ip," + "nw_src=$VIF_IPv4,action=normal") + + +def apply_ovs_ipv6_flows(command, params): + flow = OvsFlow(command, params) + + # allow valid IPv6 ND outbound (are both global and local IPs needed?) + # Neighbor Solicitation + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=135,nd_sll=$VIF_MAC," + "action=normal") + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=135,action=normal") + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=135,nd_sll=$VIF_MAC," + "action=normal") + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=135,action=normal") + + # Neighbor Advertisement + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=136," + "nd_target=$VIF_LOCAL_IPv6,action=normal") + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=136,action=normal") + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=136," + "nd_target=$VIF_GLOBAL_IPv6,action=normal") + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=136,action=normal") + + # drop all other neighbor discovery (required because we permit all icmp6 below) + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=135,action=drop") + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=136,action=drop") + + # do not allow sending specifc ICMPv6 types + # Router Advertisement + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=134,action=drop") + # Redirect Gateway + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=137,action=drop") + # Mobile Prefix Solicitation + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=146,action=drop") + # Mobile Prefix Advertisement + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=147,action=drop") + # Multicast Router Advertisement + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=151,action=drop") + # Multicast Router Solicitation + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=152,action=drop") + # Multicast Router Termination + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=153,action=drop") + + # allow valid IPv6 outbound, by type + flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," + "ipv6_src=$VIF_GLOBAL_IPv6,icmp6,action=normal") + flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," + "ipv6_src=$VIF_LOCAL_IPv6,icmp6,action=normal") + flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," + "ipv6_src=$VIF_GLOBAL_IPv6,tcp6,action=normal") + flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," + "ipv6_src=$VIF_LOCAL_IPv6,tcp6,action=normal") + flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," + "ipv6_src=$VIF_GLOBAL_IPv6,udp6,action=normal") + flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," + "ipv6_src=$VIF_LOCAL_IPv6,udp6,action=normal") + # all else will be dropped ... + + +if __name__ == "__main__": + if len(sys.argv) < 3: + print "usage: %s dom_id online|offline ipv4|ipv6|all [vif]" % \ + os.path.basename(sys.argv[0]) + sys.exit(1) + else: + dom_id, command, net = sys.argv[1:4] + vif = len(sys.argv) == 5 and sys.argv[4] or None + main(dom_id, command, net, vif) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py index 48122e6d6..500e055d8 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 OpenStack LLC. +# Copyright 2010-2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -31,7 +31,8 @@ import simplejson as json def main(dom_id, command, only_this_vif=None): xsls = execute('/usr/bin/xenstore-ls', - '/local/domain/%s/vm-data/networking' % dom_id, True) + '/local/domain/%s/vm-data/networking' % dom_id, + return_stdout=True) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: @@ -113,8 +114,8 @@ def apply_ebtables_rules(command, params): ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'], '--arp-ip-dst', params['IP'], '-j', 'ACCEPT') - ebtables('-D', 'FORWARD', '-p', '0800', '-o', - params['VIF'], '--ip-dst', params['IP'], + ebtables('-D', 'FORWARD', '-p', '0800', '-o', params['VIF'], + '--ip-dst', params['IP'], '-j', 'ACCEPT') if command == 'online': ebtables('-A', 'FORWARD', '-p', '0806', -- cgit From ab1bf7c0c12e205cd17b80be31226055cc90ef20 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Mon, 28 Mar 2011 21:00:44 +0000 Subject: minor fix and comment --- .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index a77bbbf4b..553811ab6 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -59,10 +59,11 @@ class OvsFlow(): def apply(self, rule): self.delete(rule % self.params) if self.command == 'online': - self.add(rule % params) + self.add(rule % self.params) def main(dom_id, command, net, only_this_vif=None): + # FIXME(dubs) what to do when only_this_vif is None vif_ofport = execute('/usr/bin/ovs-ofctl', 'get', 'Interface', only_this_vif, 'ofport', return_stdout=True) -- cgit From d7c51db418d554094c341639a0540ecfec8ddb19 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 1 Apr 2011 14:43:04 +0000 Subject: lots of updates to ovs scripts --- .../xensource/scripts/ovs_configure_base_flows.py | 26 ++-- .../xensource/scripts/ovs_configure_vif_flows.py | 139 ++++++++++----------- .../networking/etc/xensource/scripts/vif_rules.py | 2 +- 3 files changed, 80 insertions(+), 87 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py index c46fb4b60..1f3182e68 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010-2011 OpenStack LLC. +# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -25,26 +25,23 @@ import subprocess import sys -PNIC_NAME="eth1" -XEN_BRIDGE="xenbr1" - -def main(dom_id, command, only_this_vif=None): - pnic_ofport = execute('/usr/bin/ovs-ofctl', 'get', 'Interface', PNIC_NAME, - 'ofport', return_stdout=True) +def main(phys_dev_name, bridge_name): + pnic_ofport = execute('/usr/bin/ovs-vsctl', 'get', 'Interface', + phys_dev_name, 'ofport', return_stdout=True) ovs_ofctl = lambda *rule: execute('/usr/bin/ovs-ofctl', *rule) # clear all flows - ovs_ofctl('del-flows', XEN_BRIDGE) + ovs_ofctl('del-flows', bridge_name) # these flows are lower priority than all VM-specific flows. # allow all traffic from the physical NIC, as it is trusted (i.e., from a # filtered vif, or from the physical infrastructure - ovs_ofctl('add-flow', XEN_BRIDGE, + ovs_ofctl('add-flow', bridge_name, "priority=2,in_port=%s,action=normal" % pnic_ofport) # default drop - ovs_ofctl('add-flow', XEN_BRIDGE, 'priority=1,action=drop') + ovs_ofctl('add-flow', bridge_name, 'priority=1,action=drop') def execute(*command, return_stdout=False): @@ -60,9 +57,12 @@ def execute(*command, return_stdout=False): if __name__ == "__main__": - if sys.argv: + if len(sys.argv) != 3: + script_name = os.path.basename(sys.argv[0]) print "This script configures base ovs flows." - print "usage: %s" % os.path.basename(sys.argv[0]) + print "usage: %s phys-dev-name bridge-name" % script_name + print " ex: %s eth2 xenbr2" % script_name sys.exit(1) else: - main() + phys_dev_name, bridge_name = sys.argv[1:3] + main(phys_dev_name, bridge_name) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 553811ab6..7bad39830 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010-2011 OpenStack LLC. +# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -28,6 +28,7 @@ import sys import simplejson as json +# FIXME(dubs) this needs to be able to be passed in, check xen vif script XEN_BRIDGE = 'xenbr1' OVS_OFCTL = '/usr/bin/ovs-ofctl' @@ -62,18 +63,14 @@ class OvsFlow(): self.add(rule % self.params) -def main(dom_id, command, net, only_this_vif=None): - # FIXME(dubs) what to do when only_this_vif is None - vif_ofport = execute('/usr/bin/ovs-ofctl', 'get', 'Interface', - only_this_vif, 'ofport', return_stdout=True) - +def main(dom_id, command, net_type, only_this_vif=None): xsls = execute('/usr/bin/xenstore-ls', '/local/domain/%s/vm-data/networking' % dom_id, return_stdout=True) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: - xsread = execute('/usr/bin/enstore-read', + xsread = execute('/usr/bin/xenstore-read', '/local/domain/%s/vm-data/networking/%s' % (dom_id, mac), True) data = json.loads(xsread) @@ -83,113 +80,109 @@ def main(dom_id, command, net, only_this_vif=None): vif = "vif%s.1" % dom_id if (only_this_vif is None) or (vif == only_this_vif): - params = dict(VIF=vif, MAC=data['mac']) - if net in ('ipv4', 'all'): + vif_ofport = execute('/usr/bin/ovs-vsctl', 'get', 'Interface', + vif, 'ofport', return_stdout=True) + + params = dict(VIF_NAME=vif, + VIF_MAC=data['mac'], + VIF_OFPORT=vif_ofport) + if net_type in ('ipv4', 'all'): for ip4 in data['ips']: - params.update({'IP': ip4['ip']}) + params.update({'VIF_IPv4': ip4['ip']}) apply_ovs_ipv4_flows(command, params) - if net in ('ipv6', 'all'): + if net_type in ('ipv6', 'all'): for ip6 in data['ip6s']: - params.update({'IP': ip6['ip']}) + params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) + # TODO(dubs) calculate v6 link local addr + #params.update({'VIF_LOCAL_IPv6': XXX}) apply_ovs_ipv6_flows(command, params) -# usage: -# XEN_BRIDGE=xenbr1 -# VIF_NAME=$1 -# VIF_MAC=$2 -# VIF_IPv4=$3 -# VIF_GLOBAL_IPv6=$4 -# VIF_LOCAL_IPv6=$5 - -# # find the openflow port number associated with the vif interface -# VIF_OFPORT=`ovs-vsctl get Interface $VIF_NAME ofport` - def apply_ovs_ipv4_flows(command, params): flow = OvsFlow(command, params) # allow valid ARP outbound (both request / reply) - flow.apply("priority=3,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,arp," - "arp_sha=$VIF_MAC,nw_src=$VIF_IPv4,action=normal") + flow.apply("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," + "arp_sha=%(VIF_MAC)s,nw_src=%(VIF_IPv4)s,action=normal") - flow.apply("priority=3,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,arp," - "arp_sha=$VIF_MAC,nw_src=0.0.0.0,action=normal") + flow.apply("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," + "arp_sha=%(VIF_MAC)s,nw_src=0.0.0.0,action=normal") # allow valid IPv4 outbound - flow.apply("priority=3,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,ip," - "nw_src=$VIF_IPv4,action=normal") + flow.apply("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,ip," + "nw_src=%(VIF_IPv4)s,action=normal") def apply_ovs_ipv6_flows(command, params): flow = OvsFlow(command, params) # allow valid IPv6 ND outbound (are both global and local IPs needed?) - # Neighbor Solicitation - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=135,nd_sll=$VIF_MAC," + # Neighbor Solicitation + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," "action=normal") - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=135,action=normal") - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=135,nd_sll=$VIF_MAC," + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,action=normal") + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," "action=normal") - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=135,action=normal") + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,action=normal") # Neighbor Advertisement - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=136," - "nd_target=$VIF_LOCAL_IPv6,action=normal") - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=136,action=normal") - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=136," - "nd_target=$VIF_GLOBAL_IPv6,action=normal") - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=136,action=normal") + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136," + "nd_target=%(VIF_LOCAL_IPv6)s,action=normal") + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136,action=normal") + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136," + "nd_target=%(VIF_GLOBAL_IPv6)s,action=normal") + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136,action=normal") # drop all other neighbor discovery (required because we permit all icmp6 below) - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=135,action=drop") - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=136,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=135,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=136,action=drop") # do not allow sending specifc ICMPv6 types # Router Advertisement - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=134,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=134,action=drop") # Redirect Gateway - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=137,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=137,action=drop") # Mobile Prefix Solicitation - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=146,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=146,action=drop") # Mobile Prefix Advertisement - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=147,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=147,action=drop") # Multicast Router Advertisement - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=151,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=151,action=drop") # Multicast Router Solicitation - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=152,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=152,action=drop") # Multicast Router Termination - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=153,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=153,action=drop") # allow valid IPv6 outbound, by type - flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," - "ipv6_src=$VIF_GLOBAL_IPv6,icmp6,action=normal") - flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," - "ipv6_src=$VIF_LOCAL_IPv6,icmp6,action=normal") - flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," - "ipv6_src=$VIF_GLOBAL_IPv6,tcp6,action=normal") - flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," - "ipv6_src=$VIF_LOCAL_IPv6,tcp6,action=normal") - flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," - "ipv6_src=$VIF_GLOBAL_IPv6,udp6,action=normal") - flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," - "ipv6_src=$VIF_LOCAL_IPv6,udp6,action=normal") + flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp6,action=normal") + flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp6,action=normal") + flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,tcp6,action=normal") + flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_LOCAL_IPv6)s,tcp6,action=normal") + flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,udp6,action=normal") + flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_LOCAL_IPv6)s,udp6,action=normal") # all else will be dropped ... if __name__ == "__main__": if len(sys.argv) < 3: - print "usage: %s dom_id online|offline ipv4|ipv6|all [vif]" % \ + print "usage: %s dom_id online|offline ipv4|ipv6|all [vif_name]" % \ os.path.basename(sys.argv[0]) sys.exit(1) else: - dom_id, command, net = sys.argv[1:4] - vif = len(sys.argv) == 5 and sys.argv[4] or None - main(dom_id, command, net, vif) + dom_id, command, net_type = sys.argv[1:4] + vif_name = len(sys.argv) == 5 and sys.argv[4] or None + main(dom_id, command, net_type, vif_name) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py index 500e055d8..4e13bad9d 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py @@ -36,7 +36,7 @@ def main(dom_id, command, only_this_vif=None): macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: - xsread = execute('/usr/bin/enstore-read', + xsread = execute('/usr/bin/xenstore-read', '/local/domain/%s/vm-data/networking/%s' % (dom_id, mac), True) data = json.loads(xsread) -- cgit From 367581e63d4eb0018db293034dc1b096d2584720 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 1 Apr 2011 15:28:21 +0000 Subject: change bridge --- .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 7bad39830..2faf4e5c0 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -29,7 +29,7 @@ import simplejson as json # FIXME(dubs) this needs to be able to be passed in, check xen vif script -XEN_BRIDGE = 'xenbr1' +XEN_BRIDGE = 'xenbr0' OVS_OFCTL = '/usr/bin/ovs-ofctl' @@ -86,6 +86,7 @@ def main(dom_id, command, net_type, only_this_vif=None): params = dict(VIF_NAME=vif, VIF_MAC=data['mac'], VIF_OFPORT=vif_ofport) + if net_type in ('ipv4', 'all'): for ip4 in data['ips']: params.update({'VIF_IPv4': ip4['ip']}) -- cgit From 74b9f240c7e8c62e68011691488be9e63758e980 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 1 Apr 2011 19:54:55 +0000 Subject: extract execute methods to a library for reuse --- .../networking/etc/xensource/scripts/novalib.py | 41 ++++++++++++++++++++++ .../xensource/scripts/ovs_configure_base_flows.py | 21 ++++------- .../xensource/scripts/ovs_configure_vif_flows.py | 30 ++++++---------- 3 files changed, 57 insertions(+), 35 deletions(-) create mode 100644 plugins/xenserver/networking/etc/xensource/scripts/novalib.py diff --git a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py new file mode 100644 index 000000000..5366c385d --- /dev/null +++ b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import os +import subprocess +import sys + + +def execute_get_output(*command): + """Execute and return stdout""" + devnull = open(os.devnull, 'w') + command = map(str, command) + proc = subprocess.Popen(command, close_fds=True, + stdout=subprocess.PIPE, stderr=devnull) + devnull.close() + return proc.stdout.read() + + +def execute(*command): + """Execute without returning stdout""" + devnull = open(os.devnull, 'w') + command = map(str, command) + proc = subprocess.Popen(command, close_fds=True, + stdout=subprocess.PIPE, stderr=devnull) + devnull.close() diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py index 1f3182e68..d036cf517 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py @@ -25,9 +25,12 @@ import subprocess import sys +from novalib import execute, execute_get_output + + def main(phys_dev_name, bridge_name): - pnic_ofport = execute('/usr/bin/ovs-vsctl', 'get', 'Interface', - phys_dev_name, 'ofport', return_stdout=True) + pnic_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', 'Interface', + phys_dev_name, 'ofport') ovs_ofctl = lambda *rule: execute('/usr/bin/ovs-ofctl', *rule) # clear all flows @@ -44,24 +47,12 @@ def main(phys_dev_name, bridge_name): ovs_ofctl('add-flow', bridge_name, 'priority=1,action=drop') -def execute(*command, return_stdout=False): - devnull = open(os.devnull, 'w') - command = map(str, command) - proc = subprocess.Popen(command, close_fds=True, - stdout=subprocess.PIPE, stderr=devnull) - devnull.close() - if return_stdout: - return proc.stdout.read() - else: - return None - - if __name__ == "__main__": if len(sys.argv) != 3: script_name = os.path.basename(sys.argv[0]) print "This script configures base ovs flows." print "usage: %s phys-dev-name bridge-name" % script_name - print " ex: %s eth2 xenbr2" % script_name + print " ex: %s eth0 xenbr0" % script_name sys.exit(1) else: phys_dev_name, bridge_name = sys.argv[1:3] diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 2faf4e5c0..82e79c2d8 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -28,23 +28,14 @@ import sys import simplejson as json +from novalib import execute, execute_get_output + + # FIXME(dubs) this needs to be able to be passed in, check xen vif script XEN_BRIDGE = 'xenbr0' OVS_OFCTL = '/usr/bin/ovs-ofctl' -def execute(*command, return_stdout=False): - devnull = open(os.devnull, 'w') - command = map(str, command) - proc = subprocess.Popen(command, close_fds=True, - stdout=subprocess.PIPE, stderr=devnull) - devnull.close() - if return_stdout: - return proc.stdout.read() - else: - return None - - class OvsFlow(): def __init__(self, command, params, bridge=None): self.command = command @@ -64,15 +55,14 @@ class OvsFlow(): def main(dom_id, command, net_type, only_this_vif=None): - xsls = execute('/usr/bin/xenstore-ls', - '/local/domain/%s/vm-data/networking' % dom_id, - return_stdout=True) + xsls = execute_get_output('/usr/bin/xenstore-ls', + '/local/domain/%s/vm-data/networking' % dom_id) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: - xsread = execute('/usr/bin/xenstore-read', - '/local/domain/%s/vm-data/networking/%s' % - (dom_id, mac), True) + xsread = execute_get_output('/usr/bin/xenstore-read', + '/local/domain/%s/vm-data/networking/%s' % + (dom_id, mac)) data = json.loads(xsread) if data["label"] == "public": vif = "vif%s.0" % dom_id @@ -80,8 +70,8 @@ def main(dom_id, command, net_type, only_this_vif=None): vif = "vif%s.1" % dom_id if (only_this_vif is None) or (vif == only_this_vif): - vif_ofport = execute('/usr/bin/ovs-vsctl', 'get', 'Interface', - vif, 'ofport', return_stdout=True) + vif_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', + 'Interface', vif, 'ofport') params = dict(VIF_NAME=vif, VIF_MAC=data['mac'], -- cgit From a4e1db03a2c61648588d9adb703a385f49d82fc0 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 1 Apr 2011 20:26:59 +0000 Subject: use novalib for vif_rules.py, fix OvsFlow class --- .../xensource/scripts/ovs_configure_vif_flows.py | 2 +- .../networking/etc/xensource/scripts/vif_rules.py | 25 +++++++--------------- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 82e79c2d8..23b6d85c9 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -36,7 +36,7 @@ XEN_BRIDGE = 'xenbr0' OVS_OFCTL = '/usr/bin/ovs-ofctl' -class OvsFlow(): +class OvsFlow(object): def __init__(self, command, params, bridge=None): self.command = command self.params = params diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py index 4e13bad9d..662def205 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py @@ -29,16 +29,18 @@ import sys import simplejson as json +from novalib import execute, execute_get_output + + def main(dom_id, command, only_this_vif=None): - xsls = execute('/usr/bin/xenstore-ls', - '/local/domain/%s/vm-data/networking' % dom_id, - return_stdout=True) + xsls = execute_get_output('/usr/bin/xenstore-ls', + '/local/domain/%s/vm-data/networking' % dom_id) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: - xsread = execute('/usr/bin/xenstore-read', - '/local/domain/%s/vm-data/networking/%s' % - (dom_id, mac), True) + xsread = execute_get_output('/usr/bin/xenstore-read', + '/local/domain/%s/vm-data/networking/%s' % + (dom_id, mac)) data = json.loads(xsread) for ip in data['ips']: if data["label"] == "public": @@ -53,17 +55,6 @@ def main(dom_id, command, only_this_vif=None): apply_iptables_rules(command, params) -def execute(*command, return_stdout=False): - devnull = open(os.devnull, 'w') - command = map(str, command) - proc = subprocess.Popen(command, close_fds=True, - stdout=subprocess.PIPE, stderr=devnull) - devnull.close() - if return_stdout: - return proc.stdout.read() - else: - return None - # A note about adding rules: # Whenever we add any rule to iptables, arptables or ebtables we first # delete the same rule to ensure the rule only exists once. -- cgit From 655eb8fbd21376e694f8134e42f10ddbc1aafb0e Mon Sep 17 00:00:00 2001 From: John Tran Date: Wed, 6 Apr 2011 18:22:03 -0700 Subject: ec2 api run_instances checks for image status must be 'available'. Overhauled test_run_instances for working set of test assertions --- nova/api/ec2/cloud.py | 10 +++++++- nova/tests/test_cloud.py | 62 +++++++++++++++++++++++++----------------------- 2 files changed, 41 insertions(+), 31 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 58effd134..0ea0e3603 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -814,10 +814,18 @@ class CloudController(object): if kwargs.get('ramdisk_id'): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ramdisk['id'] + image = self._get_image(context, kwargs['image_id']) + if not image: + raise exception.NotFound(_('Image %s not found') % + kwargs['image_id']) + if not 'properties' in image or \ + (not 'image_state' in image['properties']) or \ + (image['properties']['image_state'] is not 'available'): + raise exception.ApiError(_('Image must be available')) instances = self.compute_api.create(context, instance_type=instance_types.get_by_type( kwargs.get('instance_type', None)), - image_id=self._get_image(context, kwargs['image_id'])['id'], + image_id = image['id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 5cb969979..85f3a8e87 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -36,12 +36,12 @@ from nova import rpc from nova import service from nova import test from nova import utils +from nova import exception from nova.auth import manager from nova.compute import power_state from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils from nova.image import local -from nova.exception import NotFound FLAGS = flags.FLAGS @@ -226,7 +226,7 @@ class CloudTestCase(test.TestCase): 'type': 'machine'}}] def fake_show_none(meh, context, id): - raise NotFound + raise exception.NotFound self.stubs.Set(local.LocalImageService, 'detail', fake_detail) # list all @@ -244,7 +244,7 @@ class CloudTestCase(test.TestCase): self.stubs.UnsetAll() self.stubs.Set(local.LocalImageService, 'show', fake_show_none) self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none) - self.assertRaises(NotFound, describe_images, + self.assertRaises(exception.NotFound, describe_images, self.context, ['ami-fake']) def test_console_output(self): @@ -307,39 +307,41 @@ class CloudTestCase(test.TestCase): self.cloud.delete_key_pair(self.context, 'test') def test_run_instances(self): - if FLAGS.connection_type == 'fake': - LOG.debug(_("Can't test instances without a real virtual env.")) - return + allinst = db.instance_get_all(context.get_admin_context()) + self.assertEqual(0, len(allinst)) + def fake_show_decrypt(meh, context, id): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine', 'image_state': 'decrypting'}} + + def fake_show_avail(meh, context, id): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine', 'image_state': 'available'}} + image_id = FLAGS.default_image instance_type = FLAGS.default_instance_type max_count = 1 kwargs = {'image_id': image_id, 'instance_type': instance_type, 'max_count': max_count} - rv = self.cloud.run_instances(self.context, **kwargs) - # TODO: check for proper response - instance_id = rv['reservationSet'][0].keys()[0] - instance = rv['reservationSet'][0][instance_id][0] - LOG.debug(_("Need to watch instance %s until it's running..."), - instance['instance_id']) - while True: - greenthread.sleep(1) - info = self.cloud._get_instance(instance['instance_id']) - LOG.debug(info['state']) - if info['state'] == power_state.RUNNING: - break - self.assert_(rv) - - if FLAGS.connection_type != 'fake': - time.sleep(45) # Should use boto for polling here - for reservations in rv['reservationSet']: - # for res_id in reservations.keys(): - # LOG.debug(reservations[res_id]) - # for instance in reservations[res_id]: - for instance in reservations[reservations.keys()[0]]: - instance_id = instance['instance_id'] - LOG.debug(_("Terminating instance %s"), instance_id) - rv = self.compute.terminate_instance(instance_id) + run_instances = self.cloud.run_instances + # when image doesn't have 'image_state' attr at all + self.assertRaises(exception.ApiError, run_instances, + self.context, **kwargs) + # when image has 'image_state' yet not 'available' + self.stubs.UnsetAll() + self.stubs.Set(local.LocalImageService, 'show', fake_show_decrypt) + self.assertRaises(exception.ApiError, run_instances, + self.context, **kwargs) + # when image has valid image_state + self.stubs.UnsetAll() + self.stubs.Set(local.LocalImageService, 'show', fake_show_avail) + result = run_instances(self.context, **kwargs) + instance = result['instancesSet'][0] + self.assertEqual(instance['imageId'], 'ami-00000001') + self.assertEqual(instance['displayName'], 'Server 1') + self.assertEqual(instance['instanceId'], 'i-00000001') + self.assertEqual(instance['instanceState']['name'], 'scheduling') + self.assertEqual(instance['instanceType'], 'm1.small') def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) -- cgit From 1c13695a2c5e5d14ead3f5459d0b40bb875ecdf6 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 11 Apr 2011 14:16:17 -0400 Subject: Sudo chown the vbd device to the nova user before streaming data to it. This resolves an issue where nova-compute required 'root' privs to successfully create nodes with connection_type=xenapi. --- nova/virt/xenapi/vm_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index d2045a557..50fdf3e30 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -1012,6 +1012,8 @@ def _stream_disk(dev, image_type, virtual_size, image_file): offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) + utils.execute('sudo', 'chown', os.getuid(), '/dev/%s' % dev) + with open('/dev/%s' % dev, 'wb') as f: f.seek(offset) for chunk in image_file: -- cgit From 9d2513ea3a6d586e1fe3deae778a02bb089b9a5e Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 12 Apr 2011 10:25:07 -0400 Subject: Updated to use setfacl instead of chown. --- nova/virt/xenapi/vm_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 50fdf3e30..5cdd29057 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -1012,7 +1012,8 @@ def _stream_disk(dev, image_type, virtual_size, image_file): offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) - utils.execute('sudo', 'chown', os.getuid(), '/dev/%s' % dev) + utils.execute('sudo', 'setfacl', '-m', 'u:%s:rw' % os.getuid(), + '/dev/%s' % dev) with open('/dev/%s' % dev, 'wb') as f: f.seek(offset) -- cgit From 8c66d79a41044837a0865b1a706dd89e788597d1 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 14 Apr 2011 20:57:11 +0900 Subject: add kvm-pause and kvm-suspend --- nova/virt/libvirt_conn.py | 43 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 4 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 6ec15fbb8..66f43e786 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -545,19 +545,54 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def pause(self, instance, callback): - raise exception.ApiError("pause not supported for libvirt.") + """Pause VM instance""" + if self.read_only: + tmpconn = self._connect(self.libvirt_uri, False) + dom = tmpconn.lookupByName(instance.name) + dom.suspend() + tmpconn.close() + else: + dom = self._conn.lookupByName(instance.name) + dom.suspend() @exception.wrap_exception def unpause(self, instance, callback): - raise exception.ApiError("unpause not supported for libvirt.") + """Unpause paused VM instance""" + if self.read_only: + tmpconn = self._connect(self.libvirt_uri, False) + dom = tmpconn.lookupByName(instance.name) + dom.resume() + tmpconn.close() + else: + dom = self._conn.lookupByName(instance.name) + dom.resume() @exception.wrap_exception def suspend(self, instance, callback): - raise exception.ApiError("suspend not supported for libvirt") + """Suspend the specified instance""" + if self.read_only: + tmpconn = self._connect(self.libvirt_uri, False) + dom = tmpconn.lookupByName(instance.name) + dom.managedSave(0) + tmpconn.close() + else: + dom = self._conn.lookupByName(instance.name) + dom.managedSave(0) @exception.wrap_exception def resume(self, instance, callback): - raise exception.ApiError("resume not supported for libvirt") + """resume the specified instance""" + try: + if self.read_only: + tmpconn = self._connect(self.libvirt_uri, False) + dom = tmpconn.lookupByName(instance.name) + tmpconn.close() + else: + dom = self._conn.lookupByName(instance.name) + dom.create() + except libvirt.LibvirtError: + xml = self.to_xml(instance, None) + self._create_new_domain(xml) @exception.wrap_exception def rescue(self, instance, callback=None): -- cgit From f2e9d4120ed0495d9c810a0d27d530e280f325c6 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Thu, 14 Apr 2011 14:35:42 -0400 Subject: set the bridge on each OvsFlow --- .../xensource/scripts/ovs_configure_vif_flows.py | 30 +++++++++++----------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 23b6d85c9..b59cc4d0b 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -27,30 +27,28 @@ import sys # This is written to Python 2.4, since that is what is available on XenServer import simplejson as json - from novalib import execute, execute_get_output -# FIXME(dubs) this needs to be able to be passed in, check xen vif script -XEN_BRIDGE = 'xenbr0' OVS_OFCTL = '/usr/bin/ovs-ofctl' class OvsFlow(object): - def __init__(self, command, params, bridge=None): + def __init__(self, command, bridge, params): self.command = command + self.bridge = bridge self.params = params - self.bridge = bridge or XEN_BRIDGE def add(self, rule): execute(OVS_OFCTL, 'add-flow', self.bridge, rule) def delete(self, rule): - execute(OVS_OFCTL, 'del-flow', self.bridge, rule) + execute(OVS_OFCTL, 'del-flows', self.bridge, rule) def apply(self, rule): - self.delete(rule % self.params) - if self.command == 'online': + if self.command in ('offline', 'reset'): + self.delete(rule % self.params) + if self.command in ('online', 'reset'): self.add(rule % self.params) @@ -66,8 +64,10 @@ def main(dom_id, command, net_type, only_this_vif=None): data = json.loads(xsread) if data["label"] == "public": vif = "vif%s.0" % dom_id + bridge = "xenbr0" else: vif = "vif%s.1" % dom_id + bridge = "xenbr1" if (only_this_vif is None) or (vif == only_this_vif): vif_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', @@ -80,17 +80,17 @@ def main(dom_id, command, net_type, only_this_vif=None): if net_type in ('ipv4', 'all'): for ip4 in data['ips']: params.update({'VIF_IPv4': ip4['ip']}) - apply_ovs_ipv4_flows(command, params) + apply_ovs_ipv4_flows(command, bridge, params) if net_type in ('ipv6', 'all'): for ip6 in data['ip6s']: params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) # TODO(dubs) calculate v6 link local addr #params.update({'VIF_LOCAL_IPv6': XXX}) - apply_ovs_ipv6_flows(command, params) + apply_ovs_ipv6_flows(command, bridge, params) -def apply_ovs_ipv4_flows(command, params): - flow = OvsFlow(command, params) +def apply_ovs_ipv4_flows(command, bridge, params): + flow = OvsFlow(command, bridge, params) # allow valid ARP outbound (both request / reply) flow.apply("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," @@ -104,8 +104,8 @@ def apply_ovs_ipv4_flows(command, params): "nw_src=%(VIF_IPv4)s,action=normal") -def apply_ovs_ipv6_flows(command, params): - flow = OvsFlow(command, params) +def apply_ovs_ipv6_flows(command, bridge, params): + flow = OvsFlow(command, bridge, params) # allow valid IPv6 ND outbound (are both global and local IPs needed?) # Neighbor Solicitation @@ -170,7 +170,7 @@ def apply_ovs_ipv6_flows(command, params): if __name__ == "__main__": if len(sys.argv) < 3: - print "usage: %s dom_id online|offline ipv4|ipv6|all [vif_name]" % \ + print "usage: %s dom_id online|offline|reset ipv4|ipv6|all [vif_name]" % \ os.path.basename(sys.argv[0]) sys.exit(1) else: -- cgit From c134d3c9bfb5a9d789776b243b8d6e4283fb3f80 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Mon, 18 Apr 2011 13:30:54 -0400 Subject: calc link local --- .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index b59cc4d0b..08d7a3859 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -25,6 +25,7 @@ import subprocess import sys # This is written to Python 2.4, since that is what is available on XenServer +import netaddr import simplejson as json from novalib import execute, execute_get_output @@ -84,8 +85,8 @@ def main(dom_id, command, net_type, only_this_vif=None): if net_type in ('ipv6', 'all'): for ip6 in data['ip6s']: params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) - # TODO(dubs) calculate v6 link local addr - #params.update({'VIF_LOCAL_IPv6': XXX}) + mac64 = netaddr.EUI(mac).eui64() + params.update({'VIF_LOCAL_IPv6': mac64.ipv6_link_local()}) apply_ovs_ipv6_flows(command, bridge, params) -- cgit From 0ba085928c75f2fc27fb03eaa3aaeff6618e8875 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 20:48:26 +0900 Subject: Add support for creating a snapshot of a nova volume with euca-create-snapshot. --- nova/api/ec2/__init__.py | 6 ++ nova/api/ec2/cloud.py | 52 ++++++++++++--- nova/db/api.py | 39 +++++++++++ nova/db/sqlalchemy/api.py | 77 ++++++++++++++++++++++ .../versions/015_add_volume_snapshot_support.py | 71 ++++++++++++++++++++ nova/db/sqlalchemy/models.py | 24 +++++++ nova/exception.py | 50 ++++++++++++++ nova/volume/api.py | 44 +++++++++++++ nova/volume/driver.py | 8 +++ nova/volume/manager.py | 42 ++++++++++++ 10 files changed, 405 insertions(+), 8 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index cd59340bd..4a49a5a6b 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -327,6 +327,12 @@ class Executor(wsgi.Application): ec2_id = ec2utils.id_to_ec2_id(ex.volume_id, 'vol-%08x') message = _('Volume %s not found') % ec2_id return self._error(req, context, type(ex).__name__, message) + except exception.SnapshotNotFound as ex: + LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex), + context=context) + ec2_id = ec2utils.id_to_ec2_id(ex.snapshot_id, 'snap-%08x') + message = _('Snapshot %s not found') % ec2_id + return self._error(req, context, type(ex).__name__, message) except exception.NotFound as ex: LOG.info(_('NotFound raised: %s'), unicode(ex), context=context) return self._error(req, context, type(ex).__name__, unicode(ex)) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 092b80fa2..f5360af0b 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -280,14 +280,46 @@ class CloudController(object): owner=None, restorable_by=None, **kwargs): - return {'snapshotSet': [{'snapshotId': 'fixme', - 'volumeId': 'fixme', - 'status': 'fixme', - 'startTime': 'fixme', - 'progress': 'fixme', - 'ownerId': 'fixme', - 'volumeSize': 0, - 'description': 'fixme'}]} + if snapshot_id: + snapshots = [] + for ec2_id in snapshot_id: + internal_id = ec2utils.ec2_id_to_id(ec2_id) + snapshot = self.volume_api.get_snapshot(context, snapshot_id=internal_id) + snapshots.append(snapshot) + else: + snapshots = self.volume_api.get_all_snapshots(context) + snapshots = [self._format_snapshot(context, s) for s in snapshots] + return {'snapshotSet': snapshots} + + def _format_snapshot(self, context, snapshot): + s = {} + s['snapshotId'] = ec2utils.id_to_ec2_id(snapshot['id'], 'snap-%08x') + s['volumeId'] = ec2utils.id_to_ec2_id(snapshot['volume_id'], 'vol-%08x') + s['status'] = snapshot['status'] + s['startTime'] = snapshot['created_at'] + s['progress'] = snapshot['progress'] + s['ownerId'] = snapshot['project_id'] + s['volumeSize'] = snapshot['volume_size'] + s['description'] = snapshot['display_description'] + + s['display_name'] = snapshot['display_name'] + s['display_description'] = snapshot['display_description'] + return s + + def create_snapshot(self, context, volume_id, **kwargs): + LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) + volume_id = ec2utils.ec2_id_to_id(volume_id) + snapshot = self.volume_api.create_snapshot( + context, + volume_id=volume_id, + name=kwargs.get('display_name'), + description=kwargs.get('display_description')) + return {'snapshotSet': [self._format_snapshot(context, snapshot)]} + + def delete_snapshot(self, context, snapshot_id, **kwargs): + snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) + self.volume_api.delete_snapshot(context, snapshot_id=snapshot_id) + return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) @@ -595,6 +627,10 @@ class CloudController(object): 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] + if volume.get('snapshot_id') != None: + v['snapshotId'] = ec2utils.id_to_ec2_id(volume['snapshot_id'], 'snap-%08x') + else: + v['snapshotId'] = None v['display_name'] = volume['display_name'] v['display_description'] = volume['display_description'] diff --git a/nova/db/api.py b/nova/db/api.py index f9a4b5b4b..57e585a9c 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -47,6 +47,8 @@ flags.DEFINE_string('instance_name_template', 'instance-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('volume_name_template', 'volume-%08x', 'Template string to be used to generate instance names') +flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x', + 'Template string to be used to generate instance names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], @@ -871,6 +873,43 @@ def volume_update(context, volume_id, values): #################### +def snapshot_create(context, values): + """Create a volume from the values dictionary.""" + return IMPL.snapshot_create(context, values) + + +def snapshot_destroy(context, snapshot_id): + """Create a volume from the values dictionary.""" + return IMPL.snapshot_destroy(context, snapshot_id) + + +def snapshot_get(context, snapshot_id): + """Get a volume or raise if it does not exist.""" + return IMPL.snapshot_get(context, snapshot_id) + + +def snapshot_get_all(context): + """Get all volumes.""" + return IMPL.snapshot_get_all(context) + + +def snapshot_get_all_by_project(context, project_id): + """Get all volumes belonging to a project.""" + return IMPL.snapshot_get_all_by_project(context, project_id) + + +def snapshot_update(context, snapshot_id, values): + """Set the given properties on an snapshot and update it. + + Raises NotFound if snapshot does not exist. + + """ + return IMPL.snapshot_update(context, snapshot_id, values) + + +#################### + + def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 285b22a04..ebdb2ad5c 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1758,6 +1758,83 @@ def volume_update(context, volume_id, values): ################### +@require_context +def snapshot_create(context, values): + snapshot_ref = models.Snapshot() + snapshot_ref.update(values) + + session = get_session() + with session.begin(): + snapshot_ref.save(session=session) + return snapshot_ref + + +@require_admin_context +def snapshot_destroy(context, snapshot_id): + session = get_session() + with session.begin(): + session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def snapshot_get(context, snapshot_id, session=None): + if not session: + session = get_session() + result = None + + if is_admin_context(context): + result = session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + elif is_user_context(context): + result = session.query(models.Snapshot).\ + filter_by(project_id=context.project_id).\ + filter_by(id=snapshot_id).\ + filter_by(deleted=False).\ + first() + if not result: + raise exception.SnapshotNotFound(_('Snapshot %s not found') % snapshot_id, + snapshot_id) + + return result + + +@require_admin_context +def snapshot_get_all(context): + session = get_session() + return session.query(models.Snapshot).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def snapshot_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + session = get_session() + return session.query(models.Snapshot).\ + filter_by(project_id=project_id).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def snapshot_update(context, snapshot_id, values): + session = get_session() + with session.begin(): + snapshot_ref = snapshot_get(context, snapshot_id, session=session) + snapshot_ref.update(values) + snapshot_ref.save(session=session) + + +################### + + @require_context def security_group_get_all(context): session = get_session() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py new file mode 100644 index 000000000..288f63e72 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + +meta = MetaData() + +snapshots = Table('snapshots', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', Integer(), nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('progress', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_size', Integer()), + Column('scheduled_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + try: + snapshots.create() + except Exception: + logging.info(repr(snapshots)) + logging.exception('Exception while creating table') + meta.drop_all(tables=[snapshots]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + snapshots.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 36a084a1d..2e0ead5f9 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -327,6 +327,30 @@ class Quota(BASE, NovaBase): metadata_items = Column(Integer) +class Snapshot(BASE, NovaBase): + """Represents a block storage device that can be attached to a vm.""" + __tablename__ = 'snapshots' + id = Column(Integer, primary_key=True, autoincrement=True) + + @property + def name(self): + return FLAGS.snapshot_name_template % self.id + + @property + def volume_name(self): + return FLAGS.volume_name_template % self.volume_id + + user_id = Column(String(255)) + project_id = Column(String(255)) + + volume_id = Column(Integer) + status = Column(String(255)) + progress = Column(String(255)) + volume_size = Column(Integer) + + display_name = Column(String(255)) + display_description = Column(String(255)) + class ExportDevice(BASE, NovaBase): """Represates a shelf and blade that a volume can be exported on.""" __tablename__ = 'export_devices' diff --git a/nova/exception.py b/nova/exception.py index 9905fb19b..2dffeb795 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -60,6 +60,56 @@ class ApiError(Error): class BuildInProgress(Error): + super(ApiError, self).__init__('%s: %s' % (code, message)) + + +class NotFound(Error): + pass + + +class InstanceNotFound(NotFound): + def __init__(self, message, instance_id): + self.instance_id = instance_id + super(InstanceNotFound, self).__init__(message) + + +class VolumeNotFound(NotFound): + def __init__(self, message, volume_id): + self.volume_id = volume_id + super(VolumeNotFound, self).__init__(message) + + +class SnapshotNotFound(NotFound): + def __init__(self, message, snapshot_id): + self.snapshot_id = snapshot_id + super(SnapshotNotFound, self).__init__(message) + + +class Duplicate(Error): + pass + + +class NotAuthorized(Error): + pass + + +class NotEmpty(Error): + pass + + +class Invalid(Error): + pass + + +class InvalidInputException(Error): + pass + + +class InvalidContentType(Error): + pass + + +class TimeoutException(Error): pass diff --git a/nova/volume/api.py b/nova/volume/api.py index 09befb647..c1af30de0 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -90,6 +90,15 @@ class API(base.Base): return self.db.volume_get_all(context) return self.db.volume_get_all_by_project(context, context.project_id) + def get_snapshot(self, context, snapshot_id): + rv = self.db.snapshot_get(context, snapshot_id) + return dict(rv.iteritems()) + + def get_all_snapshots(self, context): + if context.is_admin: + return self.db.snapshot_get_all(context) + return self.db.snapshot_get_all_by_project(context, context.project_id) + def check_attach(self, context, volume_id): volume = self.get(context, volume_id) # TODO(vish): abstract status checking? @@ -110,3 +119,38 @@ class API(base.Base): self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "remove_volume", "args": {'volume_id': volume_id}}) + + def create_snapshot(self, context, volume_id, name, description): + volume = self.get(context, volume_id) + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + + options = { + 'volume_id': volume_id, + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': "creating", + 'progress': '0%', + 'volume_size': volume['size'], + 'display_name': name, + 'display_description': description} + + snapshot = self.db.snapshot_create(context, options) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_snapshot", + "args": {"topic": FLAGS.volume_topic, + "volume_id": volume_id, + "snapshot_id": snapshot['id']}}) + return snapshot + + def delete_snapshot(self, context, snapshot_id): + snapshot = self.get_snapshot(context, snapshot_id) + if snapshot['status'] != "available": + raise exception.ApiError(_("Snapshot status must be available")) + self.db.snapshot_update(context, snapshot_id, {'status': 'deleting'}) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "delete_snapshot", + "args": {"topic": FLAGS.volume_topic, + "snapshot_id": snapshot_id}}) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 55307ad9b..31998e307 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -122,6 +122,14 @@ class VolumeDriver(object): (FLAGS.volume_group, volume['name'])) + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + raise NotImplementedError() + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + raise NotImplementedError() + def local_path(self, volume): # NOTE(vish): stops deprecation warning escaped_group = FLAGS.volume_group.replace('-', '--') diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 2178389ce..87fd3bf17 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -152,6 +152,48 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) return True + def create_snapshot(self, context, volume_id, snapshot_id): + """Creates and exports the snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + LOG.info(_("snapshot %s: creating"), snapshot_ref['name']) + + try: + snap_name = snapshot_ref['name'] + LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) + model_update = self.driver.create_snapshot(snapshot_ref) + if model_update: + self.db.snapshot_update(context, snapshot_ref['id'], model_update) + + except Exception: + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'error'}) + raise + + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'available', + 'progress': '100%'}) + LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name']) + return snapshot_id + + def delete_snapshot(self, context, snapshot_id): + """Deletes and unexports snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + + try: + LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name']) + self.driver.delete_snapshot(snapshot_ref) + except Exception: + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'error_deleting'}) + raise + + self.db.snapshot_destroy(context, snapshot_id) + LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name']) + return True + def setup_compute_volume(self, context, volume_id): """Setup remote volume on compute host. -- cgit From dcda6be23c3797872c406f58578b05befd378c97 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 20:48:26 +0900 Subject: Add support for creating a snapshot of a nova volume with euca-create-snapshot. --- nova/api/ec2/__init__.py | 6 ++ nova/api/ec2/cloud.py | 52 ++++++++++++--- nova/db/api.py | 39 +++++++++++ nova/db/sqlalchemy/api.py | 77 ++++++++++++++++++++++ .../versions/015_add_volume_snapshot_support.py | 71 ++++++++++++++++++++ nova/db/sqlalchemy/models.py | 24 +++++++ nova/exception.py | 6 ++ nova/volume/api.py | 44 +++++++++++++ nova/volume/driver.py | 8 +++ nova/volume/manager.py | 42 ++++++++++++ 10 files changed, 361 insertions(+), 8 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index a3c3b25a1..a89d65a38 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -331,6 +331,12 @@ class Executor(wsgi.Application): ec2_id = ec2utils.id_to_ec2_id(ex.volume_id, 'vol-%08x') message = _('Volume %s not found') % ec2_id return self._error(req, context, type(ex).__name__, message) + except exception.SnapshotNotFound as ex: + LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex), + context=context) + ec2_id = ec2utils.id_to_ec2_id(ex.snapshot_id, 'snap-%08x') + message = _('Snapshot %s not found') % ec2_id + return self._error(req, context, type(ex).__name__, message) except exception.NotFound as ex: LOG.info(_('NotFound raised: %s'), unicode(ex), context=context) return self._error(req, context, type(ex).__name__, unicode(ex)) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index bd4c9dcd4..6daf299b9 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -283,14 +283,46 @@ class CloudController(object): owner=None, restorable_by=None, **kwargs): - return {'snapshotSet': [{'snapshotId': 'fixme', - 'volumeId': 'fixme', - 'status': 'fixme', - 'startTime': 'fixme', - 'progress': 'fixme', - 'ownerId': 'fixme', - 'volumeSize': 0, - 'description': 'fixme'}]} + if snapshot_id: + snapshots = [] + for ec2_id in snapshot_id: + internal_id = ec2utils.ec2_id_to_id(ec2_id) + snapshot = self.volume_api.get_snapshot(context, snapshot_id=internal_id) + snapshots.append(snapshot) + else: + snapshots = self.volume_api.get_all_snapshots(context) + snapshots = [self._format_snapshot(context, s) for s in snapshots] + return {'snapshotSet': snapshots} + + def _format_snapshot(self, context, snapshot): + s = {} + s['snapshotId'] = ec2utils.id_to_ec2_id(snapshot['id'], 'snap-%08x') + s['volumeId'] = ec2utils.id_to_ec2_id(snapshot['volume_id'], 'vol-%08x') + s['status'] = snapshot['status'] + s['startTime'] = snapshot['created_at'] + s['progress'] = snapshot['progress'] + s['ownerId'] = snapshot['project_id'] + s['volumeSize'] = snapshot['volume_size'] + s['description'] = snapshot['display_description'] + + s['display_name'] = snapshot['display_name'] + s['display_description'] = snapshot['display_description'] + return s + + def create_snapshot(self, context, volume_id, **kwargs): + LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) + volume_id = ec2utils.ec2_id_to_id(volume_id) + snapshot = self.volume_api.create_snapshot( + context, + volume_id=volume_id, + name=kwargs.get('display_name'), + description=kwargs.get('display_description')) + return {'snapshotSet': [self._format_snapshot(context, snapshot)]} + + def delete_snapshot(self, context, snapshot_id, **kwargs): + snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) + self.volume_api.delete_snapshot(context, snapshot_id=snapshot_id) + return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) @@ -598,6 +630,10 @@ class CloudController(object): 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] + if volume.get('snapshot_id') != None: + v['snapshotId'] = ec2utils.id_to_ec2_id(volume['snapshot_id'], 'snap-%08x') + else: + v['snapshotId'] = None v['display_name'] = volume['display_name'] v['display_description'] = volume['display_description'] diff --git a/nova/db/api.py b/nova/db/api.py index 63901e94d..9fc4b8c0a 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -46,6 +46,8 @@ flags.DEFINE_string('instance_name_template', 'instance-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('volume_name_template', 'volume-%08x', 'Template string to be used to generate instance names') +flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x', + 'Template string to be used to generate instance names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], @@ -867,6 +869,43 @@ def volume_update(context, volume_id, values): #################### +def snapshot_create(context, values): + """Create a volume from the values dictionary.""" + return IMPL.snapshot_create(context, values) + + +def snapshot_destroy(context, snapshot_id): + """Create a volume from the values dictionary.""" + return IMPL.snapshot_destroy(context, snapshot_id) + + +def snapshot_get(context, snapshot_id): + """Get a volume or raise if it does not exist.""" + return IMPL.snapshot_get(context, snapshot_id) + + +def snapshot_get_all(context): + """Get all volumes.""" + return IMPL.snapshot_get_all(context) + + +def snapshot_get_all_by_project(context, project_id): + """Get all volumes belonging to a project.""" + return IMPL.snapshot_get_all_by_project(context, project_id) + + +def snapshot_update(context, snapshot_id, values): + """Set the given properties on an snapshot and update it. + + Raises NotFound if snapshot does not exist. + + """ + return IMPL.snapshot_update(context, snapshot_id, values) + + +#################### + + def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 646675a45..059a22cb9 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1760,6 +1760,83 @@ def volume_update(context, volume_id, values): ################### +@require_context +def snapshot_create(context, values): + snapshot_ref = models.Snapshot() + snapshot_ref.update(values) + + session = get_session() + with session.begin(): + snapshot_ref.save(session=session) + return snapshot_ref + + +@require_admin_context +def snapshot_destroy(context, snapshot_id): + session = get_session() + with session.begin(): + session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def snapshot_get(context, snapshot_id, session=None): + if not session: + session = get_session() + result = None + + if is_admin_context(context): + result = session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + elif is_user_context(context): + result = session.query(models.Snapshot).\ + filter_by(project_id=context.project_id).\ + filter_by(id=snapshot_id).\ + filter_by(deleted=False).\ + first() + if not result: + raise exception.SnapshotNotFound(_('Snapshot %s not found') % snapshot_id, + snapshot_id) + + return result + + +@require_admin_context +def snapshot_get_all(context): + session = get_session() + return session.query(models.Snapshot).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def snapshot_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + session = get_session() + return session.query(models.Snapshot).\ + filter_by(project_id=project_id).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def snapshot_update(context, snapshot_id, values): + session = get_session() + with session.begin(): + snapshot_ref = snapshot_get(context, snapshot_id, session=session) + snapshot_ref.update(values) + snapshot_ref.save(session=session) + + +################### + + @require_context def security_group_get_all(context): session = get_session() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py new file mode 100644 index 000000000..288f63e72 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + +meta = MetaData() + +snapshots = Table('snapshots', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', Integer(), nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('progress', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_size', Integer()), + Column('scheduled_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + try: + snapshots.create() + except Exception: + logging.info(repr(snapshots)) + logging.exception('Exception while creating table') + meta.drop_all(tables=[snapshots]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + snapshots.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index f79d0f16c..9abe4d9ae 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -327,6 +327,30 @@ class Quota(BASE, NovaBase): metadata_items = Column(Integer) +class Snapshot(BASE, NovaBase): + """Represents a block storage device that can be attached to a vm.""" + __tablename__ = 'snapshots' + id = Column(Integer, primary_key=True, autoincrement=True) + + @property + def name(self): + return FLAGS.snapshot_name_template % self.id + + @property + def volume_name(self): + return FLAGS.volume_name_template % self.volume_id + + user_id = Column(String(255)) + project_id = Column(String(255)) + + volume_id = Column(Integer) + status = Column(String(255)) + progress = Column(String(255)) + volume_size = Column(Integer) + + display_name = Column(String(255)) + display_description = Column(String(255)) + class ExportDevice(BASE, NovaBase): """Represates a shelf and blade that a volume can be exported on.""" __tablename__ = 'export_devices' diff --git a/nova/exception.py b/nova/exception.py index 4e2bbdbaf..7adc3d007 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -68,6 +68,12 @@ class VolumeNotFound(NotFound): super(VolumeNotFound, self).__init__(message) +class SnapshotNotFound(NotFound): + def __init__(self, message, snapshot_id): + self.snapshot_id = snapshot_id + super(SnapshotNotFound, self).__init__(message) + + class Duplicate(Error): pass diff --git a/nova/volume/api.py b/nova/volume/api.py index 4b4bb9dc5..f5285f31f 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -90,6 +90,15 @@ class API(base.Base): return self.db.volume_get_all(context) return self.db.volume_get_all_by_project(context, context.project_id) + def get_snapshot(self, context, snapshot_id): + rv = self.db.snapshot_get(context, snapshot_id) + return dict(rv.iteritems()) + + def get_all_snapshots(self, context): + if context.is_admin: + return self.db.snapshot_get_all(context) + return self.db.snapshot_get_all_by_project(context, context.project_id) + def check_attach(self, context, volume_id): volume = self.get(context, volume_id) # TODO(vish): abstract status checking? @@ -103,3 +112,38 @@ class API(base.Base): # TODO(vish): abstract status checking? if volume['status'] == "available": raise exception.ApiError(_("Volume is already detached")) + + def create_snapshot(self, context, volume_id, name, description): + volume = self.get(context, volume_id) + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + + options = { + 'volume_id': volume_id, + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': "creating", + 'progress': '0%', + 'volume_size': volume['size'], + 'display_name': name, + 'display_description': description} + + snapshot = self.db.snapshot_create(context, options) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_snapshot", + "args": {"topic": FLAGS.volume_topic, + "volume_id": volume_id, + "snapshot_id": snapshot['id']}}) + return snapshot + + def delete_snapshot(self, context, snapshot_id): + snapshot = self.get_snapshot(context, snapshot_id) + if snapshot['status'] != "available": + raise exception.ApiError(_("Snapshot status must be available")) + self.db.snapshot_update(context, snapshot_id, {'status': 'deleting'}) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "delete_snapshot", + "args": {"topic": FLAGS.volume_topic, + "snapshot_id": snapshot_id}}) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 55307ad9b..31998e307 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -122,6 +122,14 @@ class VolumeDriver(object): (FLAGS.volume_group, volume['name'])) + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + raise NotImplementedError() + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + raise NotImplementedError() + def local_path(self, volume): # NOTE(vish): stops deprecation warning escaped_group = FLAGS.volume_group.replace('-', '--') diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 2178389ce..87fd3bf17 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -152,6 +152,48 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) return True + def create_snapshot(self, context, volume_id, snapshot_id): + """Creates and exports the snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + LOG.info(_("snapshot %s: creating"), snapshot_ref['name']) + + try: + snap_name = snapshot_ref['name'] + LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) + model_update = self.driver.create_snapshot(snapshot_ref) + if model_update: + self.db.snapshot_update(context, snapshot_ref['id'], model_update) + + except Exception: + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'error'}) + raise + + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'available', + 'progress': '100%'}) + LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name']) + return snapshot_id + + def delete_snapshot(self, context, snapshot_id): + """Deletes and unexports snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + + try: + LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name']) + self.driver.delete_snapshot(snapshot_ref) + except Exception: + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'error_deleting'}) + raise + + self.db.snapshot_destroy(context, snapshot_id) + LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name']) + return True + def setup_compute_volume(self, context, volume_id): """Setup remote volume on compute host. -- cgit From f76f2ee50f2407155a0aaefac3224e6af14e7d26 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 20:50:10 +0900 Subject: Add support for creating a Sheepdog snapshot. --- nova/volume/driver.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 31998e307..ba0a7efef 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -620,6 +620,16 @@ class SheepdogDriver(VolumeDriver): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) + def create_snapshot(self, snapshot): + """Creates a sheepdog snapshot""" + self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'], + "sheepdog:%s" % snapshot['volume_name']) + + def delete_snapshot(self, snapshot): + """Deletes a sheepdog snapshot""" + self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'], + '-s', snapshot['name']) + def local_path(self, volume): return "sheepdog:%s" % volume['name'] -- cgit From aad857a18153792d96f300732c3bb5bb16aa02c3 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 20:50:10 +0900 Subject: Add support for creating a Sheepdog snapshot. --- nova/volume/driver.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 31998e307..ba0a7efef 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -620,6 +620,16 @@ class SheepdogDriver(VolumeDriver): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) + def create_snapshot(self, snapshot): + """Creates a sheepdog snapshot""" + self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'], + "sheepdog:%s" % snapshot['volume_name']) + + def delete_snapshot(self, snapshot): + """Deletes a sheepdog snapshot""" + self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'], + '-s', snapshot['name']) + def local_path(self, volume): return "sheepdog:%s" % volume['name'] -- cgit From 2f3819628b6d3dea13a56ea6e93e02992b2e1f5f Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 21:01:25 +0900 Subject: Add support for creating a new volume from a existing snapshot with EC2 API. --- nova/api/ec2/cloud.py | 12 +++++- .../versions/016_add_snapshot_id_to_volumes.py | 48 ++++++++++++++++++++++ nova/db/sqlalchemy/models.py | 2 + nova/volume/api.py | 12 +++++- nova/volume/driver.py | 4 ++ nova/volume/manager.py | 9 +++- 6 files changed, 81 insertions(+), 6 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index f5360af0b..aa15539ac 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -636,11 +636,19 @@ class CloudController(object): v['display_description'] = volume['display_description'] return v - def create_volume(self, context, size, **kwargs): - LOG.audit(_("Create volume of %s GB"), size, context=context) + def create_volume(self, context, **kwargs): + size = kwargs.get('size'); + if kwargs.get('snapshot_id') != None: + snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) + LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) + else: + snapshot_id = None + LOG.audit(_("Create volume of %s GB"), size, context=context) + volume = self.volume_api.create( context, size=size, + snapshot_id=snapshot_id, name=kwargs.get('display_name'), description=kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py new file mode 100644 index 000000000..0a50123bf --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py @@ -0,0 +1,48 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Table stub-definitions +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +# +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Column +# + +snapshot_id = Column('snapshot_id', Integer()) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + # Add columns to existing tables + volumes.create_column(snapshot_id) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 2e0ead5f9..ca762ca9f 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -287,6 +287,8 @@ class Volume(BASE, NovaBase): user_id = Column(String(255)) project_id = Column(String(255)) + snapshot_id = Column(String(255)) + host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? diff --git a/nova/volume/api.py b/nova/volume/api.py index c1af30de0..7fa80383b 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -39,7 +39,13 @@ LOG = logging.getLogger('nova.volume') class API(base.Base): """API for interacting with the volume manager.""" - def create(self, context, size, name, description): + def create(self, context, size, snapshot_id, name, description): + if snapshot_id != None: + snapshot = self.get_snapshot(context, snapshot_id) + if snapshot['status'] != "available": + raise exception.ApiError(_("Snapshot status must be available")) + size = snapshot['volume_size'] + if quota.allowed_volumes(context, 1, size) < 1: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" @@ -51,6 +57,7 @@ class API(base.Base): 'size': size, 'user_id': context.user_id, 'project_id': context.project_id, + 'snapshot_id': snapshot_id, 'availability_zone': FLAGS.storage_availability_zone, 'status': "creating", 'attach_status': "detached", @@ -62,7 +69,8 @@ class API(base.Base): FLAGS.scheduler_topic, {"method": "create_volume", "args": {"topic": FLAGS.volume_topic, - "volume_id": volume['id']}}) + "volume_id": volume['id'], + "snapshot_id": snapshot_id}}) return volume def delete(self, context, volume_id): diff --git a/nova/volume/driver.py b/nova/volume/driver.py index ba0a7efef..02b0d50f4 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -101,6 +101,10 @@ class VolumeDriver(object): volume['name'], FLAGS.volume_group) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + raise NotImplementedError() + def delete_volume(self, volume): """Deletes a logical volume.""" try: diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 87fd3bf17..7d47fc191 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -90,7 +90,7 @@ class VolumeManager(manager.SchedulerDependentManager): else: LOG.info(_("volume %s: skipping export"), volume['name']) - def create_volume(self, context, volume_id): + def create_volume(self, context, volume_id, snapshot_id): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) @@ -108,7 +108,12 @@ class VolumeManager(manager.SchedulerDependentManager): vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) - model_update = self.driver.create_volume(volume_ref) + if snapshot_id == None: + model_update = self.driver.create_volume(volume_ref) + else: + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + model_update = self.driver.create_volume_from_snapshot(volume_ref, + snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) -- cgit From 1018a60e3194e7e283cd89af28efd689623058a8 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 21:01:25 +0900 Subject: Add support for creating a new volume from a existing snapshot with EC2 API. --- nova/api/ec2/cloud.py | 12 +++++- .../versions/016_add_snapshot_id_to_volumes.py | 48 ++++++++++++++++++++++ nova/db/sqlalchemy/models.py | 2 + nova/volume/api.py | 12 +++++- nova/volume/driver.py | 4 ++ nova/volume/manager.py | 9 +++- 6 files changed, 81 insertions(+), 6 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 6daf299b9..5d4d2ad27 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -639,11 +639,19 @@ class CloudController(object): v['display_description'] = volume['display_description'] return v - def create_volume(self, context, size, **kwargs): - LOG.audit(_("Create volume of %s GB"), size, context=context) + def create_volume(self, context, **kwargs): + size = kwargs.get('size'); + if kwargs.get('snapshot_id') != None: + snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) + LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) + else: + snapshot_id = None + LOG.audit(_("Create volume of %s GB"), size, context=context) + volume = self.volume_api.create( context, size=size, + snapshot_id=snapshot_id, name=kwargs.get('display_name'), description=kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py new file mode 100644 index 000000000..0a50123bf --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py @@ -0,0 +1,48 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Table stub-definitions +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +# +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Column +# + +snapshot_id = Column('snapshot_id', Integer()) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + # Add columns to existing tables + volumes.create_column(snapshot_id) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 9abe4d9ae..afc2ea4e4 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -287,6 +287,8 @@ class Volume(BASE, NovaBase): user_id = Column(String(255)) project_id = Column(String(255)) + snapshot_id = Column(String(255)) + host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? diff --git a/nova/volume/api.py b/nova/volume/api.py index f5285f31f..bd073964d 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -39,7 +39,13 @@ LOG = logging.getLogger('nova.volume') class API(base.Base): """API for interacting with the volume manager.""" - def create(self, context, size, name, description): + def create(self, context, size, snapshot_id, name, description): + if snapshot_id != None: + snapshot = self.get_snapshot(context, snapshot_id) + if snapshot['status'] != "available": + raise exception.ApiError(_("Snapshot status must be available")) + size = snapshot['volume_size'] + if quota.allowed_volumes(context, 1, size) < 1: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" @@ -51,6 +57,7 @@ class API(base.Base): 'size': size, 'user_id': context.user_id, 'project_id': context.project_id, + 'snapshot_id': snapshot_id, 'availability_zone': FLAGS.storage_availability_zone, 'status': "creating", 'attach_status': "detached", @@ -62,7 +69,8 @@ class API(base.Base): FLAGS.scheduler_topic, {"method": "create_volume", "args": {"topic": FLAGS.volume_topic, - "volume_id": volume['id']}}) + "volume_id": volume['id'], + "snapshot_id": snapshot_id}}) return volume def delete(self, context, volume_id): diff --git a/nova/volume/driver.py b/nova/volume/driver.py index ba0a7efef..02b0d50f4 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -101,6 +101,10 @@ class VolumeDriver(object): volume['name'], FLAGS.volume_group) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + raise NotImplementedError() + def delete_volume(self, volume): """Deletes a logical volume.""" try: diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 87fd3bf17..7d47fc191 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -90,7 +90,7 @@ class VolumeManager(manager.SchedulerDependentManager): else: LOG.info(_("volume %s: skipping export"), volume['name']) - def create_volume(self, context, volume_id): + def create_volume(self, context, volume_id, snapshot_id): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) @@ -108,7 +108,12 @@ class VolumeManager(manager.SchedulerDependentManager): vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) - model_update = self.driver.create_volume(volume_ref) + if snapshot_id == None: + model_update = self.driver.create_volume(volume_ref) + else: + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + model_update = self.driver.create_volume_from_snapshot(volume_ref, + snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) -- cgit From 1c7c53a9f40a88eb9def7ab9d706e7399ad5e65b Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 21:02:00 +0900 Subject: Add support for cloning a Sheepdog volume. --- nova/volume/driver.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 02b0d50f4..3f3caf37a 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -620,6 +620,13 @@ class SheepdogDriver(VolumeDriver): "sheepdog:%s" % volume['name'], sizestr) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a sheepdog volume from a snapshot.""" + self._try_execute('qemu-img', 'create', '-b', + "sheepdog:%s:%s" % (snapshot['volume_name'], snapshot['name']), + "sheepdog:%s" % volume['name']) + + def delete_volume(self, volume): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) -- cgit From 5b670fe9bca9103642967bce609853704d0d1b88 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 21:02:00 +0900 Subject: Add support for cloning a Sheepdog volume. --- nova/volume/driver.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 02b0d50f4..3f3caf37a 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -620,6 +620,13 @@ class SheepdogDriver(VolumeDriver): "sheepdog:%s" % volume['name'], sizestr) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a sheepdog volume from a snapshot.""" + self._try_execute('qemu-img', 'create', '-b', + "sheepdog:%s:%s" % (snapshot['volume_name'], snapshot['name']), + "sheepdog:%s" % volume['name']) + + def delete_volume(self, volume): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) -- cgit From 4e11c04a34b3237853c0b4be90ce6362237bcbe0 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Tue, 19 Apr 2011 20:10:57 +0000 Subject: strip output, str() link local --- plugins/xenserver/networking/etc/xensource/scripts/novalib.py | 2 +- .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py index 5366c385d..9fc4b2310 100644 --- a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py @@ -29,7 +29,7 @@ def execute_get_output(*command): proc = subprocess.Popen(command, close_fds=True, stdout=subprocess.PIPE, stderr=devnull) devnull.close() - return proc.stdout.read() + return proc.stdout.read().strip() def execute(*command): diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 08d7a3859..d1d646b99 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -86,7 +86,7 @@ def main(dom_id, command, net_type, only_this_vif=None): for ip6 in data['ip6s']: params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) mac64 = netaddr.EUI(mac).eui64() - params.update({'VIF_LOCAL_IPv6': mac64.ipv6_link_local()}) + params.update({'VIF_LOCAL_IPv6': str(mac64.ipv6_link_local())}) apply_ovs_ipv6_flows(command, bridge, params) -- cgit From a46bd9fb6483959e210f25276ff70c76767e509d Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Tue, 19 Apr 2011 22:13:40 +0000 Subject: only apply ipv6 if the data exists in xenstore --- .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index d1d646b99..e1a151476 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -82,10 +82,10 @@ def main(dom_id, command, net_type, only_this_vif=None): for ip4 in data['ips']: params.update({'VIF_IPv4': ip4['ip']}) apply_ovs_ipv4_flows(command, bridge, params) - if net_type in ('ipv6', 'all'): + if net_type in ('ipv6', 'all') and 'ip6s' in data: for ip6 in data['ip6s']: params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) - mac64 = netaddr.EUI(mac).eui64() + mac64 = netaddr.EUI(data['mac']).eui64() params.update({'VIF_LOCAL_IPv6': str(mac64.ipv6_link_local())}) apply_ovs_ipv6_flows(command, bridge, params) -- cgit From 169496af390caa4035db2fefabffd71c95a57fbf Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Wed, 20 Apr 2011 14:11:25 -0400 Subject: refactor the way flows are deleted/reset --- .../xensource/scripts/ovs_configure_vif_flows.py | 143 ++++++++++----------- 1 file changed, 70 insertions(+), 73 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index e1a151476..37ff07e33 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -35,22 +35,15 @@ OVS_OFCTL = '/usr/bin/ovs-ofctl' class OvsFlow(object): - def __init__(self, command, bridge, params): - self.command = command + def __init__(self, bridge, params): self.bridge = bridge self.params = params def add(self, rule): - execute(OVS_OFCTL, 'add-flow', self.bridge, rule) + execute(OVS_OFCTL, 'add-flow', self.bridge, rule % self.params) - def delete(self, rule): - execute(OVS_OFCTL, 'del-flows', self.bridge, rule) - - def apply(self, rule): - if self.command in ('offline', 'reset'): - self.delete(rule % self.params) - if self.command in ('online', 'reset'): - self.add(rule % self.params) + def clear_flows(self, ofport): + execute(OVS_OFCTL, 'del-flows', self.bridge, "in_port=%s" % ofport) def main(dom_id, command, net_type, only_this_vif=None): @@ -78,94 +71,98 @@ def main(dom_id, command, net_type, only_this_vif=None): VIF_MAC=data['mac'], VIF_OFPORT=vif_ofport) - if net_type in ('ipv4', 'all'): - for ip4 in data['ips']: - params.update({'VIF_IPv4': ip4['ip']}) - apply_ovs_ipv4_flows(command, bridge, params) - if net_type in ('ipv6', 'all') and 'ip6s' in data: - for ip6 in data['ip6s']: - params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) - mac64 = netaddr.EUI(data['mac']).eui64() - params.update({'VIF_LOCAL_IPv6': str(mac64.ipv6_link_local())}) - apply_ovs_ipv6_flows(command, bridge, params) + ovs = OvsFlow(bridge, params) + + if command in ('offline', 'reset'): + # I haven't found a way to clear only IPv4 or IPv6 rules. + ovs.clear_flows(vif_ofport) + if command in ('online', 'reset'): + if net_type in ('ipv4', 'all') and 'ips' in data: + for ip4 in data['ips']: + ovs.params.update({'VIF_IPv4': ip4['ip']}) + apply_ovs_ipv4_flows(ovs, bridge, params) + if net_type in ('ipv6', 'all') and 'ip6s' in data: + for ip6 in data['ip6s']: + link_local = str(netaddr.EUI(data['mac']).eui64()\ + .ipv6_link_local()) + ovs.params.update({'VIF_LOCAL_IPv6': link_local}) + ovs.params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) + apply_ovs_ipv6_flows(ovs, bridge, params) -def apply_ovs_ipv4_flows(command, bridge, params): - flow = OvsFlow(command, bridge, params) +def apply_ovs_ipv4_flows(ovs, command, bridge, params): # allow valid ARP outbound (both request / reply) - flow.apply("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," - "arp_sha=%(VIF_MAC)s,nw_src=%(VIF_IPv4)s,action=normal") + ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," + "arp_sha=%(VIF_MAC)s,nw_src=%(VIF_IPv4)s,action=normal") - flow.apply("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," - "arp_sha=%(VIF_MAC)s,nw_src=0.0.0.0,action=normal") + ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," + "arp_sha=%(VIF_MAC)s,nw_src=0.0.0.0,action=normal") # allow valid IPv4 outbound - flow.apply("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,ip," - "nw_src=%(VIF_IPv4)s,action=normal") - + ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,ip," + "nw_src=%(VIF_IPv4)s,action=normal") -def apply_ovs_ipv6_flows(command, bridge, params): - flow = OvsFlow(command, bridge, params) +def apply_ovs_ipv6_flows(ovs, command, bridge, params): # allow valid IPv6 ND outbound (are both global and local IPs needed?) # Neighbor Solicitation - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," - "action=normal") - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,action=normal") - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," - "action=normal") - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," + "action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," + "action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,action=normal") # Neighbor Advertisement - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136," - "nd_target=%(VIF_LOCAL_IPv6)s,action=normal") - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136,action=normal") - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136," - "nd_target=%(VIF_GLOBAL_IPv6)s,action=normal") - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136,action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136," + "nd_target=%(VIF_LOCAL_IPv6)s,action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136,action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136," + "nd_target=%(VIF_GLOBAL_IPv6)s,action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136,action=normal") # drop all other neighbor discovery (required because we permit all icmp6 below) - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=135,action=drop") - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=136,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=135,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=136,action=drop") # do not allow sending specifc ICMPv6 types # Router Advertisement - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=134,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=134,action=drop") # Redirect Gateway - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=137,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=137,action=drop") # Mobile Prefix Solicitation - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=146,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=146,action=drop") # Mobile Prefix Advertisement - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=147,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=147,action=drop") # Multicast Router Advertisement - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=151,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=151,action=drop") # Multicast Router Solicitation - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=152,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=152,action=drop") # Multicast Router Termination - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=153,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=153,action=drop") # allow valid IPv6 outbound, by type - flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp6,action=normal") - flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp6,action=normal") - flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,tcp6,action=normal") - flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,tcp6,action=normal") - flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,udp6,action=normal") - flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,udp6,action=normal") + ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp6,action=normal") + ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp6,action=normal") + ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,tcp6,action=normal") + ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_LOCAL_IPv6)s,tcp6,action=normal") + ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,udp6,action=normal") + ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_LOCAL_IPv6)s,udp6,action=normal") # all else will be dropped ... -- cgit From 7c53dc7a792dfcda0862178725adbe585c4fed21 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Wed, 20 Apr 2011 14:24:29 -0400 Subject: bugfix signature --- .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 37ff07e33..9557eb3e2 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -91,7 +91,7 @@ def main(dom_id, command, net_type, only_this_vif=None): apply_ovs_ipv6_flows(ovs, bridge, params) -def apply_ovs_ipv4_flows(ovs, command, bridge, params): +def apply_ovs_ipv4_flows(ovs, bridge, params): # allow valid ARP outbound (both request / reply) ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," "arp_sha=%(VIF_MAC)s,nw_src=%(VIF_IPv4)s,action=normal") @@ -104,7 +104,7 @@ def apply_ovs_ipv4_flows(ovs, command, bridge, params): "nw_src=%(VIF_IPv4)s,action=normal") -def apply_ovs_ipv6_flows(ovs, command, bridge, params): +def apply_ovs_ipv6_flows(ovs, bridge, params): # allow valid IPv6 ND outbound (are both global and local IPs needed?) # Neighbor Solicitation ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," -- cgit From bbcc2304167c3331f4c54898200f01fd66c0a20c Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Thu, 21 Apr 2011 14:53:03 -0400 Subject: change action= to actions= --- .../xensource/scripts/ovs_configure_base_flows.py | 4 +- .../xensource/scripts/ovs_configure_vif_flows.py | 52 +++++++++++----------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py index d036cf517..555dad71a 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py @@ -41,10 +41,10 @@ def main(phys_dev_name, bridge_name): # allow all traffic from the physical NIC, as it is trusted (i.e., from a # filtered vif, or from the physical infrastructure ovs_ofctl('add-flow', bridge_name, - "priority=2,in_port=%s,action=normal" % pnic_ofport) + "priority=2,in_port=%s,actions=normal" % pnic_ofport) # default drop - ovs_ofctl('add-flow', bridge_name, 'priority=1,action=drop') + ovs_ofctl('add-flow', bridge_name, 'priority=1,actions=drop') if __name__ == "__main__": diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 9557eb3e2..aba8487f6 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -94,14 +94,14 @@ def main(dom_id, command, net_type, only_this_vif=None): def apply_ovs_ipv4_flows(ovs, bridge, params): # allow valid ARP outbound (both request / reply) ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," - "arp_sha=%(VIF_MAC)s,nw_src=%(VIF_IPv4)s,action=normal") + "arp_sha=%(VIF_MAC)s,nw_src=%(VIF_IPv4)s,actions=normal") ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," - "arp_sha=%(VIF_MAC)s,nw_src=0.0.0.0,action=normal") + "arp_sha=%(VIF_MAC)s,nw_src=0.0.0.0,actions=normal") # allow valid IPv4 outbound ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,ip," - "nw_src=%(VIF_IPv4)s,action=normal") + "nw_src=%(VIF_IPv4)s,actions=normal") def apply_ovs_ipv6_flows(ovs, bridge, params): @@ -109,60 +109,60 @@ def apply_ovs_ipv6_flows(ovs, bridge, params): # Neighbor Solicitation ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," - "action=normal") + "actions=normal") ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,action=normal") + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,actions=normal") ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," - "action=normal") + "actions=normal") ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,action=normal") + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,actions=normal") # Neighbor Advertisement ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136," - "nd_target=%(VIF_LOCAL_IPv6)s,action=normal") + "nd_target=%(VIF_LOCAL_IPv6)s,actions=normal") ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136,action=normal") + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136,actions=normal") ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136," - "nd_target=%(VIF_GLOBAL_IPv6)s,action=normal") + "nd_target=%(VIF_GLOBAL_IPv6)s,actions=normal") ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136,action=normal") + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136,actions=normal") # drop all other neighbor discovery (required because we permit all icmp6 below) - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=135,action=drop") - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=136,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=135,actions=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=136,actions=drop") # do not allow sending specifc ICMPv6 types # Router Advertisement - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=134,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=134,actions=drop") # Redirect Gateway - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=137,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=137,actions=drop") # Mobile Prefix Solicitation - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=146,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=146,actions=drop") # Mobile Prefix Advertisement - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=147,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=147,actions=drop") # Multicast Router Advertisement - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=151,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=151,actions=drop") # Multicast Router Solicitation - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=152,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=152,actions=drop") # Multicast Router Termination - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=153,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=153,actions=drop") # allow valid IPv6 outbound, by type ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp6,action=normal") + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp6,actions=normal") ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp6,action=normal") + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp6,actions=normal") ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,tcp6,action=normal") + "ipv6_src=%(VIF_GLOBAL_IPv6)s,tcp6,actions=normal") ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,tcp6,action=normal") + "ipv6_src=%(VIF_LOCAL_IPv6)s,tcp6,actions=normal") ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,udp6,action=normal") + "ipv6_src=%(VIF_GLOBAL_IPv6)s,udp6,actions=normal") ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,udp6,action=normal") + "ipv6_src=%(VIF_LOCAL_IPv6)s,udp6,actions=normal") # all else will be dropped ... -- cgit From a13616c2deae4ae90bb69ce87bda28576e194426 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 22 Apr 2011 16:35:26 -0400 Subject: removed unused imports and renamed template variables --- .../networking/etc/xensource/scripts/novalib.py | 1 - .../xensource/scripts/ovs_configure_base_flows.py | 1 - .../xensource/scripts/ovs_configure_vif_flows.py | 107 ++++++++++----------- 3 files changed, 53 insertions(+), 56 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py index 9fc4b2310..dcbee3ded 100644 --- a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py @@ -19,7 +19,6 @@ import os import subprocess -import sys def execute_get_output(*command): diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py index 555dad71a..82d0b9e31 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py @@ -21,7 +21,6 @@ This script is used to configure base openvswitch flows for XenServer hosts. """ import os -import subprocess import sys diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index aba8487f6..f91a5f49d 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -21,7 +21,6 @@ This script is used to configure openvswitch flows on XenServer hosts. """ import os -import subprocess import sys # This is written to Python 2.4, since that is what is available on XenServer @@ -68,8 +67,8 @@ def main(dom_id, command, net_type, only_this_vif=None): 'Interface', vif, 'ofport') params = dict(VIF_NAME=vif, - VIF_MAC=data['mac'], - VIF_OFPORT=vif_ofport) + MAC=data['mac'], + OF_PORT=vif_ofport) ovs = OvsFlow(bridge, params) @@ -80,95 +79,95 @@ def main(dom_id, command, net_type, only_this_vif=None): if command in ('online', 'reset'): if net_type in ('ipv4', 'all') and 'ips' in data: for ip4 in data['ips']: - ovs.params.update({'VIF_IPv4': ip4['ip']}) + ovs.params.update({'IPV4_ADDR': ip4['ip']}) apply_ovs_ipv4_flows(ovs, bridge, params) if net_type in ('ipv6', 'all') and 'ip6s' in data: for ip6 in data['ip6s']: link_local = str(netaddr.EUI(data['mac']).eui64()\ .ipv6_link_local()) - ovs.params.update({'VIF_LOCAL_IPv6': link_local}) - ovs.params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) + ovs.params.update({'IPV6_LINK_LOCAL_ADDR': link_local}) + ovs.params.update({'IPV6_GLOBAL_ADDR': ip6['ip']}) apply_ovs_ipv6_flows(ovs, bridge, params) def apply_ovs_ipv4_flows(ovs, bridge, params): # allow valid ARP outbound (both request / reply) - ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," - "arp_sha=%(VIF_MAC)s,nw_src=%(VIF_IPv4)s,actions=normal") + ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,arp," + "arp_sha=%(MAC)s,nw_src=%(IPV4_ADDR)s,actions=normal") - ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," - "arp_sha=%(VIF_MAC)s,nw_src=0.0.0.0,actions=normal") + ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,arp," + "arp_sha=%(MAC)s,nw_src=0.0.0.0,actions=normal") # allow valid IPv4 outbound - ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,ip," - "nw_src=%(VIF_IPv4)s,actions=normal") + ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,ip," + "nw_src=%(IPV4_ADDR)s,actions=normal") def apply_ovs_ipv6_flows(ovs, bridge, params): # allow valid IPv6 ND outbound (are both global and local IPs needed?) # Neighbor Solicitation - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s," "actions=normal") - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,actions=normal") - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,actions=normal") + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s," "actions=normal") - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,actions=normal") + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,actions=normal") # Neighbor Advertisement - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136," - "nd_target=%(VIF_LOCAL_IPv6)s,actions=normal") - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136,actions=normal") - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136," - "nd_target=%(VIF_GLOBAL_IPv6)s,actions=normal") - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136,actions=normal") - - # drop all other neighbor discovery (required because we permit all icmp6 below) - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=135,actions=drop") - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=136,actions=drop") + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136," + "nd_target=%(IPV6_LINK_LOCAL_ADDR)s,actions=normal") + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136,actions=normal") + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136," + "nd_target=%(IPV6_GLOBAL_ADDR)s,actions=normal") + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136,actions=normal") + + # drop all other neighbor discovery (req b/c we permit all icmp6 below) + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=135,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=136,actions=drop") # do not allow sending specifc ICMPv6 types # Router Advertisement - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=134,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=134,actions=drop") # Redirect Gateway - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=137,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=137,actions=drop") # Mobile Prefix Solicitation - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=146,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=146,actions=drop") # Mobile Prefix Advertisement - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=147,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=147,actions=drop") # Multicast Router Advertisement - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=151,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=151,actions=drop") # Multicast Router Solicitation - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=152,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=152,actions=drop") # Multicast Router Termination - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=153,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=153,actions=drop") # allow valid IPv6 outbound, by type - ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp6,actions=normal") - ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp6,actions=normal") - ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,tcp6,actions=normal") - ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,tcp6,actions=normal") - ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,udp6,actions=normal") - ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,udp6,actions=normal") + ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," + "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp6,actions=normal") + ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," + "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp6,actions=normal") + ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," + "ipv6_src=%(IPV6_GLOBAL_ADDR)s,tcp6,actions=normal") + ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," + "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,tcp6,actions=normal") + ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," + "ipv6_src=%(IPV6_GLOBAL_ADDR)s,udp6,actions=normal") + ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," + "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,udp6,actions=normal") # all else will be dropped ... if __name__ == "__main__": if len(sys.argv) < 3: - print "usage: %s dom_id online|offline|reset ipv4|ipv6|all [vif_name]" % \ + print "usage: %s dom_id online|offline|reset ipv4|ipv6|all [vif]" % \ os.path.basename(sys.argv[0]) sys.exit(1) else: -- cgit From 10db492376a8bb8409e3fb3c33707865ac0f3ee7 Mon Sep 17 00:00:00 2001 From: John Tran Date: Mon, 2 May 2011 14:25:21 -0700 Subject: implemented review suggestion EAFP style, and fixed test stub fake_show needs to have image_state = available or other tests will fail --- nova/api/ec2/cloud.py | 14 +++++++++----- nova/tests/test_cloud.py | 35 ++++++++++++++++++----------------- 2 files changed, 27 insertions(+), 22 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 0ea0e3603..5dc608139 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -815,17 +815,21 @@ class CloudController(object): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ramdisk['id'] image = self._get_image(context, kwargs['image_id']) - if not image: + if not image: raise exception.NotFound(_('Image %s not found') % kwargs['image_id']) - if not 'properties' in image or \ - (not 'image_state' in image['properties']) or \ - (image['properties']['image_state'] is not 'available'): + try: + available = (image['properties']['image_state'] == 'available') + except KeyError: + available = False + + if not available: raise exception.ApiError(_('Image must be available')) + instances = self.compute_api.create(context, instance_type=instance_types.get_by_type( kwargs.get('instance_type', None)), - image_id = image['id'], + image_id=image['id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 85f3a8e87..da2fce06b 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -30,13 +30,13 @@ from eventlet import greenthread from nova import context from nova import crypto from nova import db +from nova import exception from nova import flags from nova import log as logging from nova import rpc from nova import service from nova import test from nova import utils -from nova import exception from nova.auth import manager from nova.compute import power_state from nova.api.ec2 import cloud @@ -73,7 +73,7 @@ class CloudTestCase(test.TestCase): def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine'}} + 'type': 'machine', 'image_state': 'available'}} self.stubs.Set(local.LocalImageService, 'show', fake_show) self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) @@ -307,15 +307,16 @@ class CloudTestCase(test.TestCase): self.cloud.delete_key_pair(self.context, 'test') def test_run_instances(self): - allinst = db.instance_get_all(context.get_admin_context()) - self.assertEqual(0, len(allinst)) - def fake_show_decrypt(meh, context, id): + all_instances = db.instance_get_all(context.get_admin_context()) + self.assertEqual(0, len(all_instances)) + + def fake_show_decrypt(self, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine', 'image_state': 'decrypting'}} - def fake_show_avail(meh, context, id): + def fake_show_no_state(self, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine', 'image_state': 'available'}} + 'type': 'machine'}} image_id = FLAGS.default_image instance_type = FLAGS.default_instance_type @@ -324,17 +325,7 @@ class CloudTestCase(test.TestCase): 'instance_type': instance_type, 'max_count': max_count} run_instances = self.cloud.run_instances - # when image doesn't have 'image_state' attr at all - self.assertRaises(exception.ApiError, run_instances, - self.context, **kwargs) - # when image has 'image_state' yet not 'available' - self.stubs.UnsetAll() - self.stubs.Set(local.LocalImageService, 'show', fake_show_decrypt) - self.assertRaises(exception.ApiError, run_instances, - self.context, **kwargs) # when image has valid image_state - self.stubs.UnsetAll() - self.stubs.Set(local.LocalImageService, 'show', fake_show_avail) result = run_instances(self.context, **kwargs) instance = result['instancesSet'][0] self.assertEqual(instance['imageId'], 'ami-00000001') @@ -342,6 +333,16 @@ class CloudTestCase(test.TestCase): self.assertEqual(instance['instanceId'], 'i-00000001') self.assertEqual(instance['instanceState']['name'], 'scheduling') self.assertEqual(instance['instanceType'], 'm1.small') + # when image doesn't have 'image_state' attr at all + self.stubs.UnsetAll() + self.stubs.Set(local.LocalImageService, 'show', fake_show_no_state) + self.assertRaises(exception.ApiError, run_instances, + self.context, **kwargs) + # when image has 'image_state' yet not 'available' + self.stubs.UnsetAll() + self.stubs.Set(local.LocalImageService, 'show', fake_show_decrypt) + self.assertRaises(exception.ApiError, run_instances, + self.context, **kwargs) def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) -- cgit From 3ee0507ddc6bb7e15834144acc47c354396fbc70 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Thu, 5 May 2011 23:14:46 -0400 Subject: Publish errors via nova.notifier --- nova/log.py | 9 +++++++++ nova/tests/test_notifier.py | 21 +++++++++++++++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/nova/log.py b/nova/log.py index 096279f7c..3e587891a 100644 --- a/nova/log.py +++ b/nova/log.py @@ -35,6 +35,7 @@ import os import sys import traceback +import nova from nova import flags from nova import version @@ -63,6 +64,7 @@ flags.DEFINE_list('default_log_levels', 'eventlet.wsgi.server=WARN'], 'list of logger=LEVEL pairs') flags.DEFINE_bool('use_syslog', False, 'output to syslog') +flags.DEFINE_bool('publish_errors', True, 'publish error events') flags.DEFINE_string('logfile', None, 'output to named file') @@ -258,12 +260,19 @@ class NovaRootLogger(NovaLogger): else: self.removeHandler(self.filelog) self.addHandler(self.streamlog) + if FLAGS.publish_errors: + self.addHandler(PublishErrorsHandler(ERROR)) if FLAGS.verbose: self.setLevel(DEBUG) else: self.setLevel(INFO) +class PublishErrorsHandler(logging.Handler): + def emit(self, record): + nova.notifier.notify('error', record) + + def handle_exception(type, value, tb): extra = {} if FLAGS.verbose: diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 4d6289e6a..d18d3bc05 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -13,14 +13,18 @@ # License for the specific language governing permissions and limitations # under the License. -import nova +import json + +import stubout +import nova +from nova import log as logging from nova import flags from nova import notifier from nova.notifier import no_op_notifier from nova import test -import stubout +LOG = logging.getLogger('nova.compute.api') class NotifierTestCase(test.TestCase): """Test case for notifications""" @@ -58,3 +62,16 @@ class NotifierTestCase(test.TestCase): notifier.notify('derp', Mock()) self.assertEqual(self.mock_cast, True) + + def test_error_notification(self): + self.stubs.Set(nova.flags.FLAGS, 'notification_driver', + 'nova.notifier.rabbit_notifier.RabbitNotifier') + msgs = [] + def mock_cast(context, topic, msg): + data = json.loads(msg) + msgs.append(data) + self.stubs.Set(nova.rpc, 'cast', mock_cast) + LOG.error('foo'); + msg = msgs[0] + self.assertEqual(msg['event_name'], 'error') + self.assertEqual(msg['model']['msg'], 'foo') -- cgit From 65595766706631a5c65193cfc0fa2ac9de1aeffc Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Fri, 6 May 2011 20:15:06 -0400 Subject: Set publish_errors default to False. --- nova/log.py | 2 +- nova/tests/test_notifier.py | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/nova/log.py b/nova/log.py index 3e587891a..d2ed82c6c 100644 --- a/nova/log.py +++ b/nova/log.py @@ -64,7 +64,7 @@ flags.DEFINE_list('default_log_levels', 'eventlet.wsgi.server=WARN'], 'list of logger=LEVEL pairs') flags.DEFINE_bool('use_syslog', False, 'output to syslog') -flags.DEFINE_bool('publish_errors', True, 'publish error events') +flags.DEFINE_bool('publish_errors', False, 'publish error events') flags.DEFINE_string('logfile', None, 'output to named file') diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index d18d3bc05..c9c4ddde8 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -18,14 +18,12 @@ import json import stubout import nova -from nova import log as logging +from nova import log from nova import flags from nova import notifier from nova.notifier import no_op_notifier from nova import test -LOG = logging.getLogger('nova.compute.api') - class NotifierTestCase(test.TestCase): """Test case for notifications""" def setUp(self): @@ -66,12 +64,17 @@ class NotifierTestCase(test.TestCase): def test_error_notification(self): self.stubs.Set(nova.flags.FLAGS, 'notification_driver', 'nova.notifier.rabbit_notifier.RabbitNotifier') + self.stubs.Set(nova.flags.FLAGS, 'publish_errors', True) + LOG = log.getLogger('nova') + LOG.setup_from_flags() + msgs = [] def mock_cast(context, topic, msg): data = json.loads(msg) msgs.append(data) self.stubs.Set(nova.rpc, 'cast', mock_cast) LOG.error('foo'); + self.assertEqual(1, len(msgs)) msg = msgs[0] self.assertEqual(msg['event_name'], 'error') self.assertEqual(msg['model']['msg'], 'foo') -- cgit From 389f7c79199d5ad908a72375a7377a1122f36707 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 9 May 2011 17:52:26 +0900 Subject: volume/driver: factor out lvm opration Factor out lvm operation for implementing basic snapshot later. --- nova/volume/driver.py | 62 ++++++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 3f3caf37a..9591c93d0 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -90,16 +90,40 @@ class VolumeDriver(object): raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) + def _create_volume(self, volume_name, sizestr): + self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n', + volume_name, FLAGS.volume_group) + + def _copy_volume(self, srcstr, deststr, size_in_g): + self._execute('sudo', 'dd', 'if=%s' % srcstr, 'of=%s' % deststr, + 'count=%d' % (size_in_g * 1024), 'bs=1M') + + def _volume_not_present(self, volume_name): + path_name = '%s/%s' % (FLAGS.volume_group, volume_name) + try: + self._try_execute('sudo', 'lvdisplay', path_name) + except Exception as e: + # If the volume isn't present + return True + return False + + def _delete_volume(self, volume, size_in_g): + """Deletes a logical volume.""" + # zero out old volumes to prevent data leaking between users + # TODO(ja): reclaiming space should be done lazy and low priority + self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) + self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % + (FLAGS.volume_group, volume['name'])) + + def _sizestr(self, size_in_g): + if int(size_in_g) == 0: + return '100M' + return '%sG' % size_in_g + def create_volume(self, volume): """Creates a logical volume. Can optionally return a Dictionary of changes to the volume object to be persisted.""" - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] - self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n', - volume['name'], - FLAGS.volume_group) + self._create_volume(volume['name'], self._sizestr(volume['size'])) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" @@ -107,24 +131,10 @@ class VolumeDriver(object): def delete_volume(self, volume): """Deletes a logical volume.""" - try: - self._try_execute('sudo', 'lvdisplay', - '%s/%s' % - (FLAGS.volume_group, - volume['name'])) - except Exception as e: + if self._volume_not_present(volume['name']): # If the volume isn't present, then don't attempt to delete return True - - # zero out old volumes to prevent data leaking between users - # TODO(ja): reclaiming space should be done lazy and low priority - self._execute('sudo', 'dd', 'if=/dev/zero', - 'of=%s' % self.local_path(volume), - 'count=%d' % (volume['size'] * 1024), - 'bs=1M') - self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % - (FLAGS.volume_group, - volume['name'])) + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): """Creates a snapshot.""" @@ -612,13 +622,9 @@ class SheepdogDriver(VolumeDriver): def create_volume(self, volume): """Creates a sheepdog volume""" - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] self._try_execute('qemu-img', 'create', "sheepdog:%s" % volume['name'], - sizestr) + self._sizestr(volume['size'])) def create_volume_from_snapshot(self, volume, snapshot): """Creates a sheepdog volume from a snapshot.""" -- cgit From 03c735bb186a44d80a9d595e00e9c06fd8f709cc Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 9 May 2011 17:53:25 +0900 Subject: volume/driver: implement basic snapshot/clone added basic support for snapshot/clone to VolumeDriver. The implementation is not effective, but works. The effective implementation should be done by drived driver class. --- Authors | 1 + nova/exception.py | 6 ++++++ nova/volume/driver.py | 42 +++++++++++++++++++++++++++++++++++++----- nova/volume/manager.py | 6 ++++++ 4 files changed, 50 insertions(+), 5 deletions(-) diff --git a/Authors b/Authors index 60e1d2dad..9eae53e9b 100644 --- a/Authors +++ b/Authors @@ -28,6 +28,7 @@ Gabe Westmaas Hisaharu Ishii Hisaki Ohara Ilya Alekseyev +Isaku Yamahata Jason Koelker Jay Pipes Jesse Andrews diff --git a/nova/exception.py b/nova/exception.py index 2dffeb795..6748ef265 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -79,6 +79,12 @@ class VolumeNotFound(NotFound): super(VolumeNotFound, self).__init__(message) +class VolumeIsBusy(Error): + def __init__(self, message, volume_id): + self.volume_id = volume_id + super(Error, self).__init__(message) + + class SnapshotNotFound(NotFound): def __init__(self, message, snapshot_id): self.snapshot_id = snapshot_id diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 9591c93d0..457a1c9e6 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -113,13 +113,21 @@ class VolumeDriver(object): # TODO(ja): reclaiming space should be done lazy and low priority self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % - (FLAGS.volume_group, volume['name'])) + (FLAGS.volume_group, + self._escape_snapshot(volume['name']))) def _sizestr(self, size_in_g): if int(size_in_g) == 0: return '100M' return '%sG' % size_in_g + # Linux LVM reserves name that starts with snapshot, so that + # such volume name can't be created. Mangle it. + def _escape_snapshot(self, snapshot_name): + if not snapshot_name.startswith('snapshot'): + return snapshot_name + return '_' + snapshot_name + def create_volume(self, volume): """Creates a logical volume. Can optionally return a Dictionary of changes to the volume object to be persisted.""" @@ -127,27 +135,51 @@ class VolumeDriver(object): def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" - raise NotImplementedError() + self._create_volume(volume['name'], self._sizestr(volume['size'])) + self._copy_volume(self.local_path(snapshot), self.local_path(volume), + snapshot['volume_size']) def delete_volume(self, volume): """Deletes a logical volume.""" if self._volume_not_present(volume['name']): # If the volume isn't present, then don't attempt to delete return True + + # TODO(yamahata): lvm can't delete origin volume only without + # deleting derived snapshots. Can we do something fancy? + out, err = self._execute('sudo', 'lvdisplay', '--noheading', + '-C', '-o', 'Attr', + '%s/%s' % (FLAGS.volume_group, + volume['name'])) + out = out.strip() + if (out[0] == 'o') or (out[0] == 'O'): + raise exception.VolumeIsBusy( + _('deleting volume %s that has snapshot'), volume['name']) + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): """Creates a snapshot.""" - raise NotImplementedError() + orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name']) + self._try_execute('sudo', 'lvcreate', '-L', + self._sizestr(snapshot['volume_size']), + '--name', self._escape_snapshot(snapshot['name']), + '--snapshot', orig_lv_name) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" - raise NotImplementedError() + if self._volume_not_present(self._escape_snapshot(snapshot['name'])): + # If the snapshot isn't present, then don't attempt to delete + return True + + # TODO(yamahata): zeroing out the whole snapshot triggers COW. + # it's quite slow. + self._delete_volume(snapshot, snapshot['volume_size']) def local_path(self, volume): # NOTE(vish): stops deprecation warning escaped_group = FLAGS.volume_group.replace('-', '--') - escaped_name = volume['name'].replace('-', '--') + escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) def ensure_export(self, context, volume): diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 7d47fc191..84085fbd8 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -147,6 +147,12 @@ class VolumeManager(manager.SchedulerDependentManager): self.driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) + except exception.VolumeIsBusy, e: + LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) + self.driver.ensure_export(context, volume_ref) + self.db.volume_update(context, volume_ref['id'], + {'status': 'available'}) + return True except Exception: self.db.volume_update(context, volume_ref['id'], -- cgit From db148f108dfc4829e1302a54fe4f57ab81212786 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 9 May 2011 19:25:02 +0900 Subject: fix mismerge by 1059 --- nova/db/sqlalchemy/api.py | 3 +-- nova/exception.py | 65 ++++++----------------------------------------- nova/volume/driver.py | 3 +-- 3 files changed, 10 insertions(+), 61 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ebdb2ad5c..7302f25b0 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1798,8 +1798,7 @@ def snapshot_get(context, snapshot_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.SnapshotNotFound(_('Snapshot %s not found') % snapshot_id, - snapshot_id) + raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result diff --git a/nova/exception.py b/nova/exception.py index 6748ef265..b16ea6810 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -60,65 +60,8 @@ class ApiError(Error): class BuildInProgress(Error): - super(ApiError, self).__init__('%s: %s' % (code, message)) - - -class NotFound(Error): - pass - - -class InstanceNotFound(NotFound): - def __init__(self, message, instance_id): - self.instance_id = instance_id - super(InstanceNotFound, self).__init__(message) - - -class VolumeNotFound(NotFound): - def __init__(self, message, volume_id): - self.volume_id = volume_id - super(VolumeNotFound, self).__init__(message) - - -class VolumeIsBusy(Error): - def __init__(self, message, volume_id): - self.volume_id = volume_id - super(Error, self).__init__(message) - - -class SnapshotNotFound(NotFound): - def __init__(self, message, snapshot_id): - self.snapshot_id = snapshot_id - super(SnapshotNotFound, self).__init__(message) - - -class Duplicate(Error): pass - -class NotAuthorized(Error): - pass - - -class NotEmpty(Error): - pass - - -class Invalid(Error): - pass - - -class InvalidInputException(Error): - pass - - -class InvalidContentType(Error): - pass - - -class TimeoutException(Error): - pass - - class DBError(Error): """Wraps an implementation specific exception.""" def __init__(self, inner_exception): @@ -319,6 +262,14 @@ class VolumeNotFoundForInstance(VolumeNotFound): message = _("Volume not found for instance %(instance_id)s.") +class SnapshotNotFound(NotFound): + message = _("Snapshot %(snapshot_id)s not found") + + +class VolumeIsBusy(Error): + message = _("deleting volume %(volume_name)s that has snapshot") + + class ExportDeviceNotFoundForVolume(NotFound): message = _("No export device found for volume %(volume_id)s.") diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 457a1c9e6..e783d3a5a 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -153,8 +153,7 @@ class VolumeDriver(object): volume['name'])) out = out.strip() if (out[0] == 'o') or (out[0] == 'O'): - raise exception.VolumeIsBusy( - _('deleting volume %s that has snapshot'), volume['name']) + raise exception.VolumeIsBusy(volume_name=volume['name']) self._delete_volume(volume, volume['size']) -- cgit From c5dbee818b1a06bf5358c32197c8e15ecf0f660d Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 9 May 2011 20:19:35 +0900 Subject: db: fix db versioning --- .../versions/015_add_volume_snapshot_support.py | 71 ---------------------- .../versions/016_add_snapshot_id_to_volumes.py | 48 --------------- .../versions/016_add_volume_snapshot_support.py | 71 ++++++++++++++++++++++ .../versions/017_add_snapshot_id_to_volumes.py | 48 +++++++++++++++ 4 files changed, 119 insertions(+), 119 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/016_add_volume_snapshot_support.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/017_add_snapshot_id_to_volumes.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py deleted file mode 100644 index 288f63e72..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py +++ /dev/null @@ -1,71 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 MORITA Kazutaka. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * -from migrate import * - -from nova import log as logging - -meta = MetaData() - -snapshots = Table('snapshots', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('volume_id', Integer(), nullable=False), - Column('user_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('status', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('progress', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('volume_size', Integer()), - Column('scheduled_at', DateTime(timezone=False)), - Column('display_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('display_description', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - ) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - try: - snapshots.create() - except Exception: - logging.info(repr(snapshots)) - logging.exception('Exception while creating table') - meta.drop_all(tables=[snapshots]) - raise - - -def downgrade(migrate_engine): - # Operations to reverse the above upgrade go here. - snapshots.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py deleted file mode 100644 index 0a50123bf..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py +++ /dev/null @@ -1,48 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 MORITA Kazutaka. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * -from migrate import * - -from nova import log as logging - - -meta = MetaData() - - -# Table stub-definitions -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -# -volumes = Table('volumes', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# New Column -# - -snapshot_id = Column('snapshot_id', Integer()) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - # Add columns to existing tables - volumes.create_column(snapshot_id) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/016_add_volume_snapshot_support.py new file mode 100644 index 000000000..288f63e72 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_add_volume_snapshot_support.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + +meta = MetaData() + +snapshots = Table('snapshots', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', Integer(), nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('progress', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_size', Integer()), + Column('scheduled_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + try: + snapshots.create() + except Exception: + logging.info(repr(snapshots)) + logging.exception('Exception while creating table') + meta.drop_all(tables=[snapshots]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + snapshots.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/017_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/017_add_snapshot_id_to_volumes.py new file mode 100644 index 000000000..0a50123bf --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/017_add_snapshot_id_to_volumes.py @@ -0,0 +1,48 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Table stub-definitions +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +# +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Column +# + +snapshot_id = Column('snapshot_id', Integer()) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + # Add columns to existing tables + volumes.create_column(snapshot_id) -- cgit From 6c4059f20c85e9bc013a340de167151e7b5fa8c4 Mon Sep 17 00:00:00 2001 From: Mike Scherbakov Date: Wed, 11 May 2011 03:24:02 +0400 Subject: Bugfix #780784. KeyError when creating custom image. --- Authors | 1 + nova/virt/libvirt_conn.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Authors b/Authors index 60e1d2dad..72eb0b6ae 100644 --- a/Authors +++ b/Authors @@ -54,6 +54,7 @@ Mark Washenberger Masanori Itoh Matt Dietz Michael Gundlach +Mike Scherbakov Monsyne Dragon Monty Taylor MORITA Kazutaka diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 9780c69a6..71cedae54 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -456,7 +456,8 @@ class LibvirtConnection(driver.ComputeDriver): 'container_format': base['container_format'], 'is_public': False, 'name': '%s.%s' % (base['name'], image_id), - 'properties': {'architecture': base['architecture'], + 'properties': {'architecture': + base['properties']['architecture'], 'kernel_id': instance['kernel_id'], 'image_location': 'snapshot', 'image_state': 'available', -- cgit From 0a3da155228228d3f0eeac1efdea1e29eef2f3a0 Mon Sep 17 00:00:00 2001 From: John Tran Date: Thu, 12 May 2011 12:04:39 -0700 Subject: changed NotFound exception to ImageNotFound --- nova/api/ec2/cloud.py | 3 +-- nova/tests/test_cloud.py | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 8a54d23f2..ad8c3fe90 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -823,8 +823,7 @@ class CloudController(object): kwargs['ramdisk_id'] = ramdisk['id'] image = self._get_image(context, kwargs['image_id']) if not image: - raise exception.NotFound(_('Image %s not found') % - kwargs['image_id']) + raise exception.ImageNotFound(kwargs['image_id']) try: available = (image['properties']['image_state'] == 'available') except KeyError: diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 202dc36bc..ebfb5ee44 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -227,7 +227,7 @@ class CloudTestCase(test.TestCase): 'type': 'machine'}}] def fake_show_none(meh, context, id): - raise exception.NotFound + raise exception.ImageNotFound self.stubs.Set(local.LocalImageService, 'detail', fake_detail) # list all @@ -245,7 +245,7 @@ class CloudTestCase(test.TestCase): self.stubs.UnsetAll() self.stubs.Set(local.LocalImageService, 'show', fake_show_none) self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none) - self.assertRaises(exception.NotFound, describe_images, + self.assertRaises(exception.ImageNotFound, describe_images, self.context, ['ami-fake']) def test_describe_image_attribute(self): -- cgit From 321d724df5f0c4ca008da2a08ef279ca18df0733 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 13 May 2011 23:07:34 +0900 Subject: Authors: add myself to Authers file add myself to Authers file for later commit. --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index 8b54240c1..baf8fde4c 100644 --- a/Authors +++ b/Authors @@ -28,6 +28,7 @@ Gabe Westmaas Hisaharu Ishii Hisaki Ohara Ilya Alekseyev +Isaku Yamahata Jason Koelker Jay Pipes Jesse Andrews -- cgit From bbbea57cf6ab28c3ad1081041275e0d6d2bbd308 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 13 May 2011 23:08:57 +0900 Subject: volume/driver: factor out lvm opration Factor out lvm operation for implementing basic snapshot later. --- nova/volume/driver.py | 62 ++++++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index ba0a7efef..ec7be37bf 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -90,37 +90,47 @@ class VolumeDriver(object): raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) - def create_volume(self, volume): - """Creates a logical volume. Can optionally return a Dictionary of - changes to the volume object to be persisted.""" - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] + def _create_volume(self, volume_name, sizestr): self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n', - volume['name'], - FLAGS.volume_group) - - def delete_volume(self, volume): - """Deletes a logical volume.""" + volume_name, FLAGS.volume_group) + + def _copy_volume(self, srcstr, deststr, size_in_g): + self._execute('sudo', 'dd', 'if=%s' % srcstr, 'of=%s' % deststr, + 'count=%d' % (size_in_g * 1024), 'bs=1M') + + def _volume_not_present(self, volume_name): + path_name = '%s/%s' % (FLAGS.volume_group, volume_name) try: - self._try_execute('sudo', 'lvdisplay', - '%s/%s' % - (FLAGS.volume_group, - volume['name'])) + self._try_execute('sudo', 'lvdisplay', path_name) except Exception as e: - # If the volume isn't present, then don't attempt to delete + # If the volume isn't present return True + return False + def _delete_volume(self, volume, size_in_g): + """Deletes a logical volume.""" # zero out old volumes to prevent data leaking between users # TODO(ja): reclaiming space should be done lazy and low priority - self._execute('sudo', 'dd', 'if=/dev/zero', - 'of=%s' % self.local_path(volume), - 'count=%d' % (volume['size'] * 1024), - 'bs=1M') + self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % - (FLAGS.volume_group, - volume['name'])) + (FLAGS.volume_group, volume['name'])) + + def _sizestr(self, size_in_g): + if int(size_in_g) == 0: + return '100M' + return '%sG' % size_in_g + + def create_volume(self, volume): + """Creates a logical volume. Can optionally return a Dictionary of + changes to the volume object to be persisted.""" + self._create_volume(volume['name'], self._sizestr(volume['size'])) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + if self._volume_not_present(volume['name']): + # If the volume isn't present, then don't attempt to delete + return True + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): """Creates a snapshot.""" @@ -608,13 +618,9 @@ class SheepdogDriver(VolumeDriver): def create_volume(self, volume): """Creates a sheepdog volume""" - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] self._try_execute('qemu-img', 'create', "sheepdog:%s" % volume['name'], - sizestr) + self._sizestr(volume['size'])) def delete_volume(self, volume): """Deletes a logical volume""" -- cgit From a7c25a19a9a2fdf89fc9ecd3992ded936923af18 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 13 May 2011 14:21:55 +0000 Subject: Add init script and sysconfig file for openvswitch-nova --- .../networking/etc/init.d/openvswitch-nova | 96 ++++++++++++++++++++++ .../networking/etc/sysconfig/openvswitch-nova | 1 + .../xensource/scripts/ovs_configure_base_flows.py | 35 ++++---- 3 files changed, 116 insertions(+), 16 deletions(-) create mode 100755 plugins/xenserver/networking/etc/init.d/openvswitch-nova create mode 100644 plugins/xenserver/networking/etc/sysconfig/openvswitch-nova diff --git a/plugins/xenserver/networking/etc/init.d/openvswitch-nova b/plugins/xenserver/networking/etc/init.d/openvswitch-nova new file mode 100755 index 000000000..e4dbdf4af --- /dev/null +++ b/plugins/xenserver/networking/etc/init.d/openvswitch-nova @@ -0,0 +1,96 @@ +#!/bin/bash +# +# openvswitch-nova +# +# chkconfig: 2345 10 89 +# description: Apply initial OVS flows for Nova + +# Copyright 2011 OpenStack LLC. +# Copyright (C) 2009, 2010, 2011 Nicira Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# source function library +if [ -f /etc/init.d/functions ]; then + . /etc/init.d/functions +elif [ -f /etc/rc.d/init.d/functions ]; then + . /etc/rc.d/init.d/functions +elif [ -f /lib/lsb/init-functions ]; then + . /lib/lsb/init-functions +else + echo "$0: missing LSB shell function library" >&2 + exit 1 +fi + +OVS_CONFIGURE_BASE_FLOWS=/etc/xensource/scripts/ovs_configure_base_flows.py + +if test -e /etc/sysconfig/openvswitch-nova; then + . /etc/sysconfig/openvswitch-nova +else + echo "$0: missing configuration file: /etc/sysconfig/openvswitch-nova" + exit 1 +fi + +if test -e /etc/xensource/network.conf; then + NETWORK_MODE=$(cat /etc/xensource/network.conf) +fi + +case ${NETWORK_MODE:=openvswitch} in + vswitch|openvswitch) + ;; + bridge) + exit 0 + ;; + *) + echo "Open vSwitch disabled (/etc/xensource/network.conf is invalid)" >&2 + exit 0 + ;; +esac + +function run_ovs_conf_base_flows { + # expected format: DEVICE_BRIDGES="eth0:xenbr0 eth1:xenbr1" + for pair in $DEVICE_BRIDGES; do + # below in $info, physical device is [0], bridge name is [1] + info=${pair//:/ } + /usr/bin/python $OVS_CONFIGURE_BASE_FLOWS $1 ${info[0]} ${info[1]} + done +} + +function start { + run_ovs_conf_base_flows online +} + +function stop { + run_ovs_conf_base_flows offline +} + +function restart { + run_ovs_conf_base_flows reset +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart) + restart + ;; + *) + echo "usage: openvswitch-nova [start|stop|restart]" + exit 1 + ;; +esac diff --git a/plugins/xenserver/networking/etc/sysconfig/openvswitch-nova b/plugins/xenserver/networking/etc/sysconfig/openvswitch-nova new file mode 100644 index 000000000..829782fb6 --- /dev/null +++ b/plugins/xenserver/networking/etc/sysconfig/openvswitch-nova @@ -0,0 +1 @@ +#DEVICE_BRIDGES="eth0:xenbr0 eth1:xenbr1" diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py index 82d0b9e31..0186a3c8b 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py @@ -27,32 +27,35 @@ import sys from novalib import execute, execute_get_output -def main(phys_dev_name, bridge_name): - pnic_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', 'Interface', - phys_dev_name, 'ofport') +def main(command, phys_dev_name, bridge_name): ovs_ofctl = lambda *rule: execute('/usr/bin/ovs-ofctl', *rule) - # clear all flows + # always clear all flows first ovs_ofctl('del-flows', bridge_name) - # these flows are lower priority than all VM-specific flows. + if command in ('online', 'reset'): + pnic_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', 'Interface', + phys_dev_name, 'ofport') - # allow all traffic from the physical NIC, as it is trusted (i.e., from a - # filtered vif, or from the physical infrastructure - ovs_ofctl('add-flow', bridge_name, - "priority=2,in_port=%s,actions=normal" % pnic_ofport) + # these flows are lower priority than all VM-specific flows. - # default drop - ovs_ofctl('add-flow', bridge_name, 'priority=1,actions=drop') + # allow all traffic from the physical NIC, as it is trusted (i.e., from a + # filtered vif, or from the physical infrastructure + ovs_ofctl('add-flow', bridge_name, + "priority=2,in_port=%s,actions=normal" % pnic_ofport) + + # default drop + ovs_ofctl('add-flow', bridge_name, 'priority=1,actions=drop') if __name__ == "__main__": - if len(sys.argv) != 3: + if len(sys.argv) != 4 or sys.argv[1] not in ('online', 'offline', 'reset'): + print sys.argv script_name = os.path.basename(sys.argv[0]) print "This script configures base ovs flows." - print "usage: %s phys-dev-name bridge-name" % script_name - print " ex: %s eth0 xenbr0" % script_name + print "usage: %s [online|offline|reset] phys-dev-name bridge-name" % script_name + print " ex: %s online eth0 xenbr0" % script_name sys.exit(1) else: - phys_dev_name, bridge_name = sys.argv[1:3] - main(phys_dev_name, bridge_name) + command, phys_dev_name, bridge_name = sys.argv[1:4] + main(command, phys_dev_name, bridge_name) -- cgit From 4f7cfba4a00f04b7c30c61da2946f183241a7c7f Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 13 May 2011 23:27:35 +0900 Subject: volume/driver: implement basic snapshot added basic support for snapshot to VolumeDriver base class. The implementation is not effective, but works. The effective implementation should be done by drived driver class. --- nova/exception.py | 4 ++++ nova/volume/driver.py | 37 +++++++++++++++++++++++++++++++++---- nova/volume/manager.py | 6 ++++++ 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index 39620ccc1..bd04435ed 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -271,6 +271,10 @@ class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") +class VolumeIsBusy(Error): + message = _("deleting volume %(volume_name)s that has snapshot") + + class ExportDeviceNotFoundForVolume(NotFound): message = _("No export device found for volume %(volume_id)s.") diff --git a/nova/volume/driver.py b/nova/volume/driver.py index ec7be37bf..a6cf2cb46 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -113,13 +113,21 @@ class VolumeDriver(object): # TODO(ja): reclaiming space should be done lazy and low priority self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % - (FLAGS.volume_group, volume['name'])) + (FLAGS.volume_group, + self._escape_snapshot(volume['name']))) def _sizestr(self, size_in_g): if int(size_in_g) == 0: return '100M' return '%sG' % size_in_g + # Linux LVM reserves name that starts with snapshot, so that + # such volume name can't be created. Mangle it. + def _escape_snapshot(self, snapshot_name): + if not snapshot_name.startswith('snapshot'): + return snapshot_name + return '_' + snapshot_name + def create_volume(self, volume): """Creates a logical volume. Can optionally return a Dictionary of changes to the volume object to be persisted.""" @@ -130,20 +138,41 @@ class VolumeDriver(object): if self._volume_not_present(volume['name']): # If the volume isn't present, then don't attempt to delete return True + + # TODO(yamahata): lvm can't delete origin volume only without + # deleting derived snapshots. Can we do something fancy? + out, err = self._execute('sudo', 'lvdisplay', '--noheading', + '-C', '-o', 'Attr', + '%s/%s' % (FLAGS.volume_group, + volume['name'])) + out = out.strip() + if (out[0] == 'o') or (out[0] == 'O'): + raise exception.VolumeIsBusy(volume_name=volume['name']) + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): """Creates a snapshot.""" - raise NotImplementedError() + orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name']) + self._try_execute('sudo', 'lvcreate', '-L', + self._sizestr(snapshot['volume_size']), + '--name', self._escape_snapshot(snapshot['name']), + '--snapshot', orig_lv_name) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" - raise NotImplementedError() + if self._volume_not_present(self._escape_snapshot(snapshot['name'])): + # If the snapshot isn't present, then don't attempt to delete + return True + + # TODO(yamahata): zeroing out the whole snapshot triggers COW. + # it's quite slow. + self._delete_volume(snapshot, snapshot['volume_size']) def local_path(self, volume): # NOTE(vish): stops deprecation warning escaped_group = FLAGS.volume_group.replace('-', '--') - escaped_name = volume['name'].replace('-', '--') + escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) def ensure_export(self, context, volume): diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 87fd3bf17..fd889633d 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -142,6 +142,12 @@ class VolumeManager(manager.SchedulerDependentManager): self.driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) + except exception.VolumeIsBusy, e: + LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) + self.driver.ensure_export(context, volume_ref) + self.db.volume_update(context, volume_ref['id'], + {'status': 'available'}) + return True except Exception: self.db.volume_update(context, volume_ref['id'], -- cgit From aaec8400be701c674bbf89badd59ee9468827ed9 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Sat, 14 May 2011 01:42:26 +0900 Subject: volume/driver: make unit test, test_volume, pass fake command executer doesn't return command result. Which return None instead of string. So add None check to make unit test pass. --- nova/volume/driver.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index a6cf2cb46..0807ff476 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -145,9 +145,11 @@ class VolumeDriver(object): '-C', '-o', 'Attr', '%s/%s' % (FLAGS.volume_group, volume['name'])) - out = out.strip() - if (out[0] == 'o') or (out[0] == 'O'): - raise exception.VolumeIsBusy(volume_name=volume['name']) + # fake_execute returns None resulting unit test error + if out: + out = out.strip() + if (out[0] == 'o') or (out[0] == 'O'): + raise exception.VolumeIsBusy(volume_name=volume['name']) self._delete_volume(volume, volume['size']) -- cgit From 8b86fb3a4d9ee3e328232c0051b9daff6838d00d Mon Sep 17 00:00:00 2001 From: Josh Durgin Date: Fri, 13 May 2011 10:26:13 -0700 Subject: Add support for rbd snapshots. --- nova/volume/driver.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 0807ff476..e0e18b9bf 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -608,6 +608,18 @@ class RBDDriver(VolumeDriver): self._try_execute('rbd', '--pool', FLAGS.rbd_pool, 'rm', volume['name']) + def create_snapshot(self, snapshot): + """Creates an rbd snapshot""" + self._try_execute('rbd', '--pool', FLAGS.rbd_pool, + 'snap', 'create', '--snap', snapshot['name'], + snapshot['volume_name']) + + def delete_snapshot(self, snapshot): + """Deletes an rbd snapshot""" + self._try_execute('rbd', '--pool', FLAGS.rbd_pool, + 'snap', 'rm', '--snap', snapshot['name'], + snapshot['volume_name']) + def local_path(self, volume): """Returns the path of the rbd volume.""" # This is the same as the remote path -- cgit From bccbe3f845e9e7661efefbe456bfa56144de8136 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 13 May 2011 19:29:10 +0000 Subject: add udev rules and modified ovs_configure_vif_flows.py to work with udev rules --- .../etc/udev/rules.d/openvswitch-nova.rules | 3 +++ .../xensource/scripts/ovs_configure_vif_flows.py | 28 ++++++++++++---------- 2 files changed, 19 insertions(+), 12 deletions(-) create mode 100644 plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules diff --git a/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules b/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules new file mode 100644 index 000000000..0dfb029eb --- /dev/null +++ b/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules @@ -0,0 +1,3 @@ +SUBSYSTEM=="xen-backend", KERNEL=="vif*", RUN+="/etc/xensource/scripts/ovs_configure_base_flows.py $env{ACTION} %k all" +# is this one needed? +#SUBSYSTEM=="net", KERNEL=="tap*", RUN+="/etc/xensource/scripts/ovs_configure_base_flows.py $env{ACTION} %k all" diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index f91a5f49d..95a944a2b 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -45,7 +45,14 @@ class OvsFlow(object): execute(OVS_OFCTL, 'del-flows', self.bridge, "in_port=%s" % ofport) -def main(dom_id, command, net_type, only_this_vif=None): +def main(command, vif_raw, net_type): + if command not in ('online', 'offline'): + return + + vif_name, dom_id, vif_index = vif_raw.split('-') + vif = "%s%s.%s" % (vif_name, dom_id, vif_index) + bridge = "xenbr%s" % vif_index + xsls = execute_get_output('/usr/bin/xenstore-ls', '/local/domain/%s/vm-data/networking' % dom_id) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] @@ -56,13 +63,11 @@ def main(dom_id, command, net_type, only_this_vif=None): (dom_id, mac)) data = json.loads(xsread) if data["label"] == "public": - vif = "vif%s.0" % dom_id - bridge = "xenbr0" + this_vif = "vif%s.0" % dom_id else: - vif = "vif%s.1" % dom_id - bridge = "xenbr1" + this_vif = "vif%s.1" % dom_id - if (only_this_vif is None) or (vif == only_this_vif): + if vif == this_vif: vif_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', 'Interface', vif, 'ofport') @@ -72,11 +77,11 @@ def main(dom_id, command, net_type, only_this_vif=None): ovs = OvsFlow(bridge, params) - if command in ('offline', 'reset'): + if command == 'offline': # I haven't found a way to clear only IPv4 or IPv6 rules. ovs.clear_flows(vif_ofport) - if command in ('online', 'reset'): + if command == 'online': if net_type in ('ipv4', 'all') and 'ips' in data: for ip4 in data['ips']: ovs.params.update({'IPV4_ADDR': ip4['ip']}) @@ -167,10 +172,9 @@ def apply_ovs_ipv6_flows(ovs, bridge, params): if __name__ == "__main__": if len(sys.argv) < 3: - print "usage: %s dom_id online|offline|reset ipv4|ipv6|all [vif]" % \ + print "usage: %s [online|offline] vif-domid-idx ipv4|ipv6|all " % \ os.path.basename(sys.argv[0]) sys.exit(1) else: - dom_id, command, net_type = sys.argv[1:4] - vif_name = len(sys.argv) == 5 and sys.argv[4] or None - main(dom_id, command, net_type, vif_name) + command, vif_raw, net_type = sys.argv[1:4] + main(command, vif_raw, net_type) -- cgit From 68e34c790612f3250bd902cc87a0ab9d3d69abfb Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 13 May 2011 15:36:42 -0500 Subject: first cut at weighted-sum tests --- nova/scheduler/zone_aware_scheduler.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index b3d230bd2..38b395d52 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -89,6 +89,8 @@ class ZoneAwareScheduler(driver.Scheduler): # then weigh the selected hosts. # weighted = [{weight=weight, name=hostname}, ...] + # TODO(sirp): weigh_hosts should also be a function of 'topic' or + # resources, so that we can apply different objective functions to it weighted = self.weigh_hosts(num_instances, specs, host_list) # Next, tack on the best weights from the child zones ... -- cgit From fd2861fdcdae0d7a3f13dac7b54d4d8f106f2b3e Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 13 May 2011 21:05:12 +0000 Subject: fix sys.argv requirement --- .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 95a944a2b..2ebc4dd8c 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -171,7 +171,7 @@ def apply_ovs_ipv6_flows(ovs, bridge, params): if __name__ == "__main__": - if len(sys.argv) < 3: + if len(sys.argv) != 4: print "usage: %s [online|offline] vif-domid-idx ipv4|ipv6|all " % \ os.path.basename(sys.argv[0]) sys.exit(1) -- cgit From f889f6c8a430d6411a81270a68025a27781b09a2 Mon Sep 17 00:00:00 2001 From: Mike Scherbakov Date: Sun, 15 May 2011 14:15:37 +0400 Subject: Unit test for snapshotting (creating custom image). --- nova/image/fake.py | 33 +++++++++++++++++++++------- nova/tests/test_virt.py | 57 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 8 deletions(-) diff --git a/nova/image/fake.py b/nova/image/fake.py index b400b2adb..4bf25d9af 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -19,6 +19,7 @@ import copy import datetime +import random from nova import exception from nova import flags @@ -32,7 +33,7 @@ LOG = logging.getLogger('nova.image.fake') FLAGS = flags.FLAGS -class FakeImageService(service.BaseImageService): +class _FakeImageService(service.BaseImageService): """Mock (fake) image service for unit testing.""" def __init__(self): @@ -48,9 +49,10 @@ class FakeImageService(service.BaseImageService): 'container_format': 'ami', 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, - 'ramdisk_id': FLAGS.null_kernel}} + 'ramdisk_id': FLAGS.null_kernel, + 'architecture': 'x86_64'}} self.create(None, image) - super(FakeImageService, self).__init__() + super(_FakeImageService, self).__init__() def index(self, context): """Returns list of images.""" @@ -74,19 +76,28 @@ class FakeImageService(service.BaseImageService): image_id, self.images) raise exception.ImageNotFound(image_id=image_id) - def create(self, context, data): + def create(self, context, metadata, data=None): """Store the image data and return the new image id. :raises: Duplicate if the image already exist. """ - image_id = int(data['id']) + #image_id = int(metadata['id']) + # metadata['id'] may not exists, and since image_id is + # randomly generated in local.py, let us do the same here + try: + image_id = int(metadata['id']) + except: + image_id = random.randint(0, 2 ** 31 - 1) + if self.images.get(image_id): raise exception.Duplicate() - self.images[image_id] = copy.deepcopy(data) + metadata['id'] = image_id + self.images[image_id] = copy.deepcopy(metadata) + return self.images[image_id] - def update(self, context, image_id, data): + def update(self, context, image_id, metadata, data=None): """Replace the contents of the given image with the new data. :raises: ImageNotFound if the image does not exist. @@ -95,7 +106,7 @@ class FakeImageService(service.BaseImageService): image_id = int(image_id) if not self.images.get(image_id): raise exception.ImageNotFound(image_id=image_id) - self.images[image_id] = copy.deepcopy(data) + self.images[image_id] = copy.deepcopy(metadata) def delete(self, context, image_id): """Delete the given image. @@ -111,3 +122,9 @@ class FakeImageService(service.BaseImageService): def delete_all(self): """Clears out all images.""" self.images.clear() + +_fakeImageService = _FakeImageService() + + +def FakeImageService(): + return _fakeImageService diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 1311ba361..eb238e871 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -159,6 +159,7 @@ class LibvirtConnTestCase(test.TestCase): 'vcpus': 2, 'project_id': 'fake', 'bridge': 'br101', + 'image_id': '123456', 'instance_type_id': '5'} # m1.small def lazy_load_library_exists(self): @@ -279,6 +280,62 @@ class LibvirtConnTestCase(test.TestCase): instance_data = dict(self.test_instance) self._check_xml_and_container(instance_data) + def test_snapshot(self): + FLAGS.image_service = 'nova.image.fake.FakeImageService' + + # Only file-based instance storages are supported at the moment + test_xml = """ + + + + + + + + """ + + class FakeVirtDomain(object): + + def __init__(self): + pass + + def snapshotCreateXML(self, *args): + return None + + def XMLDesc(self, *args): + return test_xml + + def fake_lookup(instance_name): + if instance_name == instance_ref.name: + return FakeVirtDomain() + + def fake_execute(*args): + # Touch filename to pass 'with open(out_path)' + open(args[-1], "a").close() + + # Start test + image_service = utils.import_object(FLAGS.image_service) + + # Assuming that base image already exists in image_service + instance_ref = db.instance_create(self.context, self.test_instance) + properties = {'instance_id': instance_ref['id'], + 'user_id': str(self.context.user_id)} + sent_meta = {'name': 'test-snap', 'is_public': False, + 'properties': properties} + # Create new image. It will be updated in snapshot method + # To work with it from snapshot, the single image_service is needed + recv_meta = image_service.create(context, sent_meta) + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') + libvirt_conn.LibvirtConnection._conn.lookupByName = fake_lookup + self.mox.StubOutWithMock(libvirt_conn.utils, 'execute') + libvirt_conn.utils.execute = fake_execute + + self.mox.ReplayAll() + + conn = libvirt_conn.LibvirtConnection(False) + conn.snapshot(instance_ref, recv_meta['id']) + def test_multi_nic(self): instance_data = dict(self.test_instance) network_info = _create_network_info(2) -- cgit From 818c2424a0547882fe6bdfe6613ee66a248d91db Mon Sep 17 00:00:00 2001 From: Mike Scherbakov Date: Sun, 15 May 2011 15:11:54 +0400 Subject: Define image state during snapshotting. Name snapshot to the name provided, not generate. --- nova/compute/api.py | 5 +++-- nova/tests/test_virt.py | 10 ++++++++-- nova/virt/libvirt_conn.py | 4 +++- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 63884be97..971c0732f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -500,9 +500,10 @@ class API(base.Base): """ properties = {'instance_id': str(instance_id), - 'user_id': str(context.user_id)} + 'user_id': str(context.user_id), + 'image_state': 'creating'} sent_meta = {'name': name, 'is_public': False, - 'properties': properties} + 'status': 'creating', 'properties': properties} recv_meta = self.image_service.create(context, sent_meta) params = {'image_id': recv_meta['id']} self._cast_compute_message('snapshot_instance', context, instance_id, diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index eb238e871..c4fcc21cc 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -320,8 +320,9 @@ class LibvirtConnTestCase(test.TestCase): instance_ref = db.instance_create(self.context, self.test_instance) properties = {'instance_id': instance_ref['id'], 'user_id': str(self.context.user_id)} - sent_meta = {'name': 'test-snap', 'is_public': False, - 'properties': properties} + snapshot_name = 'test-snap' + sent_meta = {'name': snapshot_name, 'is_public': False, + 'status': 'creating', 'properties': properties} # Create new image. It will be updated in snapshot method # To work with it from snapshot, the single image_service is needed recv_meta = image_service.create(context, sent_meta) @@ -336,6 +337,11 @@ class LibvirtConnTestCase(test.TestCase): conn = libvirt_conn.LibvirtConnection(False) conn.snapshot(instance_ref, recv_meta['id']) + snapshot = image_service.show(context, recv_meta['id']) + self.assertEquals(snapshot['properties']['image_state'], 'available') + self.assertEquals(snapshot['status'], 'active') + self.assertEquals(snapshot['name'], snapshot_name) + def test_multi_nic(self): instance_data = dict(self.test_instance) network_info = _create_network_info(2) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 71cedae54..92d580314 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -451,11 +451,13 @@ class LibvirtConnection(driver.ComputeDriver): elevated = context.get_admin_context() base = image_service.show(elevated, instance['image_id']) + snapshot = image_service.show(elevated, image_id) metadata = {'disk_format': base['disk_format'], 'container_format': base['container_format'], 'is_public': False, - 'name': '%s.%s' % (base['name'], image_id), + 'status': 'active', + 'name': snapshot['name'], 'properties': {'architecture': base['properties']['architecture'], 'kernel_id': instance['kernel_id'], -- cgit From 5eb57c6191ac7c8d98539eb3967ceb00f7c55daf Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Mon, 16 May 2011 16:29:21 +0900 Subject: Add a unit test for snapshot_volume. --- nova/tests/test_volume.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 236d12434..c66b66959 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -176,6 +176,33 @@ class VolumeTestCase(test.TestCase): # This will allow us to test cross-node interactions pass + @staticmethod + def _create_snapshot(volume_id, size='0'): + """Create a snapshot object.""" + snap = {} + snap['volume_size'] = size + snap['user_id'] = 'fake' + snap['project_id'] = 'fake' + snap['volume_id'] = volume_id + snap['status'] = "creating" + return db.snapshot_create(context.get_admin_context(), snap)['id'] + + def test_create_delete_snapshot(self): + """Test snapshot can be created and deleted.""" + volume_id = self._create_volume() + self.volume.create_volume(self.context, volume_id) + snapshot_id = self._create_snapshot(volume_id) + self.volume.create_snapshot(self.context, volume_id, snapshot_id) + self.assertEqual(snapshot_id, db.snapshot_get(context.get_admin_context(), + snapshot_id).id) + + self.volume.delete_snapshot(self.context, snapshot_id) + self.assertRaises(exception.NotFound, + db.snapshot_get, + self.context, + snapshot_id) + self.volume.delete_volume(self.context, volume_id) + class DriverTestCase(test.TestCase): """Base Test class for Drivers.""" -- cgit From d44299be90bbfcac5f8de1e1264b81fbb0bfa5e2 Mon Sep 17 00:00:00 2001 From: Masanori Itoh Date: Tue, 17 May 2011 01:00:16 +0900 Subject: Add vnc_keymap flag and enable setting keymap for vnc console. --- nova/virt/libvirt.xml.template | 2 +- nova/virt/libvirt_conn.py | 1 + nova/vnc/__init__.py | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index de2497a76..20986d4d5 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -116,7 +116,7 @@ #if $getVar('vncserver_host', False) - + #end if diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 555e44ce2..7552c9488 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1022,6 +1022,7 @@ class LibvirtConnection(driver.ComputeDriver): if FLAGS.vnc_enabled: if FLAGS.libvirt_type != 'lxc': xml_info['vncserver_host'] = FLAGS.vncserver_host + xml_info['vnc_keymap'] = FLAGS.vnc_keymap if not rescue: if instance['kernel_id']: xml_info['kernel'] = xml_info['basepath'] + "/kernel" diff --git a/nova/vnc/__init__.py b/nova/vnc/__init__.py index b5b00e44e..859bfd65f 100644 --- a/nova/vnc/__init__.py +++ b/nova/vnc/__init__.py @@ -32,3 +32,5 @@ flags.DEFINE_string('vncserver_host', '0.0.0.0', 'the host interface on which vnc server should listen') flags.DEFINE_bool('vnc_enabled', True, 'enable vnc related features') +flags.DEFINE_string('vnc_keymap', 'en-us', + 'keymap for vnc') -- cgit From a4ea9ac61568ce5f8300a5ba138f0ac10c79b43c Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 16 May 2011 15:59:01 -0700 Subject: fix for lp783705 - remove nwfilters when instance is terminated --- nova/tests/test_virt.py | 42 ++++++++++++++++++++++++++++++++++++++++++ nova/virt/libvirt_conn.py | 27 +++++++++++++++++++++++++-- 2 files changed, 67 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 1311ba361..babb5de9b 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -1045,3 +1045,45 @@ class NWFilterTestCase(test.TestCase): network_info, "fake") self.assertEquals(len(result), 3) + + def test_unfilter_instance_undefines_nwfilters(self): + class FakeNWFilter: + def __init__(self): + self.undefine_call_count = 0 + + def undefine(self): + self.undefine_call_count += 1 + pass + + fakefilter = FakeNWFilter() + + def _nwfilterLookupByName(ignore): + return fakefilter + + def _filterDefineXMLMock(xml): + return True + + admin_ctxt = context.get_admin_context() + + self.fw._conn.nwfilterDefineXML = _filterDefineXMLMock + self.fw._conn.nwfilterLookupByName = _nwfilterLookupByName + + instance_ref = self._create_instance() + inst_id = instance_ref['id'] + instance = db.instance_get(self.context, inst_id) + + ip = '10.11.12.13' + network_ref = db.project_get_network(self.context, 'fake') + fixed_ip = {'address': ip, 'network_id': network_ref['id']} + db.fixed_ip_create(admin_ctxt, fixed_ip) + db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + 'instance_id': inst_id}) + self.fw.setup_basic_filtering(instance) + self.fw.prepare_instance_filter(instance) + self.fw.apply_instance_filter(instance) + self.fw.unfilter_instance(instance) + + # should attempt to undefine 2 filters: instance and instance-secgroup + self.assertEquals(fakefilter.undefine_call_count, 2) + + db.instance_destroy(admin_ctxt, instance_ref['id']) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 555e44ce2..706973176 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1835,8 +1835,30 @@ class NWFilterFirewall(FirewallDriver): tpool.execute(self._conn.nwfilterDefineXML, xml) def unfilter_instance(self, instance): - # Nothing to do - pass + """Clear out the nwfilter rules.""" + network_info = _get_network_info(instance) + instance_name = instance.name + for (network, mapping) in network_info: + nic_id = mapping['mac'].replace(':', '') + instance_filter_name = self._instance_filter_name(instance, nic_id) + + try: + self._conn.nwfilterLookupByName(instance_filter_name).\ + undefine() + except libvirt.libvirtError: + LOG.debug(_('The nwfilter(%(instance_filter_name)s) for ' + '%(instance_name)s is not found.') % locals()) + + instance_secgroup_filter_name =\ + '%s-secgroup' % (self._instance_filter_name(instance)) + + try: + self._conn.nwfilterLookupByName(instance_secgroup_filter_name).\ + undefine() + except libvirt.libvirtError: + # This will happen if called by IptablesFirewallDriver + LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) for ' + '%(instance_name)s is not found.') % locals()) def prepare_instance_filter(self, instance, network_info=None): """ @@ -2000,6 +2022,7 @@ class IptablesFirewallDriver(FirewallDriver): if self.instances.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() + self.nwfilter.unfilter_instance(instance) else: LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) -- cgit From 0e2aba9e9869a66a1c3a6ece0fb08be631daa5bf Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:38:44 +0400 Subject: Moved memcached connection in AuthManager to thread-local storage. Added caching of LDAP connection in thread-local storage. Optimized LDAP queries, added similar memcached support to LDAPDriver. Add "per-driver-request" caching of LDAP results. (should be per-api-request) --- nova/auth/ldapdriver.py | 93 ++++++++++++++++++++++++++++++++++++++++++++----- nova/auth/manager.py | 20 +++++++---- 2 files changed, 98 insertions(+), 15 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 3f8432851..7849d941e 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -24,7 +24,9 @@ other backends by creating another class that exposes the same public methods. """ +import functools import sys +import threading from nova import exception from nova import flags @@ -85,6 +87,7 @@ def _clean(attr): def sanitize(fn): """Decorator to sanitize all args""" + @functools.wraps(fn) def _wrapped(self, *args, **kwargs): args = [_clean(x) for x in args] kwargs = dict((k, _clean(v)) for (k, v) in kwargs) @@ -103,29 +106,74 @@ class LdapDriver(object): isadmin_attribute = 'isNovaAdmin' project_attribute = 'owner' project_objectclass = 'groupOfNames' + __local = threading.local() def __init__(self): """Imports the LDAP module""" self.ldap = __import__('ldap') - self.conn = None if FLAGS.ldap_schema_version == 1: LdapDriver.project_pattern = '(objectclass=novaProject)' LdapDriver.isadmin_attribute = 'isAdmin' LdapDriver.project_attribute = 'projectManager' LdapDriver.project_objectclass = 'novaProject' + self.__cache = None def __enter__(self): """Creates the connection to LDAP""" - self.conn = self.ldap.initialize(FLAGS.ldap_url) - self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + # TODO(yorik-sar): Should be per-request cache, not per-driver-request + self.__cache = {} return self def __exit__(self, exc_type, exc_value, traceback): """Destroys the connection to LDAP""" - self.conn.unbind_s() + self.__cache = None return False + def __local_cache(key_fmt): + """Wrap function to cache it's result in self.__cache. + Works only with functions with one fixed argument. + """ + def do_wrap(fn): + @functools.wraps(fn) + def inner(self, arg, **kwargs): + cache_key = key_fmt % (arg,) + try: + res = self.__cache[cache_key] + LOG.debug('Local cache hit for %s by key %s' % + (fn.__name__, cache_key)) + return res + except KeyError: + res = fn(self, arg, **kwargs) + self.__cache[cache_key] = res + return res + return inner + return do_wrap + + @property + def conn(self): + try: + return self.__local.conn + except AttributeError: + conn = self.ldap.initialize(FLAGS.ldap_url) + conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + self.__local.conn = conn + return conn + + @property + def mc(self): + try: + return self.__local.mc + except AttributeError: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + mc = memcache.Client(FLAGS.memcached_servers, debug=0) + self.__local.mc = mc + return mc + @sanitize + @__local_cache('uid_user-%s') def get_user(self, uid): """Retrieve user by id""" attr = self.__get_ldap_user(uid) @@ -134,15 +182,30 @@ class LdapDriver(object): @sanitize def get_user_from_access_key(self, access): """Retrieve user by access key""" + cache_key = 'uak_dn_%s'%(access,) + user_dn = self.mc.get(cache_key) + if user_dn: + user = self.__to_user( + self.__find_object(user_dn, scope=self.ldap.SCOPE_BASE)) + if user: + if user['access'] == access: + return user + else: + self.mc.set(cache_key, None) query = '(accessKey=%s)' % access dn = FLAGS.ldap_user_subtree - return self.__to_user(self.__find_object(dn, query)) + user_obj = self.__find_object(dn, query) + user = self.__to_user(user_obj) + if user: + self.mc.set(cache_key, user_obj['dn'][0]) + return user @sanitize + @__local_cache('pid_project-%s') def get_project(self, pid): """Retrieve project by id""" - dn = self.__project_to_dn(pid) - attr = self.__find_object(dn, LdapDriver.project_pattern) + dn = self.__project_to_dn(pid, search=False) + attr = self.__find_object(dn, LdapDriver.project_pattern, scope=self.ldap.SCOPE_BASE) return self.__to_project(attr) @sanitize @@ -395,6 +458,7 @@ class LdapDriver(object): """Check if project exists""" return self.get_project(project_id) is not None + @__local_cache('uid_attrs-%s') def __get_ldap_user(self, uid): """Retrieve LDAP user entry by id""" dn = FLAGS.ldap_user_subtree @@ -426,12 +490,20 @@ class LdapDriver(object): if scope is None: # One of the flags is 0! scope = self.ldap.SCOPE_SUBTREE + if query is None: + query = "(objectClass=*)" try: res = self.conn.search_s(dn, scope, query) except self.ldap.NO_SUCH_OBJECT: return [] # Just return the attributes - return [attributes for dn, attributes in res] + # FIXME(yorik-sar): Whole driver should be refactored to + # prevent this hack + res1 = [] + for dn, attrs in res: + attrs['dn'] = [dn] + res1.append(attrs) + return res1 def __find_role_dns(self, tree): """Find dns of role objects in given tree""" @@ -564,6 +636,7 @@ class LdapDriver(object): 'description': attr.get('description', [None])[0], 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} + @__local_cache('uid_dn-%s') def __uid_to_dn(self, uid, search=True): """Convert uid to dn""" # By default return a generated DN @@ -576,6 +649,7 @@ class LdapDriver(object): userdn = user[0] return userdn + @__local_cache('pid_dn-%s') def __project_to_dn(self, pid, search=True): """Convert pid to dn""" # By default return a generated DN @@ -603,10 +677,11 @@ class LdapDriver(object): else: return None + @__local_cache('dn_uid-%s') def __dn_to_uid(self, dn): """Convert user dn to uid""" query = '(objectclass=novaUser)' - user = self.__find_object(dn, query) + user = self.__find_object(dn, query, scope=self.ldap.SCOPE_BASE) return user[FLAGS.ldap_user_id_attribute][0] diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 07235a2a7..c71f0f161 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -23,6 +23,7 @@ Nova authentication management import os import shutil import string # pylint: disable=W0402 +import threading import tempfile import uuid import zipfile @@ -206,6 +207,7 @@ class AuthManager(object): """ _instance = None + __local = threading.local() def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" @@ -223,12 +225,18 @@ class AuthManager(object): if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache - self.mc = memcache.Client(FLAGS.memcached_servers, - debug=0) + @property + def mc(self): + try: + return self.__local.mc + except AttributeError: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + mc = memcache.Client(FLAGS.memcached_servers, debug=0) + self.__local.mc = mc + return mc def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', -- cgit From e4f8ef67065f1de36ceadf9dd97e07fbe9fc9d83 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:39:19 +0400 Subject: Fixed mistyped key, caused huge performance leak. --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index 519793643..5c536f6d8 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -110,7 +110,7 @@ class FlagValues(gflags.FlagValues): return name in self.__dict__['__dirty'] def ClearDirty(self): - self.__dict__['__is_dirty'] = [] + self.__dict__['__dirty'] = [] def WasAlreadyParsed(self): return self.__dict__['__was_already_parsed'] -- cgit From 2fcc10656222bea6056742ef943c1b82724c0b56 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:45:48 +0400 Subject: PEP8 fixes. --- nova/auth/ldapdriver.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 7849d941e..9fe0165a1 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -182,7 +182,7 @@ class LdapDriver(object): @sanitize def get_user_from_access_key(self, access): """Retrieve user by access key""" - cache_key = 'uak_dn_%s'%(access,) + cache_key = 'uak_dn_%s' % (access,) user_dn = self.mc.get(cache_key) if user_dn: user = self.__to_user( @@ -205,7 +205,8 @@ class LdapDriver(object): def get_project(self, pid): """Retrieve project by id""" dn = self.__project_to_dn(pid, search=False) - attr = self.__find_object(dn, LdapDriver.project_pattern, scope=self.ldap.SCOPE_BASE) + attr = self.__find_object(dn, LdapDriver.project_pattern, + scope=self.ldap.SCOPE_BASE) return self.__to_project(attr) @sanitize -- cgit From 23bbbfcd3317859d44dba7da7996a978ad922543 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 10:45:19 -0500 Subject: First cut at least cost scheduler --- nova/scheduler/least_cost.py | 79 +++++++++++++++++++++++++++++++++ nova/tests/test_least_cost_scheduler.py | 39 ++++++++++++++++ 2 files changed, 118 insertions(+) create mode 100644 nova/scheduler/least_cost.py create mode 100644 nova/tests/test_least_cost_scheduler.py diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py new file mode 100644 index 000000000..75dde81ca --- /dev/null +++ b/nova/scheduler/least_cost.py @@ -0,0 +1,79 @@ +import collections + +# TODO(sirp): this should be just `zone_aware` to match naming scheme +# TODO(sirp): perhaps all zone-aware stuff should go under a `zone_aware` +# module +from nova.scheduler import zone_aware_scheduler + +class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): + def get_cost_fns(self): + """Returns a list of tuples containing weights and cost functions to + use for weighing hosts + """ + cost_fns = [] + + return cost_fns + + def weigh_hosts(self, num, specs, hosts): + """ + Returns a list of dictionaries of form: + [ {weight: weight, hostname: hostname} ] + """ + # FIXME(sirp): weigh_hosts should handle more than just instances + cost_fns = [] + hosts = [] + cost_hosts = weighted_sum(domain=hosts, weighted_fns=self.get_cost_fns()) + + # TODO convert hosts back to hostnames + weight_hostnames = [] + return weight_hostnames + +def normalize_list(L): + """Normalize an array of numbers such that each element satisfies: + 0 <= e <= 1 + """ + if not L: + return L + max_ = max(L) + if max_ > 0: + return [(float(e) / max_) for e in L] + return L + +def weighted_sum(domain, weighted_fns, normalize=True): + """ + Use the weighted-sum method to compute a score for an array of objects. + Normalize the results of the objective-functions so that the weights are + meaningful regardless of objective-function's range. + + domain - input to be scored + weighted_fns - list of weights and functions like: + [(weight, objective-functions)] + + Returns an unsorted list like: [(score, elem)] + """ + # Table of form: + # { domain1: [score1, score2, ..., scoreM] + # ... + # domainN: [score1, score2, ..., scoreM] } + score_table = collections.defaultdict(list) + + for weight, fn in weighted_fns: + scores = [fn(elem) for elem in domain] + + if normalize: + norm_scores = normalize_list(scores) + else: + norm_scores = scores + + for idx, score in enumerate(norm_scores): + weighted_score = score * weight + score_table[idx].append(weighted_score) + + # Sum rows in table to compute score for each element in domain + domain_scores = [] + for idx in sorted(score_table): + elem_score = sum(score_table[idx]) + elem = domain[idx] + domain_scores.append(elem_score) + + return domain_scores diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py new file mode 100644 index 000000000..a3a18a09f --- /dev/null +++ b/nova/tests/test_least_cost_scheduler.py @@ -0,0 +1,39 @@ +from nova import test +from nova.scheduler import least_cost + +MB = 1024 * 1024 + +class FakeHost(object): + def __init__(self, host_id, free_ram, io): + self.id = host_id + self.free_ram = free_ram + self.io = io + +class WeightedSumTest(test.TestCase): + def test_empty_domain(self): + domain = [] + weighted_fns = [] + result = least_cost.weighted_sum(domain, weighted_fns) + expected = [] + self.assertEqual(expected, result) + + def test_basic_costing(self): + hosts = [ + FakeHost(1, 512 * MB, 100), + FakeHost(2, 256 * MB, 400), + FakeHost(3, 512 * MB, 100) + ] + + weighted_fns = [ + (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* + (2, lambda h: h.io), # Avoid high I/O + ] + + costs = least_cost.weighted_sum(domain=hosts, weighted_fns=weighted_fns) + + # Each 256 MB unit of free-ram contributes 0.5 points by way of: + # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 + # Each 100 iops of IO adds 0.5 points by way of: + # cost = 2 * (100/400) = 2 * 0.25 = 0.5 + expected = [1.5, 2.5, 1.5] + self.assertEqual(expected, costs) -- cgit From e6fc2fc58d2c98f4322e92b26b1031ca362c8724 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 May 2011 15:14:17 -0700 Subject: add more statuses for ec2 image registration --- nova/image/s3.py | 84 ++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 58 insertions(+), 26 deletions(-) diff --git a/nova/image/s3.py b/nova/image/s3.py index c38c58d95..673cbf56f 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -161,43 +161,75 @@ class S3ImageService(service.BaseImageService): def delayed_create(): """This handles the fetching and decrypting of the part files.""" - parts = [] - for fn_element in manifest.find('image').getiterator('filename'): - part = self._download_file(bucket, fn_element.text, image_path) - parts.append(part) - - # NOTE(vish): this may be suboptimal, should we use cat? - encrypted_filename = os.path.join(image_path, 'image.encrypted') - with open(encrypted_filename, 'w') as combined: - for filename in parts: - with open(filename) as part: - shutil.copyfileobj(part, combined) - - metadata['properties']['image_state'] = 'decrypting' + metadata['properties']['image_state'] = 'downloading' self.service.update(context, image_id, metadata) - hex_key = manifest.find('image/ec2_encrypted_key').text - encrypted_key = binascii.a2b_hex(hex_key) - hex_iv = manifest.find('image/ec2_encrypted_iv').text - encrypted_iv = binascii.a2b_hex(hex_iv) + try: + parts = [] + elements = manifest.find('image').getiterator('filename') + for fn_element in elements: + part = self._download_file(bucket, + fn_element.text, + image_path) + parts.append(part) + + # NOTE(vish): this may be suboptimal, should we use cat? + enc_filename = os.path.join(image_path, 'image.encrypted') + with open(enc_filename, 'w') as combined: + for filename in parts: + with open(filename) as part: + shutil.copyfileobj(part, combined) + + except Exception: + metadata['properties']['image_state'] = 'failed_download' + self.service.update(context, image_id, metadata) + raise - # FIXME(vish): grab key from common service so this can run on - # any host. - cloud_pk = crypto.key_path(context.project_id) + metadata['properties']['image_state'] = 'decrypting' + self.service.update(context, image_id, metadata) - decrypted_filename = os.path.join(image_path, 'image.tar.gz') - self._decrypt_image(encrypted_filename, encrypted_key, - encrypted_iv, cloud_pk, decrypted_filename) + try: + hex_key = manifest.find('image/ec2_encrypted_key').text + encrypted_key = binascii.a2b_hex(hex_key) + hex_iv = manifest.find('image/ec2_encrypted_iv').text + encrypted_iv = binascii.a2b_hex(hex_iv) + + # FIXME(vish): grab key from common service so this can run on + # any host. + cloud_pk = crypto.key_path(context.project_id) + + dec_filename = os.path.join(image_path, 'image.tar.gz') + self._decrypt_image(enc_filename, encrypted_key, + encrypted_iv, cloud_pk, + dec_filename) + except Exception: + metadata['properties']['image_state'] = 'failed_decrypt' + self.service.update(context, image_id, metadata) + raise metadata['properties']['image_state'] = 'untarring' self.service.update(context, image_id, metadata) - unz_filename = self._untarzip_image(image_path, decrypted_filename) + try: + unz_filename = self._untarzip_image(image_path, dec_filename) + except Exception: + metadata['properties']['image_state'] = 'failed_untar' + self.service.update(context, image_id, metadata) + raise metadata['properties']['image_state'] = 'uploading' - with open(unz_filename) as image_file: - self.service.update(context, image_id, metadata, image_file) + self.service.update(context, image_id, metadata) + try: + with open(unz_filename) as image_file: + self.service.update(context, image_id, + metadata, image_file) + except Exception: + metadata['properties']['image_state'] = 'failed_upload' + self.service.update(context, image_id, metadata) + raise + metadata['properties']['image_state'] = 'available' + metadata['status'] = 'active' self.service.update(context, image_id, metadata) shutil.rmtree(image_path) -- cgit From a4035df4d031d3d90f3f7ce938ff0b8305be6773 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 17:27:04 -0500 Subject: Adding fill first cost function --- nova/scheduler/least_cost.py | 12 ++++++++++++ nova/test.py | 16 +++++++++++++--- nova/tests/test_least_cost_scheduler.py | 33 +++++++++++++++++++++++++++++++-- 3 files changed, 56 insertions(+), 5 deletions(-) diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index e47951f17..79376c358 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -34,6 +34,8 @@ flags.DEFINE_list('least_cost_scheduler_cost_functions', 'Which cost functions the LeastCostScheduler should use.') +# TODO(sirp): Once we have enough of these rules, we can break them out into a +# cost_functions.py file (perhaps in a least_cost_scheduler directory) flags.DEFINE_integer('noop_cost_fn_weight', 1, 'How much weight to give the noop cost function') def noop_cost_fn(host): @@ -41,6 +43,16 @@ def noop_cost_fn(host): return 1 +flags.DEFINE_integer('fill_first_cost_fn_weight', 1, + 'How much weight to give the fill-first cost function') +def fill_first_cost_fn(host): + """Prefer hosts that have less ram available, filter_hosts will exclude + hosts that don't have enough ram""" + hostname, caps = host + free_mem = caps['compute']['host_memory_free'] + return free_mem + + class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): def get_cost_fns(self): """Returns a list of tuples containing weights and cost functions to diff --git a/nova/test.py b/nova/test.py index 4deb2a175..401f82d38 100644 --- a/nova/test.py +++ b/nova/test.py @@ -181,7 +181,7 @@ class TestCase(unittest.TestCase): wsgi.Server.start = _wrapped_start # Useful assertions - def assertDictMatch(self, d1, d2): + def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): """Assert two dicts are equivalent. This is a 'deep' match in the sense that it handles nested @@ -212,15 +212,24 @@ class TestCase(unittest.TestCase): for key in d1keys: d1value = d1[key] d2value = d2[key] + + try: + within_tolerance = abs(float(d1value) - float(d2value)) < tolerance + except ValueError: + # If both values aren't convertable to float, just ignore + within_tolerance = False + if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): self.assertDictMatch(d1value, d2value) elif 'DONTCARE' in (d1value, d2value): continue + elif approx_equal and within_tolerance: + continue elif d1value != d2value: raise_assertion("d1['%(key)s']=%(d1value)s != " "d2['%(key)s']=%(d2value)s" % locals()) - def assertDictListMatch(self, L1, L2): + def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001): """Assert a list of dicts are equivalent.""" def raise_assertion(msg): L1str = str(L1) @@ -236,4 +245,5 @@ class TestCase(unittest.TestCase): 'len(L2)=%(L2count)d' % locals()) for d1, d2 in zip(L1, L2): - self.assertDictMatch(d1, d2) + self.assertDictMatch(d1, d2, approx_equal=approx_equal, + tolerance=tolerance) diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py index b2318a3bf..b7bcd2f02 100644 --- a/nova/tests/test_least_cost_scheduler.py +++ b/nova/tests/test_least_cost_scheduler.py @@ -119,7 +119,7 @@ class LeastCostSchedulerTestCase(test.TestCase): def assertWeights(self, expected, num, request_spec, hosts): weighted = self.sched.weigh_hosts(num, request_spec, hosts) - self.assertDictListMatch(weighted, expected) + self.assertDictListMatch(weighted, expected, approx_equal=True) def test_no_hosts(self): num = 1 @@ -137,12 +137,41 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) expected = [ dict(weight=1, hostname=hostname) for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) + def test_cost_fn_weights(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] FLAGS.noop_cost_fn_weight = 2 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + expected = [ dict(weight=2, hostname=hostname) for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) + + def test_fill_first_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.fill_first_cost_fn' + ] + FLAGS.fill_first_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [] + for idx, (hostname, caps) in enumerate(hosts): + # Costs are normalized so over 10 hosts, each host with increasing + # free ram will cost 1/N more. Since the lowest cost host has some + # free ram, we add in the 1/N for the base_cost + weight = 0.1 + (0.1 * idx) + weight_dict = dict(weight=weight, hostname=hostname) + expected.append(weight_dict) + + self.assertWeights(expected, num, request_spec, hosts) -- cgit From d6fbe417d7f8f7540cffe8c941c0591a22483978 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 17:44:08 -0500 Subject: Using import_class to import filter_host driver --- nova/scheduler/host_filter.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 7cb41a433..117f08242 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -42,6 +42,7 @@ from nova import exception from nova import flags from nova import log as logging from nova.scheduler import zone_aware_scheduler +from nova import utils LOG = logging.getLogger('nova.scheduler.host_filter') @@ -283,11 +284,13 @@ def choose_driver(driver_name=None): if not driver_name: driver_name = FLAGS.default_host_filter_driver - # FIXME(sirp): use utils.import_class here - for driver in DRIVERS: - if "%s.%s" % (driver.__module__, driver.__name__) == driver_name: - return driver() - raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name) + + try: + driver = utils.import_object(driver_name) + return driver + except exception.ClassNotFound: + raise exception.SchedulerHostFilterDriverNotFound( + driver_name=driver_name) class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): -- cgit From 41ea2f4babc474cad64d81c9c95cf02e399a0a64 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 17 May 2011 18:57:00 -0400 Subject: added util functions to get image service --- nova/utils.py | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/nova/utils.py b/nova/utils.py index 361fc9873..e7ce0a79b 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -724,3 +724,51 @@ def parse_server_string(server_str): except: LOG.debug(_('Invalid server_string: %s' % server_str)) return ('', '') + + +def parse_image_ref(image_ref): + """ + Parse an imageRef and return (id, host, port) + + If the image_ref passed in is an integer, it will + return (image_ref, None, None), otherwise it will + return (id, host, port) + + image_ref - imageRef for an image + + """ + + if is_int(image_ref): + return (image_ref, None, None) + + o = urlparse(image_ref) + # Default to port 80 if not passed, should this be 9292? + port = o.port or 80 + host = o.netloc.split(':', 1)[0] + id = o.path.split('/')[-1] + + return (id, host, port) + + +def get_image_service(image_ref): + """ + Get the proper image_service for an image_id + + image_ref - image ref/id for an image + """ + + (image_id, host, port) = parse_image_ref(image_ref) + + image_service = None + + if host: + GlanceImageService = utils.import_class(FLAGS.glance_image_service) + GlanceClient = utils.import_class('glance.client.Client') + + glance_client = GlanceClient(host, port) + image_service = GlanceImageService(glance_client) + else: + ImageService = utils.import_class(FLAGS.image_service) + image_service = ImageService() + + return (image_id, image_service) -- cgit From 5d35b548316eccd5a8454ccf7424ebe60aaf54e6 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 17 May 2011 19:07:44 -0400 Subject: updates to utils methods, initial usage in images.py --- nova/api/openstack/images.py | 14 ++++++-------- nova/utils.py | 10 +++++++--- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 34d4c27fc..8d796c284 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -82,15 +82,12 @@ class Controller(common.OpenstackController): :param id: Image identifier (integer) """ context = req.environ['nova.context'] + image_id = id try: - image_id = int(id) - except ValueError: - explanation = _("Image not found.") - raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) - - try: - image = self._image_service.show(context, image_id) + (image_service, service_image_id) = utils.get_image_service( + image_id) + image = image_service.show(context, service_image_id) except exception.NotFound: explanation = _("Image '%d' not found.") % (image_id) raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) @@ -105,7 +102,8 @@ class Controller(common.OpenstackController): """ image_id = id context = req.environ['nova.context'] - self._image_service.delete(context, image_id) + (image_service, service_image_id) = utils.get_image_service(image_id) + image_service.delete(context, service_image_id) return webob.exc.HTTPNoContent() def create(self, req): diff --git a/nova/utils.py b/nova/utils.py index e7ce0a79b..c7da95a97 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -750,12 +750,17 @@ def parse_image_ref(image_ref): return (id, host, port) -def get_image_service(image_ref): +def get_image_service(image_ref=None): """ Get the proper image_service for an image_id + Returns (image_service, image_id) image_ref - image ref/id for an image """ + ImageService = utils.import_class(FLAGS.image_service) + + if not image_ref: + return (ImageService(), -1) (image_id, host, port) = parse_image_ref(image_ref) @@ -768,7 +773,6 @@ def get_image_service(image_ref): glance_client = GlanceClient(host, port) image_service = GlanceImageService(glance_client) else: - ImageService = utils.import_class(FLAGS.image_service) image_service = ImageService() - return (image_id, image_service) + return (image_service, id) -- cgit From dacb4899ea631840fd95ee0bd25d999fbb16b8b4 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 17 May 2011 19:10:11 -0400 Subject: use utils.get_image_service in compute_api --- nova/compute/api.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index a12f8d515..930e4efaa 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -156,7 +156,8 @@ class API(base.Base): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) - image = self.image_service.show(context, image_id) + (image_service, service_image_id) = utils.get_image_service(image_id) + image = image_service.show(context, service_image_id) os_type = None if 'properties' in image and 'os_type' in image['properties']: @@ -176,9 +177,9 @@ class API(base.Base): logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: - self.image_service.show(context, kernel_id) + image_service.show(context, kernel_id) if ramdisk_id: - self.image_service.show(context, ramdisk_id) + image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] @@ -515,6 +516,8 @@ class API(base.Base): 'user_id': str(context.user_id)} sent_meta = {'name': name, 'is_public': False, 'properties': properties} + # TODO(wwolf): not sure if we need to use + # utils.get_image_service() here ? recv_meta = self.image_service.create(context, sent_meta) params = {'image_id': recv_meta['id']} self._cast_compute_message('snapshot_instance', context, instance_id, -- cgit From eacb354c159aeb8f428232eb7d678ffb60bb73cd Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 17 May 2011 19:14:35 -0400 Subject: made get_image_service calls in servers.py --- nova/api/openstack/servers.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8f2de2afe..bf0f56373 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -142,7 +142,10 @@ class Controller(common.OpenstackController): requested_image_id = self._image_id_from_req_data(env) try: - image_id = common.get_image_id_from_image_hash(self._image_service, + (image_service, service_image_id) = utils.get_image_service( + requested_image_id) + + image_id = common.get_image_id_from_image_hash(image_service, context, requested_image_id) except: msg = _("Can not find requested image") @@ -556,7 +559,8 @@ class Controller(common.OpenstackController): associated kernel and ramdisk image IDs. """ context = req.environ['nova.context'] - image_meta = self._image_service.show(context, image_id) + (image_service, service_image_id) = utils.get_image_service(image_id) + image_meta = image_service.show(context, service_image_id) # NOTE(sirp): extracted to a separate method to aid unit-testing, the # new method doesn't need a request obj or an ImageService stub kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image( -- cgit From 439787e7588b2409f319f2d86a41a3581cff8861 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:15:31 -0500 Subject: Pep8 fixes --- nova/exception.py | 3 +++ nova/scheduler/least_cost.py | 30 +++++++++++++++++++++--------- nova/scheduler/zone_aware_scheduler.py | 11 ++++++----- nova/test.py | 5 +++-- nova/tests/test_least_cost_scheduler.py | 18 +++++++++++------- 5 files changed, 44 insertions(+), 23 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index 63ed6dd5e..16c443c61 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -460,15 +460,18 @@ class FlavorNotFound(NotFound): class ZoneNotFound(NotFound): message = _("Zone %(zone_id)s could not be found.") + # TODO(sirp): move these into the schedule classes since they are internal? class SchedulerHostFilterDriverNotFound(NotFound): message = _("Scheduler Host Filter Driver %(driver_name)s could" " not be found.") + class SchedulerCostFunctionNotFound(NotFound): message = _("Scheduler cost function %(cost_fn_str)s could" " not be found.") + class SchedulerWeightFlagNotFound(NotFound): message = _("Scheduler weight flag not found: %(flag_name)s") diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index 79376c358..629fe2e42 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -13,16 +13,19 @@ # License for the specific language governing permissions and limitations # under the License. """ -Helpful docstring here +Least Cost Scheduler is a mechanism for choosing which host machines to +provision a set of resources to. The input of the least-cost-scheduler is a +set of objective-functions, called the 'cost-functions', a weight for each +cost-function, and a list of candidate hosts (gathered via FilterHosts). + +The cost-function and weights are tabulated, and the host with the least cost +is then selected for provisioning. """ import collections from nova import flags from nova import log as logging -# TODO(sirp): this should be just `zone_aware` to match naming scheme -# TODO(sirp): perhaps all zone-aware stuff should go under a `zone_aware` -# module from nova.scheduler import zone_aware_scheduler from nova import utils @@ -38,6 +41,8 @@ flags.DEFINE_list('least_cost_scheduler_cost_functions', # cost_functions.py file (perhaps in a least_cost_scheduler directory) flags.DEFINE_integer('noop_cost_fn_weight', 1, 'How much weight to give the noop cost function') + + def noop_cost_fn(host): """Return a pre-weight cost of 1 for each host""" return 1 @@ -45,6 +50,8 @@ def noop_cost_fn(host): flags.DEFINE_integer('fill_first_cost_fn_weight', 1, 'How much weight to give the fill-first cost function') + + def fill_first_cost_fn(host): """Prefer hosts that have less ram available, filter_hosts will exclude hosts that don't have enough ram""" @@ -68,7 +75,7 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): except exception.ClassNotFound: raise exception.SchedulerCostFunctionNotFound( cost_fn_str=cost_fn_str) - + try: weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__) except AttributeError: @@ -82,17 +89,22 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): def weigh_hosts(self, num, request_spec, hosts): """Returns a list of dictionaries of form: [ {weight: weight, hostname: hostname} ]""" + # FIXME(sirp): weigh_hosts should handle more than just instances - hostnames = [hostname for hostname, _ in hosts] + hostnames = [hostname for hostname, caps in hosts] cost_fns = self.get_cost_fns() costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) - + weighted = [] + weight_log = [] for cost, hostname in zip(costs, hostnames): + weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) weight_dict = dict(weight=cost, hostname=hostname) weighted.append(weight_dict) - return weighted + + LOG.debug(_("Weighted Costs => %s") % weight_log) + return weighted def normalize_list(L): @@ -110,7 +122,7 @@ def weighted_sum(domain, weighted_fns, normalize=True): """Use the weighted-sum method to compute a score for an array of objects. Normalize the results of the objective-functions so that the weights are meaningful regardless of objective-function's range. - + domain - input to be scored weighted_fns - list of weights and functions like: [(weight, objective-functions)] diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index fa5b3b1b6..a1a68ce5e 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -80,7 +80,7 @@ class ZoneAwareScheduler(driver.Scheduler): LOG.debug(_("Casted to compute %(host)s for run_instance") % locals()) else: - # TODO(sandy) Provision in child zone ... + # TODO(sandy) Provision in child zone ... LOG.warning(_("Provision to Child Zone not supported (yet)") % locals()) pass @@ -117,11 +117,11 @@ class ZoneAwareScheduler(driver.Scheduler): # Filter local hosts based on requirements ... host_list = self.filter_hosts(num_instances, request_spec) - # then weigh the selected hosts. - # weighted = [{weight=weight, name=hostname}, ...] - # TODO(sirp): weigh_hosts should also be a function of 'topic' or # resources, so that we can apply different objective functions to it + + # then weigh the selected hosts. + # weighted = [{weight=weight, name=hostname}, ...] weighted = self.weigh_hosts(num_instances, request_spec, host_list) # Next, tack on the best weights from the child zones ... @@ -145,8 +145,9 @@ class ZoneAwareScheduler(driver.Scheduler): """Derived classes must override this method and return a list of hosts in [(hostname, capability_dict)] format.""" # NOTE(sirp): The default logic is the equivalent to AllHostsFilter + service_states = self.zone_manager.service_states return [(host, services) - for host, services in self.zone_manager.service_states.iteritems()] + for host, services in service_states.iteritems()] def weigh_hosts(self, num, request_spec, hosts): """Derived classes may override this to provide more sophisticated diff --git a/nova/test.py b/nova/test.py index 401f82d38..00a16dd68 100644 --- a/nova/test.py +++ b/nova/test.py @@ -212,9 +212,10 @@ class TestCase(unittest.TestCase): for key in d1keys: d1value = d1[key] d2value = d2[key] - + try: - within_tolerance = abs(float(d1value) - float(d2value)) < tolerance + error = abs(float(d1value) - float(d2value)) + within_tolerance = error <= tolerance except ValueError: # If both values aren't convertable to float, just ignore within_tolerance = False diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py index b7bcd2f02..c8ce7892f 100644 --- a/nova/tests/test_least_cost_scheduler.py +++ b/nova/tests/test_least_cost_scheduler.py @@ -30,6 +30,7 @@ class FakeHost(object): self.free_ram = free_ram self.io = io + class WeightedSumTestCase(test.TestCase): def test_empty_domain(self): domain = [] @@ -50,14 +51,15 @@ class WeightedSumTestCase(test.TestCase): (2, lambda h: h.io), # Avoid high I/O ] - costs = least_cost.weighted_sum(domain=hosts, weighted_fns=weighted_fns) + costs = least_cost.weighted_sum( + domain=hosts, weighted_fns=weighted_fns) # Each 256 MB unit of free-ram contributes 0.5 points by way of: # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 # Each 100 iops of IO adds 0.5 points by way of: # cost = 2 * (100/400) = 2 * 0.25 = 0.5 expected = [1.5, 2.5, 1.5] - self.assertEqual(expected, costs) + self.assertEqual(expected, costs) # TODO(sirp): unify this with test_host_filter tests? possibility of sharing @@ -65,6 +67,7 @@ class WeightedSumTestCase(test.TestCase): class FakeZoneManager: pass + class LeastCostSchedulerTestCase(test.TestCase): def _host_caps(self, multiplier): # Returns host capabilities in the following way: @@ -116,7 +119,6 @@ class LeastCostSchedulerTestCase(test.TestCase): #FLAGS.default_host_filter_driver = self.old_flag super(LeastCostSchedulerTestCase, self).tearDown() - def assertWeights(self, expected, num, request_spec, hosts): weighted = self.sched.weigh_hosts(num, request_spec, hosts) self.assertDictListMatch(weighted, expected, approx_equal=True) @@ -138,8 +140,9 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - - expected = [ dict(weight=1, hostname=hostname) for hostname, caps in hosts] + + expected = [dict(weight=1, hostname=hostname) + for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) def test_cost_fn_weights(self): @@ -152,7 +155,8 @@ class LeastCostSchedulerTestCase(test.TestCase): request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - expected = [ dict(weight=2, hostname=hostname) for hostname, caps in hosts] + expected = [dict(weight=2, hostname=hostname) + for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) def test_fill_first_cost_fn(self): @@ -164,7 +168,7 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - + expected = [] for idx, (hostname, caps) in enumerate(hosts): # Costs are normalized so over 10 hosts, each host with increasing -- cgit From d24f59a251173826817e5f5c53a4f54dfe927f2d Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 17 May 2011 19:30:29 -0400 Subject: added is_int function to utils --- nova/utils.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/nova/utils.py b/nova/utils.py index c7da95a97..fff916527 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -726,6 +726,11 @@ def parse_server_string(server_str): return ('', '') +def is_int(x): + """ Return if passed in variable is integer or not """ + return re.match(r'\d+$', str(x)) + + def parse_image_ref(image_ref): """ Parse an imageRef and return (id, host, port) @@ -757,18 +762,18 @@ def get_image_service(image_ref=None): image_ref - image ref/id for an image """ - ImageService = utils.import_class(FLAGS.image_service) + ImageService = import_class(FLAGS.image_service) if not image_ref: - return (ImageService(), -1) + return (ImageService(), None) (image_id, host, port) = parse_image_ref(image_ref) image_service = None if host: - GlanceImageService = utils.import_class(FLAGS.glance_image_service) - GlanceClient = utils.import_class('glance.client.Client') + GlanceImageService = import_class(FLAGS.glance_image_service) + GlanceClient = import_class('glance.client.Client') glance_client = GlanceClient(host, port) image_service = GlanceImageService(glance_client) -- cgit From 4ba215224e6c75037fd4f20be57d632da5d07469 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:32:56 -0500 Subject: Moving tests into scheduler subdirectory --- nova/scheduler/host_filter.py | 5 - nova/tests/scheduler/test_scheduler.py | 1118 +++++++++++++++++++++ nova/tests/scheduler/test_zone_aware_scheduler.py | 121 +++ nova/tests/test_host_filter.py | 211 ---- nova/tests/test_least_cost_scheduler.py | 181 ---- nova/tests/test_scheduler.py | 1118 --------------------- nova/tests/test_zone_aware_scheduler.py | 121 --- 7 files changed, 1239 insertions(+), 1636 deletions(-) create mode 100644 nova/tests/scheduler/test_scheduler.py create mode 100644 nova/tests/scheduler/test_zone_aware_scheduler.py delete mode 100644 nova/tests/test_host_filter.py delete mode 100644 nova/tests/test_least_cost_scheduler.py delete mode 100644 nova/tests/test_scheduler.py delete mode 100644 nova/tests/test_zone_aware_scheduler.py diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 117f08242..79e9f3159 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -313,8 +313,3 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): instance_type = request_spec['instance_type'] name, query = driver.instance_type_to_filter(instance_type) return driver.filter_hosts(self.zone_manager, query) - - def weigh_hosts(self, num, request_spec, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format.""" - return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py new file mode 100644 index 000000000..54b3f80fb --- /dev/null +++ b/nova/tests/scheduler/test_scheduler.py @@ -0,0 +1,1118 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler +""" + +import datetime +import mox +import novaclient.exceptions +import stubout +import webob + +from mox import IgnoreArg +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import service +from nova import test +from nova import rpc +from nova import utils +from nova.auth import manager as auth_manager +from nova.scheduler import api +from nova.scheduler import manager +from nova.scheduler import driver +from nova.compute import power_state +from nova.db.sqlalchemy import models + + +FLAGS = flags.FLAGS +flags.DECLARE('max_cores', 'nova.scheduler.simple') +flags.DECLARE('stub_network', 'nova.compute.manager') +flags.DECLARE('instances_path', 'nova.compute.manager') + + +class TestDriver(driver.Scheduler): + """Scheduler Driver for Tests""" + def schedule(context, topic, *args, **kwargs): + return 'fallback_host' + + def schedule_named_method(context, topic, num): + return 'named_host' + + +class SchedulerTestCase(test.TestCase): + """Test case for scheduler""" + def setUp(self): + super(SchedulerTestCase, self).setUp() + self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') + + def _create_compute_service(self): + """Create compute-manager(ComputeNode and Service record).""" + ctxt = context.get_admin_context() + dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute', + 'report_count': 0, 'availability_zone': 'dummyzone'} + s_ref = db.service_create(ctxt, dic) + + dic = {'service_id': s_ref['id'], + 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, + 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10, + 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, + 'cpu_info': ''} + db.compute_node_create(ctxt, dic) + + return db.service_get(ctxt, s_ref['id']) + + def _create_instance(self, **kwargs): + """Create a test instance""" + ctxt = context.get_admin_context() + inst = {} + inst['user_id'] = 'admin' + inst['project_id'] = kwargs.get('project_id', 'fake') + inst['host'] = kwargs.get('host', 'dummy') + inst['vcpus'] = kwargs.get('vcpus', 1) + inst['memory_mb'] = kwargs.get('memory_mb', 10) + inst['local_gb'] = kwargs.get('local_gb', 20) + return db.instance_create(ctxt, inst) + + def test_fallback(self): + scheduler = manager.SchedulerManager() + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + ctxt = context.get_admin_context() + rpc.cast(ctxt, + 'topic.fallback_host', + {'method': 'noexist', + 'args': {'num': 7}}) + self.mox.ReplayAll() + scheduler.noexist(ctxt, 'topic', num=7) + + def test_named_method(self): + scheduler = manager.SchedulerManager() + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + ctxt = context.get_admin_context() + rpc.cast(ctxt, + 'topic.named_host', + {'method': 'named_method', + 'args': {'num': 7}}) + self.mox.ReplayAll() + scheduler.named_method(ctxt, 'topic', num=7) + + def test_show_host_resources_host_not_exit(self): + """A host given as an argument does not exists.""" + + scheduler = manager.SchedulerManager() + dest = 'dummydest' + ctxt = context.get_admin_context() + + self.assertRaises(exception.NotFound, scheduler.show_host_resources, + ctxt, dest) + #TODO(bcwaldon): reimplement this functionality + #c1 = (e.message.find(_("does not exist or is not a " + # "compute node.")) >= 0) + + def _dic_is_equal(self, dic1, dic2, keys=None): + """Compares 2 dictionary contents(Helper method)""" + if not keys: + keys = ['vcpus', 'memory_mb', 'local_gb', + 'vcpus_used', 'memory_mb_used', 'local_gb_used'] + + for key in keys: + if not (dic1[key] == dic2[key]): + return False + return True + + def test_show_host_resources_no_project(self): + """No instance are running on the given host.""" + + scheduler = manager.SchedulerManager() + ctxt = context.get_admin_context() + s_ref = self._create_compute_service() + + result = scheduler.show_host_resources(ctxt, s_ref['host']) + + # result checking + c1 = ('resource' in result and 'usage' in result) + compute_node = s_ref['compute_node'][0] + c2 = self._dic_is_equal(result['resource'], compute_node) + c3 = result['usage'] == {} + self.assertTrue(c1 and c2 and c3) + db.service_destroy(ctxt, s_ref['id']) + + def test_show_host_resources_works_correctly(self): + """Show_host_resources() works correctly as expected.""" + + scheduler = manager.SchedulerManager() + ctxt = context.get_admin_context() + s_ref = self._create_compute_service() + i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host']) + i_ref2 = self._create_instance(project_id='p-02', vcpus=3, + host=s_ref['host']) + + result = scheduler.show_host_resources(ctxt, s_ref['host']) + + c1 = ('resource' in result and 'usage' in result) + compute_node = s_ref['compute_node'][0] + c2 = self._dic_is_equal(result['resource'], compute_node) + c3 = result['usage'].keys() == ['p-01', 'p-02'] + keys = ['vcpus', 'memory_mb', 'local_gb'] + c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys) + c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys) + self.assertTrue(c1 and c2 and c3 and c4 and c5) + + db.service_destroy(ctxt, s_ref['id']) + db.instance_destroy(ctxt, i_ref1['id']) + db.instance_destroy(ctxt, i_ref2['id']) + + +class ZoneSchedulerTestCase(test.TestCase): + """Test case for zone scheduler""" + def setUp(self): + super(ZoneSchedulerTestCase, self).setUp() + self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler') + + def _create_service_model(self, **kwargs): + service = db.sqlalchemy.models.Service() + service.host = kwargs['host'] + service.disabled = False + service.deleted = False + service.report_count = 0 + service.binary = 'nova-compute' + service.topic = 'compute' + service.id = kwargs['id'] + service.availability_zone = kwargs['zone'] + service.created_at = datetime.datetime.utcnow() + return service + + def test_with_two_zones(self): + scheduler = manager.SchedulerManager() + ctxt = context.get_admin_context() + service_list = [self._create_service_model(id=1, + host='host1', + zone='zone1'), + self._create_service_model(id=2, + host='host2', + zone='zone2'), + self._create_service_model(id=3, + host='host3', + zone='zone2'), + self._create_service_model(id=4, + host='host4', + zone='zone2'), + self._create_service_model(id=5, + host='host5', + zone='zone2')] + self.mox.StubOutWithMock(db, 'service_get_all_by_topic') + arg = IgnoreArg() + db.service_get_all_by_topic(arg, arg).AndReturn(service_list) + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + rpc.cast(ctxt, + 'compute.host1', + {'method': 'run_instance', + 'args': {'instance_id': 'i-ffffffff', + 'availability_zone': 'zone1'}}) + self.mox.ReplayAll() + scheduler.run_instance(ctxt, + 'compute', + instance_id='i-ffffffff', + availability_zone='zone1') + + +class SimpleDriverTestCase(test.TestCase): + """Test case for simple driver""" + def setUp(self): + super(SimpleDriverTestCase, self).setUp() + self.flags(connection_type='fake', + stub_network=True, + max_cores=4, + max_gigabytes=4, + network_manager='nova.network.manager.FlatManager', + volume_driver='nova.volume.driver.FakeISCSIDriver', + scheduler_driver='nova.scheduler.simple.SimpleScheduler') + self.scheduler = manager.SchedulerManager() + self.manager = auth_manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake') + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.get_admin_context() + + def tearDown(self): + self.manager.delete_user(self.user) + self.manager.delete_project(self.project) + super(SimpleDriverTestCase, self).tearDown() + + def _create_instance(self, **kwargs): + """Create a test instance""" + inst = {} + inst['image_id'] = 1 + inst['reservation_id'] = 'r-fakeres' + inst['user_id'] = self.user.id + inst['project_id'] = self.project.id + inst['instance_type_id'] = '1' + inst['mac_address'] = utils.generate_mac() + inst['vcpus'] = kwargs.get('vcpus', 1) + inst['ami_launch_index'] = 0 + inst['availability_zone'] = kwargs.get('availability_zone', None) + inst['host'] = kwargs.get('host', 'dummy') + inst['memory_mb'] = kwargs.get('memory_mb', 20) + inst['local_gb'] = kwargs.get('local_gb', 30) + inst['launched_on'] = kwargs.get('launghed_on', 'dummy') + inst['state_description'] = kwargs.get('state_description', 'running') + inst['state'] = kwargs.get('state', power_state.RUNNING) + return db.instance_create(self.context, inst)['id'] + + def _create_volume(self): + """Create a test volume""" + vol = {} + vol['size'] = 1 + vol['availability_zone'] = 'test' + return db.volume_create(self.context, vol)['id'] + + def _create_compute_service(self, **kwargs): + """Create a compute service.""" + + dic = {'binary': 'nova-compute', 'topic': 'compute', + 'report_count': 0, 'availability_zone': 'dummyzone'} + dic['host'] = kwargs.get('host', 'dummy') + s_ref = db.service_create(self.context, dic) + if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): + t = datetime.datetime.utcnow() - datetime.timedelta(0) + dic['created_at'] = kwargs.get('created_at', t) + dic['updated_at'] = kwargs.get('updated_at', t) + db.service_update(self.context, s_ref['id'], dic) + + dic = {'service_id': s_ref['id'], + 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, + 'vcpus_used': 16, 'local_gb_used': 10, + 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, + 'cpu_info': ''} + dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32) + dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu') + dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003) + db.compute_node_create(self.context, dic) + return db.service_get(self.context, s_ref['id']) + + def test_doesnt_report_disabled_hosts_as_up(self): + """Ensures driver doesn't find hosts before they are enabled""" + # NOTE(vish): constructing service without create method + # because we are going to use it without queue + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + db.service_update(self.context, s2['id'], {'disabled': True}) + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(0, len(hosts)) + compute1.kill() + compute2.kill() + + def test_reports_enabled_hosts_as_up(self): + """Ensures driver can find the hosts that are up""" + # NOTE(vish): constructing service without create method + # because we are going to use it without queue + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(2, len(hosts)) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_instance(self): + """Ensures the host with less cores gets the next one""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance() + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual(host, 'host2') + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_specific_host_gets_instance(self): + """Ensures if you set availability_zone it launches on that zone""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_wont_sechedule_if_specified_host_is_down(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) + past = now - delta + db.service_update(self.context, s1['id'], {'updated_at': past}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + self.assertRaises(driver.WillNotSchedule, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id2) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_will_schedule_on_disabled_host_if_specified(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_too_many_cores(self): + """Ensures we don't go over max cores""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_ids1 = [] + instance_ids2 = [] + for index in xrange(FLAGS.max_cores): + instance_id = self._create_instance() + compute1.run_instance(self.context, instance_id) + instance_ids1.append(instance_id) + instance_id = self._create_instance() + compute2.run_instance(self.context, instance_id) + instance_ids2.append(instance_id) + instance_id = self._create_instance() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id) + for instance_id in instance_ids1: + compute1.terminate_instance(self.context, instance_id) + for instance_id in instance_ids2: + compute2.terminate_instance(self.context, instance_id) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_volume(self): + """Ensures the host with less gigabytes gets the next one""" + volume1 = service.Service('host1', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume1.start() + volume2 = service.Service('host2', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume2.start() + volume_id1 = self._create_volume() + volume1.create_volume(self.context, volume_id1) + volume_id2 = self._create_volume() + host = self.scheduler.driver.schedule_create_volume(self.context, + volume_id2) + self.assertEqual(host, 'host2') + volume1.delete_volume(self.context, volume_id1) + db.volume_destroy(self.context, volume_id2) + dic = {'service_id': s_ref['id'], + 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, + 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10, + 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, + 'cpu_info': ''} + + def test_doesnt_report_disabled_hosts_as_up(self): + """Ensures driver doesn't find hosts before they are enabled""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + db.service_update(self.context, s2['id'], {'disabled': True}) + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(0, len(hosts)) + compute1.kill() + compute2.kill() + + def test_reports_enabled_hosts_as_up(self): + """Ensures driver can find the hosts that are up""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(2, len(hosts)) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_instance(self): + """Ensures the host with less cores gets the next one""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance() + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual(host, 'host2') + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_specific_host_gets_instance(self): + """Ensures if you set availability_zone it launches on that zone""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_wont_sechedule_if_specified_host_is_down(self): + compute1 = self.start_service('compute', host='host1') + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) + past = now - delta + db.service_update(self.context, s1['id'], {'updated_at': past}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + self.assertRaises(driver.WillNotSchedule, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id2) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_will_schedule_on_disabled_host_if_specified(self): + compute1 = self.start_service('compute', host='host1') + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_too_many_cores(self): + """Ensures we don't go over max cores""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + instance_ids1 = [] + instance_ids2 = [] + for index in xrange(FLAGS.max_cores): + instance_id = self._create_instance() + compute1.run_instance(self.context, instance_id) + instance_ids1.append(instance_id) + instance_id = self._create_instance() + compute2.run_instance(self.context, instance_id) + instance_ids2.append(instance_id) + instance_id = self._create_instance() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id) + db.instance_destroy(self.context, instance_id) + for instance_id in instance_ids1: + compute1.terminate_instance(self.context, instance_id) + for instance_id in instance_ids2: + compute2.terminate_instance(self.context, instance_id) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_volume(self): + """Ensures the host with less gigabytes gets the next one""" + volume1 = self.start_service('volume', host='host1') + volume2 = self.start_service('volume', host='host2') + volume_id1 = self._create_volume() + volume1.create_volume(self.context, volume_id1) + volume_id2 = self._create_volume() + host = self.scheduler.driver.schedule_create_volume(self.context, + volume_id2) + self.assertEqual(host, 'host2') + volume1.delete_volume(self.context, volume_id1) + db.volume_destroy(self.context, volume_id2) + volume1.kill() + volume2.kill() + + def test_too_many_gigabytes(self): + """Ensures we don't go over max gigabytes""" + volume1 = self.start_service('volume', host='host1') + volume2 = self.start_service('volume', host='host2') + volume_ids1 = [] + volume_ids2 = [] + for index in xrange(FLAGS.max_gigabytes): + volume_id = self._create_volume() + volume1.create_volume(self.context, volume_id) + volume_ids1.append(volume_id) + volume_id = self._create_volume() + volume2.create_volume(self.context, volume_id) + volume_ids2.append(volume_id) + volume_id = self._create_volume() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_create_volume, + self.context, + volume_id) + for volume_id in volume_ids1: + volume1.delete_volume(self.context, volume_id) + for volume_id in volume_ids2: + volume2.delete_volume(self.context, volume_id) + volume1.kill() + volume2.kill() + + def test_scheduler_live_migration_with_volume(self): + """scheduler_live_migration() works correctly as expected. + + Also, checks instance state is changed from 'running' -> 'migrating'. + + """ + + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + dic = {'instance_id': instance_id, 'size': 1} + v_ref = db.volume_create(self.context, dic) + + # cannot check 2nd argument b/c the addresses of instance object + # is different. + driver_i = self.scheduler.driver + nocare = mox.IgnoreArg() + self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') + self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') + self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') + driver_i._live_migration_src_check(nocare, nocare) + driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) + driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} + rpc.cast(self.context, + db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), + {"method": 'live_migration', "args": kwargs}) + + self.mox.ReplayAll() + self.scheduler.live_migration(self.context, FLAGS.compute_topic, + instance_id=instance_id, + dest=i_ref['host']) + + i_ref = db.instance_get(self.context, instance_id) + self.assertTrue(i_ref['state_description'] == 'migrating') + db.instance_destroy(self.context, instance_id) + db.volume_destroy(self.context, v_ref['id']) + + def test_live_migration_src_check_instance_not_running(self): + """The instance given by instance_id is not running.""" + + instance_id = self._create_instance(state_description='migrating') + i_ref = db.instance_get(self.context, instance_id) + + try: + self.scheduler.driver._live_migration_src_check(self.context, + i_ref) + except exception.Invalid, e: + c = (e.message.find('is not running') > 0) + + self.assertTrue(c) + db.instance_destroy(self.context, instance_id) + + def test_live_migration_src_check_volume_node_not_alive(self): + """Raise exception when volume node is not alive.""" + + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + dic = {'instance_id': instance_id, 'size': 1} + v_ref = db.volume_create(self.context, {'instance_id': instance_id, + 'size': 1}) + t1 = datetime.datetime.utcnow() - datetime.timedelta(1) + dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', + 'topic': 'volume', 'report_count': 0} + s_ref = db.service_create(self.context, dic) + + self.assertRaises(exception.VolumeServiceUnavailable, + self.scheduler.driver.schedule_live_migration, + self.context, instance_id, i_ref['host']) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.volume_destroy(self.context, v_ref['id']) + + def test_live_migration_src_check_compute_node_not_alive(self): + """Confirms src-compute node is alive.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + t = datetime.datetime.utcnow() - datetime.timedelta(10) + s_ref = self._create_compute_service(created_at=t, updated_at=t, + host=i_ref['host']) + + self.assertRaises(exception.ComputeServiceUnavailable, + self.scheduler.driver._live_migration_src_check, + self.context, i_ref) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_src_check_works_correctly(self): + """Confirms this method finishes with no error.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host=i_ref['host']) + + ret = self.scheduler.driver._live_migration_src_check(self.context, + i_ref) + + self.assertTrue(ret is None) + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_not_alive(self): + """Confirms exception raises in case dest host does not exist.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + t = datetime.datetime.utcnow() - datetime.timedelta(10) + s_ref = self._create_compute_service(created_at=t, updated_at=t, + host=i_ref['host']) + + self.assertRaises(exception.ComputeServiceUnavailable, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, i_ref['host']) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_service_same_host(self): + """Confirms exceptioin raises in case dest and src is same host.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host=i_ref['host']) + + self.assertRaises(exception.UnableToMigrateToSelf, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, i_ref['host']) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_service_lack_memory(self): + """Confirms exception raises when dest doesn't have enough memory.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host='somewhere', + memory_mb_used=12) + + self.assertRaises(exception.MigrationError, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, 'somewhere') + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_service_works_correctly(self): + """Confirms method finishes with no error.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host='somewhere', + memory_mb_used=5) + + ret = self.scheduler.driver._live_migration_dest_check(self.context, + i_ref, + 'somewhere') + self.assertTrue(ret is None) + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_common_check_service_orig_not_exists(self): + """Destination host does not exist.""" + + dest = 'dummydest' + # mocks for live_migration_common_check() + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + t1 = datetime.datetime.utcnow() - datetime.timedelta(10) + s_ref = self._create_compute_service(created_at=t1, updated_at=t1, + host=dest) + + # mocks for mounted_on_same_shared_storage() + fpath = '/test/20110127120000' + self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) + topic = FLAGS.compute_topic + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(self.context, topic, dest), + {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), + {"method": 'check_shared_storage_test_file', + "args": {'filename': fpath}}) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), topic, dest), + {"method": 'cleanup_shared_storage_test_file', + "args": {'filename': fpath}}) + + self.mox.ReplayAll() + self.assertRaises(exception.SourceHostUnavailable, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_common_check_service_different_hypervisor(self): + """Original host and dest host has different hypervisor type.""" + dest = 'dummydest' + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + + # compute service for destination + s_ref = self._create_compute_service(host=i_ref['host']) + # compute service for original host + s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen') + + # mocks + driver = self.scheduler.driver + self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') + driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) + + self.mox.ReplayAll() + self.assertRaises(exception.InvalidHypervisorType, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.service_destroy(self.context, s_ref2['id']) + + def test_live_migration_common_check_service_different_version(self): + """Original host and dest host has different hypervisor version.""" + dest = 'dummydest' + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + + # compute service for destination + s_ref = self._create_compute_service(host=i_ref['host']) + # compute service for original host + s_ref2 = self._create_compute_service(host=dest, + hypervisor_version=12002) + + # mocks + driver = self.scheduler.driver + self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') + driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) + + self.mox.ReplayAll() + self.assertRaises(exception.DestinationHypervisorTooOld, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.service_destroy(self.context, s_ref2['id']) + + def test_live_migration_common_check_checking_cpuinfo_fail(self): + """Raise excetion when original host doen't have compatible cpu.""" + + dest = 'dummydest' + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + + # compute service for destination + s_ref = self._create_compute_service(host=i_ref['host']) + # compute service for original host + s_ref2 = self._create_compute_service(host=dest) + + # mocks + driver = self.scheduler.driver + self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') + driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) + self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True) + rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), + {"method": 'compare_cpu', + "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ + AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_common_check(self.context, + i_ref, + dest) + except rpc.RemoteError, e: + c = (e.message.find(_("doesn't have compatibility to")) >= 0) + + self.assertTrue(c) + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.service_destroy(self.context, s_ref2['id']) + + +class FakeZone(object): + def __init__(self, id, api_url, username, password): + self.id = id + self.api_url = api_url + self.username = username + self.password = password + + +def zone_get_all(context): + return [ + FakeZone(1, 'http://example.com', 'bob', 'xxx'), + ] + + +class FakeRerouteCompute(api.reroute_compute): + def _call_child_zones(self, zones, function): + return [] + + def get_collection_context_and_id(self, args, kwargs): + return ("servers", None, 1) + + def unmarshall_result(self, zone_responses): + return dict(magic="found me") + + +def go_boom(self, context, instance): + raise exception.InstanceNotFound(instance_id=instance) + + +def found_instance(self, context, instance): + return dict(name='myserver') + + +class FakeResource(object): + def __init__(self, attribute_dict): + for k, v in attribute_dict.iteritems(): + setattr(self, k, v) + + def pause(self): + pass + + +class ZoneRedirectTest(test.TestCase): + def setUp(self): + super(ZoneRedirectTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + + self.stubs.Set(db, 'zone_get_all', zone_get_all) + + self.enable_zone_routing = FLAGS.enable_zone_routing + FLAGS.enable_zone_routing = True + + def tearDown(self): + self.stubs.UnsetAll() + FLAGS.enable_zone_routing = self.enable_zone_routing + super(ZoneRedirectTest, self).tearDown() + + def test_trap_found_locally(self): + decorator = FakeRerouteCompute("foo") + try: + result = decorator(found_instance)(None, None, 1) + except api.RedirectResult, e: + self.fail(_("Successful database hit should succeed")) + + def test_trap_not_found_locally(self): + decorator = FakeRerouteCompute("foo") + try: + result = decorator(go_boom)(None, None, 1) + self.assertFail(_("Should have rerouted.")) + except api.RedirectResult, e: + self.assertEquals(e.results['magic'], 'found me') + + def test_routing_flags(self): + FLAGS.enable_zone_routing = False + decorator = FakeRerouteCompute("foo") + self.assertRaises(exception.InstanceNotFound, decorator(go_boom), + None, None, 1) + + def test_get_collection_context_and_id(self): + decorator = api.reroute_compute("foo") + self.assertEquals(decorator.get_collection_context_and_id( + (None, 10, 20), {}), ("servers", 10, 20)) + self.assertEquals(decorator.get_collection_context_and_id( + (None, 11,), dict(instance_id=21)), ("servers", 11, 21)) + self.assertEquals(decorator.get_collection_context_and_id( + (None,), dict(context=12, instance_id=22)), ("servers", 12, 22)) + + def test_unmarshal_single_server(self): + decorator = api.reroute_compute("foo") + self.assertEquals(decorator.unmarshall_result([]), {}) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, b=2)), ]), + dict(server=dict(a=1, b=2))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, _b=2)), ]), + dict(server=dict(a=1,))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, manager=2)), ]), + dict(server=dict(a=1,))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(_a=1, manager=2)), ]), + dict(server={})) + + +class FakeServerCollection(object): + def get(self, instance_id): + return FakeResource(dict(a=10, b=20)) + + def find(self, name): + return FakeResource(dict(a=11, b=22)) + + +class FakeEmptyServerCollection(object): + def get(self, f): + raise novaclient.NotFound(1) + + def find(self, name): + raise novaclient.NotFound(2) + + +class FakeNovaClient(object): + def __init__(self, collection): + self.servers = collection + + +class DynamicNovaClientTest(test.TestCase): + def test_issue_novaclient_command_found(self): + zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "get", 100).a, 10) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "find", "name").b, 22) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "pause", 100), None) + + def test_issue_novaclient_command_not_found(self): + zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "get", 100), None) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "find", "name"), None) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "any", "name"), None) + + +class FakeZonesProxy(object): + def do_something(*args, **kwargs): + return 42 + + def raises_exception(*args, **kwargs): + raise Exception('testing') + + +class FakeNovaClientOpenStack(object): + def __init__(self, *args, **kwargs): + self.zones = FakeZonesProxy() + + def authenticate(self): + pass + + +class CallZoneMethodTest(test.TestCase): + def setUp(self): + super(CallZoneMethodTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.stubs.Set(db, 'zone_get_all', zone_get_all) + self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack) + + def tearDown(self): + self.stubs.UnsetAll() + super(CallZoneMethodTest, self).tearDown() + + def test_call_zone_method(self): + context = {} + method = 'do_something' + results = api.call_zone_method(context, method) + expected = [(1, 42)] + self.assertEqual(expected, results) + + def test_call_zone_method_not_present(self): + context = {} + method = 'not_present' + self.assertRaises(AttributeError, api.call_zone_method, + context, method) + + def test_call_zone_method_generates_exception(self): + context = {} + method = 'raises_exception' + results = api.call_zone_method(context, method) + + # FIXME(sirp): for now the _error_trap code is catching errors and + # converting them to a ("ERROR", "string") tuples. The code (and this + # test) should eventually handle real exceptions. + expected = [(1, ('ERROR', 'testing'))] + self.assertEqual(expected, results) diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py new file mode 100644 index 000000000..37169fb97 --- /dev/null +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -0,0 +1,121 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Zone Aware Scheduler. +""" + +from nova import test +from nova.scheduler import driver +from nova.scheduler import zone_aware_scheduler +from nova.scheduler import zone_manager + + +class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): + def filter_hosts(self, num, specs): + # NOTE(sirp): this is returning [(hostname, services)] + return self.zone_manager.service_states.items() + + def weigh_hosts(self, num, specs, hosts): + fake_weight = 99 + weighted = [] + for hostname, caps in hosts: + weighted.append(dict(weight=fake_weight, name=hostname)) + return weighted + + +class FakeZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = { + 'host1': { + 'compute': {'ram': 1000} + }, + 'host2': { + 'compute': {'ram': 2000} + }, + 'host3': { + 'compute': {'ram': 3000} + } + } + + +class FakeEmptyZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = {} + + +def fake_empty_call_zone_method(context, method, specs): + return [] + + +def fake_call_zone_method(context, method, specs): + return [ + ('zone1', [ + dict(weight=1, blob='AAAAAAA'), + dict(weight=111, blob='BBBBBBB'), + dict(weight=112, blob='CCCCCCC'), + dict(weight=113, blob='DDDDDDD'), + ]), + ('zone2', [ + dict(weight=120, blob='EEEEEEE'), + dict(weight=2, blob='FFFFFFF'), + dict(weight=122, blob='GGGGGGG'), + dict(weight=123, blob='HHHHHHH'), + ]), + ('zone3', [ + dict(weight=130, blob='IIIIIII'), + dict(weight=131, blob='JJJJJJJ'), + dict(weight=132, blob='KKKKKKK'), + dict(weight=3, blob='LLLLLLL'), + ]), + ] + + +class ZoneAwareSchedulerTestCase(test.TestCase): + """Test case for Zone Aware Scheduler.""" + + def test_zone_aware_scheduler(self): + """ + Create a nested set of FakeZones, ensure that a select call returns the + appropriate build plan. + """ + sched = FakeZoneAwareScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) + + zm = FakeZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + build_plan = sched.select(fake_context, {}) + + self.assertEqual(15, len(build_plan)) + + hostnames = [plan_item['name'] + for plan_item in build_plan if 'name' in plan_item] + self.assertEqual(3, len(hostnames)) + + def test_empty_zone_aware_scheduler(self): + """ + Ensure empty hosts & child_zones result in NoValidHosts exception. + """ + sched = FakeZoneAwareScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) + + zm = FakeEmptyZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, + fake_context, 1, + dict(host_filter=None, instance_type={})) diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py deleted file mode 100644 index 1a2a86a79..000000000 --- a/nova/tests/test_host_filter.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler Host Filter Drivers. -""" - -import json - -from nova import exception -from nova import flags -from nova import test -from nova.scheduler import host_filter - -FLAGS = flags.FLAGS - - -class FakeZoneManager: - pass - - -class HostFilterTestCase(test.TestCase): - """Test case for host filter drivers.""" - - def _host_caps(self, multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - def setUp(self): - super(HostFilterTestCase, self).setUp() - self.old_flag = FLAGS.default_host_filter_driver - FLAGS.default_host_filter_driver = \ - 'nova.scheduler.host_filter.AllHostsFilter' - self.instance_type = dict(name='tiny', - memory_mb=50, - vcpus=10, - local_gb=500, - flavorid=1, - swap=500, - rxtx_quota=30000, - rxtx_cap=200) - - self.zone_manager = FakeZoneManager() - states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} - self.zone_manager.service_states = states - - def tearDown(self): - FLAGS.default_host_filter_driver = self.old_flag - super(HostFilterTestCase, self).tearDown() - - def test_choose_driver(self): - # Test default driver ... - driver = host_filter.choose_driver() - self.assertEquals(driver._full_name(), - 'nova.scheduler.host_filter.AllHostsFilter') - # Test valid driver ... - driver = host_filter.choose_driver( - 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(driver._full_name(), - 'nova.scheduler.host_filter.InstanceTypeFilter') - # Test invalid driver ... - try: - host_filter.choose_driver('does not exist') - self.fail("Should not find driver") - except exception.SchedulerHostFilterDriverNotFound: - pass - - def test_all_host_driver(self): - driver = host_filter.AllHostsFilter() - cooked = driver.instance_type_to_filter(self.instance_type) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(10, len(hosts)) - for host, capabilities in hosts: - self.assertTrue(host.startswith('host')) - - def test_instance_type_driver(self): - driver = host_filter.InstanceTypeFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - def test_json_driver(self): - driver = host_filter.JsonFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - # Try some custom queries - - raw = ['or', - ['and', - ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300] - ], - ['and', - ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700] - ] - ] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['not', - ['=', '$compute.host_memory_free', 30], - ] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(9, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([2, 4, 6, 8, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - # Try some bogus input ... - raw = ['unknown command', ] - cooked = json.dumps(raw) - try: - driver.filter_hosts(self.zone_manager, cooked) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([]))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({}))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps( - ['not', True, False, True, False] - ))) - - try: - driver.filter_hosts(self.zone_manager, json.dumps( - 'not', True, False, True, False - )) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$foo', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$.....', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] - ))) - - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', {}, ['>', '$missing....foo']] - ))) diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py deleted file mode 100644 index c8ce7892f..000000000 --- a/nova/tests/test_least_cost_scheduler.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Least Cost Scheduler -""" - -from nova import flags -from nova import test -from nova.scheduler import least_cost - -MB = 1024 * 1024 -FLAGS = flags.FLAGS - - -class FakeHost(object): - def __init__(self, host_id, free_ram, io): - self.id = host_id - self.free_ram = free_ram - self.io = io - - -class WeightedSumTestCase(test.TestCase): - def test_empty_domain(self): - domain = [] - weighted_fns = [] - result = least_cost.weighted_sum(domain, weighted_fns) - expected = [] - self.assertEqual(expected, result) - - def test_basic_costing(self): - hosts = [ - FakeHost(1, 512 * MB, 100), - FakeHost(2, 256 * MB, 400), - FakeHost(3, 512 * MB, 100) - ] - - weighted_fns = [ - (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* - (2, lambda h: h.io), # Avoid high I/O - ] - - costs = least_cost.weighted_sum( - domain=hosts, weighted_fns=weighted_fns) - - # Each 256 MB unit of free-ram contributes 0.5 points by way of: - # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 - # Each 100 iops of IO adds 0.5 points by way of: - # cost = 2 * (100/400) = 2 * 0.25 = 0.5 - expected = [1.5, 2.5, 1.5] - self.assertEqual(expected, costs) - - -# TODO(sirp): unify this with test_host_filter tests? possibility of sharing -# test setup code -class FakeZoneManager: - pass - - -class LeastCostSchedulerTestCase(test.TestCase): - def _host_caps(self, multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - def setUp(self): - super(LeastCostSchedulerTestCase, self).setUp() - #self.old_flag = FLAGS.default_host_filter_driver - #FLAGS.default_host_filter_driver = \ - # 'nova.scheduler.host_filter.AllHostsFilter' - self.instance_type = dict(name='tiny', - memory_mb=50, - vcpus=10, - local_gb=500, - flavorid=1, - swap=500, - rxtx_quota=30000, - rxtx_cap=200) - - zone_manager = FakeZoneManager() - states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} - zone_manager.service_states = states - - self.sched = least_cost.LeastCostScheduler() - self.sched.zone_manager = zone_manager - - def tearDown(self): - #FLAGS.default_host_filter_driver = self.old_flag - super(LeastCostSchedulerTestCase, self).tearDown() - - def assertWeights(self, expected, num, request_spec, hosts): - weighted = self.sched.weigh_hosts(num, request_spec, hosts) - self.assertDictListMatch(weighted, expected, approx_equal=True) - - def test_no_hosts(self): - num = 1 - request_spec = {} - hosts = [] - - expected = [] - self.assertWeights(expected, num, request_spec, hosts) - - def test_noop_cost_fn(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.noop_cost_fn' - ] - FLAGS.noop_cost_fn_weight = 1 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [dict(weight=1, hostname=hostname) - for hostname, caps in hosts] - self.assertWeights(expected, num, request_spec, hosts) - - def test_cost_fn_weights(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.noop_cost_fn' - ] - FLAGS.noop_cost_fn_weight = 2 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [dict(weight=2, hostname=hostname) - for hostname, caps in hosts] - self.assertWeights(expected, num, request_spec, hosts) - - def test_fill_first_cost_fn(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.fill_first_cost_fn' - ] - FLAGS.fill_first_cost_fn_weight = 1 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [] - for idx, (hostname, caps) in enumerate(hosts): - # Costs are normalized so over 10 hosts, each host with increasing - # free ram will cost 1/N more. Since the lowest cost host has some - # free ram, we add in the 1/N for the base_cost - weight = 0.1 + (0.1 * idx) - weight_dict = dict(weight=weight, hostname=hostname) - expected.append(weight_dict) - - self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py deleted file mode 100644 index 54b3f80fb..000000000 --- a/nova/tests/test_scheduler.py +++ /dev/null @@ -1,1118 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler -""" - -import datetime -import mox -import novaclient.exceptions -import stubout -import webob - -from mox import IgnoreArg -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import service -from nova import test -from nova import rpc -from nova import utils -from nova.auth import manager as auth_manager -from nova.scheduler import api -from nova.scheduler import manager -from nova.scheduler import driver -from nova.compute import power_state -from nova.db.sqlalchemy import models - - -FLAGS = flags.FLAGS -flags.DECLARE('max_cores', 'nova.scheduler.simple') -flags.DECLARE('stub_network', 'nova.compute.manager') -flags.DECLARE('instances_path', 'nova.compute.manager') - - -class TestDriver(driver.Scheduler): - """Scheduler Driver for Tests""" - def schedule(context, topic, *args, **kwargs): - return 'fallback_host' - - def schedule_named_method(context, topic, num): - return 'named_host' - - -class SchedulerTestCase(test.TestCase): - """Test case for scheduler""" - def setUp(self): - super(SchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') - - def _create_compute_service(self): - """Create compute-manager(ComputeNode and Service record).""" - ctxt = context.get_admin_context() - dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute', - 'report_count': 0, 'availability_zone': 'dummyzone'} - s_ref = db.service_create(ctxt, dic) - - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - db.compute_node_create(ctxt, dic) - - return db.service_get(ctxt, s_ref['id']) - - def _create_instance(self, **kwargs): - """Create a test instance""" - ctxt = context.get_admin_context() - inst = {} - inst['user_id'] = 'admin' - inst['project_id'] = kwargs.get('project_id', 'fake') - inst['host'] = kwargs.get('host', 'dummy') - inst['vcpus'] = kwargs.get('vcpus', 1) - inst['memory_mb'] = kwargs.get('memory_mb', 10) - inst['local_gb'] = kwargs.get('local_gb', 20) - return db.instance_create(ctxt, inst) - - def test_fallback(self): - scheduler = manager.SchedulerManager() - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - ctxt = context.get_admin_context() - rpc.cast(ctxt, - 'topic.fallback_host', - {'method': 'noexist', - 'args': {'num': 7}}) - self.mox.ReplayAll() - scheduler.noexist(ctxt, 'topic', num=7) - - def test_named_method(self): - scheduler = manager.SchedulerManager() - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - ctxt = context.get_admin_context() - rpc.cast(ctxt, - 'topic.named_host', - {'method': 'named_method', - 'args': {'num': 7}}) - self.mox.ReplayAll() - scheduler.named_method(ctxt, 'topic', num=7) - - def test_show_host_resources_host_not_exit(self): - """A host given as an argument does not exists.""" - - scheduler = manager.SchedulerManager() - dest = 'dummydest' - ctxt = context.get_admin_context() - - self.assertRaises(exception.NotFound, scheduler.show_host_resources, - ctxt, dest) - #TODO(bcwaldon): reimplement this functionality - #c1 = (e.message.find(_("does not exist or is not a " - # "compute node.")) >= 0) - - def _dic_is_equal(self, dic1, dic2, keys=None): - """Compares 2 dictionary contents(Helper method)""" - if not keys: - keys = ['vcpus', 'memory_mb', 'local_gb', - 'vcpus_used', 'memory_mb_used', 'local_gb_used'] - - for key in keys: - if not (dic1[key] == dic2[key]): - return False - return True - - def test_show_host_resources_no_project(self): - """No instance are running on the given host.""" - - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - s_ref = self._create_compute_service() - - result = scheduler.show_host_resources(ctxt, s_ref['host']) - - # result checking - c1 = ('resource' in result and 'usage' in result) - compute_node = s_ref['compute_node'][0] - c2 = self._dic_is_equal(result['resource'], compute_node) - c3 = result['usage'] == {} - self.assertTrue(c1 and c2 and c3) - db.service_destroy(ctxt, s_ref['id']) - - def test_show_host_resources_works_correctly(self): - """Show_host_resources() works correctly as expected.""" - - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - s_ref = self._create_compute_service() - i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host']) - i_ref2 = self._create_instance(project_id='p-02', vcpus=3, - host=s_ref['host']) - - result = scheduler.show_host_resources(ctxt, s_ref['host']) - - c1 = ('resource' in result and 'usage' in result) - compute_node = s_ref['compute_node'][0] - c2 = self._dic_is_equal(result['resource'], compute_node) - c3 = result['usage'].keys() == ['p-01', 'p-02'] - keys = ['vcpus', 'memory_mb', 'local_gb'] - c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys) - c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys) - self.assertTrue(c1 and c2 and c3 and c4 and c5) - - db.service_destroy(ctxt, s_ref['id']) - db.instance_destroy(ctxt, i_ref1['id']) - db.instance_destroy(ctxt, i_ref2['id']) - - -class ZoneSchedulerTestCase(test.TestCase): - """Test case for zone scheduler""" - def setUp(self): - super(ZoneSchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler') - - def _create_service_model(self, **kwargs): - service = db.sqlalchemy.models.Service() - service.host = kwargs['host'] - service.disabled = False - service.deleted = False - service.report_count = 0 - service.binary = 'nova-compute' - service.topic = 'compute' - service.id = kwargs['id'] - service.availability_zone = kwargs['zone'] - service.created_at = datetime.datetime.utcnow() - return service - - def test_with_two_zones(self): - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - service_list = [self._create_service_model(id=1, - host='host1', - zone='zone1'), - self._create_service_model(id=2, - host='host2', - zone='zone2'), - self._create_service_model(id=3, - host='host3', - zone='zone2'), - self._create_service_model(id=4, - host='host4', - zone='zone2'), - self._create_service_model(id=5, - host='host5', - zone='zone2')] - self.mox.StubOutWithMock(db, 'service_get_all_by_topic') - arg = IgnoreArg() - db.service_get_all_by_topic(arg, arg).AndReturn(service_list) - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - rpc.cast(ctxt, - 'compute.host1', - {'method': 'run_instance', - 'args': {'instance_id': 'i-ffffffff', - 'availability_zone': 'zone1'}}) - self.mox.ReplayAll() - scheduler.run_instance(ctxt, - 'compute', - instance_id='i-ffffffff', - availability_zone='zone1') - - -class SimpleDriverTestCase(test.TestCase): - """Test case for simple driver""" - def setUp(self): - super(SimpleDriverTestCase, self).setUp() - self.flags(connection_type='fake', - stub_network=True, - max_cores=4, - max_gigabytes=4, - network_manager='nova.network.manager.FlatManager', - volume_driver='nova.volume.driver.FakeISCSIDriver', - scheduler_driver='nova.scheduler.simple.SimpleScheduler') - self.scheduler = manager.SchedulerManager() - self.manager = auth_manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake') - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.get_admin_context() - - def tearDown(self): - self.manager.delete_user(self.user) - self.manager.delete_project(self.project) - super(SimpleDriverTestCase, self).tearDown() - - def _create_instance(self, **kwargs): - """Create a test instance""" - inst = {} - inst['image_id'] = 1 - inst['reservation_id'] = 'r-fakeres' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id - inst['instance_type_id'] = '1' - inst['mac_address'] = utils.generate_mac() - inst['vcpus'] = kwargs.get('vcpus', 1) - inst['ami_launch_index'] = 0 - inst['availability_zone'] = kwargs.get('availability_zone', None) - inst['host'] = kwargs.get('host', 'dummy') - inst['memory_mb'] = kwargs.get('memory_mb', 20) - inst['local_gb'] = kwargs.get('local_gb', 30) - inst['launched_on'] = kwargs.get('launghed_on', 'dummy') - inst['state_description'] = kwargs.get('state_description', 'running') - inst['state'] = kwargs.get('state', power_state.RUNNING) - return db.instance_create(self.context, inst)['id'] - - def _create_volume(self): - """Create a test volume""" - vol = {} - vol['size'] = 1 - vol['availability_zone'] = 'test' - return db.volume_create(self.context, vol)['id'] - - def _create_compute_service(self, **kwargs): - """Create a compute service.""" - - dic = {'binary': 'nova-compute', 'topic': 'compute', - 'report_count': 0, 'availability_zone': 'dummyzone'} - dic['host'] = kwargs.get('host', 'dummy') - s_ref = db.service_create(self.context, dic) - if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): - t = datetime.datetime.utcnow() - datetime.timedelta(0) - dic['created_at'] = kwargs.get('created_at', t) - dic['updated_at'] = kwargs.get('updated_at', t) - db.service_update(self.context, s_ref['id'], dic) - - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32) - dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu') - dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003) - db.compute_node_create(self.context, dic) - return db.service_get(self.context, s_ref['id']) - - def test_doesnt_report_disabled_hosts_as_up(self): - """Ensures driver doesn't find hosts before they are enabled""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - db.service_update(self.context, s2['id'], {'disabled': True}) - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(0, len(hosts)) - compute1.kill() - compute2.kill() - - def test_reports_enabled_hosts_as_up(self): - """Ensures driver can find the hosts that are up""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(2, len(hosts)) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_instance(self): - """Ensures the host with less cores gets the next one""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance() - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual(host, 'host2') - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_specific_host_gets_instance(self): - """Ensures if you set availability_zone it launches on that zone""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_wont_sechedule_if_specified_host_is_down(self): - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) - past = now - delta - db.service_update(self.context, s1['id'], {'updated_at': past}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - self.assertRaises(driver.WillNotSchedule, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id2) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_will_schedule_on_disabled_host_if_specified(self): - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_too_many_cores(self): - """Ensures we don't go over max cores""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_ids1 = [] - instance_ids2 = [] - for index in xrange(FLAGS.max_cores): - instance_id = self._create_instance() - compute1.run_instance(self.context, instance_id) - instance_ids1.append(instance_id) - instance_id = self._create_instance() - compute2.run_instance(self.context, instance_id) - instance_ids2.append(instance_id) - instance_id = self._create_instance() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id) - for instance_id in instance_ids1: - compute1.terminate_instance(self.context, instance_id) - for instance_id in instance_ids2: - compute2.terminate_instance(self.context, instance_id) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_volume(self): - """Ensures the host with less gigabytes gets the next one""" - volume1 = service.Service('host1', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume1.start() - volume2 = service.Service('host2', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume2.start() - volume_id1 = self._create_volume() - volume1.create_volume(self.context, volume_id1) - volume_id2 = self._create_volume() - host = self.scheduler.driver.schedule_create_volume(self.context, - volume_id2) - self.assertEqual(host, 'host2') - volume1.delete_volume(self.context, volume_id1) - db.volume_destroy(self.context, volume_id2) - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - - def test_doesnt_report_disabled_hosts_as_up(self): - """Ensures driver doesn't find hosts before they are enabled""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - db.service_update(self.context, s2['id'], {'disabled': True}) - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(0, len(hosts)) - compute1.kill() - compute2.kill() - - def test_reports_enabled_hosts_as_up(self): - """Ensures driver can find the hosts that are up""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(2, len(hosts)) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_instance(self): - """Ensures the host with less cores gets the next one""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance() - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual(host, 'host2') - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_specific_host_gets_instance(self): - """Ensures if you set availability_zone it launches on that zone""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_wont_sechedule_if_specified_host_is_down(self): - compute1 = self.start_service('compute', host='host1') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) - past = now - delta - db.service_update(self.context, s1['id'], {'updated_at': past}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - self.assertRaises(driver.WillNotSchedule, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id2) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_will_schedule_on_disabled_host_if_specified(self): - compute1 = self.start_service('compute', host='host1') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_too_many_cores(self): - """Ensures we don't go over max cores""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_ids1 = [] - instance_ids2 = [] - for index in xrange(FLAGS.max_cores): - instance_id = self._create_instance() - compute1.run_instance(self.context, instance_id) - instance_ids1.append(instance_id) - instance_id = self._create_instance() - compute2.run_instance(self.context, instance_id) - instance_ids2.append(instance_id) - instance_id = self._create_instance() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id) - db.instance_destroy(self.context, instance_id) - for instance_id in instance_ids1: - compute1.terminate_instance(self.context, instance_id) - for instance_id in instance_ids2: - compute2.terminate_instance(self.context, instance_id) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_volume(self): - """Ensures the host with less gigabytes gets the next one""" - volume1 = self.start_service('volume', host='host1') - volume2 = self.start_service('volume', host='host2') - volume_id1 = self._create_volume() - volume1.create_volume(self.context, volume_id1) - volume_id2 = self._create_volume() - host = self.scheduler.driver.schedule_create_volume(self.context, - volume_id2) - self.assertEqual(host, 'host2') - volume1.delete_volume(self.context, volume_id1) - db.volume_destroy(self.context, volume_id2) - volume1.kill() - volume2.kill() - - def test_too_many_gigabytes(self): - """Ensures we don't go over max gigabytes""" - volume1 = self.start_service('volume', host='host1') - volume2 = self.start_service('volume', host='host2') - volume_ids1 = [] - volume_ids2 = [] - for index in xrange(FLAGS.max_gigabytes): - volume_id = self._create_volume() - volume1.create_volume(self.context, volume_id) - volume_ids1.append(volume_id) - volume_id = self._create_volume() - volume2.create_volume(self.context, volume_id) - volume_ids2.append(volume_id) - volume_id = self._create_volume() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_create_volume, - self.context, - volume_id) - for volume_id in volume_ids1: - volume1.delete_volume(self.context, volume_id) - for volume_id in volume_ids2: - volume2.delete_volume(self.context, volume_id) - volume1.kill() - volume2.kill() - - def test_scheduler_live_migration_with_volume(self): - """scheduler_live_migration() works correctly as expected. - - Also, checks instance state is changed from 'running' -> 'migrating'. - - """ - - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - dic = {'instance_id': instance_id, 'size': 1} - v_ref = db.volume_create(self.context, dic) - - # cannot check 2nd argument b/c the addresses of instance object - # is different. - driver_i = self.scheduler.driver - nocare = mox.IgnoreArg() - self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') - self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') - self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') - driver_i._live_migration_src_check(nocare, nocare) - driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) - driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} - rpc.cast(self.context, - db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), - {"method": 'live_migration', "args": kwargs}) - - self.mox.ReplayAll() - self.scheduler.live_migration(self.context, FLAGS.compute_topic, - instance_id=instance_id, - dest=i_ref['host']) - - i_ref = db.instance_get(self.context, instance_id) - self.assertTrue(i_ref['state_description'] == 'migrating') - db.instance_destroy(self.context, instance_id) - db.volume_destroy(self.context, v_ref['id']) - - def test_live_migration_src_check_instance_not_running(self): - """The instance given by instance_id is not running.""" - - instance_id = self._create_instance(state_description='migrating') - i_ref = db.instance_get(self.context, instance_id) - - try: - self.scheduler.driver._live_migration_src_check(self.context, - i_ref) - except exception.Invalid, e: - c = (e.message.find('is not running') > 0) - - self.assertTrue(c) - db.instance_destroy(self.context, instance_id) - - def test_live_migration_src_check_volume_node_not_alive(self): - """Raise exception when volume node is not alive.""" - - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - dic = {'instance_id': instance_id, 'size': 1} - v_ref = db.volume_create(self.context, {'instance_id': instance_id, - 'size': 1}) - t1 = datetime.datetime.utcnow() - datetime.timedelta(1) - dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', - 'topic': 'volume', 'report_count': 0} - s_ref = db.service_create(self.context, dic) - - self.assertRaises(exception.VolumeServiceUnavailable, - self.scheduler.driver.schedule_live_migration, - self.context, instance_id, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.volume_destroy(self.context, v_ref['id']) - - def test_live_migration_src_check_compute_node_not_alive(self): - """Confirms src-compute node is alive.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t, updated_at=t, - host=i_ref['host']) - - self.assertRaises(exception.ComputeServiceUnavailable, - self.scheduler.driver._live_migration_src_check, - self.context, i_ref) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_src_check_works_correctly(self): - """Confirms this method finishes with no error.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host=i_ref['host']) - - ret = self.scheduler.driver._live_migration_src_check(self.context, - i_ref) - - self.assertTrue(ret is None) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_not_alive(self): - """Confirms exception raises in case dest host does not exist.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t, updated_at=t, - host=i_ref['host']) - - self.assertRaises(exception.ComputeServiceUnavailable, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_same_host(self): - """Confirms exceptioin raises in case dest and src is same host.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host=i_ref['host']) - - self.assertRaises(exception.UnableToMigrateToSelf, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_lack_memory(self): - """Confirms exception raises when dest doesn't have enough memory.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=12) - - self.assertRaises(exception.MigrationError, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, 'somewhere') - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_works_correctly(self): - """Confirms method finishes with no error.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=5) - - ret = self.scheduler.driver._live_migration_dest_check(self.context, - i_ref, - 'somewhere') - self.assertTrue(ret is None) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_common_check_service_orig_not_exists(self): - """Destination host does not exist.""" - - dest = 'dummydest' - # mocks for live_migration_common_check() - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t1 = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t1, updated_at=t1, - host=dest) - - # mocks for mounted_on_same_shared_storage() - fpath = '/test/20110127120000' - self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) - topic = FLAGS.compute_topic - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(self.context, topic, dest), - {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), - {"method": 'check_shared_storage_test_file', - "args": {'filename': fpath}}) - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(mox.IgnoreArg(), topic, dest), - {"method": 'cleanup_shared_storage_test_file', - "args": {'filename': fpath}}) - - self.mox.ReplayAll() - self.assertRaises(exception.SourceHostUnavailable, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_common_check_service_different_hypervisor(self): - """Original host and dest host has different hypervisor type.""" - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen') - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - - self.mox.ReplayAll() - self.assertRaises(exception.InvalidHypervisorType, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - def test_live_migration_common_check_service_different_version(self): - """Original host and dest host has different hypervisor version.""" - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest, - hypervisor_version=12002) - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - - self.mox.ReplayAll() - self.assertRaises(exception.DestinationHypervisorTooOld, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - def test_live_migration_common_check_checking_cpuinfo_fail(self): - """Raise excetion when original host doen't have compatible cpu.""" - - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest) - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True) - rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), - {"method": 'compare_cpu', - "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ - AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) - - self.mox.ReplayAll() - try: - self.scheduler.driver._live_migration_common_check(self.context, - i_ref, - dest) - except rpc.RemoteError, e: - c = (e.message.find(_("doesn't have compatibility to")) >= 0) - - self.assertTrue(c) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - -class FakeZone(object): - def __init__(self, id, api_url, username, password): - self.id = id - self.api_url = api_url - self.username = username - self.password = password - - -def zone_get_all(context): - return [ - FakeZone(1, 'http://example.com', 'bob', 'xxx'), - ] - - -class FakeRerouteCompute(api.reroute_compute): - def _call_child_zones(self, zones, function): - return [] - - def get_collection_context_and_id(self, args, kwargs): - return ("servers", None, 1) - - def unmarshall_result(self, zone_responses): - return dict(magic="found me") - - -def go_boom(self, context, instance): - raise exception.InstanceNotFound(instance_id=instance) - - -def found_instance(self, context, instance): - return dict(name='myserver') - - -class FakeResource(object): - def __init__(self, attribute_dict): - for k, v in attribute_dict.iteritems(): - setattr(self, k, v) - - def pause(self): - pass - - -class ZoneRedirectTest(test.TestCase): - def setUp(self): - super(ZoneRedirectTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - - self.stubs.Set(db, 'zone_get_all', zone_get_all) - - self.enable_zone_routing = FLAGS.enable_zone_routing - FLAGS.enable_zone_routing = True - - def tearDown(self): - self.stubs.UnsetAll() - FLAGS.enable_zone_routing = self.enable_zone_routing - super(ZoneRedirectTest, self).tearDown() - - def test_trap_found_locally(self): - decorator = FakeRerouteCompute("foo") - try: - result = decorator(found_instance)(None, None, 1) - except api.RedirectResult, e: - self.fail(_("Successful database hit should succeed")) - - def test_trap_not_found_locally(self): - decorator = FakeRerouteCompute("foo") - try: - result = decorator(go_boom)(None, None, 1) - self.assertFail(_("Should have rerouted.")) - except api.RedirectResult, e: - self.assertEquals(e.results['magic'], 'found me') - - def test_routing_flags(self): - FLAGS.enable_zone_routing = False - decorator = FakeRerouteCompute("foo") - self.assertRaises(exception.InstanceNotFound, decorator(go_boom), - None, None, 1) - - def test_get_collection_context_and_id(self): - decorator = api.reroute_compute("foo") - self.assertEquals(decorator.get_collection_context_and_id( - (None, 10, 20), {}), ("servers", 10, 20)) - self.assertEquals(decorator.get_collection_context_and_id( - (None, 11,), dict(instance_id=21)), ("servers", 11, 21)) - self.assertEquals(decorator.get_collection_context_and_id( - (None,), dict(context=12, instance_id=22)), ("servers", 12, 22)) - - def test_unmarshal_single_server(self): - decorator = api.reroute_compute("foo") - self.assertEquals(decorator.unmarshall_result([]), {}) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, b=2)), ]), - dict(server=dict(a=1, b=2))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, _b=2)), ]), - dict(server=dict(a=1,))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, manager=2)), ]), - dict(server=dict(a=1,))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(_a=1, manager=2)), ]), - dict(server={})) - - -class FakeServerCollection(object): - def get(self, instance_id): - return FakeResource(dict(a=10, b=20)) - - def find(self, name): - return FakeResource(dict(a=11, b=22)) - - -class FakeEmptyServerCollection(object): - def get(self, f): - raise novaclient.NotFound(1) - - def find(self, name): - raise novaclient.NotFound(2) - - -class FakeNovaClient(object): - def __init__(self, collection): - self.servers = collection - - -class DynamicNovaClientTest(test.TestCase): - def test_issue_novaclient_command_found(self): - zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "get", 100).a, 10) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "find", "name").b, 22) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "pause", 100), None) - - def test_issue_novaclient_command_not_found(self): - zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "get", 100), None) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "find", "name"), None) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "any", "name"), None) - - -class FakeZonesProxy(object): - def do_something(*args, **kwargs): - return 42 - - def raises_exception(*args, **kwargs): - raise Exception('testing') - - -class FakeNovaClientOpenStack(object): - def __init__(self, *args, **kwargs): - self.zones = FakeZonesProxy() - - def authenticate(self): - pass - - -class CallZoneMethodTest(test.TestCase): - def setUp(self): - super(CallZoneMethodTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - self.stubs.Set(db, 'zone_get_all', zone_get_all) - self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack) - - def tearDown(self): - self.stubs.UnsetAll() - super(CallZoneMethodTest, self).tearDown() - - def test_call_zone_method(self): - context = {} - method = 'do_something' - results = api.call_zone_method(context, method) - expected = [(1, 42)] - self.assertEqual(expected, results) - - def test_call_zone_method_not_present(self): - context = {} - method = 'not_present' - self.assertRaises(AttributeError, api.call_zone_method, - context, method) - - def test_call_zone_method_generates_exception(self): - context = {} - method = 'raises_exception' - results = api.call_zone_method(context, method) - - # FIXME(sirp): for now the _error_trap code is catching errors and - # converting them to a ("ERROR", "string") tuples. The code (and this - # test) should eventually handle real exceptions. - expected = [(1, ('ERROR', 'testing'))] - self.assertEqual(expected, results) diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py deleted file mode 100644 index 37169fb97..000000000 --- a/nova/tests/test_zone_aware_scheduler.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Zone Aware Scheduler. -""" - -from nova import test -from nova.scheduler import driver -from nova.scheduler import zone_aware_scheduler -from nova.scheduler import zone_manager - - -class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): - def filter_hosts(self, num, specs): - # NOTE(sirp): this is returning [(hostname, services)] - return self.zone_manager.service_states.items() - - def weigh_hosts(self, num, specs, hosts): - fake_weight = 99 - weighted = [] - for hostname, caps in hosts: - weighted.append(dict(weight=fake_weight, name=hostname)) - return weighted - - -class FakeZoneManager(zone_manager.ZoneManager): - def __init__(self): - self.service_states = { - 'host1': { - 'compute': {'ram': 1000} - }, - 'host2': { - 'compute': {'ram': 2000} - }, - 'host3': { - 'compute': {'ram': 3000} - } - } - - -class FakeEmptyZoneManager(zone_manager.ZoneManager): - def __init__(self): - self.service_states = {} - - -def fake_empty_call_zone_method(context, method, specs): - return [] - - -def fake_call_zone_method(context, method, specs): - return [ - ('zone1', [ - dict(weight=1, blob='AAAAAAA'), - dict(weight=111, blob='BBBBBBB'), - dict(weight=112, blob='CCCCCCC'), - dict(weight=113, blob='DDDDDDD'), - ]), - ('zone2', [ - dict(weight=120, blob='EEEEEEE'), - dict(weight=2, blob='FFFFFFF'), - dict(weight=122, blob='GGGGGGG'), - dict(weight=123, blob='HHHHHHH'), - ]), - ('zone3', [ - dict(weight=130, blob='IIIIIII'), - dict(weight=131, blob='JJJJJJJ'), - dict(weight=132, blob='KKKKKKK'), - dict(weight=3, blob='LLLLLLL'), - ]), - ] - - -class ZoneAwareSchedulerTestCase(test.TestCase): - """Test case for Zone Aware Scheduler.""" - - def test_zone_aware_scheduler(self): - """ - Create a nested set of FakeZones, ensure that a select call returns the - appropriate build plan. - """ - sched = FakeZoneAwareScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) - - zm = FakeZoneManager() - sched.set_zone_manager(zm) - - fake_context = {} - build_plan = sched.select(fake_context, {}) - - self.assertEqual(15, len(build_plan)) - - hostnames = [plan_item['name'] - for plan_item in build_plan if 'name' in plan_item] - self.assertEqual(3, len(hostnames)) - - def test_empty_zone_aware_scheduler(self): - """ - Ensure empty hosts & child_zones result in NoValidHosts exception. - """ - sched = FakeZoneAwareScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) - - zm = FakeEmptyZoneManager() - sched.set_zone_manager(zm) - - fake_context = {} - self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, - fake_context, 1, - dict(host_filter=None, instance_type={})) -- cgit From 967d82669ae07b2add3289e3decad60aea2657d8 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:49:21 -0500 Subject: Moving into scheduler subdir and refactoring out common code --- nova/tests/scheduler/__init__.py | 0 nova/tests/scheduler/test_host_filter.py | 189 ++++++++++++++++++++++ nova/tests/scheduler/test_least_cost_scheduler.py | 146 +++++++++++++++++ nova/tests/scheduler/test_zone_aware_scheduler.py | 31 ++++ 4 files changed, 366 insertions(+) create mode 100644 nova/tests/scheduler/__init__.py create mode 100644 nova/tests/scheduler/test_host_filter.py create mode 100644 nova/tests/scheduler/test_least_cost_scheduler.py diff --git a/nova/tests/scheduler/__init__.py b/nova/tests/scheduler/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py new file mode 100644 index 000000000..c3af50a6e --- /dev/null +++ b/nova/tests/scheduler/test_host_filter.py @@ -0,0 +1,189 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler Host Filter Drivers. +""" + +import json + +from nova import exception +from nova import flags +from nova import test +from nova.scheduler import host_filter +from nova.tests.scheduler import test_zone_aware_scheduler + +FLAGS = flags.FLAGS + + +class FakeZoneManager: + pass + + +class HostFilterTestCase(test.TestCase): + """Test case for host filter drivers.""" + + def setUp(self): + super(HostFilterTestCase, self).setUp() + self.old_flag = FLAGS.default_host_filter_driver + FLAGS.default_host_filter_driver = \ + 'nova.scheduler.host_filter.AllHostsFilter' + self.instance_type = dict(name='tiny', + memory_mb=50, + vcpus=10, + local_gb=500, + flavorid=1, + swap=500, + rxtx_quota=30000, + rxtx_cap=200) + + self.zone_manager = FakeZoneManager() + + states = test_zone_aware_scheduler.fake_zone_manager_service_states( + num_hosts=10) + self.zone_manager.service_states = states + + def tearDown(self): + FLAGS.default_host_filter_driver = self.old_flag + super(HostFilterTestCase, self).tearDown() + + def test_choose_driver(self): + # Test default driver ... + driver = host_filter.choose_driver() + self.assertEquals(driver._full_name(), + 'nova.scheduler.host_filter.AllHostsFilter') + # Test valid driver ... + driver = host_filter.choose_driver( + 'nova.scheduler.host_filter.InstanceTypeFilter') + self.assertEquals(driver._full_name(), + 'nova.scheduler.host_filter.InstanceTypeFilter') + # Test invalid driver ... + try: + host_filter.choose_driver('does not exist') + self.fail("Should not find driver") + except exception.SchedulerHostFilterDriverNotFound: + pass + + def test_all_host_driver(self): + driver = host_filter.AllHostsFilter() + cooked = driver.instance_type_to_filter(self.instance_type) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(10, len(hosts)) + for host, capabilities in hosts: + self.assertTrue(host.startswith('host')) + + def test_instance_type_driver(self): + driver = host_filter.InstanceTypeFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = driver.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', + name) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(6, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + self.assertEquals('host05', just_hosts[0]) + self.assertEquals('host10', just_hosts[5]) + + def test_json_driver(self): + driver = host_filter.JsonFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = driver.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(6, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + self.assertEquals('host05', just_hosts[0]) + self.assertEquals('host10', just_hosts[5]) + + # Try some custom queries + + raw = ['or', + ['and', + ['<', '$compute.host_memory_free', 30], + ['<', '$compute.disk_available', 300] + ], + ['and', + ['>', '$compute.host_memory_free', 70], + ['>', '$compute.disk_available', 700] + ] + ] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(5, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([1, 2, 8, 9, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + raw = ['not', + ['=', '$compute.host_memory_free', 30], + ] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(9, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(5, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([2, 4, 6, 8, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + # Try some bogus input ... + raw = ['unknown command', ] + cooked = json.dumps(raw) + try: + driver.filter_hosts(self.zone_manager, cooked) + self.fail("Should give KeyError") + except KeyError, e: + pass + + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([]))) + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({}))) + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps( + ['not', True, False, True, False] + ))) + + try: + driver.filter_hosts(self.zone_manager, json.dumps( + 'not', True, False, True, False + )) + self.fail("Should give KeyError") + except KeyError, e: + pass + + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', '$foo', 100] + ))) + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', '$.....', 100] + ))) + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] + ))) + + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', {}, ['>', '$missing....foo']] + ))) diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py new file mode 100644 index 000000000..e0ed61417 --- /dev/null +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -0,0 +1,146 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Least Cost Scheduler +""" + +from nova import flags +from nova import test +from nova.scheduler import least_cost +from nova.tests.scheduler import test_zone_aware_scheduler + +MB = 1024 * 1024 +FLAGS = flags.FLAGS + + +class FakeHost(object): + def __init__(self, host_id, free_ram, io): + self.id = host_id + self.free_ram = free_ram + self.io = io + + +class WeightedSumTestCase(test.TestCase): + def test_empty_domain(self): + domain = [] + weighted_fns = [] + result = least_cost.weighted_sum(domain, weighted_fns) + expected = [] + self.assertEqual(expected, result) + + def test_basic_costing(self): + hosts = [ + FakeHost(1, 512 * MB, 100), + FakeHost(2, 256 * MB, 400), + FakeHost(3, 512 * MB, 100) + ] + + weighted_fns = [ + (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* + (2, lambda h: h.io), # Avoid high I/O + ] + + costs = least_cost.weighted_sum( + domain=hosts, weighted_fns=weighted_fns) + + # Each 256 MB unit of free-ram contributes 0.5 points by way of: + # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 + # Each 100 iops of IO adds 0.5 points by way of: + # cost = 2 * (100/400) = 2 * 0.25 = 0.5 + expected = [1.5, 2.5, 1.5] + self.assertEqual(expected, costs) + + +# TODO(sirp): unify this with test_host_filter tests? possibility of sharing +# test setup code +class FakeZoneManager: + pass + + +class LeastCostSchedulerTestCase(test.TestCase): + def setUp(self): + super(LeastCostSchedulerTestCase, self).setUp() + zone_manager = FakeZoneManager() + + states = test_zone_aware_scheduler.fake_zone_manager_service_states( + num_hosts=10) + zone_manager.service_states = states + + self.sched = least_cost.LeastCostScheduler() + self.sched.zone_manager = zone_manager + + def tearDown(self): + super(LeastCostSchedulerTestCase, self).tearDown() + + def assertWeights(self, expected, num, request_spec, hosts): + weighted = self.sched.weigh_hosts(num, request_spec, hosts) + self.assertDictListMatch(weighted, expected, approx_equal=True) + + def test_no_hosts(self): + num = 1 + request_spec = {} + hosts = [] + + expected = [] + self.assertWeights(expected, num, request_spec, hosts) + + def test_noop_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] + FLAGS.noop_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [dict(weight=1, hostname=hostname) + for hostname, caps in hosts] + self.assertWeights(expected, num, request_spec, hosts) + + def test_cost_fn_weights(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] + FLAGS.noop_cost_fn_weight = 2 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [dict(weight=2, hostname=hostname) + for hostname, caps in hosts] + self.assertWeights(expected, num, request_spec, hosts) + + def test_fill_first_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.fill_first_cost_fn' + ] + FLAGS.fill_first_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [] + for idx, (hostname, caps) in enumerate(hosts): + # Costs are normalized so over 10 hosts, each host with increasing + # free ram will cost 1/N more. Since the lowest cost host has some + # free ram, we add in the 1/N for the base_cost + weight = 0.1 + (0.1 * idx) + weight_dict = dict(weight=weight, hostname=hostname) + expected.append(weight_dict) + + self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py index 37169fb97..b2cc4fe23 100644 --- a/nova/tests/scheduler/test_zone_aware_scheduler.py +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -22,6 +22,37 @@ from nova.scheduler import zone_aware_scheduler from nova.scheduler import zone_manager +def _host_caps(multiplier): + # Returns host capabilities in the following way: + # host1 = memory:free 10 (100max) + # disk:available 100 (1000max) + # hostN = memory:free 10 + 10N + # disk:available 100 + 100N + # in other words: hostN has more resources than host0 + # which means ... don't go above 10 hosts. + return {'host_name-description': 'XenServer %s' % multiplier, + 'host_hostname': 'xs-%s' % multiplier, + 'host_memory_total': 100, + 'host_memory_overhead': 10, + 'host_memory_free': 10 + multiplier * 10, + 'host_memory_free-computed': 10 + multiplier * 10, + 'host_other-config': {}, + 'host_ip_address': '192.168.1.%d' % (100 + multiplier), + 'host_cpu_info': {}, + 'disk_available': 100 + multiplier * 100, + 'disk_total': 1000, + 'disk_used': 0, + 'host_uuid': 'xxx-%d' % multiplier, + 'host_name-label': 'xs-%s' % multiplier} + + +def fake_zone_manager_service_states(num_hosts): + states = {} + for x in xrange(num_hosts): + states['host%02d' % (x + 1)] = {'compute': _host_caps(x)} + return states + + class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): def filter_hosts(self, num, specs): # NOTE(sirp): this is returning [(hostname, services)] -- cgit From 1b610e28e40c77271191349b6bfaa56c8f522c24 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:53:00 -0500 Subject: Small cleanups --- nova/tests/scheduler/test_host_filter.py | 7 +++---- nova/tests/scheduler/test_least_cost_scheduler.py | 10 ++++------ 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index c3af50a6e..edbab7ab4 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -27,10 +27,6 @@ from nova.tests.scheduler import test_zone_aware_scheduler FLAGS = flags.FLAGS -class FakeZoneManager: - pass - - class HostFilterTestCase(test.TestCase): """Test case for host filter drivers.""" @@ -48,6 +44,9 @@ class HostFilterTestCase(test.TestCase): rxtx_quota=30000, rxtx_cap=200) + class FakeZoneManager: + pass + self.zone_manager = FakeZoneManager() states = test_zone_aware_scheduler.fake_zone_manager_service_states( diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index e0ed61417..506fa62fb 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -63,15 +63,13 @@ class WeightedSumTestCase(test.TestCase): self.assertEqual(expected, costs) -# TODO(sirp): unify this with test_host_filter tests? possibility of sharing -# test setup code -class FakeZoneManager: - pass - - class LeastCostSchedulerTestCase(test.TestCase): def setUp(self): super(LeastCostSchedulerTestCase, self).setUp() + + class FakeZoneManager: + pass + zone_manager = FakeZoneManager() states = test_zone_aware_scheduler.fake_zone_manager_service_states( -- cgit From 6c151bfbfeb728d6e38f777640d483c1e344113d Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 18 May 2011 03:51:25 -0400 Subject: Removed all utils.import_object(FLAGS.image_service) and replaced with utils.get_default_image_service(). --- bin/nova-manage | 2 +- nova/api/openstack/image_metadata.py | 2 +- nova/api/openstack/images.py | 5 ++- nova/api/openstack/servers.py | 22 ++++++------ nova/api/openstack/views/servers.py | 10 +++--- nova/compute/api.py | 4 +-- nova/image/s3.py | 4 +-- nova/tests/api/openstack/test_servers.py | 11 +++--- nova/utils.py | 60 +++++++++++++++----------------- nova/virt/images.py | 2 +- nova/virt/libvirt_conn.py | 2 +- 11 files changed, 58 insertions(+), 66 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index db964064d..3f3fd72a6 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -905,7 +905,7 @@ class ImageCommands(object): """Methods for dealing with a cloud in an odd state""" def __init__(self, *args, **kwargs): - self.image_service = utils.import_object(FLAGS.image_service) + self.image_service = utils.get_default_image_service() def _register(self, container_format, disk_format, path, owner, name=None, is_public='T', diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index 1eccc0174..f6913ffc6 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -32,7 +32,7 @@ class Controller(common.OpenstackController): """The image metadata API controller for the Openstack API""" def __init__(self): - self.image_service = utils.import_object(FLAGS.image_service) + self.image_service = utils.get_default_image_service() super(Controller, self).__init__() def _get_metadata(self, context, image_id, image=None): diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 8d796c284..8a90b4c4d 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -47,11 +47,10 @@ class Controller(common.OpenstackController): :param compute_service: `nova.compute.api:API` :param image_service: `nova.image.service:BaseImageService` - """ - _default_service = utils.import_object(flags.FLAGS.image_service) + """ self._compute_service = compute_service or compute.API() - self._image_service = image_service or _default_service + self._image_service = image_service or utils.get_default_image_service() def index(self, req): """Return an index listing of images available to the request. diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index bf0f56373..4e8574994 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -64,7 +64,6 @@ class Controller(common.OpenstackController): def __init__(self): self.compute_api = compute.API() - self._image_service = utils.import_object(FLAGS.image_service) super(Controller, self).__init__() def index(self, req): @@ -75,7 +74,7 @@ class Controller(common.OpenstackController): """ Returns a list of server details for a given user """ return self._items(req, is_detail=True) - def _image_id_from_req_data(self, data): + def _image_ref_from_req_data(self, data): raise NotImplementedError() def _flavor_id_from_req_data(self, data): @@ -140,13 +139,13 @@ class Controller(common.OpenstackController): key_name = key_pair['name'] key_data = key_pair['public_key'] - requested_image_id = self._image_id_from_req_data(env) + image_ref = self._image_ref_from_req_data(env) try: - (image_service, service_image_id) = utils.get_image_service( - requested_image_id) + (image_service, image_id) = utils.get_image_service( image_ref) - image_id = common.get_image_id_from_image_hash(image_service, - context, requested_image_id) + #TODO: need to assert image exists a better way + #image_id = common.get_image_id_from_image_hash(image_service, + #context, image_ref) except: msg = _("Can not find requested image") return faults.Fault(exc.HTTPBadRequest(msg)) @@ -188,7 +187,7 @@ class Controller(common.OpenstackController): self._handle_quota_error(error) inst['instance_type'] = inst_type - inst['image_id'] = requested_image_id + inst['image_id'] = image_ref builder = self._get_view_builder(req) server = builder.build(inst, is_detail=True) @@ -596,7 +595,7 @@ class Controller(common.OpenstackController): class ControllerV10(Controller): - def _image_id_from_req_data(self, data): + def _image_ref_from_req_data(self, data): return data['server']['imageId'] def _flavor_id_from_req_data(self, data): @@ -639,9 +638,8 @@ class ControllerV10(Controller): class ControllerV11(Controller): - def _image_id_from_req_data(self, data): - href = data['server']['imageRef'] - return common.get_id_from_href(href) + def _image_ref_from_req_data(self, data): + return data['server']['imageRef'] def _flavor_id_from_req_data(self, data): href = data['server']['flavorRef'] diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 0be468edc..70a942594 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -113,7 +113,7 @@ class ViewBuilderV10(ViewBuilder): def _build_image(self, response, inst): if 'image_id' in dict(inst): - response['imageId'] = inst['image_id'] + response['imageId'] = int(inst['image_id']) def _build_flavor(self, response, inst): if 'instance_type' in dict(inst): @@ -130,9 +130,11 @@ class ViewBuilderV11(ViewBuilder): self.base_url = base_url def _build_image(self, response, inst): - if "image_id" in dict(inst): - image_id = inst.get("image_id") - response["imageRef"] = self.image_builder.generate_href(image_id) + if 'image_id' in dict(inst): + image_id = inst['image_id'] + if utils.is_int(image_id): + image_id = int(image_id) + response['imageRef'] = image_id def _build_flavor(self, response, inst): if "instance_type" in dict(inst): diff --git a/nova/compute/api.py b/nova/compute/api.py index 930e4efaa..4e7af7421 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -58,9 +58,7 @@ class API(base.Base): def __init__(self, image_service=None, network_api=None, volume_api=None, hostname_factory=generate_default_hostname, **kwargs): - if not image_service: - image_service = utils.import_object(FLAGS.image_service) - self.image_service = image_service + self.image_service = image_service or utils.get_default_image_service() if not network_api: network_api = network.API() self.network_api = network_api diff --git a/nova/image/s3.py b/nova/image/s3.py index c38c58d95..ed685ea51 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -46,9 +46,7 @@ class S3ImageService(service.BaseImageService): """Wraps an existing image service to support s3 based register.""" def __init__(self, service=None, *args, **kwargs): - if service is None: - service = utils.import_object(FLAGS.image_service) - self.service = service + self.service = service or utils.get_default_image_service() self.service.__init__(*args, **kwargs) def create(self, context, metadata, data=None): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index e8182b6a9..cfa8d2556 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -628,13 +628,12 @@ class ServersTest(test.TestCase): def test_create_instance_v1_1_local_href(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/2' - image_ref_local = '2' + image_ref = 2 flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': image_ref_local, + 'imageRef': image_ref, 'flavorRef': flavor_ref, }, } @@ -852,7 +851,7 @@ class ServersTest(test.TestCase): self.assertEqual(s['id'], i) self.assertEqual(s['hostId'], '') self.assertEqual(s['name'], 'server%d' % i) - self.assertEqual(s['imageId'], '10') + self.assertEqual(s['imageId'], 10) self.assertEqual(s['flavorId'], 1) self.assertEqual(s['status'], 'BUILD') self.assertEqual(s['metadata']['seq'], str(i)) @@ -866,7 +865,7 @@ class ServersTest(test.TestCase): self.assertEqual(s['id'], i) self.assertEqual(s['hostId'], '') self.assertEqual(s['name'], 'server%d' % i) - self.assertEqual(s['imageRef'], 'http://localhost/v1.1/images/10') + self.assertEqual(s['imageRef'], 10) self.assertEqual(s['flavorRef'], 'http://localhost/v1.1/flavors/1') self.assertEqual(s['status'], 'BUILD') self.assertEqual(s['metadata']['seq'], str(i)) @@ -898,7 +897,7 @@ class ServersTest(test.TestCase): self.assertEqual(s['id'], i) self.assertEqual(s['hostId'], host_ids[i % 2]) self.assertEqual(s['name'], 'server%d' % i) - self.assertEqual(s['imageId'], '10') + self.assertEqual(s['imageId'], 10) self.assertEqual(s['flavorId'], 1) def test_server_pause(self): diff --git a/nova/utils.py b/nova/utils.py index fff916527..3c8c82281 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -35,6 +35,7 @@ import struct import sys import time import types +from urlparse import urlparse from xml.sax import saxutils from eventlet import event @@ -42,6 +43,7 @@ from eventlet import greenthread from eventlet import semaphore from eventlet.green import subprocess +import nova from nova import exception from nova import flags from nova import log as logging @@ -727,57 +729,53 @@ def parse_server_string(server_str): def is_int(x): - """ Return if passed in variable is integer or not """ return re.match(r'\d+$', str(x)) def parse_image_ref(image_ref): - """ - Parse an imageRef and return (id, host, port) + """Parse an image href into composite parts. If the image_ref passed in is an integer, it will return (image_ref, None, None), otherwise it will - return (id, host, port) + return (image_id, host, port) - image_ref - imageRef for an image + :param image_ref: href or id of an image """ - if is_int(image_ref): - return (image_ref, None, None) + return (int(image_ref), None, None) o = urlparse(image_ref) - # Default to port 80 if not passed, should this be 9292? - port = o.port or 80 + port = o.port host = o.netloc.split(':', 1)[0] - id = o.path.split('/')[-1] - - return (id, host, port) + image_id = o.path.split('/')[-1] + if is_int(image_id): + image_id = int(image_id) + else: + raise Exception(_('image_ref [%s] is missing a proper id') % image_ref) + return (image_id, host, port) -def get_image_service(image_ref=None): - """ - Get the proper image_service for an image_id - Returns (image_service, image_id) - image_ref - image ref/id for an image - """ +def get_default_image_service(): ImageService = import_class(FLAGS.image_service) + return ImageService() - if not image_ref: - return (ImageService(), None) - - (image_id, host, port) = parse_image_ref(image_ref) +def get_image_service(image_ref): + """Get the proper image_service and id for the given image_ref. - image_service = None + The image_ref param can be an href of the form + http://myglanceserver:9292/images/42, or just an int such as 42. If the + image_ref is an int, then the default image service is returned. - if host: - GlanceImageService = import_class(FLAGS.glance_image_service) - GlanceClient = import_class('glance.client.Client') + :param image_ref: image ref/id for an image + :returns: a tuple of the form (image_service, image_id) - glance_client = GlanceClient(host, port) - image_service = GlanceImageService(glance_client) - else: - image_service = ImageService() + """ + if is_int(image_ref): + return (get_default_image_service(), int(image_ref)) - return (image_service, id) + (image_id, host, port) = parse_image_ref(image_ref) + glance_client = nova.image.glance.GlanceClient(host, port) + image_service = nova.image.glance.GlanceImageService(glance_client) + return (image_service, image_id) diff --git a/nova/virt/images.py b/nova/virt/images.py index 2e3f2ee4d..0828a1fd0 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -45,7 +45,7 @@ def fetch(image_id, path, _user, _project): # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. - image_service = utils.import_object(FLAGS.image_service) + image_service = utils.get_default_image_service() with open(path, "wb") as image_file: elevated = context.get_admin_context() metadata = image_service.get(elevated, image_id, image_file) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index fa918b0a3..23fa5bdfc 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -448,7 +448,7 @@ class LibvirtConnection(driver.ComputeDriver): to support this command. """ - image_service = utils.import_object(FLAGS.image_service) + image_service = utils.get_default_image_service() virt_dom = self._lookup_by_name(instance['name']) elevated = context.get_admin_context() -- cgit From 375fdc745fc5915098f11585ccd6a91e86747086 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 09:50:18 -0400 Subject: get integrated server_tests passing --- nova/flags.py | 3 +++ nova/image/fake.py | 2 +- nova/tests/integrated/integrated_helpers.py | 4 +--- nova/tests/integrated/test_servers.py | 17 +++++++++-------- nova/utils.py | 5 +++-- 5 files changed, 17 insertions(+), 14 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 519793643..2481a10af 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -362,6 +362,9 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.local.LocalImageService', 'The service to use for retrieving and searching for images.') +DEFINE_string('glance_image_service', 'nova.image.local.LocalImageService', + 'The service to use for retrieving and searching for ' + + 'glance images.') DEFINE_string('host', socket.gethostname(), 'name of this node') diff --git a/nova/image/fake.py b/nova/image/fake.py index b400b2adb..8918c0c14 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -35,7 +35,7 @@ FLAGS = flags.FLAGS class FakeImageService(service.BaseImageService): """Mock (fake) image service for unit testing.""" - def __init__(self): + def __init__(self, client=None): self.images = {} # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 2e5d67017..e6efc16c5 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -185,6 +185,7 @@ class _IntegratedTestBase(test.TestCase): """An opportunity to setup flags, before the services are started.""" f = {} f['image_service'] = 'nova.image.fake.FakeImageService' + f['glance_image_service'] = 'nova.image.fake.FakeImageService' f['fake_network'] = True return f @@ -201,9 +202,6 @@ class _IntegratedTestBase(test.TestCase): LOG.warning("imageRef not yet in images output") image_ref = image['id'] - # TODO(justinsb): This is FUBAR - image_ref = abs(hash(image_ref)) - image_ref = 'http://fake.server/%s' % image_ref # We now have a valid imageId diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index e89d0100a..ba764907a 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -51,14 +51,14 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Without an imageRef, this throws 500. # TODO(justinsb): Check whatever the spec says should be thrown here - self.assertRaises(client.OpenStackApiException, - self.api.post_server, post) + #self.assertRaises(client.OpenStackApiException, + #self.api.post_server, post) # With an invalid imageRef, this throws 500. server['imageRef'] = self.user.get_invalid_image() # TODO(justinsb): Check whatever the spec says should be thrown here - self.assertRaises(client.OpenStackApiException, - self.api.post_server, post) + #self.assertRaises(client.OpenStackApiException, + #self.api.post_server, post) # Add a valid imageId/imageRef server['imageId'] = good_server.get('imageId') @@ -66,8 +66,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Without flavorId, this throws 500 # TODO(justinsb): Check whatever the spec says should be thrown here - self.assertRaises(client.OpenStackApiException, - self.api.post_server, post) + #self.assertRaises(client.OpenStackApiException, + #self.api.post_server, post) # Set a valid flavorId/flavorRef server['flavorRef'] = good_server.get('flavorRef') @@ -75,8 +75,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Without a name, this throws 500 # TODO(justinsb): Check whatever the spec says should be thrown here - self.assertRaises(client.OpenStackApiException, - self.api.post_server, post) + #self.assertRaises(client.OpenStackApiException, + #self.api.post_server, post) # Set a valid server name server['name'] = good_server['name'] @@ -85,6 +85,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): LOG.debug("created_server: %s" % created_server) self.assertTrue(created_server['id']) created_server_id = created_server['id'] + return # Check it's there found_server = self.api.get_server(created_server_id) diff --git a/nova/utils.py b/nova/utils.py index 3c8c82281..46dfc82e9 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -776,6 +776,7 @@ def get_image_service(image_ref): return (get_default_image_service(), int(image_ref)) (image_id, host, port) = parse_image_ref(image_ref) - glance_client = nova.image.glance.GlanceClient(host, port) - image_service = nova.image.glance.GlanceImageService(glance_client) + glance_client = import_class('nova.image.glance.GlanceClient')(host, + port) + image_service = import_class(FLAGS.glance_image_service)(glance_client) return (image_service, image_id) -- cgit From d3f67f97d81185158f611c3bc9bd5542a7fed788 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 09:52:37 -0400 Subject: fixed test_servers small tests as well --- nova/tests/api/openstack/test_servers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index cfa8d2556..6982f87a8 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -464,6 +464,8 @@ class ServersTest(test.TestCase): def image_id_from_hash(*args, **kwargs): return 2 + + FLAGS.glance_image_service = 'nova.image.fake.FakeImageService' self.stubs.Set(nova.db.api, 'project_get_network', project_get_network) self.stubs.Set(nova.db.api, 'instance_create', instance_create) self.stubs.Set(nova.rpc, 'cast', fake_method) -- cgit From a9738fe5196cc1ed0715c3d96c692e782e77fec6 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 10:10:10 -0400 Subject: made ImageControllerWithGlanceServiceTests pass --- nova/api/openstack/images.py | 2 +- nova/api/openstack/servers.py | 7 +++---- nova/utils.py | 3 ++- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 8a90b4c4d..6d3e50b56 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -88,7 +88,7 @@ class Controller(common.OpenstackController): image_id) image = image_service.show(context, service_image_id) except exception.NotFound: - explanation = _("Image '%d' not found.") % (image_id) + explanation = _("Image not found.") raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) return dict(image=self.get_builder(req).build(image, detail=True)) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 4e8574994..ca13a8669 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -141,7 +141,9 @@ class Controller(common.OpenstackController): image_ref = self._image_ref_from_req_data(env) try: - (image_service, image_id) = utils.get_image_service( image_ref) + (image_service, image_id) = utils.get_image_service(image_ref) + kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( + req, image_id) #TODO: need to assert image exists a better way #image_id = common.get_image_id_from_image_hash(image_service, @@ -150,9 +152,6 @@ class Controller(common.OpenstackController): msg = _("Can not find requested image") return faults.Fault(exc.HTTPBadRequest(msg)) - kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( - req, image_id) - personality = env['server'].get('personality') injected_files = [] if personality: diff --git a/nova/utils.py b/nova/utils.py index 46dfc82e9..04acfc417 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -752,7 +752,8 @@ def parse_image_ref(image_ref): if is_int(image_id): image_id = int(image_id) else: - raise Exception(_('image_ref [%s] is missing a proper id') % image_ref) + raise exception.ImageNotFound( + _('image_ref [%s] is missing a proper id') % image_ref) return (image_id, host, port) -- cgit From 3c36abb43eea4ff7a740278085690aa057aba502 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 10:16:51 -0400 Subject: fixed ComputeTestCase tests --- nova/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/utils.py b/nova/utils.py index 04acfc417..dcaaab602 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -773,6 +773,7 @@ def get_image_service(image_ref): :returns: a tuple of the form (image_service, image_id) """ + image_ref = image_ref or 0 if is_int(image_ref): return (get_default_image_service(), int(image_ref)) -- cgit From 96c888312fb7a2ba2cc9120282d29128a18342a8 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 10:41:33 -0400 Subject: fixed QuotaTestCases --- nova/flags.py | 2 +- nova/image/fake.py | 18 ++++++++++++++---- nova/tests/test_quota.py | 2 ++ nova/utils.py | 4 +--- 4 files changed, 18 insertions(+), 8 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 2481a10af..d3f72d412 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -362,7 +362,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.local.LocalImageService', 'The service to use for retrieving and searching for images.') -DEFINE_string('glance_image_service', 'nova.image.local.LocalImageService', +DEFINE_string('glance_image_service', 'nova.image.glance.GlanceImageService', 'The service to use for retrieving and searching for ' + 'glance images.') diff --git a/nova/image/fake.py b/nova/image/fake.py index 8918c0c14..3ada0d8d0 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -40,7 +40,7 @@ class FakeImageService(service.BaseImageService): # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03) - image = {'id': '123456', + image1 = {'id': '123456', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, @@ -49,7 +49,18 @@ class FakeImageService(service.BaseImageService): 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} - self.create(None, image) + + image2 = {'id': 'fake', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'status': 'active', + 'container_format': 'ami', + 'disk_format': 'raw', + 'properties': {'kernel_id': FLAGS.null_kernel, + 'ramdisk_id': FLAGS.null_kernel}} + self.create(None, image1) + self.create(None, image2) super(FakeImageService, self).__init__() def index(self, context): @@ -66,7 +77,6 @@ class FakeImageService(service.BaseImageService): Returns a dict containing image data for the given opaque image id. """ - image_id = int(image_id) image = self.images.get(image_id) if image: return copy.deepcopy(image) @@ -80,7 +90,7 @@ class FakeImageService(service.BaseImageService): :raises: Duplicate if the image already exist. """ - image_id = int(data['id']) + image_id = data['id'] if self.images.get(image_id): raise exception.Duplicate() diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 7ace2ad7d..9ede0786f 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -280,6 +280,7 @@ class QuotaTestCase(test.TestCase): FLAGS.quota_max_injected_files) def _create_with_injected_files(self, files): + FLAGS.glance_image_service = 'nova.image.fake.FakeImageService' api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') api.create(self.context, min_count=1, max_count=1, @@ -287,6 +288,7 @@ class QuotaTestCase(test.TestCase): injected_files=files) def test_no_injected_files(self): + FLAGS.glance_image_service = 'nova.image.fake.FakeImageService' api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') api.create(self.context, instance_type=inst_type, image_id='fake') diff --git a/nova/utils.py b/nova/utils.py index dcaaab602..252f5e9a6 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -749,11 +749,9 @@ def parse_image_ref(image_ref): port = o.port host = o.netloc.split(':', 1)[0] image_id = o.path.split('/')[-1] + if is_int(image_id): image_id = int(image_id) - else: - raise exception.ImageNotFound( - _('image_ref [%s] is missing a proper id') % image_ref) return (image_id, host, port) -- cgit From d94d040986e00409ed031b591b39a43edc111e28 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 10:45:33 -0400 Subject: fixed api.openstack.test_servers tests...again --- nova/api/openstack/servers.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ca13a8669..17d286748 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -184,6 +184,10 @@ class Controller(common.OpenstackController): injected_files=injected_files) except quota.QuotaError as error: self._handle_quota_error(error) + except exception.ImageNotFound as error: + msg = _("Can not find requested image") + return faults.Fault(exc.HTTPBadRequest(msg)) + inst['instance_type'] = inst_type inst['image_id'] = image_ref -- cgit From 980ceb71fdc97e92954239b843e7cec60c786a97 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 10:59:22 -0400 Subject: oops, took out commented out tests in integrated.test_servers and made tests pass again --- nova/image/fake.py | 4 ++-- nova/tests/integrated/test_servers.py | 17 ++++++++--------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/nova/image/fake.py b/nova/image/fake.py index 3ada0d8d0..2c0b87952 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -77,7 +77,7 @@ class FakeImageService(service.BaseImageService): Returns a dict containing image data for the given opaque image id. """ - image = self.images.get(image_id) + image = self.images.get(str(image_id)) if image: return copy.deepcopy(image) LOG.warn('Unable to find image id %s. Have images: %s', @@ -90,7 +90,7 @@ class FakeImageService(service.BaseImageService): :raises: Duplicate if the image already exist. """ - image_id = data['id'] + image_id = str(data['id']) if self.images.get(image_id): raise exception.Duplicate() diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index ba764907a..e89d0100a 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -51,14 +51,14 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Without an imageRef, this throws 500. # TODO(justinsb): Check whatever the spec says should be thrown here - #self.assertRaises(client.OpenStackApiException, - #self.api.post_server, post) + self.assertRaises(client.OpenStackApiException, + self.api.post_server, post) # With an invalid imageRef, this throws 500. server['imageRef'] = self.user.get_invalid_image() # TODO(justinsb): Check whatever the spec says should be thrown here - #self.assertRaises(client.OpenStackApiException, - #self.api.post_server, post) + self.assertRaises(client.OpenStackApiException, + self.api.post_server, post) # Add a valid imageId/imageRef server['imageId'] = good_server.get('imageId') @@ -66,8 +66,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Without flavorId, this throws 500 # TODO(justinsb): Check whatever the spec says should be thrown here - #self.assertRaises(client.OpenStackApiException, - #self.api.post_server, post) + self.assertRaises(client.OpenStackApiException, + self.api.post_server, post) # Set a valid flavorId/flavorRef server['flavorRef'] = good_server.get('flavorRef') @@ -75,8 +75,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Without a name, this throws 500 # TODO(justinsb): Check whatever the spec says should be thrown here - #self.assertRaises(client.OpenStackApiException, - #self.api.post_server, post) + self.assertRaises(client.OpenStackApiException, + self.api.post_server, post) # Set a valid server name server['name'] = good_server['name'] @@ -85,7 +85,6 @@ class ServersTest(integrated_helpers._IntegratedTestBase): LOG.debug("created_server: %s" % created_server) self.assertTrue(created_server['id']) created_server_id = created_server['id'] - return # Check it's there found_server = self.api.get_server(created_server_id) -- cgit From 9407bbfc61f165bca0a854d59dd516193334a4b4 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 11:13:22 -0400 Subject: fix pep8 issues --- nova/api/openstack/images.py | 3 ++- nova/api/openstack/servers.py | 1 - nova/flags.py | 2 +- nova/tests/api/openstack/test_servers.py | 1 - nova/utils.py | 3 ++- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 6d3e50b56..c2511b99f 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -50,7 +50,8 @@ class Controller(common.OpenstackController): """ self._compute_service = compute_service or compute.API() - self._image_service = image_service or utils.get_default_image_service() + self._image_service = image_service or \ + utils.get_default_image_service() def index(self, req): """Return an index listing of images available to the request. diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 17d286748..ae7df3fe5 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -188,7 +188,6 @@ class Controller(common.OpenstackController): msg = _("Can not find requested image") return faults.Fault(exc.HTTPBadRequest(msg)) - inst['instance_type'] = inst_type inst['image_id'] = image_ref diff --git a/nova/flags.py b/nova/flags.py index d3f72d412..b45d252c7 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -363,7 +363,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', DEFINE_string('image_service', 'nova.image.local.LocalImageService', 'The service to use for retrieving and searching for images.') DEFINE_string('glance_image_service', 'nova.image.glance.GlanceImageService', - 'The service to use for retrieving and searching for ' + + 'The service to use for retrieving and searching for ' + 'glance images.') DEFINE_string('host', socket.gethostname(), diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 6982f87a8..bced2b910 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -464,7 +464,6 @@ class ServersTest(test.TestCase): def image_id_from_hash(*args, **kwargs): return 2 - FLAGS.glance_image_service = 'nova.image.fake.FakeImageService' self.stubs.Set(nova.db.api, 'project_get_network', project_get_network) self.stubs.Set(nova.db.api, 'instance_create', instance_create) diff --git a/nova/utils.py b/nova/utils.py index 252f5e9a6..82d0dd7a4 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -760,6 +760,7 @@ def get_default_image_service(): ImageService = import_class(FLAGS.image_service) return ImageService() + def get_image_service(image_ref): """Get the proper image_service and id for the given image_ref. @@ -776,7 +777,7 @@ def get_image_service(image_ref): return (get_default_image_service(), int(image_ref)) (image_id, host, port) = parse_image_ref(image_ref) - glance_client = import_class('nova.image.glance.GlanceClient')(host, + glance_client = import_class('nova.image.glance.GlanceClient')(host, port) image_service = import_class(FLAGS.glance_image_service)(glance_client) return (image_service, image_id) -- cgit From 048dda438c9670998e9c91f6a906373a12ea294d Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 13:03:05 -0400 Subject: fixed bug with compute_api not having actual image_ref to use proper image service --- nova/api/openstack/servers.py | 1 + nova/compute/api.py | 7 ++++--- nova/image/fake.py | 22 ++++++++++++++++++++++ 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ae7df3fe5..a4e679242 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -174,6 +174,7 @@ class Controller(common.OpenstackController): context, inst_type, image_id, + image_ref=image_ref, kernel_id=kernel_id, ramdisk_id=ramdisk_id, display_name=name, diff --git a/nova/compute/api.py b/nova/compute/api.py index 4e7af7421..40d011132 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -132,7 +132,7 @@ class API(base.Base): display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, - injected_files=None): + injected_files=None, image_ref=None): """Create the number and type of instances requested. Verifies that quota and other arguments are valid. @@ -154,7 +154,8 @@ class API(base.Base): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) - (image_service, service_image_id) = utils.get_image_service(image_id) + (image_service, service_image_id) = utils.get_image_service( + image_ref or image_id) image = image_service.show(context, service_image_id) os_type = None @@ -198,7 +199,7 @@ class API(base.Base): base_options = { 'reservation_id': utils.generate_uid('r'), - 'image_id': image_id, + 'image_id': image_ref or image_id, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, diff --git a/nova/image/fake.py b/nova/image/fake.py index 2c0b87952..2a60c7743 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -59,8 +59,30 @@ class FakeImageService(service.BaseImageService): 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} + + image3 = {'id': '2', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'status': 'active', + 'container_format': 'ami', + 'disk_format': 'raw', + 'properties': {'kernel_id': FLAGS.null_kernel, + 'ramdisk_id': FLAGS.null_kernel}} + + image4 = {'id': '1', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'status': 'active', + 'container_format': 'ami', + 'disk_format': 'raw', + 'properties': {'kernel_id': FLAGS.null_kernel, + 'ramdisk_id': FLAGS.null_kernel}} self.create(None, image1) self.create(None, image2) + self.create(None, image3) + self.create(None, image4) super(FakeImageService, self).__init__() def index(self, context): -- cgit From 62328a6437f238228152f460b1bd53e7254aa89c Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 16:26:42 -0400 Subject: libvirt fixes to use new image_service stuff --- nova/virt/images.py | 4 ++-- nova/virt/libvirt_conn.py | 14 +++++++++++--- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/nova/virt/images.py b/nova/virt/images.py index 0828a1fd0..fd433ea0c 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -45,10 +45,10 @@ def fetch(image_id, path, _user, _project): # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. - image_service = utils.get_default_image_service() + (image_service, service_image_id) = utils.get_image_service(image_id) with open(path, "wb") as image_file: elevated = context.get_admin_context() - metadata = image_service.get(elevated, image_id, image_file) + metadata = image_service.get(elevated, service_image_id, image_file) return metadata diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 23fa5bdfc..ab47493fd 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -36,6 +36,7 @@ Supports KVM, LXC, QEMU, UML, and XEN. """ +import hashlib import multiprocessing import os import random @@ -843,7 +844,9 @@ class LibvirtConnection(driver.ComputeDriver): 'ramdisk_id': inst['ramdisk_id']} if disk_images['kernel_id']: - fname = '%08x' % int(disk_images['kernel_id']) + fname_hash = hashlib.sha1() + fname_hash.update(disk_images['kernel_id']) + fname = fname_hash.hexdigest() self._cache_image(fn=self._fetch_image, target=basepath('kernel'), fname=fname, @@ -851,7 +854,9 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) if disk_images['ramdisk_id']: - fname = '%08x' % int(disk_images['ramdisk_id']) + fname_hash = hashlib.sha1() + fname_hash.update(disk_images['ramdisk_id']) + fname = fname_hash.hexdigest() self._cache_image(fn=self._fetch_image, target=basepath('ramdisk'), fname=fname, @@ -859,7 +864,10 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) - root_fname = '%08x' % int(disk_images['image_id']) + fname_hash = hashlib.sha1() + fname_hash.update(disk_images['image_id']) + root_fname = fname_hash.hexdigest() + size = FLAGS.minimum_root_size inst_type_id = inst['instance_type_id'] -- cgit From 2c6c184138b0d8c650496e0e8d033c85a2e2dec1 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Wed, 18 May 2011 20:46:21 +0000 Subject: fix typo in udev rule --- plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules b/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules index 0dfb029eb..b179f0847 100644 --- a/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules +++ b/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules @@ -1,3 +1,3 @@ -SUBSYSTEM=="xen-backend", KERNEL=="vif*", RUN+="/etc/xensource/scripts/ovs_configure_base_flows.py $env{ACTION} %k all" +SUBSYSTEM=="xen-backend", KERNEL=="vif*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all" # is this one needed? -#SUBSYSTEM=="net", KERNEL=="tap*", RUN+="/etc/xensource/scripts/ovs_configure_base_flows.py $env{ACTION} %k all" +#SUBSYSTEM=="net", KERNEL=="tap*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all" -- cgit From ef42fa95197e7b0f73e04322456bbbdedaf3e2b3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 18 May 2011 14:15:36 -0700 Subject: log any exceptions that get thrown trying to retrieve metadata --- nova/api/ec2/metadatarequesthandler.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 28f99b0ef..481e34e12 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -71,7 +71,11 @@ class MetadataRequestHandler(wsgi.Application): remote_address = req.remote_addr if FLAGS.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) - meta_data = cc.get_metadata(remote_address) + try: + meta_data = cc.get_metadata(remote_address) + except Exception: + LOG.exception(_('Failed to get metadata for ip: %s'), remote_address) + raise if meta_data is None: LOG.error(_('Failed to get metadata for ip: %s'), remote_address) raise webob.exc.HTTPNotFound() -- cgit From 38ba122d9eb67c699ea0c10eab5961c3b4c25d81 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 18 May 2011 14:23:09 -0700 Subject: use a manual 500 with error text instead of traceback for failure --- nova/api/ec2/metadatarequesthandler.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 481e34e12..720f264a4 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -23,6 +23,7 @@ import webob.exc from nova import log as logging from nova import flags +from nova import utils from nova import wsgi from nova.api.ec2 import cloud @@ -75,7 +76,12 @@ class MetadataRequestHandler(wsgi.Application): meta_data = cc.get_metadata(remote_address) except Exception: LOG.exception(_('Failed to get metadata for ip: %s'), remote_address) - raise + resp = webob.Response() + resp.status = 500 + message = _('An unknown error has occurred. ' + 'Please try your request again.') + resp.body = str(utils.utf8(message)) + return resp if meta_data is None: LOG.error(_('Failed to get metadata for ip: %s'), remote_address) raise webob.exc.HTTPNotFound() -- cgit From 76c98e277a405127d85cf2c264a20ec3a18e023a Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 17:30:43 -0400 Subject: hackish patch to fix hrefs asking for their metadata in boot (this really shouldnt be in ec2 api?) --- nova/api/ec2/cloud.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 1fa07d042..06b5f662f 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -157,7 +157,12 @@ class CloudController(object): floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) - image_ec2_id = self.image_ec2_id(instance_ref['image_id']) + try: + image_ec2_id = self.image_ec2_id(instance_ref['image_id']) + except ValueError: + # not really an ec2_id here + image_ec2_id = instance_ref['image_id'] + data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { -- cgit From c69a1b0d9ef15ecc06217ec2c1ec4d73a755d14b Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 17:57:44 -0400 Subject: return dummy id per vishs suggestion --- nova/api/ec2/cloud.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 06b5f662f..950b72e72 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -157,12 +157,7 @@ class CloudController(object): floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) - try: - image_ec2_id = self.image_ec2_id(instance_ref['image_id']) - except ValueError: - # not really an ec2_id here - image_ec2_id = instance_ref['image_id'] - + image_ec2_id = self.image_ec2_id(instance_ref['image_id']) data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { @@ -907,7 +902,12 @@ class CloudController(object): def image_ec2_id(image_id, image_type='ami'): """Returns image ec2_id using id and three letter type.""" template = image_type + '-%08x' - return ec2utils.id_to_ec2_id(int(image_id), template=template) + try: + return ec2utils.id_to_ec2_id(int(image_id), template=template) + except ValueError: + #TODO(wwolf): once we have ec2_id -> glance_id mapping + # in place, this wont be necessary + return "ami-00000000" def _get_image(self, context, ec2_id): try: -- cgit From 64e9aa6daa416662a25eeab0d943b23906695e92 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 18:09:37 -0400 Subject: default to port 80 if it isnt in the href/uri --- nova/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/utils.py b/nova/utils.py index 82d0dd7a4..85934813e 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -746,7 +746,7 @@ def parse_image_ref(image_ref): return (int(image_ref), None, None) o = urlparse(image_ref) - port = o.port + port = o.port or 80 host = o.netloc.split(':', 1)[0] image_id = o.path.split('/')[-1] -- cgit From 5e722ea7b912f189c0a3b9434e9a38d08095ad00 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 18 May 2011 19:13:22 -0400 Subject: refactoring wsgi to separate controller/serialization/deserialization logic; creating osapi-specific module --- nova/api/openstack/__init__.py | 43 ++--- nova/api/openstack/accounts.py | 33 ++-- nova/api/openstack/backup_schedules.py | 27 ++- nova/api/openstack/consoles.py | 26 ++- nova/api/openstack/flavors.py | 34 ++-- nova/api/openstack/image_metadata.py | 19 +- nova/api/openstack/images.py | 40 +++-- nova/api/openstack/ips.py | 33 ++-- nova/api/openstack/limits.py | 50 ++++-- nova/api/openstack/server_metadata.py | 21 ++- nova/api/openstack/servers.py | 124 +++++++------ nova/api/openstack/shared_ip_groups.py | 28 +-- nova/api/openstack/users.py | 43 +++-- nova/api/openstack/wsgi.py | 291 +++++++++++++++++++++++++++++++ nova/api/openstack/zones.py | 33 ++-- nova/tests/api/openstack/test_limits.py | 4 +- nova/tests/api/openstack/test_servers.py | 2 - nova/tests/api/test_wsgi.py | 135 -------------- nova/tests/integrated/test_xml.py | 4 +- 19 files changed, 624 insertions(+), 366 deletions(-) create mode 100644 nova/api/openstack/wsgi.py diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 348b70d5b..fbbd99cb9 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -26,7 +26,7 @@ import webob.exc from nova import flags from nova import log as logging -from nova import wsgi +from nova import wsgi as base_wsgi from nova.api.openstack import accounts from nova.api.openstack import faults from nova.api.openstack import backup_schedules @@ -40,6 +40,7 @@ from nova.api.openstack import servers from nova.api.openstack import server_metadata from nova.api.openstack import shared_ip_groups from nova.api.openstack import users +from nova.api.openstack import wsgi from nova.api.openstack import zones @@ -50,7 +51,7 @@ flags.DEFINE_bool('allow_admin_api', 'When True, this API service will accept admin operations.') -class FaultWrapper(wsgi.Middleware): +class FaultWrapper(base_wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" @webob.dec.wsgify(RequestClass=wsgi.Request) @@ -63,7 +64,7 @@ class FaultWrapper(wsgi.Middleware): return faults.Fault(exc) -class APIRouter(wsgi.Router): +class APIRouter(base_wsgi.Router): """ Routes requests on the OpenStack API to the appropriate controller and method. @@ -97,18 +98,20 @@ class APIRouter(wsgi.Router): server_members['reset_network'] = 'POST' server_members['inject_network_info'] = 'POST' - mapper.resource("zone", "zones", controller=zones.Controller(), + mapper.resource("zone", "zones", + controller=zones.resource_factory(), collection={'detail': 'GET', 'info': 'GET'}), - mapper.resource("user", "users", controller=users.Controller(), + mapper.resource("user", "users", + controller=users.resource_factory(), collection={'detail': 'GET'}) mapper.resource("account", "accounts", - controller=accounts.Controller(), + controller=accounts.resource_factory(), collection={'detail': 'GET'}) mapper.resource("console", "consoles", - controller=consoles.Controller(), + controller=consoles.resource_factory(), parent_resource=dict(member_name='server', collection_name='servers')) @@ -121,31 +124,31 @@ class APIRouterV10(APIRouter): def _setup_routes(self, mapper): super(APIRouterV10, self)._setup_routes(mapper) mapper.resource("server", "servers", - controller=servers.ControllerV10(), + controller=servers.resource_factory('1.0'), collection={'detail': 'GET'}, member=self.server_members) mapper.resource("image", "images", - controller=images.ControllerV10(), + controller=images.resource_factory('1.0'), collection={'detail': 'GET'}) mapper.resource("flavor", "flavors", - controller=flavors.ControllerV10(), + controller=flavors.resource_factory('1.0'), collection={'detail': 'GET'}) mapper.resource("shared_ip_group", "shared_ip_groups", collection={'detail': 'GET'}, - controller=shared_ip_groups.Controller()) + controller=shared_ip_groups.resource_factory()) mapper.resource("backup_schedule", "backup_schedule", - controller=backup_schedules.Controller(), + controller=backup_schedules.resource_factory(), parent_resource=dict(member_name='server', collection_name='servers')) mapper.resource("limit", "limits", - controller=limits.LimitsControllerV10()) + controller=limits.resource_factory('1.0')) - mapper.resource("ip", "ips", controller=ips.Controller(), + mapper.resource("ip", "ips", controller=ips.resource_factory(), collection=dict(public='GET', private='GET'), parent_resource=dict(member_name='server', collection_name='servers')) @@ -157,27 +160,27 @@ class APIRouterV11(APIRouter): def _setup_routes(self, mapper): super(APIRouterV11, self)._setup_routes(mapper) mapper.resource("server", "servers", - controller=servers.ControllerV11(), + controller=servers.resource_factory('1.1'), collection={'detail': 'GET'}, member=self.server_members) mapper.resource("image", "images", - controller=images.ControllerV11(), + controller=images.resource_factory('1.1'), collection={'detail': 'GET'}) mapper.resource("image_meta", "meta", - controller=image_metadata.Controller(), + controller=image_metadata.resource_factory(), parent_resource=dict(member_name='image', collection_name='images')) mapper.resource("server_meta", "meta", - controller=server_metadata.Controller(), + controller=server_metadata.resource_factory(), parent_resource=dict(member_name='server', collection_name='servers')) mapper.resource("flavor", "flavors", - controller=flavors.ControllerV11(), + controller=flavors.resource_factory('1.1'), collection={'detail': 'GET'}) mapper.resource("limit", "limits", - controller=limits.LimitsControllerV11()) + controller=limits.resource_factory('1.1')) diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py index 00fdd4540..d8a9d1909 100644 --- a/nova/api/openstack/accounts.py +++ b/nova/api/openstack/accounts.py @@ -20,8 +20,9 @@ from nova import flags from nova import log as logging from nova.auth import manager -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi + FLAGS = flags.FLAGS LOG = logging.getLogger('nova.api.openstack') @@ -34,12 +35,7 @@ def _translate_keys(account): manager=account.project_manager_id) -class Controller(common.OpenstackController): - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "account": ["id", "name", "description", "manager"]}}} +class Controller(object): def __init__(self): self.manager = manager.AuthManager() @@ -66,20 +62,33 @@ class Controller(common.OpenstackController): self.manager.delete_project(id) return {} - def create(self, req): + def create(self, req, body): """We use update with create-or-update semantics because the id comes from an external source""" raise faults.Fault(webob.exc.HTTPNotImplemented()) - def update(self, req, id): + def update(self, req, id, body): """This is really create or update.""" self._check_admin(req.environ['nova.context']) - env = self._deserialize(req.body, req.get_content_type()) - description = env['account'].get('description') - manager = env['account'].get('manager') + description = body['account'].get('description') + manager = body['account'].get('manager') try: account = self.manager.get_project(id) self.manager.modify_project(id, manager, description) except exception.NotFound: account = self.manager.create_project(id, manager, description) return dict(account=_translate_keys(account)) + + +def resource_factory(): + metadata = { + "attributes": { + "account": ["id", "name", "description", "manager"], + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index 4bf744046..4153c90c1 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -19,9 +19,8 @@ import time from webob import exc -from nova.api.openstack import common from nova.api.openstack import faults -import nova.image.service +from nova.api.openstack import wsgi def _translate_keys(inst): @@ -29,14 +28,9 @@ def _translate_keys(inst): return dict(backupSchedule=inst) -class Controller(common.OpenstackController): +class Controller(object): """ The backup schedule API controller for the Openstack API """ - _serialization_metadata = { - 'application/xml': { - 'attributes': { - 'backupSchedule': []}}} - def __init__(self): pass @@ -48,7 +42,7 @@ class Controller(common.OpenstackController): """ Returns a single backup schedule for a given instance """ return faults.Fault(exc.HTTPNotImplemented()) - def create(self, req, server_id): + def create(self, req, server_id, body): """ No actual update method required, since the existing API allows both create and update through a POST """ return faults.Fault(exc.HTTPNotImplemented()) @@ -56,3 +50,18 @@ class Controller(common.OpenstackController): def delete(self, req, server_id, id): """ Deletes an existing backup schedule """ return faults.Fault(exc.HTTPNotImplemented()) + + +def resource_factory(): + metadata = { + 'attributes': { + 'backupSchedule': [], + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V10, + metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 1a77f25d7..36d570803 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -19,8 +19,8 @@ from webob import exc from nova import console from nova import exception -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi def _translate_keys(cons): @@ -43,14 +43,9 @@ def _translate_detail_keys(cons): return dict(console=info) -class Controller(common.OpenstackController): +class Controller(object): """The Consoles Controller for the Openstack API""" - _serialization_metadata = { - 'application/xml': { - 'attributes': { - 'console': []}}} - def __init__(self): self.console_api = console.API() super(Controller, self).__init__() @@ -63,9 +58,8 @@ class Controller(common.OpenstackController): return dict(consoles=[_translate_keys(console) for console in consoles]) - def create(self, req, server_id): + def create(self, req, server_id, body): """Creates a new console""" - #info = self._deserialize(req.body, req.get_content_type()) self.console_api.create_console( req.environ['nova.context'], int(server_id)) @@ -94,3 +88,17 @@ class Controller(common.OpenstackController): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() + + +def resource_factory(): + metadata = { + 'attributes': { + 'console': [], + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index 4c5971cf6..46056a27a 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -19,22 +19,13 @@ import webob from nova import db from nova import exception -from nova.api.openstack import common from nova.api.openstack import views +from nova.api.openstack import wsgi -class Controller(common.OpenstackController): +class Controller(object): """Flavor controller for the OpenStack API.""" - _serialization_metadata = { - 'application/xml': { - "attributes": { - "flavor": ["id", "name", "ram", "disk"], - "link": ["rel", "type", "href"], - } - } - } - def index(self, req): """Return all flavors in brief.""" items = self._get_flavors(req, is_detail=False) @@ -71,14 +62,31 @@ class Controller(common.OpenstackController): class ControllerV10(Controller): + def _get_view_builder(self, req): return views.flavors.ViewBuilder() class ControllerV11(Controller): + def _get_view_builder(self, req): base_url = req.application_url return views.flavors.ViewBuilderV11(base_url) - def get_default_xmlns(self, req): - return common.XML_NS_V11 + +def resource_factory(version='1.0'): + controller = { + '1.0': ControllerV10, + '1.1': ControllerV11, + }[version]() + + xmlns = { + '1.0': wsgi.XMLNS_V10, + '1.1': wsgi.XMLNS_V11, + }[version] + + serializers = { + 'application/xml': wsgi.XMLSerializer(xmlns=xmlns), + } + + return wsgi.Resource(controller, serializers=serializers) diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index 1eccc0174..ce0140265 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -21,19 +21,18 @@ from nova import flags from nova import quota from nova import utils from nova import wsgi -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi FLAGS = flags.FLAGS -class Controller(common.OpenstackController): +class Controller(object): """The image metadata API controller for the Openstack API""" def __init__(self): self.image_service = utils.import_object(FLAGS.image_service) - super(Controller, self).__init__() def _get_metadata(self, context, image_id, image=None): if not image: @@ -64,9 +63,8 @@ class Controller(common.OpenstackController): else: return faults.Fault(exc.HTTPNotFound()) - def create(self, req, image_id): + def create(self, req, image_id, body): context = req.environ['nova.context'] - body = self._deserialize(req.body, req.get_content_type()) img = self.image_service.show(context, image_id) metadata = self._get_metadata(context, image_id, img) if 'metadata' in body: @@ -77,9 +75,8 @@ class Controller(common.OpenstackController): self.image_service.update(context, image_id, img, None) return dict(metadata=metadata) - def update(self, req, image_id, id): + def update(self, req, image_id, id, body): context = req.environ['nova.context'] - body = self._deserialize(req.body, req.get_content_type()) if not id in body: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) @@ -104,3 +101,11 @@ class Controller(common.OpenstackController): metadata.pop(id) img['properties'] = metadata self.image_service.update(context, image_id, img, None) + + +def resource_factory(): + serializers = { + 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V11), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 34d4c27fc..e22854ebf 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -23,25 +23,16 @@ from nova import utils from nova.api.openstack import common from nova.api.openstack import faults from nova.api.openstack.views import images as images_view +from nova.api.openstack import wsgi LOG = log.getLogger('nova.api.openstack.images') FLAGS = flags.FLAGS -class Controller(common.OpenstackController): +class Controller(object): """Base `wsgi.Controller` for retrieving/displaying images.""" - _serialization_metadata = { - 'application/xml': { - "attributes": { - "image": ["id", "name", "updated", "created", "status", - "serverId", "progress"], - "link": ["rel", "type", "href"], - }, - }, - } - def __init__(self, image_service=None, compute_service=None): """Initialize new `ImageController`. @@ -153,3 +144,30 @@ class ControllerV11(Controller): def get_default_xmlns(self, req): return common.XML_NS_V11 + + +def resource_factory(version='1.0'): + controller = { + '1.0': ControllerV10, + '1.1': ControllerV11, + }[version]() + + xmlns = { + '1.0': wsgi.XMLNS_V10, + '1.1': wsgi.XMLNS_V11, + }[version] + + metadata = { + "attributes": { + "image": ["id", "name", "updated", "created", "status", + "serverId", "progress"], + "link": ["rel", "type", "href"], + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(xmlns=xmlns, + metadata=metadata), + } + + return wsgi.Resource(controller, serializers=serializers) diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py index 778e9ba1a..24612eafb 100644 --- a/nova/api/openstack/ips.py +++ b/nova/api/openstack/ips.py @@ -20,23 +20,14 @@ import time from webob import exc import nova -import nova.api.openstack.views.addresses -from nova.api.openstack import common from nova.api.openstack import faults +import nova.api.openstack.views.addresses +from nova.api.openstack import wsgi -class Controller(common.OpenstackController): +class Controller(object): """The servers addresses API controller for the Openstack API.""" - _serialization_metadata = { - 'application/xml': { - 'list_collections': { - 'public': {'item_name': 'ip', 'item_key': 'addr'}, - 'private': {'item_name': 'ip', 'item_key': 'addr'}, - }, - }, - } - def __init__(self): self.compute_api = nova.compute.API() self.builder = nova.api.openstack.views.addresses.ViewBuilderV10() @@ -65,8 +56,24 @@ class Controller(common.OpenstackController): def show(self, req, server_id, id): return faults.Fault(exc.HTTPNotImplemented()) - def create(self, req, server_id): + def create(self, req, server_id, body): return faults.Fault(exc.HTTPNotImplemented()) def delete(self, req, server_id, id): return faults.Fault(exc.HTTPNotImplemented()) + + +def resource_factory(): + metadata = { + 'list_collections': { + 'public': {'item_name': 'ip', 'item_key': 'addr'}, + 'private': {'item_name': 'ip', 'item_key': 'addr'}, + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 47bc238f1..306048d8f 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -30,10 +30,11 @@ from collections import defaultdict from webob.dec import wsgify -from nova import wsgi +from nova import wsgi as base_wsgi from nova.api.openstack import common from nova.api.openstack import faults from nova.api.openstack.views import limits as limits_views +from nova.api.openstack import wsgi # Convenience constants for the limits dictionary passed to Limiter(). @@ -43,23 +44,11 @@ PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 -class LimitsController(common.OpenstackController): +class LimitsController(object): """ Controller for accessing limits in the OpenStack API. """ - _serialization_metadata = { - "application/xml": { - "attributes": { - "limit": ["verb", "URI", "uri", "regex", "value", "unit", - "resetTime", "next-available", "remaining", "name"], - }, - "plurals": { - "rate": "limit", - }, - }, - } - def index(self, req): """ Return all global and rate limit information. @@ -84,6 +73,35 @@ class LimitsControllerV11(LimitsController): return limits_views.ViewBuilderV11() +def resource_factory(version='1.0'): + controller = { + '1.0': LimitsControllerV10, + '1.1': LimitsControllerV11, + }[version]() + + xmlns = { + '1.0': wsgi.XMLNS_V10, + '1.1': wsgi.XMLNS_V11, + }[version] + + metadata = { + "attributes": { + "limit": ["verb", "URI", "uri", "regex", "value", "unit", + "resetTime", "next-available", "remaining", "name"], + }, + "plurals": { + "rate": "limit", + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(xmlns=xmlns, + metadata=metadata) + } + + return wsgi.Resource(controller, serializers=serializers) + + class Limit(object): """ Stores information about a limit for HTTP requets. @@ -195,7 +213,7 @@ DEFAULT_LIMITS = [ ] -class RateLimitingMiddleware(wsgi.Middleware): +class RateLimitingMiddleware(base_wsgi.Middleware): """ Rate-limits requests passing through this middleware. All limit information is stored in memory for this implementation. @@ -209,7 +227,7 @@ class RateLimitingMiddleware(wsgi.Middleware): @param application: WSGI application to wrap @param limits: List of dictionaries describing limits """ - wsgi.Middleware.__init__(self, application) + base_wsgi.Middleware.__init__(self, application) self._limiter = Limiter(limits or DEFAULT_LIMITS) @wsgify(RequestClass=wsgi.Request) diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index fd64ee4fb..fb9449b4c 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -19,12 +19,11 @@ from webob import exc from nova import compute from nova import quota -from nova import wsgi -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi -class Controller(common.OpenstackController): +class Controller(object): """ The server metadata API controller for the Openstack API """ def __init__(self): @@ -43,10 +42,9 @@ class Controller(common.OpenstackController): context = req.environ['nova.context'] return self._get_metadata(context, server_id) - def create(self, req, server_id): + def create(self, req, server_id, body): context = req.environ['nova.context'] - data = self._deserialize(req.body, req.get_content_type()) - metadata = data.get('metadata') + metadata = body.get('metadata') try: self.compute_api.update_or_create_instance_metadata(context, server_id, @@ -55,9 +53,8 @@ class Controller(common.OpenstackController): self._handle_quota_error(error) return req.body - def update(self, req, server_id, id): + def update(self, req, server_id, id, body): context = req.environ['nova.context'] - body = self._deserialize(req.body, req.get_content_type()) if not id in body: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) @@ -92,3 +89,11 @@ class Controller(common.OpenstackController): if error.code == "MetadataLimitExceeded": raise exc.HTTPBadRequest(explanation=error.message) raise error + + +def resource_factory(): + serializers = { + 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V11), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8f2de2afe..78f8bb1b7 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -31,6 +31,7 @@ import nova.api.openstack.views.addresses import nova.api.openstack.views.flavors import nova.api.openstack.views.images import nova.api.openstack.views.servers +from nova.api.openstack import wsgi from nova.auth import manager as auth_manager from nova.compute import instance_types import nova.api.openstack @@ -41,31 +42,12 @@ LOG = logging.getLogger('nova.api.openstack.servers') FLAGS = flags.FLAGS -class Controller(common.OpenstackController): +class Controller(object): """ The Server API controller for the OpenStack API """ - _serialization_metadata = { - "application/xml": { - "attributes": { - "server": ["id", "imageId", "name", "flavorId", "hostId", - "status", "progress", "adminPass", "flavorRef", - "imageRef"], - "link": ["rel", "type", "href"], - }, - "dict_collections": { - "metadata": {"item_name": "meta", "item_key": "key"}, - }, - "list_collections": { - "public": {"item_name": "ip", "item_key": "addr"}, - "private": {"item_name": "ip", "item_key": "addr"}, - }, - }, - } - def __init__(self): self.compute_api = compute.API() self._image_service = utils.import_object(FLAGS.image_service) - super(Controller, self).__init__() def index(self, req): """ Returns a list of server names and ids for a given user """ @@ -122,15 +104,14 @@ class Controller(common.OpenstackController): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() - def create(self, req): + def create(self, req, body): """ Creates a new server for a given user """ - env = self._deserialize_create(req) - if not env: + if not body: return faults.Fault(exc.HTTPUnprocessableEntity()) context = req.environ['nova.context'] - password = self._get_server_admin_password(env['server']) + password = self._get_server_admin_password(body['server']) key_name = None key_data = None @@ -140,7 +121,7 @@ class Controller(common.OpenstackController): key_name = key_pair['name'] key_data = key_pair['public_key'] - requested_image_id = self._image_id_from_req_data(env) + requested_image_id = self._image_id_from_req_data(body) try: image_id = common.get_image_id_from_image_hash(self._image_service, context, requested_image_id) @@ -151,18 +132,18 @@ class Controller(common.OpenstackController): kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) - personality = env['server'].get('personality') + personality = body['server'].get('personality') injected_files = [] if personality: injected_files = self._get_injected_files(personality) - flavor_id = self._flavor_id_from_req_data(env) + flavor_id = self._flavor_id_from_req_data(body) - if not 'name' in env['server']: + if not 'name' in body['server']: msg = _("Server name is not defined") return exc.HTTPBadRequest(msg) - name = env['server']['name'] + name = body['server']['name'] self._validate_server_name(name) name = name.strip() @@ -179,7 +160,7 @@ class Controller(common.OpenstackController): display_description=name, key_name=key_name, key_data=key_data, - metadata=env['server'].get('metadata', {}), + metadata=body['server'].get('metadata', {}), injected_files=injected_files) except quota.QuotaError as error: self._handle_quota_error(error) @@ -194,18 +175,6 @@ class Controller(common.OpenstackController): password) return server - def _deserialize_create(self, request): - """ - Deserialize a create request - - Overrides normal behavior in the case of xml content - """ - if request.content_type == "application/xml": - deserializer = ServerCreateRequestXMLDeserializer() - return deserializer.deserialize(request.body) - else: - return self._deserialize(request.body, request.get_content_type()) - def _get_injected_files(self, personality): """ Create a list of injected files from the personality attribute @@ -255,24 +224,23 @@ class Controller(common.OpenstackController): return utils.generate_password(16) @scheduler_api.redirect_handler - def update(self, req, id): + def update(self, req, id, body): """ Updates the server name or password """ if len(req.body) == 0: raise exc.HTTPUnprocessableEntity() - inst_dict = self._deserialize(req.body, req.get_content_type()) - if not inst_dict: + if not body: return faults.Fault(exc.HTTPUnprocessableEntity()) ctxt = req.environ['nova.context'] update_dict = {} - if 'name' in inst_dict['server']: - name = inst_dict['server']['name'] + if 'name' in body['server']: + name = body['server']['name'] self._validate_server_name(name) update_dict['display_name'] = name.strip() - self._parse_update(ctxt, id, inst_dict, update_dict) + self._parse_update(ctxt, id, body, update_dict) try: self.compute_api.update(ctxt, id, **update_dict) @@ -294,7 +262,7 @@ class Controller(common.OpenstackController): pass @scheduler_api.redirect_handler - def action(self, req, id): + def action(self, req, id, body): """Multi-purpose method used to reboot, rebuild, or resize a server""" @@ -307,10 +275,9 @@ class Controller(common.OpenstackController): 'rebuild': self._action_rebuild, } - input_dict = self._deserialize(req.body, req.get_content_type()) for key in actions.keys(): - if key in input_dict: - return actions[key](input_dict, req, id) + if key in body: + return actions[key](body, req, id) return faults.Fault(exc.HTTPNotImplemented()) def _action_change_password(self, input_dict, req, id): @@ -410,7 +377,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def reset_network(self, req, id): + def reset_network(self, req, id, body): """ Reset networking on an instance (admin only). @@ -425,7 +392,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def inject_network_info(self, req, id): + def inject_network_info(self, req, id, body): """ Inject network info for an instance (admin only). @@ -440,7 +407,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def pause(self, req, id): + def pause(self, req, id, body): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] try: @@ -452,7 +419,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def unpause(self, req, id): + def unpause(self, req, id, body): """ Permit Admins to Unpause the server. """ ctxt = req.environ['nova.context'] try: @@ -464,7 +431,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def suspend(self, req, id): + def suspend(self, req, id, body): """permit admins to suspend the server""" context = req.environ['nova.context'] try: @@ -476,7 +443,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def resume(self, req, id): + def resume(self, req, id, body): """permit admins to resume the server from suspend""" context = req.environ['nova.context'] try: @@ -815,3 +782,44 @@ class ServerCreateRequestXMLDeserializer(object): if child.nodeType == child.TEXT_NODE: return child.nodeValue return "" + + +def resource_factory(version='1.0'): + controller = { + '1.0': ControllerV10, + '1.1': ControllerV11, + }[version]() + + metadata = { + "attributes": { + "server": ["id", "imageId", "name", "flavorId", "hostId", + "status", "progress", "adminPass", "flavorRef", + "imageRef"], + "link": ["rel", "type", "href"], + }, + "dict_collections": { + "metadata": {"item_name": "meta", "item_key": "key"}, + }, + "list_collections": { + "public": {"item_name": "ip", "item_key": "addr"}, + "private": {"item_name": "ip", "item_key": "addr"}, + }, + } + + xmlns = { + '1.0': wsgi.XMLNS_V10, + '1.1': wsgi.XMLNS_V11, + }[version] + + serializers = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata, + xmlns=xmlns), + } + + deserializers = { + 'application/xml': ServerCreateRequestXMLDeserializer(), + } + + return wsgi.Resource(controller, serializers=serializers, + deserializers=deserializers) + diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py index 996db3648..db178f2a2 100644 --- a/nova/api/openstack/shared_ip_groups.py +++ b/nova/api/openstack/shared_ip_groups.py @@ -17,29 +17,13 @@ from webob import exc -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi -def _translate_keys(inst): - """ Coerces a shared IP group instance into proper dictionary format """ - return dict(sharedIpGroup=inst) - - -def _translate_detail_keys(inst): - """ Coerces a shared IP group instance into proper dictionary format with - correctly mapped attributes """ - return dict(sharedIpGroups=inst) - - -class Controller(common.OpenstackController): +class Controller(object): """ The Shared IP Groups Controller for the Openstack API """ - _serialization_metadata = { - 'application/xml': { - 'attributes': { - 'sharedIpGroup': []}}} - def index(self, req): """ Returns a list of Shared IP Groups for the user """ raise faults.Fault(exc.HTTPNotImplemented()) @@ -48,7 +32,7 @@ class Controller(common.OpenstackController): """ Shows in-depth information on a specific Shared IP Group """ raise faults.Fault(exc.HTTPNotImplemented()) - def update(self, req, id): + def update(self, req, id, body): """ You can't update a Shared IP Group """ raise faults.Fault(exc.HTTPNotImplemented()) @@ -60,6 +44,10 @@ class Controller(common.OpenstackController): """ Returns a complete list of Shared IP Groups """ raise faults.Fault(exc.HTTPNotImplemented()) - def create(self, req): + def create(self, req, body): """ Creates a new Shared IP group """ raise faults.Fault(exc.HTTPNotImplemented()) + + +def resource_factory(): + return wsgi.Resource(Controller()) diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py index 7ae4c3232..35b6a502e 100644 --- a/nova/api/openstack/users.py +++ b/nova/api/openstack/users.py @@ -20,8 +20,10 @@ from nova import flags from nova import log as logging from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi from nova.auth import manager + FLAGS = flags.FLAGS LOG = logging.getLogger('nova.api.openstack') @@ -34,12 +36,7 @@ def _translate_keys(user): admin=user.admin) -class Controller(common.OpenstackController): - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "user": ["id", "name", "access", "secret", "admin"]}}} +class Controller(object): def __init__(self): self.manager = manager.AuthManager() @@ -81,23 +78,35 @@ class Controller(common.OpenstackController): self.manager.delete_user(id) return {} - def create(self, req): + def create(self, req, body): self._check_admin(req.environ['nova.context']) - env = self._deserialize(req.body, req.get_content_type()) - is_admin = env['user'].get('admin') in ('T', 'True', True) - name = env['user'].get('name') - access = env['user'].get('access') - secret = env['user'].get('secret') + is_admin = body['user'].get('admin') in ('T', 'True', True) + name = body['user'].get('name') + access = body['user'].get('access') + secret = body['user'].get('secret') user = self.manager.create_user(name, access, secret, is_admin) return dict(user=_translate_keys(user)) - def update(self, req, id): + def update(self, req, id, body): self._check_admin(req.environ['nova.context']) - env = self._deserialize(req.body, req.get_content_type()) - is_admin = env['user'].get('admin') + is_admin = body['user'].get('admin') if is_admin is not None: is_admin = is_admin in ('T', 'True', True) - access = env['user'].get('access') - secret = env['user'].get('secret') + access = body['user'].get('access') + secret = body['user'].get('secret') self.manager.modify_user(id, access, secret, is_admin) return dict(user=_translate_keys(self.manager.get_user(id))) + + +def resource_factory(): + metadata = { + "attributes": { + "user": ["id", "name", "access", "secret", "admin"], + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py new file mode 100644 index 000000000..9e0077932 --- /dev/null +++ b/nova/api/openstack/wsgi.py @@ -0,0 +1,291 @@ + +import json +import webob +from xml.dom import minidom + +from nova import exception +from nova import log as logging +from nova import utils + + +XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' +XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' + +LOG = logging.getLogger('nova.api.openstack.wsgi') + + +class Request(webob.Request): + def best_match_content_type(self, supported=None): + """Determine the requested content-type. + + Based on the query extension then the Accept header. + + :param supported: list of content-types to override defaults + + """ + supported = supported or ['application/json', 'application/xml'] + parts = self.path.rsplit('.', 1) + + if len(parts) > 1: + ctype = 'application/{0}'.format(parts[1]) + if ctype in supported: + return ctype + + bm = self.accept.best_match(supported) + + return bm or 'application/json' + + def get_content_type(self): + if not "Content-Type" in self.headers: + raise exception.InvalidContentType(content_type=None) + + allowed_types = ("application/xml", "application/json") + type = self.content_type + + if type not in allowed_types: + raise exception.InvalidContentType(content_type=type) + else: + return type + + +class JSONDeserializer(object): + def deserialize(self, datastring): + return utils.loads(datastring) + + +class JSONSerializer(object): + def serialize(self, data): + return utils.dumps(data) + + +class XMLDeserializer(object): + def __init__(self, metadata=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + """ + super(XMLDeserializer, self).__init__() + self.metadata = metadata or {} + + def deserialize(self, datastring): + """XML deserialization entry point.""" + plurals = set(self.metadata.get('plurals', {})) + node = minidom.parseString(datastring).childNodes[0] + return {node.nodeName: self._from_xml_node(node, plurals)} + + def _from_xml_node(self, node, listnames): + """Convert a minidom node to a simple Python type. + + :param listnames: list of XML node names whose subnodes should + be considered list items. + + """ + if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: + return node.childNodes[0].nodeValue + elif node.nodeName in listnames: + return [self._from_xml_node(n, listnames) for n in node.childNodes] + else: + result = dict() + for attr in node.attributes.keys(): + result[attr] = node.attributes[attr].nodeValue + for child in node.childNodes: + if child.nodeType != node.TEXT_NODE: + result[child.nodeName] = self._from_xml_node(child, + listnames) + return result + + +class XMLSerializer(object): + def __init__(self, metadata=None, xmlns=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + :param xmlns: XML namespace to include with serialized xml + """ + super(XMLSerializer, self).__init__() + self.metadata = metadata or {} + self.xmlns = xmlns + + def serialize(self, data): + # We expect data to contain a single key which is the XML root. + root_key = data.keys()[0] + doc = minidom.Document() + node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) + + xmlns = node.getAttribute('xmlns') + if not xmlns and self.xmlns: + node.setAttribute('xmlns', self.xmlns) + + return node.toprettyxml(indent=' ') + + def _to_xml_node(self, doc, metadata, nodename, data): + """Recursive method to convert data members to XML nodes.""" + result = doc.createElement(nodename) + + # Set the xml namespace if one is specified + # TODO(justinsb): We could also use prefixes on the keys + xmlns = metadata.get('xmlns', None) + if xmlns: + result.setAttribute('xmlns', xmlns) + + if type(data) is list: + collections = metadata.get('list_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for item in data: + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(item)) + result.appendChild(node) + return result + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + node = self._to_xml_node(doc, metadata, singular, item) + result.appendChild(node) + elif type(data) is dict: + collections = metadata.get('dict_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for k, v in data.items(): + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(k)) + text = doc.createTextNode(str(v)) + node.appendChild(text) + result.appendChild(node) + return result + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k, v in data.items(): + if k in attrs: + result.setAttribute(k, str(v)) + else: + node = self._to_xml_node(doc, metadata, k, v) + result.appendChild(node) + else: + # Type is atom + node = doc.createTextNode(str(data)) + result.appendChild(node) + return result + + +class Resource(object): + """WSGI app that dispatched to methods. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon itself. All action methods + must, in addition to their normal parameters, accept a 'req' argument + which is the incoming wsgi.Request. They raise a webob.exc exception, + or return a dict which will be serialized by requested content type. + + """ + def __init__(self, controller, serializers=None, deserializers=None): + self.serializers = { + 'application/xml': XMLSerializer(), + 'application/json': JSONSerializer(), + } + self.serializers.update(serializers or {}) + + self.deserializers = { + 'application/xml': XMLDeserializer(), + 'application/json': JSONDeserializer(), + } + self.deserializers.update(deserializers or {}) + + self.controller = controller + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """Call the method specified in req.environ by RoutesMiddleware.""" + LOG.debug("%s %s" % (request.method, request.url)) + + try: + action, action_args, accept = self.deserialize_request(request) + except exception.InvalidContentType: + return webob.exc.HTTPBadRequest(_("Unsupported Content-Type")) + + controller_method = getattr(self.controller, action) + result = controller_method(req=request, **action_args) + + response = self.serialize_response(accept, result) + + try: + msg_dict = dict(url=request.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + except AttributeError: + msg_dict = dict(url=request.url) + msg = _("%(url)s returned a fault") + + LOG.debug(msg) + + return response + + def serialize_response(self, content_type, response_body): + """Serialize a dict into a string and wrap in a wsgi.Request object. + + :param content_type: expected mimetype of serialized response body + :param response_body: dict produced by the Controller + + """ + if not type(response_body) is dict: + return response_body + + response = webob.Response() + response.headers['Content-Type'] = content_type + + serializer = self.get_serializer(content_type) + response.body = serializer.serialize(response_body) + + return response + + def get_serializer(self, content_type): + try: + return self.serializers[content_type] + except Exception: + raise exception.InvalidContentType(content_type=content_type) + + def deserialize_request(self, request): + """Parse a wsgi request into a set of params we care about. + + :param request: wsgi.Request object + + """ + action_args = self.get_action_args(request.environ) + action = action_args.pop('action') + + if request.method.lower() in ('post', 'put'): + if len(request.body) == 0: + action_args['body'] = None + else: + content_type = request.get_content_type() + deserializer = self.get_deserializer(content_type) + + try: + action_args['body'] = deserializer.deserialize(request.body) + except exception.InvalidContentType: + action_args['body'] = None + + accept = self.get_expected_content_type(request) + + return (action, action_args, accept) + + def get_expected_content_type(self, request): + return request.best_match_content_type() + + def get_action_args(self, request_environment): + args = request_environment['wsgiorg.routing_args'][1].copy() + + del args['controller'] + + if 'format' in args: + del args['format'] + + return args + + def get_deserializer(self, content_type): + try: + return self.deserializers[content_type] + except Exception: + raise exception.InvalidContentType(content_type=content_type) diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index 227ffecdc..d17ab7a9b 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -17,6 +17,7 @@ from nova import db from nova import flags from nova import log as logging from nova.api.openstack import common +from nova.api.openstack import wsgi from nova.scheduler import api @@ -41,12 +42,7 @@ def _scrub_zone(zone): 'deleted', 'deleted_at', 'updated_at')) -class Controller(common.OpenstackController): - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "zone": ["id", "api_url", "name", "capabilities"]}}} +class Controller(object): def index(self, req): """Return all zones in brief""" @@ -85,15 +81,28 @@ class Controller(common.OpenstackController): api.zone_delete(req.environ['nova.context'], zone_id) return {} - def create(self, req): + def create(self, req, body): context = req.environ['nova.context'] - env = self._deserialize(req.body, req.get_content_type()) - zone = api.zone_create(context, env["zone"]) + zone = api.zone_create(context, body["zone"]) return dict(zone=_scrub_zone(zone)) - def update(self, req, id): + def update(self, req, id, body): context = req.environ['nova.context'] - env = self._deserialize(req.body, req.get_content_type()) zone_id = int(id) - zone = api.zone_update(context, zone_id, env["zone"]) + zone = api.zone_update(context, zone_id, body["zone"]) return dict(zone=_scrub_zone(zone)) + + +def resource_factory(): + metadata = { + "attributes": { + "zone": ["id", "api_url", "name", "capabilities"], + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V10, + metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 45bd4d501..db859c2f8 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -65,7 +65,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): def setUp(self): """Run before each test.""" BaseLimitTestSuite.setUp(self) - self.controller = limits.LimitsControllerV10() + self.controller = limits.resource_factory('1.0') def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" @@ -178,7 +178,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite): def setUp(self): """Run before each test.""" BaseLimitTestSuite.setUp(self) - self.controller = limits.LimitsControllerV11() + self.controller = limits.resource_factory('1.1') def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index e8182b6a9..15f376f74 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -207,7 +207,6 @@ class ServersTest(test.TestCase): }, ] - print res_dict['server'] self.assertEqual(res_dict['server']['links'], expected_links) def test_get_server_by_id_with_addresses_xml(self): @@ -831,7 +830,6 @@ class ServersTest(test.TestCase): req = webob.Request.blank('/v1.0/servers/detail') req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) - print res.body dom = minidom.parseString(res.body) for i, server in enumerate(dom.getElementsByTagName('server')): self.assertEqual(server.getAttribute('id'), str(i)) diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index 5820ecdc2..0be3aecf1 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -121,138 +121,3 @@ class ControllerTest(test.TestCase): result = request.get_response(self.TestRouter()) self.assertEqual(result.status_int, 200) self.assertEqual(result.headers["Content-Type"], "application/json") - - -class RequestTest(test.TestCase): - - def test_request_content_type_missing(self): - request = wsgi.Request.blank('/tests/123') - request.body = "" - self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type) - - def test_request_content_type_unsupported(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Content-Type"] = "text/html" - request.body = "asdf
" - self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type) - - def test_request_content_type_with_charset(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Content-Type"] = "application/json; charset=UTF-8" - result = request.get_content_type() - self.assertEqual(result, "application/json") - - def test_content_type_from_accept_xml(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml" - result = request.best_match_content_type() - self.assertEqual(result, "application/xml") - - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/json" - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml, application/json" - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = \ - "application/json; q=0.3, application/xml; q=0.9" - result = request.best_match_content_type() - self.assertEqual(result, "application/xml") - - def test_content_type_from_query_extension(self): - request = wsgi.Request.blank('/tests/123.xml') - result = request.best_match_content_type() - self.assertEqual(result, "application/xml") - - request = wsgi.Request.blank('/tests/123.json') - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - request = wsgi.Request.blank('/tests/123.invalid') - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - def test_content_type_accept_and_query_extension(self): - request = wsgi.Request.blank('/tests/123.xml') - request.headers["Accept"] = "application/json" - result = request.best_match_content_type() - self.assertEqual(result, "application/xml") - - def test_content_type_accept_default(self): - request = wsgi.Request.blank('/tests/123.unsupported') - request.headers["Accept"] = "application/unsupported1" - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - -class SerializerTest(test.TestCase): - - def test_xml(self): - input_dict = dict(servers=dict(a=(2, 3))) - expected_xml = '(2,3)' - serializer = wsgi.Serializer() - result = serializer.serialize(input_dict, "application/xml") - result = result.replace('\n', '').replace(' ', '') - self.assertEqual(result, expected_xml) - - def test_json(self): - input_dict = dict(servers=dict(a=(2, 3))) - expected_json = '{"servers":{"a":[2,3]}}' - serializer = wsgi.Serializer() - result = serializer.serialize(input_dict, "application/json") - result = result.replace('\n', '').replace(' ', '') - self.assertEqual(result, expected_json) - - def test_unsupported_content_type(self): - serializer = wsgi.Serializer() - self.assertRaises(exception.InvalidContentType, serializer.serialize, - {}, "text/null") - - def test_deserialize_json(self): - data = """{"a": { - "a1": "1", - "a2": "2", - "bs": ["1", "2", "3", {"c": {"c1": "1"}}], - "d": {"e": "1"}, - "f": "1"}}""" - as_dict = dict(a={ - 'a1': '1', - 'a2': '2', - 'bs': ['1', '2', '3', {'c': dict(c1='1')}], - 'd': {'e': '1'}, - 'f': '1'}) - metadata = {} - serializer = wsgi.Serializer(metadata) - self.assertEqual(serializer.deserialize(data, "application/json"), - as_dict) - - def test_deserialize_xml(self): - xml = """ - - 123 - 1 - 1 - - """.strip() - as_dict = dict(a={ - 'a1': '1', - 'a2': '2', - 'bs': ['1', '2', '3', {'c': dict(c1='1')}], - 'd': {'e': '1'}, - 'f': '1'}) - metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})} - serializer = wsgi.Serializer(metadata) - self.assertEqual(serializer.deserialize(xml, "application/xml"), - as_dict) - - def test_deserialize_empty_xml(self): - xml = """""" - as_dict = {"a": {}} - serializer = wsgi.Serializer() - self.assertEqual(serializer.deserialize(xml, "application/xml"), - as_dict) diff --git a/nova/tests/integrated/test_xml.py b/nova/tests/integrated/test_xml.py index 8a9754777..fde32f797 100644 --- a/nova/tests/integrated/test_xml.py +++ b/nova/tests/integrated/test_xml.py @@ -32,7 +32,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase): """"Some basic XML sanity checks.""" def test_namespace_limits(self): - """/limits should have v1.0 namespace (hasn't changed in 1.1).""" + """/limits should have v1.1 namespace (has changed in 1.1).""" headers = {} headers['Accept'] = 'application/xml' @@ -40,7 +40,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase): data = response.read() LOG.debug("data: %s" % data) - prefix = ' Date: Wed, 18 May 2011 20:33:25 -0400 Subject: removing controller/serializer code from wsgi.py; updating other code to use new modules --- nova/api/direct.py | 12 +- nova/api/openstack/common.py | 7 - nova/api/openstack/consoles.py | 3 +- nova/api/openstack/contrib/volumes.py | 23 +- nova/api/openstack/extensions.py | 97 +++++---- nova/api/openstack/faults.py | 39 ++-- nova/api/openstack/image_metadata.py | 1 - nova/api/openstack/images.py | 11 +- nova/api/openstack/versions.py | 44 ++-- nova/api/openstack/wsgi.py | 23 +- nova/objectstore/s3server.py | 2 +- nova/tests/api/openstack/extensions/foxinsocks.py | 4 +- nova/tests/api/openstack/test_extensions.py | 4 +- nova/tests/api/test_wsgi.py | 54 ----- nova/wsgi.py | 250 +--------------------- 15 files changed, 147 insertions(+), 427 deletions(-) diff --git a/nova/api/direct.py b/nova/api/direct.py index 8ceae299c..5e6c7c882 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -42,6 +42,7 @@ from nova import exception from nova import flags from nova import utils from nova import wsgi +import nova.api.openstack.wsgi # Global storage for registering modules. @@ -251,7 +252,7 @@ class Reflection(object): return self._methods[method] -class ServiceWrapper(wsgi.Controller): +class ServiceWrapper(object): """Wrapper to dynamically povide a WSGI controller for arbitrary objects. With lightweight introspection allows public methods on the object to @@ -265,7 +266,7 @@ class ServiceWrapper(wsgi.Controller): def __init__(self, service_handle): self.service_handle = service_handle - @webob.dec.wsgify(RequestClass=wsgi.Request) + @webob.dec.wsgify(RequestClass=nova.api.openstack.wsgi.Request) def __call__(self, req): arg_dict = req.environ['wsgiorg.routing_args'][1] action = arg_dict['action'] @@ -289,8 +290,11 @@ class ServiceWrapper(wsgi.Controller): try: content_type = req.best_match_content_type() - default_xmlns = self.get_default_xmlns(req) - return self._serialize(result, content_type, default_xmlns) + serializer = { + 'application/xml': nova.api.openstack.wsgi.XMLSerializer(), + 'application/json': nova.api.openstack.wsgi.JSONSerializer(), + }[content_type] + return serializer.serialize(result) except: raise exception.Error("returned non-serializable type: %s" % result) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 32cd689ca..bb1a96812 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -23,7 +23,6 @@ import webob from nova import exception from nova import flags from nova import log as logging -from nova import wsgi LOG = logging.getLogger('nova.api.openstack.common') @@ -146,9 +145,3 @@ def get_id_from_href(href): except: LOG.debug(_("Error extracting id from href: %s") % href) raise webob.exc.HTTPBadRequest(_('could not parse id from href')) - - -class OpenstackController(wsgi.Controller): - def get_default_xmlns(self, req): - # Use V10 by default - return XML_NS_V10 diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 36d570803..97304affe 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -44,11 +44,10 @@ def _translate_detail_keys(cons): class Controller(object): - """The Consoles Controller for the Openstack API""" + """The Consoles controller for the Openstack API""" def __init__(self): self.console_api = console.API() - super(Controller, self).__init__() def index(self, req, server_id): """Returns a list of consoles for this instance""" diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index 18de2ec71..b00790b7f 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -22,7 +22,6 @@ from nova import exception from nova import flags from nova import log as logging from nova import volume -from nova import wsgi from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import faults @@ -64,7 +63,7 @@ def _translate_volume_summary_view(context, vol): return d -class VolumeController(wsgi.Controller): +class VolumeController(object): """The Volumes API controller for the OpenStack API.""" _serialization_metadata = { @@ -124,15 +123,14 @@ class VolumeController(wsgi.Controller): res = [entity_maker(context, vol) for vol in limited_list] return {'volumes': res} - def create(self, req): + def create(self, req, body): """Creates a new volume.""" context = req.environ['nova.context'] - env = self._deserialize(req.body, req.get_content_type()) - if not env: + if not body: return faults.Fault(exc.HTTPUnprocessableEntity()) - vol = env['volume'] + vol = body['volume'] size = vol['size'] LOG.audit(_("Create volume of %s GB"), size, context=context) new_volume = self.volume_api.create(context, size, @@ -175,7 +173,7 @@ def _translate_attachment_summary_view(_context, vol): return d -class VolumeAttachmentController(wsgi.Controller): +class VolumeAttachmentController(object): """The volume attachment API controller for the Openstack API. A child resource of the server. Note that we use the volume id @@ -219,17 +217,16 @@ class VolumeAttachmentController(wsgi.Controller): return {'volumeAttachment': _translate_attachment_detail_view(context, vol)} - def create(self, req, server_id): + def create(self, req, server_id, body): """Attach a volume to an instance.""" context = req.environ['nova.context'] - env = self._deserialize(req.body, req.get_content_type()) - if not env: + if not body: return faults.Fault(exc.HTTPUnprocessableEntity()) instance_id = server_id - volume_id = env['volumeAttachment']['volumeId'] - device = env['volumeAttachment']['device'] + volume_id = body['volumeAttachment']['volumeId'] + device = body['volumeAttachment']['device'] msg = _("Attach volume %(volume_id)s to instance %(server_id)s" " at %(device)s") % locals() @@ -259,7 +256,7 @@ class VolumeAttachmentController(wsgi.Controller): # TODO(justinsb): How do I return "accepted" here? return {'volumeAttachment': attachment} - def update(self, _req, _server_id, _id): + def update(self, req, server_id, id, body): """Update a volume attachment. We don't currently support this.""" return faults.Fault(exc.HTTPBadRequest()) diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 7ea7afef6..73f174e07 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -27,9 +27,10 @@ import webob.exc from nova import exception from nova import flags from nova import log as logging -from nova import wsgi +from nova import wsgi as base_wsgi from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi LOG = logging.getLogger('extensions') @@ -116,28 +117,34 @@ class ExtensionDescriptor(object): return response_exts -class ActionExtensionController(common.OpenstackController): - +class ActionExtensionController(object): def __init__(self, application): - self.application = application self.action_handlers = {} def add_action(self, action_name, handler): self.action_handlers[action_name] = handler - def action(self, req, id): - - input_dict = self._deserialize(req.body, req.get_content_type()) + def action(self, req, id, body): for action_name, handler in self.action_handlers.iteritems(): - if action_name in input_dict: - return handler(input_dict, req, id) + if action_name in body: + return handler(body, req, id) # no action handler found (bump to downstream application) res = self.application return res -class ResponseExtensionController(common.OpenstackController): +class ActionExtensionResource(wsgi.Resource): + + def __init__(self, application): + controller = ActionExtensionController(application) + super(ActionExtensionResource, self).__init__(controller) + + def add_action(self, action_name, handler): + self.controller.add_action(action_name, handler) + + +class ResponseExtensionController(object): def __init__(self, application): self.application = application @@ -157,7 +164,11 @@ class ResponseExtensionController(common.OpenstackController): headers = res.headers except AttributeError: default_xmlns = None - body = self._serialize(res, content_type, default_xmlns) + serializer = { + 'application/xml': wsgi.XMLSerializer(), + 'application/json': wsgi.JSONSerializer(), + }[content_type] + body = serializer.serialize(res) headers = {"Content-Type": content_type} res = webob.Response() res.body = body @@ -165,7 +176,17 @@ class ResponseExtensionController(common.OpenstackController): return res -class ExtensionController(common.OpenstackController): +class ResponseExtensionResource(wsgi.Resource): + + def __init__(self, application): + controller = ResponseExtensionController(application) + super(ResponseExtensionResource, self).__init__(controller) + + def add_handler(self, handler): + self.controller.add_handler(handler) + + +class ExtensionController(object): def __init__(self, extension_manager): self.extension_manager = extension_manager @@ -198,7 +219,7 @@ class ExtensionController(common.OpenstackController): raise faults.Fault(webob.exc.HTTPNotFound()) -class ExtensionMiddleware(wsgi.Middleware): +class ExtensionMiddleware(base_wsgi.Middleware): """Extensions middleware for WSGI.""" @classmethod def factory(cls, global_config, **local_config): @@ -207,43 +228,43 @@ class ExtensionMiddleware(wsgi.Middleware): return cls(app, **local_config) return _factory - def _action_ext_controllers(self, application, ext_mgr, mapper): - """Return a dict of ActionExtensionController-s by collection.""" - action_controllers = {} + def _action_ext_resources(self, application, ext_mgr, mapper): + """Return a dict of ActionExtensionResource objects by collection.""" + action_resources = {} for action in ext_mgr.get_actions(): - if not action.collection in action_controllers.keys(): - controller = ActionExtensionController(application) + if not action.collection in action_resources.keys(): + resource = ActionExtensionResource(application) mapper.connect("/%s/:(id)/action.:(format)" % action.collection, action='action', - controller=controller, + controller=resource, conditions=dict(method=['POST'])) mapper.connect("/%s/:(id)/action" % action.collection, action='action', - controller=controller, + controller=resource, conditions=dict(method=['POST'])) - action_controllers[action.collection] = controller + action_resources[action.collection] = resource - return action_controllers + return action_resources - def _response_ext_controllers(self, application, ext_mgr, mapper): - """Returns a dict of ResponseExtensionController-s by collection.""" - response_ext_controllers = {} + def _response_ext_resources(self, application, ext_mgr, mapper): + """Returns a dict of ResponseExtensionResource objects by collection.""" + response_ext_resources = {} for resp_ext in ext_mgr.get_response_extensions(): - if not resp_ext.key in response_ext_controllers.keys(): - controller = ResponseExtensionController(application) + if not resp_ext.key in response_ext_resources.keys(): + resource = ResponseExtensionResource(application) mapper.connect(resp_ext.url_route + '.:(format)', action='process', - controller=controller, + controller=resource, conditions=resp_ext.conditions) mapper.connect(resp_ext.url_route, action='process', - controller=controller, + controller=resource, conditions=resp_ext.conditions) - response_ext_controllers[resp_ext.key] = controller + response_ext_resources[resp_ext.key] = resource - return response_ext_controllers + return response_ext_resources def __init__(self, application, ext_mgr=None): @@ -258,21 +279,21 @@ class ExtensionMiddleware(wsgi.Middleware): LOG.debug(_('Extended resource: %s'), resource.collection) mapper.resource(resource.collection, resource.collection, - controller=resource.controller, + controller=wsgi.Resource(resource.controller), collection=resource.collection_actions, member=resource.member_actions, parent_resource=resource.parent) # extended actions - action_controllers = self._action_ext_controllers(application, ext_mgr, + action_resources = self._action_ext_resources(application, ext_mgr, mapper) for action in ext_mgr.get_actions(): LOG.debug(_('Extended action: %s'), action.action_name) - controller = action_controllers[action.collection] - controller.add_action(action.action_name, action.handler) + resource = action_resources[action.collection] + resource.add_action(action.action_name, action.handler) # extended responses - resp_controllers = self._response_ext_controllers(application, ext_mgr, + resp_controllers = self._response_ext_resources(application, ext_mgr, mapper) for response_ext in ext_mgr.get_response_extensions(): LOG.debug(_('Extended response: %s'), response_ext.key) @@ -422,7 +443,7 @@ class ExtensionManager(object): class ResponseExtension(object): - """Add data to responses from core nova OpenStack API controllers.""" + """Add data to responses from core nova OpenStack API resources.""" def __init__(self, method, url_route, handler): self.url_route = url_route @@ -432,7 +453,7 @@ class ResponseExtension(object): class ActionExtension(object): - """Add custom actions to core nova OpenStack API controllers.""" + """Add custom actions to core nova OpenStack API resources.""" def __init__(self, collection, action_name, handler): self.collection = collection diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index 87118ce19..fd36f8f17 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -19,8 +19,7 @@ import webob.dec import webob.exc -from nova import wsgi -from nova.api.openstack import common +from nova.api.openstack import wsgi class Fault(webob.exc.HTTPException): @@ -55,13 +54,21 @@ class Fault(webob.exc.HTTPException): if code == 413: retry = self.wrapped_exc.headers['Retry-After'] fault_data[fault_name]['retryAfter'] = retry + # 'code' is an attribute on the fault tag itself - metadata = {'application/xml': {'attributes': {fault_name: 'code'}}} - default_xmlns = common.XML_NS_V10 - serializer = wsgi.Serializer(metadata, default_xmlns) + metadata = {'attributes': {fault_name: 'code'}} + content_type = req.best_match_content_type() - self.wrapped_exc.body = serializer.serialize(fault_data, content_type) + + serializer = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), + 'application/json': wsgi.JSONSerializer(), + }[content_type] + + self.wrapped_exc.body = serializer.serialize(fault_data) self.wrapped_exc.content_type = content_type + return self.wrapped_exc @@ -70,14 +77,6 @@ class OverLimitFault(webob.exc.HTTPException): Rate-limited request response. """ - _serialization_metadata = { - "application/xml": { - "attributes": { - "overLimitFault": "code", - }, - }, - } - def __init__(self, message, details, retry_time): """ Initialize new `OverLimitFault` with relevant information. @@ -97,8 +96,16 @@ class OverLimitFault(webob.exc.HTTPException): Return the wrapped exception with a serialized body conforming to our error format. """ - serializer = wsgi.Serializer(self._serialization_metadata) content_type = request.best_match_content_type() - content = serializer.serialize(self.content, content_type) + metadata = {"attributes": {"overLimitFault": "code"}} + + serializer = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), + 'application/json': wsgi.JSONSerializer(), + }[content_type] + + content = serializer.serialize(self.content) self.wrapped_exc.body = content + return self.wrapped_exc diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index ce0140265..506b63acf 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -20,7 +20,6 @@ from webob import exc from nova import flags from nova import quota from nova import utils -from nova import wsgi from nova.api.openstack import faults from nova.api.openstack import wsgi diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index e22854ebf..5a03573d8 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -31,7 +31,7 @@ FLAGS = flags.FLAGS class Controller(object): - """Base `wsgi.Controller` for retrieving/displaying images.""" + """Base controller for retrieving/displaying images.""" def __init__(self, image_service=None, compute_service=None): """Initialize new `ImageController`. @@ -99,21 +99,20 @@ class Controller(object): self._image_service.delete(context, image_id) return webob.exc.HTTPNoContent() - def create(self, req): + def create(self, req, body): """Snapshot a server instance and save the image. :param req: `wsgi.Request` object """ context = req.environ['nova.context'] content_type = req.get_content_type() - image = self._deserialize(req.body, content_type) - if not image: + if not body: raise webob.exc.HTTPBadRequest() try: - server_id = image["image"]["serverId"] - image_name = image["image"]["name"] + server_id = body["image"]["serverId"] + image_name = body["image"]["name"] except KeyError: raise webob.exc.HTTPBadRequest() diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index 3f9d91934..a8d785b52 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -18,13 +18,27 @@ import webob import webob.dec -from nova import wsgi +from nova import wsgi as base_wsgi import nova.api.openstack.views.versions +from nova.api.openstack import wsgi -class Versions(wsgi.Application): - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): +class Versions(wsgi.Resource, base_wsgi.Application): + def __init__(self): + metadata = { + "attributes": { + "version": ["status", "id"], + "link": ["rel", "href"], + } + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata), + } + + super(Versions, self).__init__(None, serializers=serializers) + + def dispatch(self, request, *args): """Respond to a request for all OpenStack API versions.""" version_objs = [ { @@ -37,24 +51,6 @@ class Versions(wsgi.Application): }, ] - builder = nova.api.openstack.views.versions.get_view_builder(req) + builder = nova.api.openstack.views.versions.get_view_builder(request) versions = [builder.build(version) for version in version_objs] - response = dict(versions=versions) - - metadata = { - "application/xml": { - "attributes": { - "version": ["status", "id"], - "link": ["rel", "href"], - } - } - } - - content_type = req.best_match_content_type() - body = wsgi.Serializer(metadata).serialize(response, content_type) - - response = webob.Response() - response.content_type = content_type - response.body = body - - return response + return dict(versions=versions) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 9e0077932..97280c365 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -206,8 +206,7 @@ class Resource(object): except exception.InvalidContentType: return webob.exc.HTTPBadRequest(_("Unsupported Content-Type")) - controller_method = getattr(self.controller, action) - result = controller_method(req=request, **action_args) + result = self.dispatch(request, action, action_args) response = self.serialize_response(accept, result) @@ -222,6 +221,10 @@ class Resource(object): return response + def dispatch(self, request, action, action_args): + controller_method = getattr(self.controller, action) + return controller_method(req=request, **action_args) + def serialize_response(self, content_type, response_body): """Serialize a dict into a string and wrap in a wsgi.Request object. @@ -253,7 +256,7 @@ class Resource(object): """ action_args = self.get_action_args(request.environ) - action = action_args.pop('action') + action = action_args.pop('action', None) if request.method.lower() in ('post', 'put'): if len(request.body) == 0: @@ -275,14 +278,18 @@ class Resource(object): return request.best_match_content_type() def get_action_args(self, request_environment): - args = request_environment['wsgiorg.routing_args'][1].copy() + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + + del args['controller'] - del args['controller'] + if 'format' in args: + del args['format'] - if 'format' in args: - del args['format'] + return args - return args + except KeyError: + return {} def get_deserializer(self, content_type): try: diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py index dd6327c8f..76025a1e3 100644 --- a/nova/objectstore/s3server.py +++ b/nova/objectstore/s3server.py @@ -81,7 +81,7 @@ class S3Application(wsgi.Router): super(S3Application, self).__init__(mapper) -class BaseRequestHandler(wsgi.Controller): +class BaseRequestHandler(object): """Base class emulating Tornado's web framework pattern in WSGI. This is a direct port of Tornado's implementation, so some key decisions diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py index 0860b51ac..a64552af1 100644 --- a/nova/tests/api/openstack/extensions/foxinsocks.py +++ b/nova/tests/api/openstack/extensions/foxinsocks.py @@ -17,12 +17,10 @@ import json -from nova import wsgi - from nova.api.openstack import extensions -class FoxInSocksController(wsgi.Controller): +class FoxInSocksController(object): def index(self, req): return "Try to say this Mr. Knox, sir..." diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 481d34ed1..a8168f88a 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -26,15 +26,15 @@ from nova import flags from nova.api import openstack from nova.api.openstack import extensions from nova.api.openstack import flavors +from nova.api.openstack import wsgi from nova.tests.api.openstack import fakes -import nova.wsgi FLAGS = flags.FLAGS response_body = "Try to say this Mr. Knox, sir..." -class StubController(nova.wsgi.Controller): +class StubController(object): def __init__(self, body): self.body = body diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index 0be3aecf1..d33268296 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -67,57 +67,3 @@ class Test(test.TestCase): self.assertEqual(result.body, "Router result") result = webob.Request.blank('/bad').get_response(Router()) self.assertNotEqual(result.body, "Router result") - - -class ControllerTest(test.TestCase): - - class TestRouter(wsgi.Router): - - class TestController(wsgi.Controller): - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "test": ["id"]}}} - - def show(self, req, id): # pylint: disable=W0622,C0103 - return {"test": {"id": id}} - - def __init__(self): - mapper = routes.Mapper() - mapper.resource("test", "tests", controller=self.TestController()) - wsgi.Router.__init__(self, mapper) - - def test_show(self): - request = wsgi.Request.blank('/tests/123') - result = request.get_response(self.TestRouter()) - self.assertEqual(json.loads(result.body), {"test": {"id": "123"}}) - - def test_response_content_type_from_accept_xml(self): - request = webob.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml" - result = request.get_response(self.TestRouter()) - self.assertEqual(result.headers["Content-Type"], "application/xml") - - def test_response_content_type_from_accept_json(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/json" - result = request.get_response(self.TestRouter()) - self.assertEqual(result.headers["Content-Type"], "application/json") - - def test_response_content_type_from_query_extension_xml(self): - request = wsgi.Request.blank('/tests/123.xml') - result = request.get_response(self.TestRouter()) - self.assertEqual(result.headers["Content-Type"], "application/xml") - - def test_response_content_type_from_query_extension_json(self): - request = wsgi.Request.blank('/tests/123.json') - result = request.get_response(self.TestRouter()) - self.assertEqual(result.headers["Content-Type"], "application/json") - - def test_response_content_type_default_when_unsupported(self): - request = wsgi.Request.blank('/tests/123.unsupported') - request.headers["Accept"] = "application/unsupported1" - result = request.get_response(self.TestRouter()) - self.assertEqual(result.status_int, 200) - self.assertEqual(result.headers["Content-Type"], "application/json") diff --git a/nova/wsgi.py b/nova/wsgi.py index e60a8820d..3a292073b 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -82,36 +82,7 @@ class Server(object): class Request(webob.Request): - - def best_match_content_type(self): - """Determine the most acceptable content-type. - - Based on the query extension then the Accept header. - - """ - parts = self.path.rsplit('.', 1) - - if len(parts) > 1: - format = parts[1] - if format in ['json', 'xml']: - return 'application/{0}'.format(parts[1]) - - ctypes = ['application/json', 'application/xml'] - bm = self.accept.best_match(ctypes) - - return bm or 'application/json' - - def get_content_type(self): - allowed_types = ("application/xml", "application/json") - if not "Content-Type" in self.headers: - msg = _("Missing Content-Type") - LOG.debug(msg) - raise webob.exc.HTTPBadRequest(msg) - type = self.content_type - if type in allowed_types: - return type - LOG.debug(_("Wrong Content-Type: %s") % type) - raise webob.exc.HTTPBadRequest("Invalid content type") + pass class Application(object): @@ -286,7 +257,7 @@ class Router(object): Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as - well and have your controller be a wsgi.Controller, who will route + well and have your controller be a controller, who will route the request to the action method. Examples: @@ -335,223 +306,6 @@ class Router(object): return app -class Controller(object): - """WSGI app that dispatched to methods. - - WSGI app that reads routing information supplied by RoutesMiddleware - and calls the requested action method upon itself. All action methods - must, in addition to their normal parameters, accept a 'req' argument - which is the incoming wsgi.Request. They raise a webob.exc exception, - or return a dict which will be serialized by requested content type. - - """ - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - """Call the method specified in req.environ by RoutesMiddleware.""" - arg_dict = req.environ['wsgiorg.routing_args'][1] - action = arg_dict['action'] - method = getattr(self, action) - LOG.debug("%s %s" % (req.method, req.url)) - del arg_dict['controller'] - del arg_dict['action'] - if 'format' in arg_dict: - del arg_dict['format'] - arg_dict['req'] = req - result = method(**arg_dict) - - if type(result) is dict: - content_type = req.best_match_content_type() - default_xmlns = self.get_default_xmlns(req) - body = self._serialize(result, content_type, default_xmlns) - - response = webob.Response() - response.headers['Content-Type'] = content_type - response.body = body - msg_dict = dict(url=req.url, status=response.status_int) - msg = _("%(url)s returned with HTTP %(status)d") % msg_dict - LOG.debug(msg) - return response - else: - return result - - def _serialize(self, data, content_type, default_xmlns): - """Serialize the given dict to the provided content_type. - - Uses self._serialization_metadata if it exists, which is a dict mapping - MIME types to information needed to serialize to that type. - - """ - _metadata = getattr(type(self), '_serialization_metadata', {}) - - serializer = Serializer(_metadata, default_xmlns) - try: - return serializer.serialize(data, content_type) - except exception.InvalidContentType: - raise webob.exc.HTTPNotAcceptable() - - def _deserialize(self, data, content_type): - """Deserialize the request body to the specefied content type. - - Uses self._serialization_metadata if it exists, which is a dict mapping - MIME types to information needed to serialize to that type. - - """ - _metadata = getattr(type(self), '_serialization_metadata', {}) - serializer = Serializer(_metadata) - return serializer.deserialize(data, content_type) - - def get_default_xmlns(self, req): - """Provide the XML namespace to use if none is otherwise specified.""" - return None - - -class Serializer(object): - """Serializes and deserializes dictionaries to certain MIME types.""" - - def __init__(self, metadata=None, default_xmlns=None): - """Create a serializer based on the given WSGI environment. - - 'metadata' is an optional dict mapping MIME types to information - needed to serialize a dictionary to that type. - - """ - self.metadata = metadata or {} - self.default_xmlns = default_xmlns - - def _get_serialize_handler(self, content_type): - handlers = { - 'application/json': self._to_json, - 'application/xml': self._to_xml, - } - - try: - return handlers[content_type] - except Exception: - raise exception.InvalidContentType(content_type=content_type) - - def serialize(self, data, content_type): - """Serialize a dictionary into the specified content type.""" - return self._get_serialize_handler(content_type)(data) - - def deserialize(self, datastring, content_type): - """Deserialize a string to a dictionary. - - The string must be in the format of a supported MIME type. - - """ - return self.get_deserialize_handler(content_type)(datastring) - - def get_deserialize_handler(self, content_type): - handlers = { - 'application/json': self._from_json, - 'application/xml': self._from_xml, - } - - try: - return handlers[content_type] - except Exception: - raise exception.InvalidContentType(content_type=content_type) - - def _from_json(self, datastring): - return utils.loads(datastring) - - def _from_xml(self, datastring): - xmldata = self.metadata.get('application/xml', {}) - plurals = set(xmldata.get('plurals', {})) - node = minidom.parseString(datastring).childNodes[0] - return {node.nodeName: self._from_xml_node(node, plurals)} - - def _from_xml_node(self, node, listnames): - """Convert a minidom node to a simple Python type. - - listnames is a collection of names of XML nodes whose subnodes should - be considered list items. - - """ - if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: - return node.childNodes[0].nodeValue - elif node.nodeName in listnames: - return [self._from_xml_node(n, listnames) for n in node.childNodes] - else: - result = dict() - for attr in node.attributes.keys(): - result[attr] = node.attributes[attr].nodeValue - for child in node.childNodes: - if child.nodeType != node.TEXT_NODE: - result[child.nodeName] = self._from_xml_node(child, - listnames) - return result - - def _to_json(self, data): - return utils.dumps(data) - - def _to_xml(self, data): - metadata = self.metadata.get('application/xml', {}) - # We expect data to contain a single key which is the XML root. - root_key = data.keys()[0] - doc = minidom.Document() - node = self._to_xml_node(doc, metadata, root_key, data[root_key]) - - xmlns = node.getAttribute('xmlns') - if not xmlns and self.default_xmlns: - node.setAttribute('xmlns', self.default_xmlns) - - return node.toprettyxml(indent=' ') - - def _to_xml_node(self, doc, metadata, nodename, data): - """Recursive method to convert data members to XML nodes.""" - result = doc.createElement(nodename) - - # Set the xml namespace if one is specified - # TODO(justinsb): We could also use prefixes on the keys - xmlns = metadata.get('xmlns', None) - if xmlns: - result.setAttribute('xmlns', xmlns) - - if type(data) is list: - collections = metadata.get('list_collections', {}) - if nodename in collections: - metadata = collections[nodename] - for item in data: - node = doc.createElement(metadata['item_name']) - node.setAttribute(metadata['item_key'], str(item)) - result.appendChild(node) - return result - singular = metadata.get('plurals', {}).get(nodename, None) - if singular is None: - if nodename.endswith('s'): - singular = nodename[:-1] - else: - singular = 'item' - for item in data: - node = self._to_xml_node(doc, metadata, singular, item) - result.appendChild(node) - elif type(data) is dict: - collections = metadata.get('dict_collections', {}) - if nodename in collections: - metadata = collections[nodename] - for k, v in data.items(): - node = doc.createElement(metadata['item_name']) - node.setAttribute(metadata['item_key'], str(k)) - text = doc.createTextNode(str(v)) - node.appendChild(text) - result.appendChild(node) - return result - attrs = metadata.get('attributes', {}).get(nodename, {}) - for k, v in data.items(): - if k in attrs: - result.setAttribute(k, str(v)) - else: - node = self._to_xml_node(doc, metadata, k, v) - result.appendChild(node) - else: - # Type is atom - node = doc.createTextNode(str(data)) - result.appendChild(node) - return result - - def paste_config_file(basename): """Find the best location in the system for a paste config file. -- cgit From 0aefdc6da92b8db8b15a3e8a0bef8fc5c4b46450 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 18 May 2011 20:33:52 -0400 Subject: missed the new wsgi test file --- nova/tests/api/openstack/test_wsgi.py | 248 ++++++++++++++++++++++++++++++++++ 1 file changed, 248 insertions(+) create mode 100644 nova/tests/api/openstack/test_wsgi.py diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py new file mode 100644 index 000000000..430dafe77 --- /dev/null +++ b/nova/tests/api/openstack/test_wsgi.py @@ -0,0 +1,248 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +import json +import webob + +from nova import exception +from nova import test +from nova.api.openstack import wsgi + + +class RequestTest(test.TestCase): + def test_content_type_missing(self): + request = wsgi.Request.blank('/tests/123') + request.body = "" + self.assertRaises(exception.InvalidContentType, + request.get_content_type) + + def test_content_type_unsupported(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "text/html" + request.body = "asdf
" + self.assertRaises(exception.InvalidContentType, + request.get_content_type) + + def test_content_type_with_charset(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/json; charset=UTF-8" + result = request.get_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_from_accept_xml(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml, application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = \ + "application/json; q=0.3, application/xml; q=0.9" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_from_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123.json') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123.invalid') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_accept_and_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_accept_default(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + +class SerializationTest(test.TestCase): + def test_xml(self): + input_dict = dict(servers=dict(a=(2, 3))) + expected_xml = '(2,3)' + xmlns = "testing xmlns" + serializer = wsgi.XMLSerializer(xmlns="asdf") + result = serializer.serialize(input_dict) + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(result, expected_xml) + + def test_json(self): + input_dict = dict(servers=dict(a=(2, 3))) + expected_json = '{"servers":{"a":[2,3]}}' + serializer = wsgi.JSONSerializer() + result = serializer.serialize(input_dict) + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(result, expected_json) + + +class DeserializationTest(test.TestCase): + def test_json(self): + data = """{"a": { + "a1": "1", + "a2": "2", + "bs": ["1", "2", "3", {"c": {"c1": "1"}}], + "d": {"e": "1"}, + "f": "1"}}""" + as_dict = dict(a={ + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': dict(c1='1')}], + 'd': {'e': '1'}, + 'f': '1'}) + deserializer = wsgi.JSONDeserializer() + self.assertEqual(deserializer.deserialize(data), as_dict) + + def test_xml(self): + xml = """ + + 123 + 1 + 1 + + """.strip() + as_dict = dict(a={ + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': dict(c1='1')}], + 'd': {'e': '1'}, + 'f': '1'}) + metadata = {'plurals': {'bs': 'b', 'ts': 't'}} + deserializer = wsgi.XMLDeserializer(metadata=metadata) + self.assertEqual(deserializer.deserialize(xml), as_dict) + + def test_xml_empty(self): + xml = """""" + as_dict = {"a": {}} + deserializer = wsgi.XMLDeserializer() + self.assertEqual(deserializer.deserialize(xml), as_dict) + + +class ResourceSerializerTest(test.TestCase): + def setUp(self): + class JSONSerializer(object): + def serialize(self, data): + return 'pew_json' + + class XMLSerializer(object): + def serialize(self, data): + return 'pew_xml' + + self.serializers = { + 'application/json': JSONSerializer(), + 'application/XML': XMLSerializer(), + } + + self.resource = wsgi.Resource(None, serializers=self.serializers) + + def tearDown(self): + pass + + def test_get_serializer(self): + self.assertEqual(self.resource.get_serializer('application/json'), + self.serializers['application/json']) + + def test_get_serializer_unknown_content_type(self): + self.assertRaises(exception.InvalidContentType, + self.resource.get_serializer, + 'application/unknown') + + def test_serialize_response_dict(self): + response = self.resource.serialize_response('application/json', {}) + self.assertEqual(response.headers['Content-Type'], 'application/json') + self.assertEqual(response.body, 'pew_json') + + def test_serialize_response_non_dict(self): + response = self.resource.serialize_response('application/json', 'a') + self.assertEqual(response, 'a') + + def test_serialize_response_dict_to_unknown_content_type(self): + self.assertRaises(exception.InvalidContentType, + self.resource.serialize_response, + 'application/unknown', {}) + + def test_serialize_response_non_dict_to_unknown_content_type(self): + response = self.resource.serialize_response('application/unknown', 'a') + self.assertEqual(response, 'a') + + +class ResourceDeserializerTest(test.TestCase): + def setUp(self): + class JSONDeserializer(object): + def deserialize(self, data): + return 'pew_json' + + class XMLDeserializer(object): + def deserialize(self, data): + return 'pew_xml' + + self.deserializers = { + 'application/json': JSONDeserializer(), + 'application/XML': XMLDeserializer(), + } + + self.resource = wsgi.Resource(None, deserializers=self.deserializers) + + def tearDown(self): + pass + + def test_get_deserializer(self): + self.assertEqual(self.resource.get_deserializer('application/json'), + self.deserializers['application/json']) + + def test_get_deserializer_unknown_content_type(self): + self.assertRaises(exception.InvalidContentType, + self.resource.get_deserializer, + 'application/unknown') + + def test_get_expected_content_type(self): + request = wsgi.Request.blank('/') + request.headers['Accept'] = 'application/json' + self.assertEqual(self.resource.get_expected_content_type(request), + 'application/json') + + def test_get_action_args(self): + env = { + 'wsgiorg.routing_args': [None, { + 'controller': None, + 'format': None, + 'action': 'update', + 'id': 12, + }], + } + + expected = {'action': 'update', 'id': 12} + + self.assertEqual(self.resource.get_action_args(env), expected) + + def test_deserialize_request(self): + def fake_get_routing_args(request): + return {'action': 'create'} + self.resource.get_action_args = fake_get_routing_args + + request = wsgi.Request.blank('/') + request.headers['Accept'] = 'application/xml' + + deserialized = self.resource.deserialize_request(request) + expected = ('create', {}, 'application/xml') + + self.assertEqual(expected, deserialized) -- cgit From 0b698186b56af6580633dedd7916df2897945f29 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Thu, 19 May 2011 21:31:14 +0900 Subject: Avoid wildcard import. --- .../migrate_repo/versions/019_add_volume_snapshot_support.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py index 288f63e72..5a44bac16 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py @@ -15,8 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * +from sqlalchemy import Column, Table, MetaData +from sqlalchemy import Integer, DateTime, Boolean, String from nova import log as logging -- cgit From a4cc51b78ae5e08227bef7a4be52953776a3e947 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Thu, 19 May 2011 21:49:15 +0900 Subject: Add a unitest to test EC2 snapshot APIs. --- nova/tests/test_cloud.py | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index c8559615a..d9169a646 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -188,6 +188,52 @@ class CloudTestCase(test.TestCase): db.service_destroy(self.context, service1['id']) db.service_destroy(self.context, service2['id']) + def test_describe_snapshots(self): + """Makes sure describe_snapshots works and filters results.""" + vol = db.volume_create(self.context, {}) + snap1 = db.snapshot_create(self.context, {'volume_id': vol['id']}) + snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']}) + result = self.cloud.describe_snapshots(self.context) + self.assertEqual(len(result['snapshotSet']), 2) + snapshot_id = ec2utils.id_to_ec2_id(snap2['id'], 'snap-%08x') + result = self.cloud.describe_snapshots(self.context, + snapshot_id=[snapshot_id]) + self.assertEqual(len(result['snapshotSet']), 1) + self.assertEqual( + ec2utils.ec2_id_to_id(result['snapshotSet'][0]['snapshotId']), + snap2['id']) + db.snapshot_destroy(self.context, snap1['id']) + db.snapshot_destroy(self.context, snap2['id']) + db.volume_destroy(self.context, vol['id']) + + def test_create_snapshot(self): + """Makes sure create_snapshot works.""" + vol = db.volume_create(self.context, {'status': "available"}) + volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x') + + result = self.cloud.create_snapshot(self.context, + volume_id=volume_id) + snapshot_id = result['snapshotId'] + result = self.cloud.describe_snapshots(self.context) + self.assertEqual(len(result['snapshotSet']), 1) + self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id) + + db.snapshot_destroy(self.context, ec2utils.ec2_id_to_id(snapshot_id)) + db.volume_destroy(self.context, vol['id']) + + def test_delete_snapshot(self): + """Makes sure delete_snapshot works.""" + vol = db.volume_create(self.context, {'status': "available"}) + snap = db.snapshot_create(self.context, {'volume_id': vol['id'], + 'status': "available"}) + snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x') + + result = self.cloud.delete_snapshot(self.context, + snapshot_id=snapshot_id) + self.assertTrue(result) + + db.volume_destroy(self.context, vol['id']) + def test_describe_instances(self): """Makes sure describe_instances works and filters results.""" inst1 = db.instance_create(self.context, {'reservation_id': 'a', -- cgit From c04a59fefbcbd0e5e21cbc8c70eb3147785cf22d Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Thu, 19 May 2011 22:06:18 +0900 Subject: Fix comments. --- nova/db/api.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 1ef82b461..3597732b9 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -884,27 +884,27 @@ def volume_update(context, volume_id, values): def snapshot_create(context, values): - """Create a volume from the values dictionary.""" + """Create a snapshot from the values dictionary.""" return IMPL.snapshot_create(context, values) def snapshot_destroy(context, snapshot_id): - """Create a volume from the values dictionary.""" + """Destroy the snapshot or raise if it does not exist.""" return IMPL.snapshot_destroy(context, snapshot_id) def snapshot_get(context, snapshot_id): - """Get a volume or raise if it does not exist.""" + """Get a snapshot or raise if it does not exist.""" return IMPL.snapshot_get(context, snapshot_id) def snapshot_get_all(context): - """Get all volumes.""" + """Get all snapshots.""" return IMPL.snapshot_get_all(context) def snapshot_get_all_by_project(context, project_id): - """Get all volumes belonging to a project.""" + """Get all snapshots belonging to a project.""" return IMPL.snapshot_get_all_by_project(context, project_id) -- cgit From beea6545804dc17661eea83b373d74d14cf07c32 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Thu, 19 May 2011 10:52:23 -0400 Subject: Minor cleanup --- nova/api/openstack/images.py | 8 +++----- nova/virt/libvirt_conn.py | 12 +++--------- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index c2511b99f..ac02d63c5 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -50,8 +50,7 @@ class Controller(common.OpenstackController): """ self._compute_service = compute_service or compute.API() - self._image_service = image_service or \ - utils.get_default_image_service() + self._image_service = image_service or utils.get_default_image_service() def index(self, req): """Return an index listing of images available to the request. @@ -75,14 +74,13 @@ class Controller(common.OpenstackController): builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) - def show(self, req, id): + def show(self, req, image_id): """Return detailed information about a specific image. :param req: `wsgi.Request` object - :param id: Image identifier (integer) + :param image_id: Image identifier (integer) """ context = req.environ['nova.context'] - image_id = id try: (image_service, service_image_id) = utils.get_image_service( diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ab47493fd..e311184e7 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -844,9 +844,7 @@ class LibvirtConnection(driver.ComputeDriver): 'ramdisk_id': inst['ramdisk_id']} if disk_images['kernel_id']: - fname_hash = hashlib.sha1() - fname_hash.update(disk_images['kernel_id']) - fname = fname_hash.hexdigest() + fname = hashlib.sha1(disk_images['kernel_id']).hexdigest() self._cache_image(fn=self._fetch_image, target=basepath('kernel'), fname=fname, @@ -854,9 +852,7 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) if disk_images['ramdisk_id']: - fname_hash = hashlib.sha1() - fname_hash.update(disk_images['ramdisk_id']) - fname = fname_hash.hexdigest() + fname = hashlib.sha1(disk_images['ramdisk_id']).hexdigest() self._cache_image(fn=self._fetch_image, target=basepath('ramdisk'), fname=fname, @@ -864,9 +860,7 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) - fname_hash = hashlib.sha1() - fname_hash.update(disk_images['image_id']) - root_fname = fname_hash.hexdigest() + fname = hashlib.sha1(disk_images['image_id']).hexdigest() size = FLAGS.minimum_root_size -- cgit From d6fab80027e5fdb9a8d3e56044c399a7a80b2464 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Thu, 19 May 2011 11:29:23 -0400 Subject: fname should have been root_fname --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index e311184e7..9e66c3b48 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -860,7 +860,7 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) - fname = hashlib.sha1(disk_images['image_id']).hexdigest() + root_fname = hashlib.sha1(disk_images['image_id']).hexdigest() size = FLAGS.minimum_root_size -- cgit From e0d43f39aeee0d62741ed40de9045bfde3fd20d8 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Thu, 19 May 2011 16:03:10 -0400 Subject: No reason to hash ramdisk_id and kernel_id. They are ints. --- nova/virt/libvirt_conn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 9e66c3b48..25ba0bc8d 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -844,7 +844,7 @@ class LibvirtConnection(driver.ComputeDriver): 'ramdisk_id': inst['ramdisk_id']} if disk_images['kernel_id']: - fname = hashlib.sha1(disk_images['kernel_id']).hexdigest() + fname = disk_images['kernel_id'] self._cache_image(fn=self._fetch_image, target=basepath('kernel'), fname=fname, @@ -852,7 +852,7 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) if disk_images['ramdisk_id']: - fname = hashlib.sha1(disk_images['ramdisk_id']).hexdigest() + fname = disk_images['ramdisk_id'] self._cache_image(fn=self._fetch_image, target=basepath('ramdisk'), fname=fname, -- cgit From 68426df2287c24efc3d327d12371911ac29d117e Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 19 May 2011 16:16:06 -0400 Subject: further refactoring of wsgi module; adding documentation and tests --- nova/api/direct.py | 4 +- nova/api/openstack/accounts.py | 2 +- nova/api/openstack/backup_schedules.py | 4 +- nova/api/openstack/consoles.py | 2 +- nova/api/openstack/extensions.py | 8 +- nova/api/openstack/faults.py | 12 +- nova/api/openstack/flavors.py | 2 +- nova/api/openstack/image_metadata.py | 2 +- nova/api/openstack/images.py | 4 +- nova/api/openstack/ips.py | 4 +- nova/api/openstack/limits.py | 4 +- nova/api/openstack/server_metadata.py | 2 +- nova/api/openstack/servers.py | 11 +- nova/api/openstack/users.py | 2 +- nova/api/openstack/versions.py | 5 +- nova/api/openstack/wsgi.py | 301 ++++++++++++++++++++----------- nova/api/openstack/zones.py | 4 +- nova/tests/api/openstack/test_servers.py | 50 ++--- nova/tests/api/openstack/test_wsgi.py | 104 ++++++++--- 19 files changed, 331 insertions(+), 196 deletions(-) diff --git a/nova/api/direct.py b/nova/api/direct.py index 5e6c7c882..ea20042a7 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -291,8 +291,8 @@ class ServiceWrapper(object): try: content_type = req.best_match_content_type() serializer = { - 'application/xml': nova.api.openstack.wsgi.XMLSerializer(), - 'application/json': nova.api.openstack.wsgi.JSONSerializer(), + 'application/xml': nova.api.openstack.wsgi.XMLDictSerializer(), + 'application/json': nova.api.openstack.wsgi.JSONDictSerializer(), }[content_type] return serializer.serialize(result) except: diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py index d8a9d1909..faff8bb2c 100644 --- a/nova/api/openstack/accounts.py +++ b/nova/api/openstack/accounts.py @@ -88,7 +88,7 @@ def resource_factory(): } serializers = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index 4153c90c1..d08a4799c 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -60,8 +60,8 @@ def resource_factory(): } serializers = { - 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V10, - metadata=metadata), + 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10, + metadata=metadata), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 97304affe..56f79db60 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -97,7 +97,7 @@ def resource_factory(): } serializers = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 73f174e07..19147bbea 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -165,8 +165,8 @@ class ResponseExtensionController(object): except AttributeError: default_xmlns = None serializer = { - 'application/xml': wsgi.XMLSerializer(), - 'application/json': wsgi.JSONSerializer(), + 'application/xml': wsgi.XMLDictSerializer(), + 'application/json': wsgi.JSONDictSerializer(), }[content_type] body = serializer.serialize(res) headers = {"Content-Type": content_type} @@ -229,7 +229,7 @@ class ExtensionMiddleware(base_wsgi.Middleware): return _factory def _action_ext_resources(self, application, ext_mgr, mapper): - """Return a dict of ActionExtensionResource objects by collection.""" + """Return a dict of ActionExtensionResource-s by collection.""" action_resources = {} for action in ext_mgr.get_actions(): if not action.collection in action_resources.keys(): @@ -248,7 +248,7 @@ class ExtensionMiddleware(base_wsgi.Middleware): return action_resources def _response_ext_resources(self, application, ext_mgr, mapper): - """Returns a dict of ResponseExtensionResource objects by collection.""" + """Returns a dict of ResponseExtensionResource-s by collection.""" response_ext_resources = {} for resp_ext in ext_mgr.get_response_extensions(): if not resp_ext.key in response_ext_resources.keys(): diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index fd36f8f17..b9a23c126 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -61,9 +61,9 @@ class Fault(webob.exc.HTTPException): content_type = req.best_match_content_type() serializer = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata, - xmlns=wsgi.XMLNS_V10), - 'application/json': wsgi.JSONSerializer(), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), + 'application/json': wsgi.JSONDictSerializer(), }[content_type] self.wrapped_exc.body = serializer.serialize(fault_data) @@ -100,9 +100,9 @@ class OverLimitFault(webob.exc.HTTPException): metadata = {"attributes": {"overLimitFault": "code"}} serializer = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata, - xmlns=wsgi.XMLNS_V10), - 'application/json': wsgi.JSONSerializer(), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), + 'application/json': wsgi.JSONDictSerializer(), }[content_type] content = serializer.serialize(self.content) diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index 46056a27a..9e98e6c27 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -86,7 +86,7 @@ def resource_factory(version='1.0'): }[version] serializers = { - 'application/xml': wsgi.XMLSerializer(xmlns=xmlns), + 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns), } return wsgi.Resource(controller, serializers=serializers) diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index 506b63acf..8acde9fe8 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -104,7 +104,7 @@ class Controller(object): def resource_factory(): serializers = { - 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V11), + 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 5a03573d8..a9071ed8a 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -165,8 +165,8 @@ def resource_factory(version='1.0'): } serializers = { - 'application/xml': wsgi.XMLSerializer(xmlns=xmlns, - metadata=metadata), + 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns, + metadata=metadata), } return wsgi.Resource(controller, serializers=serializers) diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py index 24612eafb..87c8c997a 100644 --- a/nova/api/openstack/ips.py +++ b/nova/api/openstack/ips.py @@ -72,8 +72,8 @@ def resource_factory(): } serializers = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata, - xmlns=wsgi.XMLNS_V10), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 306048d8f..b0e093702 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -95,8 +95,8 @@ def resource_factory(version='1.0'): } serializers = { - 'application/xml': wsgi.XMLSerializer(xmlns=xmlns, - metadata=metadata) + 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns, + metadata=metadata) } return wsgi.Resource(controller, serializers=serializers) diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index fb9449b4c..eff98c060 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -93,7 +93,7 @@ class Controller(object): def resource_factory(): serializers = { - 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V11), + 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 78f8bb1b7..8f39bd256 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -707,7 +707,7 @@ class ControllerV11(Controller): return common.XML_NS_V11 -class ServerCreateRequestXMLDeserializer(object): +class ServerXMLDeserializer(wsgi.XMLDeserializer): """ Deserializer to handle xml-formatted server create requests. @@ -715,7 +715,7 @@ class ServerCreateRequestXMLDeserializer(object): and personality attributes """ - def deserialize(self, string): + def create(self, string): """Deserialize an xml-formatted server create request""" dom = minidom.parseString(string) server = self._extract_server(dom) @@ -812,14 +812,13 @@ def resource_factory(version='1.0'): }[version] serializers = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata, - xmlns=xmlns), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, + xmlns=xmlns), } deserializers = { - 'application/xml': ServerCreateRequestXMLDeserializer(), + 'application/xml': ServerXMLDeserializer(), } return wsgi.Resource(controller, serializers=serializers, deserializers=deserializers) - diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py index 35b6a502e..e14616349 100644 --- a/nova/api/openstack/users.py +++ b/nova/api/openstack/users.py @@ -106,7 +106,7 @@ def resource_factory(): } serializers = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index a8d785b52..9db160102 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -18,12 +18,11 @@ import webob import webob.dec -from nova import wsgi as base_wsgi import nova.api.openstack.views.versions from nova.api.openstack import wsgi -class Versions(wsgi.Resource, base_wsgi.Application): +class Versions(wsgi.Resource): def __init__(self): metadata = { "attributes": { @@ -33,7 +32,7 @@ class Versions(wsgi.Resource, base_wsgi.Application): } serializers = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), } super(Versions, self).__init__(None, serializers=serializers) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 97280c365..bd840a6f7 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -6,6 +6,7 @@ from xml.dom import minidom from nova import exception from nova import log as logging from nova import utils +from nova import wsgi XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' @@ -15,17 +16,17 @@ LOG = logging.getLogger('nova.api.openstack.wsgi') class Request(webob.Request): - def best_match_content_type(self, supported=None): - """Determine the requested content-type. + """Add some Openstack API-specific logic to the base webob.Request.""" - Based on the query extension then the Accept header. + def best_match_content_type(self): + """Determine the requested response content-type. - :param supported: list of content-types to override defaults + Based on the query extension then the Accept header. """ - supported = supported or ['application/json', 'application/xml'] - parts = self.path.rsplit('.', 1) + supported = ('application/json', 'application/xml') + parts = self.path.rsplit('.', 1) if len(parts) > 1: ctype = 'application/{0}'.format(parts[1]) if ctype in supported: @@ -33,32 +34,52 @@ class Request(webob.Request): bm = self.accept.best_match(supported) + # default to application/json if we don't find a preference return bm or 'application/json' def get_content_type(self): + """Determine content type of the request body. + + Does not do any body introspection, only checks header + + """ if not "Content-Type" in self.headers: raise exception.InvalidContentType(content_type=None) allowed_types = ("application/xml", "application/json") - type = self.content_type + content_type = self.content_type - if type not in allowed_types: - raise exception.InvalidContentType(content_type=type) + if content_type not in allowed_types: + raise exception.InvalidContentType(content_type=content_type) else: - return type + return content_type -class JSONDeserializer(object): - def deserialize(self, datastring): - return utils.loads(datastring) +class TextDeserializer(object): + """Custom request body deserialization based on controller action name.""" + def deserialize(self, datastring, action=None): + """Find local deserialization method and parse request body.""" + try: + action_method = getattr(self, action) + except Exception: + action_method = self.default -class JSONSerializer(object): - def serialize(self, data): - return utils.dumps(data) + return action_method(datastring) + def default(self, datastring): + """Default deserialization code should live here""" + raise NotImplementedError() + + +class JSONDeserializer(TextDeserializer): + + def default(self, datastring): + return utils.loads(datastring) + + +class XMLDeserializer(TextDeserializer): -class XMLDeserializer(object): def __init__(self, metadata=None): """ :param metadata: information needed to deserialize xml into @@ -67,8 +88,7 @@ class XMLDeserializer(object): super(XMLDeserializer, self).__init__() self.metadata = metadata or {} - def deserialize(self, datastring): - """XML deserialization entry point.""" + def default(self, datastring): plurals = set(self.metadata.get('plurals', {})) node = minidom.parseString(datastring).childNodes[0] return {node.nodeName: self._from_xml_node(node, plurals)} @@ -95,18 +115,111 @@ class XMLDeserializer(object): return result -class XMLSerializer(object): +class RequestDeserializer(object): + """Break up a Request object into more useful pieces.""" + + def __init__(self, deserializers=None): + """ + :param deserializers: dictionary of content-type-specific deserializers + + """ + self.deserializers = { + 'application/xml': XMLDeserializer(), + 'application/json': JSONDeserializer(), + } + + self.deserializers.update(deserializers or {}) + + def deserialize(self, request): + """Extract necessary pieces of the request. + + :param request: Request object + :returns tuple of expected controller action name, dictionary of + keyword arguments to pass to the controller, the expected + content type of the response + + """ + action_args = self.get_action_args(request.environ) + action = action_args.pop('action', None) + + if request.method.lower() in ('post', 'put'): + if len(request.body) == 0: + action_args['body'] = None + else: + content_type = request.get_content_type() + deserializer = self.get_deserializer(content_type) + + try: + body = deserializer.deserialize(request.body, action) + action_args['body'] = body + except exception.InvalidContentType: + action_args['body'] = None + + accept = self.get_expected_content_type(request) + + return (action, action_args, accept) + + def get_deserializer(self, content_type): + try: + return self.deserializers[content_type] + except Exception: + raise exception.InvalidContentType(content_type=content_type) + + def get_expected_content_type(self, request): + return request.best_match_content_type() + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + + del args['controller'] + + if 'format' in args: + del args['format'] + + return args + + except KeyError: + return {} + + +class DictSerializer(object): + """Custom response body serialization based on controller action name.""" + + def serialize(self, data, action=None): + """Find local serialization method and encode response body.""" + try: + action_method = getattr(self, action) + except Exception: + action_method = self.default + + return action_method(data) + + def default(self, data): + """Default serialization code should live here""" + raise NotImplementedError() + + +class JSONDictSerializer(DictSerializer): + + def default(self, data): + return utils.dumps(data) + + +class XMLDictSerializer(DictSerializer): + def __init__(self, metadata=None, xmlns=None): """ :param metadata: information needed to deserialize xml into a dictionary. :param xmlns: XML namespace to include with serialized xml """ - super(XMLSerializer, self).__init__() + super(XMLDictSerializer, self).__init__() self.metadata = metadata or {} self.xmlns = xmlns - def serialize(self, data): + def default(self, data): # We expect data to contain a single key which is the XML root. root_key = data.keys()[0] doc = minidom.Document() @@ -171,75 +284,32 @@ class XMLSerializer(object): return result -class Resource(object): - """WSGI app that dispatched to methods. +class ResponseSerializer(object): + """Encode the necessary pieces into a response object""" - WSGI app that reads routing information supplied by RoutesMiddleware - and calls the requested action method upon itself. All action methods - must, in addition to their normal parameters, accept a 'req' argument - which is the incoming wsgi.Request. They raise a webob.exc exception, - or return a dict which will be serialized by requested content type. + def __init__(self, serializers=None): + """ + :param serializers: dictionary of content-type-specific serializers - """ - def __init__(self, controller, serializers=None, deserializers=None): + """ self.serializers = { - 'application/xml': XMLSerializer(), - 'application/json': JSONSerializer(), + 'application/xml': XMLDictSerializer(), + 'application/json': JSONDictSerializer(), } self.serializers.update(serializers or {}) - self.deserializers = { - 'application/xml': XMLDeserializer(), - 'application/json': JSONDeserializer(), - } - self.deserializers.update(deserializers or {}) - - self.controller = controller - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, request): - """Call the method specified in req.environ by RoutesMiddleware.""" - LOG.debug("%s %s" % (request.method, request.url)) - - try: - action, action_args, accept = self.deserialize_request(request) - except exception.InvalidContentType: - return webob.exc.HTTPBadRequest(_("Unsupported Content-Type")) - - result = self.dispatch(request, action, action_args) - - response = self.serialize_response(accept, result) - - try: - msg_dict = dict(url=request.url, status=response.status_int) - msg = _("%(url)s returned with HTTP %(status)d") % msg_dict - except AttributeError: - msg_dict = dict(url=request.url) - msg = _("%(url)s returned a fault") - - LOG.debug(msg) - - return response - - def dispatch(self, request, action, action_args): - controller_method = getattr(self.controller, action) - return controller_method(req=request, **action_args) - - def serialize_response(self, content_type, response_body): + def serialize(self, response_data, content_type): """Serialize a dict into a string and wrap in a wsgi.Request object. + :param response_data: dict produced by the Controller :param content_type: expected mimetype of serialized response body - :param response_body: dict produced by the Controller """ - if not type(response_body) is dict: - return response_body - response = webob.Response() response.headers['Content-Type'] = content_type serializer = self.get_serializer(content_type) - response.body = serializer.serialize(response_body) + response.body = serializer.serialize(response_data) return response @@ -249,50 +319,63 @@ class Resource(object): except Exception: raise exception.InvalidContentType(content_type=content_type) - def deserialize_request(self, request): - """Parse a wsgi request into a set of params we care about. - :param request: wsgi.Request object +class Resource(wsgi.Application): + """WSGI app that handles (de)serialization and controller dispatch. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon its controller. All + controller action methods must accept a 'req' argument, which is the + incoming wsgi.Request. If the operation is a PUT or POST, the controller + method must also accept a 'body' argument (the deserialized request body). + They may raise a webob.exc exception or return a dict, which will be + serialized by requested content type. + """ + def __init__(self, controller, serializers=None, deserializers=None): """ - action_args = self.get_action_args(request.environ) - action = action_args.pop('action', None) + :param controller: object that implement methods created by routes lib + :param serializers: dict of content-type specific text serializers + :param deserializers: dict of content-type specific text deserializers - if request.method.lower() in ('post', 'put'): - if len(request.body) == 0: - action_args['body'] = None - else: - content_type = request.get_content_type() - deserializer = self.get_deserializer(content_type) + """ + self.controller = controller + self.serializer = ResponseSerializer(serializers) + self.deserializer = RequestDeserializer(deserializers) - try: - action_args['body'] = deserializer.deserialize(request.body) - except exception.InvalidContentType: - action_args['body'] = None + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """WSGI method that controls (de)serialization and method dispatch.""" - accept = self.get_expected_content_type(request) + LOG.debug("%s %s" % (request.method, request.url)) - return (action, action_args, accept) + try: + action, action_args, accept = self.deserializer.deserialize( + request) + except exception.InvalidContentType: + return webob.exc.HTTPBadRequest(_("Unsupported Content-Type")) - def get_expected_content_type(self, request): - return request.best_match_content_type() + action_result = self.dispatch(request, action, action_args) - def get_action_args(self, request_environment): - try: - args = request_environment['wsgiorg.routing_args'][1].copy() + #TODO(bcwaldon): find a more elegant way to pass through non-dict types + if type(action_result) is dict: + response = self.serializer.serialize(action_result, accept) + else: + response = action_result - del args['controller'] + try: + msg_dict = dict(url=request.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + except AttributeError: + msg_dict = dict(url=request.url) + msg = _("%(url)s returned a fault") - if 'format' in args: - del args['format'] + LOG.debug(msg) - return args + return response - except KeyError: - return {} + def dispatch(self, request, action, action_args): + """Find action-spefic method on controller and call it.""" - def get_deserializer(self, content_type): - try: - return self.deserializers[content_type] - except Exception: - raise exception.InvalidContentType(content_type=content_type) + controller_method = getattr(self.controller, action) + return controller_method(req=request, **action_args) diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index d17ab7a9b..e750fc230 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -101,8 +101,8 @@ def resource_factory(): } serializers = { - 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V10, - metadata=metadata), + 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10, + metadata=metadata), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 15f376f74..31571fc46 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -993,6 +993,14 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 501) + def test_server_change_password_xml(self): + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.content_type = 'application/xml' + req.body = '' +# res = req.get_response(fakes.wsgi_app()) +# self.assertEqual(res.status_int, 501) + def test_server_change_password_v1_1(self): class MockSetAdminPassword(object): @@ -1375,13 +1383,13 @@ class ServersTest(test.TestCase): class TestServerCreateRequestXMLDeserializer(unittest.TestCase): def setUp(self): - self.deserializer = servers.ServerCreateRequestXMLDeserializer() + self.deserializer = servers.ServerXMLDeserializer() def test_minimal_request(self): serial_request = """ """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1395,7 +1403,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): name="new-server-test" imageId="1" flavorId="1"> """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1410,7 +1418,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): name="new-server-test" imageId="1" flavorId="1"> """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1426,7 +1434,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1443,7 +1451,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1461,7 +1469,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): aabbccdd """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}] self.assertEquals(request["server"]["personality"], expected) @@ -1471,7 +1479,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): name="new-server-test" imageId="1" flavorId="1"> aabbccdd abcd""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}, {"path": "/etc/sudoers", "contents": "abcd"}] self.assertEquals(request["server"]["personality"], expected) @@ -1487,7 +1495,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): anything """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}] self.assertEquals(request["server"]["personality"], expected) @@ -1496,7 +1504,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): aabbccdd""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"contents": "aabbccdd"}] self.assertEquals(request["server"]["personality"], expected) @@ -1505,7 +1513,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": ""}] self.assertEquals(request["server"]["personality"], expected) @@ -1514,7 +1522,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": ""}] self.assertEquals(request["server"]["personality"], expected) @@ -1526,7 +1534,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): beta """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "beta"} self.assertEquals(request["server"]["metadata"], expected) @@ -1539,7 +1547,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): bar """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "beta", "foo": "bar"} self.assertEquals(request["server"]["metadata"], expected) @@ -1551,7 +1559,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": ""} self.assertEquals(request["server"]["metadata"], expected) @@ -1564,7 +1572,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "", "delta": ""} self.assertEquals(request["server"]["metadata"], expected) @@ -1576,7 +1584,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): beta """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"": "beta"} self.assertEquals(request["server"]["metadata"], expected) @@ -1589,7 +1597,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): gamma """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"": "gamma"} self.assertEquals(request["server"]["metadata"], expected) @@ -1602,7 +1610,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): baz """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"foo": "baz"} self.assertEquals(request["server"]["metadata"], expected) @@ -1649,7 +1657,7 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""", }, ], }} - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') self.assertEqual(request, expected) def test_request_xmlser_with_flavor_image_ref(self): @@ -1659,7 +1667,7 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""", imageRef="http://localhost:8774/v1.1/images/1" flavorRef="http://localhost:8774/v1.1/flavors/1"> """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') self.assertEquals(request["server"]["flavorRef"], "http://localhost:8774/v1.1/flavors/1") self.assertEquals(request["server"]["imageRef"], diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index 430dafe77..6c57d3e4f 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -76,26 +76,56 @@ class RequestTest(test.TestCase): self.assertEqual(result, "application/json") -class SerializationTest(test.TestCase): +class DictSerializerTest(test.TestCase): + def test_dispatch(self): + serializer = wsgi.DictSerializer() + serializer.create = lambda x: 'pants' + serializer.default = lambda x: 'trousers' + self.assertEqual(serializer.serialize({}, 'create'), 'pants') + + def test_dispatch_default(self): + serializer = wsgi.DictSerializer() + serializer.create = lambda x: 'pants' + serializer.default = lambda x: 'trousers' + self.assertEqual(serializer.serialize({}, 'update'), 'trousers') + + +class XMLDictSerializerTest(test.TestCase): def test_xml(self): input_dict = dict(servers=dict(a=(2, 3))) expected_xml = '(2,3)' xmlns = "testing xmlns" - serializer = wsgi.XMLSerializer(xmlns="asdf") + serializer = wsgi.XMLDictSerializer(xmlns="asdf") result = serializer.serialize(input_dict) result = result.replace('\n', '').replace(' ', '') self.assertEqual(result, expected_xml) + +class JSONDictSerializerTest(test.TestCase): def test_json(self): input_dict = dict(servers=dict(a=(2, 3))) expected_json = '{"servers":{"a":[2,3]}}' - serializer = wsgi.JSONSerializer() + serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace('\n', '').replace(' ', '') self.assertEqual(result, expected_json) -class DeserializationTest(test.TestCase): +class TextDeserializerTest(test.TestCase): + def test_dispatch(self): + deserializer = wsgi.TextDeserializer() + deserializer.create = lambda x: 'pants' + deserializer.default = lambda x: 'trousers' + self.assertEqual(deserializer.deserialize({}, 'create'), 'pants') + + def test_dispatch_default(self): + deserializer = wsgi.TextDeserializer() + deserializer.create = lambda x: 'pants' + deserializer.default = lambda x: 'trousers' + self.assertEqual(deserializer.deserialize({}, 'update'), 'trousers') + + +class JSONDeserializerTest(test.TestCase): def test_json(self): data = """{"a": { "a1": "1", @@ -112,6 +142,8 @@ class DeserializationTest(test.TestCase): deserializer = wsgi.JSONDeserializer() self.assertEqual(deserializer.deserialize(data), as_dict) + +class XMLDeserializerTest(test.TestCase): def test_xml(self): xml = """ @@ -137,7 +169,7 @@ class DeserializationTest(test.TestCase): self.assertEqual(deserializer.deserialize(xml), as_dict) -class ResourceSerializerTest(test.TestCase): +class ResponseSerializerTest(test.TestCase): def setUp(self): class JSONSerializer(object): def serialize(self, data): @@ -152,40 +184,32 @@ class ResourceSerializerTest(test.TestCase): 'application/XML': XMLSerializer(), } - self.resource = wsgi.Resource(None, serializers=self.serializers) + self.serializer = wsgi.ResponseSerializer(serializers=self.serializers) def tearDown(self): pass def test_get_serializer(self): - self.assertEqual(self.resource.get_serializer('application/json'), + self.assertEqual(self.serializer.get_serializer('application/json'), self.serializers['application/json']) def test_get_serializer_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, - self.resource.get_serializer, + self.serializer.get_serializer, 'application/unknown') - def test_serialize_response_dict(self): - response = self.resource.serialize_response('application/json', {}) + def test_serialize_response(self): + response = self.serializer.serialize({}, 'application/json') self.assertEqual(response.headers['Content-Type'], 'application/json') self.assertEqual(response.body, 'pew_json') - def test_serialize_response_non_dict(self): - response = self.resource.serialize_response('application/json', 'a') - self.assertEqual(response, 'a') - def test_serialize_response_dict_to_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, - self.resource.serialize_response, + self.serializer.serialize, 'application/unknown', {}) - def test_serialize_response_non_dict_to_unknown_content_type(self): - response = self.resource.serialize_response('application/unknown', 'a') - self.assertEqual(response, 'a') - -class ResourceDeserializerTest(test.TestCase): +class RequestDeserializerTest(test.TestCase): def setUp(self): class JSONDeserializer(object): def deserialize(self, data): @@ -200,24 +224,25 @@ class ResourceDeserializerTest(test.TestCase): 'application/XML': XMLDeserializer(), } - self.resource = wsgi.Resource(None, deserializers=self.deserializers) + self.deserializer = wsgi.RequestDeserializer( + deserializers=self.deserializers) def tearDown(self): pass def test_get_deserializer(self): - self.assertEqual(self.resource.get_deserializer('application/json'), - self.deserializers['application/json']) + expected = self.deserializer.get_deserializer('application/json') + self.assertEqual(expected, self.deserializers['application/json']) def test_get_deserializer_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, - self.resource.get_deserializer, + self.deserializer.get_deserializer, 'application/unknown') def test_get_expected_content_type(self): request = wsgi.Request.blank('/') request.headers['Accept'] = 'application/json' - self.assertEqual(self.resource.get_expected_content_type(request), + self.assertEqual(self.deserializer.get_expected_content_type(request), 'application/json') def test_get_action_args(self): @@ -232,17 +257,38 @@ class ResourceDeserializerTest(test.TestCase): expected = {'action': 'update', 'id': 12} - self.assertEqual(self.resource.get_action_args(env), expected) + self.assertEqual(self.deserializer.get_action_args(env), expected) - def test_deserialize_request(self): + def test_deserialize(self): def fake_get_routing_args(request): return {'action': 'create'} - self.resource.get_action_args = fake_get_routing_args + self.deserializer.get_action_args = fake_get_routing_args request = wsgi.Request.blank('/') request.headers['Accept'] = 'application/xml' - deserialized = self.resource.deserialize_request(request) + deserialized = self.deserializer.deserialize(request) expected = ('create', {}, 'application/xml') self.assertEqual(expected, deserialized) + + +class ResourceTest(test.TestCase): + def test_dispatch(self): + class Controller(object): + def index(self, req, pants=None): + return pants + + resource = wsgi.Resource(Controller()) + actual = resource.dispatch(None, 'index', {'pants': 'off'}) + expected = 'off' + self.assertEqual(actual, expected) + + def test_dispatch_unknown_controller_action(self): + class Controller(object): + def index(self, req, pants=None): + return pants + + resource = wsgi.Resource(Controller()) + self.assertRaises(AttributeError, resource.dispatch, + None, 'create', {}) -- cgit From 74bae1b1e2b298ef8425f7cb1aefd3826db40147 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 19 May 2011 13:50:11 -0700 Subject: Separate out tests for when unfilter is called from iptables vs. nwfilter driver. Re: lp783705 --- nova/tests/test_virt.py | 65 ++++++++++++++++++++++++++++++++++------------- nova/virt/libvirt_conn.py | 22 ++++++++-------- 2 files changed, 58 insertions(+), 29 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index babb5de9b..3b5a3867d 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -657,6 +657,21 @@ class LibvirtConnTestCase(test.TestCase): super(LibvirtConnTestCase, self).tearDown() +class FakeNWFilter: + def __init__(self): + self.undefine_call_count = 0 + + def undefine(self): + self.undefine_call_count += 1 + pass + + def _nwfilterLookupByName(self, ignore): + return self + + def _filterDefineXMLMock(self, xml): + return True + + class IptablesFirewallTestCase(test.TestCase): def setUp(self): super(IptablesFirewallTestCase, self).setUp() @@ -869,6 +884,35 @@ class IptablesFirewallTestCase(test.TestCase): self.assertEquals(ipv6_network_rules, ipv6_rules_per_network * networks_count) + def test_unfilter_instance_undefines_nwfilters(self): + admin_ctxt = context.get_admin_context() + + fakefilter = FakeNWFilter() + self.fw.nwfilter._conn.nwfilterDefineXML =\ + fakefilter._filterDefineXMLMock + self.fw.nwfilter._conn.nwfilterLookupByName =\ + fakefilter._nwfilterLookupByName + + instance_ref = self._create_instance_ref() + inst_id = instance_ref['id'] + instance = db.instance_get(self.context, inst_id) + + ip = '10.11.12.13' + network_ref = db.project_get_network(self.context, 'fake') + fixed_ip = {'address': ip, 'network_id': network_ref['id']} + db.fixed_ip_create(admin_ctxt, fixed_ip) + db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + 'instance_id': inst_id}) + self.fw.setup_basic_filtering(instance) + self.fw.prepare_instance_filter(instance) + self.fw.apply_instance_filter(instance) + self.fw.unfilter_instance(instance) + + # should attempt to undefine just the instance filter + self.assertEquals(fakefilter.undefine_call_count, 1) + + db.instance_destroy(admin_ctxt, instance_ref['id']) + class NWFilterTestCase(test.TestCase): def setUp(self): @@ -1047,26 +1091,11 @@ class NWFilterTestCase(test.TestCase): self.assertEquals(len(result), 3) def test_unfilter_instance_undefines_nwfilters(self): - class FakeNWFilter: - def __init__(self): - self.undefine_call_count = 0 - - def undefine(self): - self.undefine_call_count += 1 - pass - - fakefilter = FakeNWFilter() - - def _nwfilterLookupByName(ignore): - return fakefilter - - def _filterDefineXMLMock(xml): - return True - admin_ctxt = context.get_admin_context() - self.fw._conn.nwfilterDefineXML = _filterDefineXMLMock - self.fw._conn.nwfilterLookupByName = _nwfilterLookupByName + fakefilter = FakeNWFilter() + self.fw._conn.nwfilterDefineXML = fakefilter._filterDefineXMLMock + self.fw._conn.nwfilterLookupByName = fakefilter._nwfilterLookupByName instance_ref = self._create_instance() inst_id = instance_ref['id'] diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 706973176..f808a4b7b 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1834,7 +1834,7 @@ class NWFilterFirewall(FirewallDriver): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, remove_secgroup=True): """Clear out the nwfilter rules.""" network_info = _get_network_info(instance) instance_name = instance.name @@ -1846,19 +1846,19 @@ class NWFilterFirewall(FirewallDriver): self._conn.nwfilterLookupByName(instance_filter_name).\ undefine() except libvirt.libvirtError: - LOG.debug(_('The nwfilter(%(instance_filter_name)s) for ' - '%(instance_name)s is not found.') % locals()) + LOG.debug(_('The nwfilter(%(instance_filter_name)s) ' + 'for %(instance_name)s is not found.') % locals()) instance_secgroup_filter_name =\ '%s-secgroup' % (self._instance_filter_name(instance)) - try: - self._conn.nwfilterLookupByName(instance_secgroup_filter_name).\ - undefine() - except libvirt.libvirtError: - # This will happen if called by IptablesFirewallDriver - LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) for ' - '%(instance_name)s is not found.') % locals()) + if remove_secgroup: + try: + self._conn.nwfilterLookupByName(instance_secgroup_filter_name)\ + .undefine() + except libvirt.libvirtError: + LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' + 'for %(instance_name)s is not found.') % locals()) def prepare_instance_filter(self, instance, network_info=None): """ @@ -2022,7 +2022,7 @@ class IptablesFirewallDriver(FirewallDriver): if self.instances.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() - self.nwfilter.unfilter_instance(instance) + self.nwfilter.unfilter_instance(instance, False) else: LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) -- cgit From a1869741689817168c75046f2f81ee9761956cbc Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Thu, 19 May 2011 18:05:38 -0400 Subject: Fail early if requested imageRef does not exist when creating a server. --- nova/api/openstack/servers.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index a4e679242..337c6ced8 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -144,10 +144,8 @@ class Controller(common.OpenstackController): (image_service, image_id) = utils.get_image_service(image_ref) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) - - #TODO: need to assert image exists a better way - #image_id = common.get_image_id_from_image_hash(image_service, - #context, image_ref) + image_set = set([x['id'] for x in image_service.index(context)]) + assert image_id in image_set except: msg = _("Can not find requested image") return faults.Fault(exc.HTTPBadRequest(msg)) -- cgit From e16b2d22dc4e6e24c3bf5150a0830661933aad29 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Fri, 20 May 2011 04:14:02 -0400 Subject: Fixed some tests. --- nova/api/openstack/common.py | 28 ---------------------------- nova/api/openstack/servers.py | 6 +++--- nova/exception.py | 4 ++++ nova/flags.py | 3 --- nova/image/fake.py | 12 ++++++++++++ nova/tests/api/openstack/test_servers.py | 14 +++++++++----- nova/tests/integrated/integrated_helpers.py | 7 ++++++- nova/tests/test_quota.py | 8 ++++---- nova/utils.py | 16 +++++++--------- 9 files changed, 45 insertions(+), 53 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 32cd689ca..a89594c13 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -100,34 +100,6 @@ def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): return items[start_index:range_end] -def get_image_id_from_image_hash(image_service, context, image_hash): - """Given an Image ID Hash, return an objectstore Image ID. - - image_service - reference to objectstore compatible image service. - context - security context for image service requests. - image_hash - hash of the image ID. - """ - - # FIX(sandy): This is terribly inefficient. It pulls all images - # from objectstore in order to find the match. ObjectStore - # should have a numeric counterpart to the string ID. - try: - items = image_service.detail(context) - except NotImplementedError: - items = image_service.index(context) - for image in items: - image_id = image['id'] - try: - if abs(hash(image_id)) == int(image_hash): - return image_id - except ValueError: - msg = _("Requested image_id has wrong format: %s," - "should have numerical format") % image_id - LOG.error(msg) - raise Exception(msg) - raise exception.ImageNotFound(image_id=image_hash) - - def get_id_from_href(href): """Return the id portion of a url as an int. diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 337c6ced8..31c1e86c0 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -144,10 +144,10 @@ class Controller(common.OpenstackController): (image_service, image_id) = utils.get_image_service(image_ref) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) - image_set = set([x['id'] for x in image_service.index(context)]) - assert image_id in image_set + images = set([str(x['id']) for x in image_service.index(context)]) + assert str(image_id) in images except: - msg = _("Can not find requested image") + msg = _("Cannot find requested image %s") % image_ref return faults.Fault(exc.HTTPBadRequest(msg)) personality = env['server'].get('personality') diff --git a/nova/exception.py b/nova/exception.py index cf6069454..4c977aca0 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -279,6 +279,10 @@ class DiskNotFound(NotFound): message = _("No disk at %(location)s") +class InvalidImageRef(Invalid): + message = _("Invalid image ref %(image_ref)s.") + + class ImageNotFound(NotFound): message = _("Image %(image_id)s could not be found.") diff --git a/nova/flags.py b/nova/flags.py index ee5adae32..32cb6efa8 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -362,9 +362,6 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.local.LocalImageService', 'The service to use for retrieving and searching for images.') -DEFINE_string('glance_image_service', 'nova.image.glance.GlanceImageService', - 'The service to use for retrieving and searching for ' + - 'glance images.') DEFINE_string('host', socket.gethostname(), 'name of this node') diff --git a/nova/image/fake.py b/nova/image/fake.py index 2a60c7743..659c16557 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -79,10 +79,22 @@ class FakeImageService(service.BaseImageService): 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} + + image5 = {'id': '3', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'status': 'active', + 'container_format': 'ami', + 'disk_format': 'raw', + 'properties': {'kernel_id': FLAGS.null_kernel, + 'ramdisk_id': FLAGS.null_kernel}} + self.create(None, image1) self.create(None, image2) self.create(None, image3) self.create(None, image4) + self.create(None, image5) super(FakeImageService, self).__init__() def index(self, context): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index bced2b910..22beef05f 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -29,6 +29,7 @@ from nova import db from nova import exception from nova import flags from nova import test +from nova import utils import nova.api.openstack from nova.api.openstack import servers import nova.compute.api @@ -37,6 +38,7 @@ from nova.compute import power_state import nova.db.api from nova.db.sqlalchemy.models import Instance from nova.db.sqlalchemy.models import InstanceMetadata +import nova.image.fake import nova.rpc from nova.tests.api.openstack import common from nova.tests.api.openstack import fakes @@ -464,7 +466,12 @@ class ServersTest(test.TestCase): def image_id_from_hash(*args, **kwargs): return 2 - FLAGS.glance_image_service = 'nova.image.fake.FakeImageService' + def fake_image_service(*args): + return nova.image.fake.FakeImageService() + + FLAGS.image_service = 'nova.image.fake.FakeImageService' + self.stubs.Set( + nova.image.glance, 'GlanceImageService', fake_image_service) self.stubs.Set(nova.db.api, 'project_get_network', project_get_network) self.stubs.Set(nova.db.api, 'instance_create', instance_create) self.stubs.Set(nova.rpc, 'cast', fake_method) @@ -476,8 +483,6 @@ class ServersTest(test.TestCase): fake_method) self.stubs.Set(nova.api.openstack.servers.Controller, "_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping) - self.stubs.Set(nova.api.openstack.common, - "get_image_id_from_image_hash", image_id_from_hash) self.stubs.Set(nova.compute.api.API, "_find_host", find_host) def _test_create_instance_helper(self): @@ -1707,11 +1712,10 @@ class TestServerInstanceCreation(test.TestCase): return stub_method compute_api = MockComputeAPI() + FLAGS.image_service = 'nova.image.fake.FakeImageService' self.stubs.Set(nova.compute, 'API', make_stub_method(compute_api)) self.stubs.Set(nova.api.openstack.servers.Controller, '_get_kernel_ramdisk_from_image', make_stub_method((1, 1))) - self.stubs.Set(nova.api.openstack.common, - 'get_image_id_from_image_hash', make_stub_method(2)) return compute_api def _create_personality_request_dict(self, personality_files): diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index e6efc16c5..5871a498c 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -27,6 +27,7 @@ from nova import flags from nova import service from nova import test # For the flags from nova.auth import manager +import nova.image.glance from nova.log import logging from nova.tests.integrated.api import client @@ -151,6 +152,11 @@ class _IntegratedTestBase(test.TestCase): f = self._get_flags() self.flags(**f) + def fake_image_service(*args): + return nova.image.fake.FakeImageService() + self.stubs.Set( + nova.image.glance, 'GlanceImageService', fake_image_service) + # set up services self.start_service('compute') self.start_service('volume') @@ -185,7 +191,6 @@ class _IntegratedTestBase(test.TestCase): """An opportunity to setup flags, before the services are started.""" f = {} f['image_service'] = 'nova.image.fake.FakeImageService' - f['glance_image_service'] = 'nova.image.fake.FakeImageService' f['fake_network'] = True return f diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 9ede0786f..02b641a47 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -280,18 +280,18 @@ class QuotaTestCase(test.TestCase): FLAGS.quota_max_injected_files) def _create_with_injected_files(self, files): - FLAGS.glance_image_service = 'nova.image.fake.FakeImageService' + FLAGS.image_service = 'nova.image.fake.FakeImageService' api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') api.create(self.context, min_count=1, max_count=1, - instance_type=inst_type, image_id='fake', + instance_type=inst_type, image_id='3', injected_files=files) def test_no_injected_files(self): - FLAGS.glance_image_service = 'nova.image.fake.FakeImageService' + FLAGS.image_service = 'nova.image.fake.FakeImageService' api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') - api.create(self.context, instance_type=inst_type, image_id='fake') + api.create(self.context, instance_type=inst_type, image_id='3') def test_max_injected_files(self): files = [] diff --git a/nova/utils.py b/nova/utils.py index 85934813e..3802f50c4 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -748,11 +748,7 @@ def parse_image_ref(image_ref): o = urlparse(image_ref) port = o.port or 80 host = o.netloc.split(':', 1)[0] - image_id = o.path.split('/')[-1] - - if is_int(image_id): - image_id = int(image_id) - + image_id = int(o.path.split('/')[-1]) return (image_id, host, port) @@ -776,8 +772,10 @@ def get_image_service(image_ref): if is_int(image_ref): return (get_default_image_service(), int(image_ref)) - (image_id, host, port) = parse_image_ref(image_ref) - glance_client = import_class('nova.image.glance.GlanceClient')(host, - port) - image_service = import_class(FLAGS.glance_image_service)(glance_client) + try: + (image_id, host, port) = parse_image_ref(image_ref) + except: + raise exception.InvalidImageRef(image_ref=image_ref) + glance_client = nova.image.glance.GlanceClient(host, port) + image_service = nova.image.glance.GlanceImageService(glance_client) return (image_service, image_id) -- cgit From 0f191404fee42b9225f364af12242812798ff08a Mon Sep 17 00:00:00 2001 From: William Wolf Date: Fri, 20 May 2011 11:42:38 -0400 Subject: fixed silly issue with variable needing to be named 'id' for the url mapper, also caught new exception type where needed --- nova/api/openstack/images.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index ac02d63c5..2a3f9e070 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -74,7 +74,7 @@ class Controller(common.OpenstackController): builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) - def show(self, req, image_id): + def show(self, req, id): """Return detailed information about a specific image. :param req: `wsgi.Request` object @@ -84,11 +84,14 @@ class Controller(common.OpenstackController): try: (image_service, service_image_id) = utils.get_image_service( - image_id) + id) image = image_service.show(context, service_image_id) except exception.NotFound: explanation = _("Image not found.") raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) + except exception.InvalidImageRef: + explanation = _("Image not found.") + raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) return dict(image=self.get_builder(req).build(image, detail=True)) -- cgit From f1da26ec9af6f6adffb7b6bfdc64f9702db93b56 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Fri, 20 May 2011 11:50:00 -0400 Subject: fix pep8 issue --- nova/api/openstack/images.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 2a3f9e070..5508e7d50 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -50,7 +50,8 @@ class Controller(common.OpenstackController): """ self._compute_service = compute_service or compute.API() - self._image_service = image_service or utils.get_default_image_service() + self._image_service = image_service or \ + utils.get_default_image_service() def index(self, req): """Return an index listing of images available to the request. -- cgit From 0bb2d0085e1fb3ba22a408f405f4539aa07b226c Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 20 May 2011 08:59:07 -0700 Subject: make nwfilter mock more 'realistic' by having it remember which filters have been defined --- nova/tests/test_virt.py | 56 +++++++++++++++++++++++++++++++++++++++-------- nova/virt/libvirt_conn.py | 17 +++++++------- 2 files changed, 55 insertions(+), 18 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 4bc5fed16..5e85e3a2f 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -659,16 +659,26 @@ class LibvirtConnTestCase(test.TestCase): class FakeNWFilter: def __init__(self): - self.undefine_call_count = 0 + self.filters = {} - def undefine(self): - self.undefine_call_count += 1 - pass - - def _nwfilterLookupByName(self, ignore): - return self + def _nwfilterLookupByName(self, name): + if name in self.filters: + return self.filters[name] + raise libvirt.libvirtError('Filter Not Found') def _filterDefineXMLMock(self, xml): + class FakeNWFilterInternal: + def __init__(self, parent, name): + self.name = name + self.parent = parent + + def undefine(self): + del self.parent.filters[self.name] + pass + tree = xml_to_tree(xml) + name = tree.get('name') + if name not in self.filters: + self.filters[name] = FakeNWFilterInternal(self, name) return True @@ -689,6 +699,20 @@ class IptablesFirewallTestCase(test.TestCase): self.fw = libvirt_conn.IptablesFirewallDriver( get_connection=lambda: self.fake_libvirt_connection) + def lazy_load_library_exists(self): + """check if libvirt is available.""" + # try to connect libvirt. if fail, skip test. + try: + import libvirt + import libxml2 + except ImportError: + return False + global libvirt + libvirt = __import__('libvirt') + libvirt_conn.libvirt = __import__('libvirt') + libvirt_conn.libxml2 = __import__('libxml2') + return True + def tearDown(self): self.manager.delete_project(self.project) self.manager.delete_user(self.user) @@ -895,6 +919,10 @@ class IptablesFirewallTestCase(test.TestCase): self.fw.do_refresh_security_group_rules("fake") def test_unfilter_instance_undefines_nwfilter(self): + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + admin_ctxt = context.get_admin_context() fakefilter = FakeNWFilter() @@ -916,10 +944,11 @@ class IptablesFirewallTestCase(test.TestCase): self.fw.setup_basic_filtering(instance) self.fw.prepare_instance_filter(instance) self.fw.apply_instance_filter(instance) + original_filter_count = len(fakefilter.filters) self.fw.unfilter_instance(instance) # should attempt to undefine just the instance filter - self.assertEquals(fakefilter.undefine_call_count, 1) + self.assertEqual(original_filter_count - len(fakefilter.filters), 1) db.instance_destroy(admin_ctxt, instance_ref['id']) @@ -1109,6 +1138,12 @@ class NWFilterTestCase(test.TestCase): instance_ref = self._create_instance() inst_id = instance_ref['id'] + + self.security_group = self.setup_and_return_security_group() + + db.instance_add_security_group(self.context, inst_id, + self.security_group.id) + instance = db.instance_get(self.context, inst_id) ip = '10.11.12.13' @@ -1120,9 +1155,12 @@ class NWFilterTestCase(test.TestCase): self.fw.setup_basic_filtering(instance) self.fw.prepare_instance_filter(instance) self.fw.apply_instance_filter(instance) + original_filter_count = len(fakefilter.filters) + print fakefilter.filters.keys() self.fw.unfilter_instance(instance) + print fakefilter.filters.keys() # should attempt to undefine 2 filters: instance and instance-secgroup - self.assertEquals(fakefilter.undefine_call_count, 2) + self.assertEqual(original_filter_count - len(fakefilter.filters), 2) db.instance_destroy(admin_ctxt, instance_ref['id']) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 9241c1d9e..f27398aa3 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1838,7 +1838,7 @@ class NWFilterFirewall(FirewallDriver): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def unfilter_instance(self, instance, remove_secgroup=True): + def unfilter_instance(self, instance): """Clear out the nwfilter rules.""" network_info = _get_network_info(instance) instance_name = instance.name @@ -1856,13 +1856,12 @@ class NWFilterFirewall(FirewallDriver): instance_secgroup_filter_name =\ '%s-secgroup' % (self._instance_filter_name(instance)) - if remove_secgroup: - try: - self._conn.nwfilterLookupByName(instance_secgroup_filter_name)\ - .undefine() - except libvirt.libvirtError: - LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' - 'for %(instance_name)s is not found.') % locals()) + try: + self._conn.nwfilterLookupByName(instance_secgroup_filter_name)\ + .undefine() + except libvirt.libvirtError: + LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' + 'for %(instance_name)s is not found.') % locals()) def prepare_instance_filter(self, instance, network_info=None): """ @@ -2028,7 +2027,7 @@ class IptablesFirewallDriver(FirewallDriver): if self.instances.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() - self.nwfilter.unfilter_instance(instance, False) + self.nwfilter.unfilter_instance(instance) else: LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) -- cgit From 5c205bb5ef1565db4e52af538cf0d6b73cbeda37 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 20 May 2011 09:09:03 -0700 Subject: fix comments --- nova/tests/test_virt.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 5e85e3a2f..90c6de5a9 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -659,6 +659,7 @@ class LibvirtConnTestCase(test.TestCase): class FakeNWFilter: def __init__(self): + self.filters = {} def _nwfilterLookupByName(self, name): @@ -947,7 +948,7 @@ class IptablesFirewallTestCase(test.TestCase): original_filter_count = len(fakefilter.filters) self.fw.unfilter_instance(instance) - # should attempt to undefine just the instance filter + # should undefine just the instance filter self.assertEqual(original_filter_count - len(fakefilter.filters), 1) db.instance_destroy(admin_ctxt, instance_ref['id']) @@ -1160,7 +1161,7 @@ class NWFilterTestCase(test.TestCase): self.fw.unfilter_instance(instance) print fakefilter.filters.keys() - # should attempt to undefine 2 filters: instance and instance-secgroup + # should undefine 2 filters: instance and instance-secgroup self.assertEqual(original_filter_count - len(fakefilter.filters), 2) db.instance_destroy(admin_ctxt, instance_ref['id']) -- cgit From 7ed71092d513bc621be539e612e6b4e66849b888 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Fri, 20 May 2011 14:22:58 -0400 Subject: combined the exception catching to eliminate duplication --- nova/api/openstack/images.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 5508e7d50..523b3f431 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -87,10 +87,7 @@ class Controller(common.OpenstackController): (image_service, service_image_id) = utils.get_image_service( id) image = image_service.show(context, service_image_id) - except exception.NotFound: - explanation = _("Image not found.") - raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) - except exception.InvalidImageRef: + except (exception.NotFound, exception.InvalidImageRef): explanation = _("Image not found.") raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) -- cgit From 3fc3b7537cc1af2783829a2caaca272e83d6d3e8 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Fri, 20 May 2011 14:42:19 -0400 Subject: renaming resource_factory to create_resource --- nova/api/openstack/__init__.py | 34 ++++++++++++++++----------------- nova/api/openstack/accounts.py | 2 +- nova/api/openstack/backup_schedules.py | 2 +- nova/api/openstack/consoles.py | 2 +- nova/api/openstack/flavors.py | 2 +- nova/api/openstack/image_metadata.py | 2 +- nova/api/openstack/images.py | 2 +- nova/api/openstack/ips.py | 2 +- nova/api/openstack/limits.py | 2 +- nova/api/openstack/server_metadata.py | 2 +- nova/api/openstack/servers.py | 2 +- nova/api/openstack/shared_ip_groups.py | 2 +- nova/api/openstack/users.py | 2 +- nova/api/openstack/zones.py | 2 +- nova/tests/api/openstack/test_limits.py | 4 ++-- 15 files changed, 32 insertions(+), 32 deletions(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index fbbd99cb9..4419d0748 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -99,19 +99,19 @@ class APIRouter(base_wsgi.Router): server_members['inject_network_info'] = 'POST' mapper.resource("zone", "zones", - controller=zones.resource_factory(), + controller=zones.create_resource(), collection={'detail': 'GET', 'info': 'GET'}), mapper.resource("user", "users", - controller=users.resource_factory(), + controller=users.create_resource(), collection={'detail': 'GET'}) mapper.resource("account", "accounts", - controller=accounts.resource_factory(), + controller=accounts.create_resource(), collection={'detail': 'GET'}) mapper.resource("console", "consoles", - controller=consoles.resource_factory(), + controller=consoles.create_resource(), parent_resource=dict(member_name='server', collection_name='servers')) @@ -124,31 +124,31 @@ class APIRouterV10(APIRouter): def _setup_routes(self, mapper): super(APIRouterV10, self)._setup_routes(mapper) mapper.resource("server", "servers", - controller=servers.resource_factory('1.0'), + controller=servers.create_resource('1.0'), collection={'detail': 'GET'}, member=self.server_members) mapper.resource("image", "images", - controller=images.resource_factory('1.0'), + controller=images.create_resource('1.0'), collection={'detail': 'GET'}) mapper.resource("flavor", "flavors", - controller=flavors.resource_factory('1.0'), + controller=flavors.create_resource('1.0'), collection={'detail': 'GET'}) mapper.resource("shared_ip_group", "shared_ip_groups", collection={'detail': 'GET'}, - controller=shared_ip_groups.resource_factory()) + controller=shared_ip_groups.create_resource()) mapper.resource("backup_schedule", "backup_schedule", - controller=backup_schedules.resource_factory(), + controller=backup_schedules.create_resource(), parent_resource=dict(member_name='server', collection_name='servers')) mapper.resource("limit", "limits", - controller=limits.resource_factory('1.0')) + controller=limits.create_resource('1.0')) - mapper.resource("ip", "ips", controller=ips.resource_factory(), + mapper.resource("ip", "ips", controller=ips.create_resource(), collection=dict(public='GET', private='GET'), parent_resource=dict(member_name='server', collection_name='servers')) @@ -160,27 +160,27 @@ class APIRouterV11(APIRouter): def _setup_routes(self, mapper): super(APIRouterV11, self)._setup_routes(mapper) mapper.resource("server", "servers", - controller=servers.resource_factory('1.1'), + controller=servers.create_resource('1.1'), collection={'detail': 'GET'}, member=self.server_members) mapper.resource("image", "images", - controller=images.resource_factory('1.1'), + controller=images.create_resource('1.1'), collection={'detail': 'GET'}) mapper.resource("image_meta", "meta", - controller=image_metadata.resource_factory(), + controller=image_metadata.create_resource(), parent_resource=dict(member_name='image', collection_name='images')) mapper.resource("server_meta", "meta", - controller=server_metadata.resource_factory(), + controller=server_metadata.create_resource(), parent_resource=dict(member_name='server', collection_name='servers')) mapper.resource("flavor", "flavors", - controller=flavors.resource_factory('1.1'), + controller=flavors.create_resource('1.1'), collection={'detail': 'GET'}) mapper.resource("limit", "limits", - controller=limits.resource_factory('1.1')) + controller=limits.create_resource('1.1')) diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py index faff8bb2c..0dcd37217 100644 --- a/nova/api/openstack/accounts.py +++ b/nova/api/openstack/accounts.py @@ -80,7 +80,7 @@ class Controller(object): return dict(account=_translate_keys(account)) -def resource_factory(): +def create_resource(): metadata = { "attributes": { "account": ["id", "name", "description", "manager"], diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index d08a4799c..71a14d4ce 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -52,7 +52,7 @@ class Controller(object): return faults.Fault(exc.HTTPNotImplemented()) -def resource_factory(): +def create_resource(): metadata = { 'attributes': { 'backupSchedule': [], diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 56f79db60..bccf04d8f 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -89,7 +89,7 @@ class Controller(object): return exc.HTTPAccepted() -def resource_factory(): +def create_resource(): metadata = { 'attributes': { 'console': [], diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index 9e98e6c27..a21ff6cb2 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -74,7 +74,7 @@ class ControllerV11(Controller): return views.flavors.ViewBuilderV11(base_url) -def resource_factory(version='1.0'): +def create_resource(version='1.0'): controller = { '1.0': ControllerV10, '1.1': ControllerV11, diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index 8acde9fe8..88e10168d 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -102,7 +102,7 @@ class Controller(object): self.image_service.update(context, image_id, img, None) -def resource_factory(): +def create_resource(): serializers = { 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11), } diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index a9071ed8a..3376f358a 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -145,7 +145,7 @@ class ControllerV11(Controller): return common.XML_NS_V11 -def resource_factory(version='1.0'): +def create_resource(version='1.0'): controller = { '1.0': ControllerV10, '1.1': ControllerV11, diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py index 87c8c997a..abea71830 100644 --- a/nova/api/openstack/ips.py +++ b/nova/api/openstack/ips.py @@ -63,7 +63,7 @@ class Controller(object): return faults.Fault(exc.HTTPNotImplemented()) -def resource_factory(): +def create_resource(): metadata = { 'list_collections': { 'public': {'item_name': 'ip', 'item_key': 'addr'}, diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index b0e093702..2d9fe356f 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -73,7 +73,7 @@ class LimitsControllerV11(LimitsController): return limits_views.ViewBuilderV11() -def resource_factory(version='1.0'): +def create_resource(version='1.0'): controller = { '1.0': LimitsControllerV10, '1.1': LimitsControllerV11, diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index eff98c060..b38b84a2a 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -91,7 +91,7 @@ class Controller(object): raise error -def resource_factory(): +def create_resource(): serializers = { 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11), } diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8f39bd256..bdd2960d9 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -784,7 +784,7 @@ class ServerXMLDeserializer(wsgi.XMLDeserializer): return "" -def resource_factory(version='1.0'): +def create_resource(version='1.0'): controller = { '1.0': ControllerV10, '1.1': ControllerV11, diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py index db178f2a2..4f11f8dfb 100644 --- a/nova/api/openstack/shared_ip_groups.py +++ b/nova/api/openstack/shared_ip_groups.py @@ -49,5 +49,5 @@ class Controller(object): raise faults.Fault(exc.HTTPNotImplemented()) -def resource_factory(): +def create_resource(): return wsgi.Resource(Controller()) diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py index e14616349..50975fc1f 100644 --- a/nova/api/openstack/users.py +++ b/nova/api/openstack/users.py @@ -98,7 +98,7 @@ class Controller(object): return dict(user=_translate_keys(self.manager.get_user(id))) -def resource_factory(): +def create_resource(): metadata = { "attributes": { "user": ["id", "name", "access", "secret", "admin"], diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index e750fc230..0475deb52 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -93,7 +93,7 @@ class Controller(object): return dict(zone=_scrub_zone(zone)) -def resource_factory(): +def create_resource(): metadata = { "attributes": { "zone": ["id", "api_url", "name", "capabilities"], diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index db859c2f8..4cf857507 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -65,7 +65,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): def setUp(self): """Run before each test.""" BaseLimitTestSuite.setUp(self) - self.controller = limits.resource_factory('1.0') + self.controller = limits.create_resource('1.0') def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" @@ -178,7 +178,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite): def setUp(self): """Run before each test.""" BaseLimitTestSuite.setUp(self) - self.controller = limits.resource_factory('1.1') + self.controller = limits.create_resource('1.1') def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" -- cgit From 2c16eb37822b3ebdb14ac36df26362636d0f5078 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Fri, 20 May 2011 16:36:10 -0400 Subject: minor cleanup --- nova/api/openstack/images.py | 3 --- nova/api/openstack/servers.py | 3 --- nova/tests/api/openstack/test_wsgi.py | 1 - 3 files changed, 7 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 3376f358a..7f5551664 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -141,9 +141,6 @@ class ControllerV11(Controller): base_url = request.application_url return images_view.ViewBuilderV11(base_url) - def get_default_xmlns(self, req): - return common.XML_NS_V11 - def create_resource(version='1.0'): controller = { diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index bdd2960d9..313321d7d 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -703,9 +703,6 @@ class ControllerV11(Controller): raise exc.HTTPBadRequest(msg) return password - def get_default_xmlns(self, req): - return common.XML_NS_V11 - class ServerXMLDeserializer(wsgi.XMLDeserializer): """ diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index 6c57d3e4f..89603d82b 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -94,7 +94,6 @@ class XMLDictSerializerTest(test.TestCase): def test_xml(self): input_dict = dict(servers=dict(a=(2, 3))) expected_xml = '(2,3)' - xmlns = "testing xmlns" serializer = wsgi.XMLDictSerializer(xmlns="asdf") result = serializer.serialize(input_dict) result = result.replace('\n', '').replace(' ', '') -- cgit From 0850945efd0c5d7341590acd109572b9caf89e18 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 20 May 2011 21:30:04 +0000 Subject: move init start position to 96 to allow openvswitch time to fully start --- plugins/xenserver/networking/etc/init.d/openvswitch-nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/xenserver/networking/etc/init.d/openvswitch-nova b/plugins/xenserver/networking/etc/init.d/openvswitch-nova index e4dbdf4af..8672a69b8 100755 --- a/plugins/xenserver/networking/etc/init.d/openvswitch-nova +++ b/plugins/xenserver/networking/etc/init.d/openvswitch-nova @@ -2,7 +2,7 @@ # # openvswitch-nova # -# chkconfig: 2345 10 89 +# chkconfig: 2345 96 89 # description: Apply initial OVS flows for Nova # Copyright 2011 OpenStack LLC. -- cgit From 4a184103fef7b1209ecfe3a6aadeccb8fc08fa31 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sat, 21 May 2011 02:04:29 -0400 Subject: No reason to modify the way file names are generated for kernel and ramdisk, since the kernel_id and ramdisk_id is still guaranteed to be ints. --- nova/virt/libvirt_conn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 25ba0bc8d..18f5e3aa9 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -844,7 +844,7 @@ class LibvirtConnection(driver.ComputeDriver): 'ramdisk_id': inst['ramdisk_id']} if disk_images['kernel_id']: - fname = disk_images['kernel_id'] + fname = '%08x' % int(disk_images['kernel_id']) self._cache_image(fn=self._fetch_image, target=basepath('kernel'), fname=fname, @@ -852,7 +852,7 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) if disk_images['ramdisk_id']: - fname = disk_images['ramdisk_id'] + fname = '%08x' % int(disk_images['ramdisk_id']) self._cache_image(fn=self._fetch_image, target=basepath('ramdisk'), fname=fname, -- cgit From f1983479ae8d2483bdb73a494c9043f82928f189 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sat, 21 May 2011 02:34:27 -0400 Subject: Minor cleanup --- nova/api/openstack/images.py | 3 +-- nova/api/openstack/servers.py | 4 ++-- nova/image/fake.py | 2 +- nova/virt/libvirt_conn.py | 1 - 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 523b3f431..bf9d3f49e 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -84,8 +84,7 @@ class Controller(common.OpenstackController): context = req.environ['nova.context'] try: - (image_service, service_image_id) = utils.get_image_service( - id) + (image_service, service_image_id) = utils.get_image_service(id) image = image_service.show(context, service_image_id) except (exception.NotFound, exception.InvalidImageRef): explanation = _("Image not found.") diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 31c1e86c0..d5dee61a5 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -141,7 +141,7 @@ class Controller(common.OpenstackController): image_ref = self._image_ref_from_req_data(env) try: - (image_service, image_id) = utils.get_image_service(image_ref) + image_service, image_id = utils.get_image_service(image_ref) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) images = set([str(x['id']) for x in image_service.index(context)]) @@ -559,7 +559,7 @@ class Controller(common.OpenstackController): associated kernel and ramdisk image IDs. """ context = req.environ['nova.context'] - (image_service, service_image_id) = utils.get_image_service(image_id) + image_service, service_image_id = utils.get_image_service(image_id) image_meta = image_service.show(context, service_image_id) # NOTE(sirp): extracted to a separate method to aid unit-testing, the # new method doesn't need a request obj or an ImageService stub diff --git a/nova/image/fake.py b/nova/image/fake.py index 659c16557..939f855ac 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -35,7 +35,7 @@ FLAGS = flags.FLAGS class FakeImageService(service.BaseImageService): """Mock (fake) image service for unit testing.""" - def __init__(self, client=None): + def __init__(self): self.images = {} # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 18f5e3aa9..8ba5d09ba 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -861,7 +861,6 @@ class LibvirtConnection(driver.ComputeDriver): project=project) root_fname = hashlib.sha1(disk_images['image_id']).hexdigest() - size = FLAGS.minimum_root_size inst_type_id = inst['instance_type_id'] -- cgit From 58c18901ab27219248e64175f2745502499dc265 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sun, 22 May 2011 03:16:16 -0400 Subject: Removing utils.is_int() --- nova/api/openstack/views/servers.py | 2 +- nova/utils.py | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 70a942594..0fe9dbe4a 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -132,7 +132,7 @@ class ViewBuilderV11(ViewBuilder): def _build_image(self, response, inst): if 'image_id' in dict(inst): image_id = inst['image_id'] - if utils.is_int(image_id): + if str(image_id).isdigit(): image_id = int(image_id) response['imageRef'] = image_id diff --git a/nova/utils.py b/nova/utils.py index 3802f50c4..ecca5303a 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -728,10 +728,6 @@ def parse_server_string(server_str): return ('', '') -def is_int(x): - return re.match(r'\d+$', str(x)) - - def parse_image_ref(image_ref): """Parse an image href into composite parts. @@ -742,7 +738,7 @@ def parse_image_ref(image_ref): :param image_ref: href or id of an image """ - if is_int(image_ref): + if str(image_ref).isdigit(): return (int(image_ref), None, None) o = urlparse(image_ref) @@ -769,7 +765,7 @@ def get_image_service(image_ref): """ image_ref = image_ref or 0 - if is_int(image_ref): + if str(image_ref).isdigit(): return (get_default_image_service(), int(image_ref)) try: -- cgit From 1c315d233128f1013d1ec02c78acb36821f6c63d Mon Sep 17 00:00:00 2001 From: William Wolf Date: Mon, 23 May 2011 10:28:04 -0400 Subject: moved utils functions into nova/image/ --- bin/nova-manage | 3 +- nova/api/openstack/image_metadata.py | 3 +- nova/api/openstack/images.py | 9 ++-- nova/api/openstack/servers.py | 10 +++-- nova/compute/api.py | 7 ++- nova/image/__init__.py | 77 ++++++++++++++++++++++++++++++++ nova/image/s3.py | 3 +- nova/tests/api/openstack/test_servers.py | 1 + nova/utils.py | 49 -------------------- nova/virt/images.py | 3 +- nova/virt/libvirt_conn.py | 6 ++- 11 files changed, 107 insertions(+), 64 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 3f3fd72a6..8a9be5d8f 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -78,6 +78,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import image from nova import log as logging from nova import quota from nova import rpc @@ -905,7 +906,7 @@ class ImageCommands(object): """Methods for dealing with a cloud in an odd state""" def __init__(self, *args, **kwargs): - self.image_service = utils.get_default_image_service() + self.image_service = image.get_default_image_service() def _register(self, container_format, disk_format, path, owner, name=None, is_public='T', diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index f6913ffc6..c51d7acf2 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -18,6 +18,7 @@ from webob import exc from nova import flags +from nova import image from nova import quota from nova import utils from nova import wsgi @@ -32,7 +33,7 @@ class Controller(common.OpenstackController): """The image metadata API controller for the Openstack API""" def __init__(self): - self.image_service = utils.get_default_image_service() + self.image_service = image.get_default_image_service() super(Controller, self).__init__() def _get_metadata(self, context, image_id, image=None): diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index bf9d3f49e..c61b5c6a6 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -18,6 +18,7 @@ import webob.exc from nova import compute from nova import exception from nova import flags +import nova.image from nova import log from nova import utils from nova.api.openstack import common @@ -51,7 +52,7 @@ class Controller(common.OpenstackController): """ self._compute_service = compute_service or compute.API() self._image_service = image_service or \ - utils.get_default_image_service() + nova.image.get_default_image_service() def index(self, req): """Return an index listing of images available to the request. @@ -84,7 +85,8 @@ class Controller(common.OpenstackController): context = req.environ['nova.context'] try: - (image_service, service_image_id) = utils.get_image_service(id) + (image_service, service_image_id) = nova.image.get_image_service( + id) image = image_service.show(context, service_image_id) except (exception.NotFound, exception.InvalidImageRef): explanation = _("Image not found.") @@ -100,7 +102,8 @@ class Controller(common.OpenstackController): """ image_id = id context = req.environ['nova.context'] - (image_service, service_image_id) = utils.get_image_service(image_id) + (image_service, service_image_id) = nova.image.get_image_service( + image_id) image_service.delete(context, service_image_id) return webob.exc.HTTPNoContent() diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index d5dee61a5..181833a23 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -22,6 +22,7 @@ from xml.dom import minidom from nova import compute from nova import exception from nova import flags +import nova.image from nova import log as logging from nova import quota from nova import utils @@ -141,7 +142,7 @@ class Controller(common.OpenstackController): image_ref = self._image_ref_from_req_data(env) try: - image_service, image_id = utils.get_image_service(image_ref) + image_service, image_id = nova.image.get_image_service(image_ref) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) images = set([str(x['id']) for x in image_service.index(context)]) @@ -559,12 +560,13 @@ class Controller(common.OpenstackController): associated kernel and ramdisk image IDs. """ context = req.environ['nova.context'] - image_service, service_image_id = utils.get_image_service(image_id) - image_meta = image_service.show(context, service_image_id) + image_service, service_image_id = nova.image.get_image_service( + image_id) + image = image_service.show(context, service_image_id) # NOTE(sirp): extracted to a separate method to aid unit-testing, the # new method doesn't need a request obj or an ImageService stub kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image( - image_meta) + image) return kernel_id, ramdisk_id @staticmethod diff --git a/nova/compute/api.py b/nova/compute/api.py index 61adda13a..a9075ff8a 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -26,6 +26,7 @@ import time from nova import db from nova import exception from nova import flags +import nova.image from nova import log as logging from nova import network from nova import quota @@ -58,7 +59,9 @@ class API(base.Base): def __init__(self, image_service=None, network_api=None, volume_api=None, hostname_factory=generate_default_hostname, **kwargs): - self.image_service = image_service or utils.get_default_image_service() + self.image_service = image_service or \ + nova.image.get_default_image_service() + if not network_api: network_api = network.API() self.network_api = network_api @@ -154,7 +157,7 @@ class API(base.Base): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) - (image_service, service_image_id) = utils.get_image_service( + (image_service, service_image_id) = nova.image.get_image_service( image_ref or image_id) image = image_service.show(context, service_image_id) diff --git a/nova/image/__init__.py b/nova/image/__init__.py index e69de29bb..be692680a 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -0,0 +1,77 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from urlparse import urlparse + + +from nova import exception +import nova.image.glance +from nova.utils import import_class +from nova import flags + + +FLAGS = flags.FLAGS + + +def parse_image_ref(image_ref): + """Parse an image href into composite parts. + + If the image_ref passed in is an integer, it will + return (image_ref, None, None), otherwise it will + return (image_id, host, port) + + :param image_ref: href or id of an image + + """ + if str(image_ref).isdigit(): + return (int(image_ref), None, None) + + o = urlparse(image_ref) + port = o.port or 80 + host = o.netloc.split(':', 1)[0] + image_id = int(o.path.split('/')[-1]) + return (image_id, host, port) + + +def get_default_image_service(): + ImageService = import_class(FLAGS.image_service) + return ImageService() + + +def get_image_service(image_ref): + """Get the proper image_service and id for the given image_ref. + + The image_ref param can be an href of the form + http://myglanceserver:9292/images/42, or just an int such as 42. If the + image_ref is an int, then the default image service is returned. + + :param image_ref: image ref/id for an image + :returns: a tuple of the form (image_service, image_id) + + """ + image_ref = image_ref or 0 + if str(image_ref).isdigit(): + return (get_default_image_service(), int(image_ref)) + + try: + (image_id, host, port) = parse_image_ref(image_ref) + except: + raise exception.InvalidImageRef(image_ref=image_ref) + glance_client = nova.image.glance.GlanceClient(host, port) + image_service = nova.image.glance.GlanceImageService(glance_client) + return (image_service, image_id) diff --git a/nova/image/s3.py b/nova/image/s3.py index ed685ea51..7051b522a 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -31,6 +31,7 @@ import eventlet from nova import crypto from nova import exception from nova import flags +from nova import image from nova import utils from nova.auth import manager from nova.image import service @@ -46,7 +47,7 @@ class S3ImageService(service.BaseImageService): """Wraps an existing image service to support s3 based register.""" def __init__(self, service=None, *args, **kwargs): - self.service = service or utils.get_default_image_service() + self.service = service or image.get_default_image_service() self.service.__init__(*args, **kwargs) def create(self, context, metadata, data=None): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 22beef05f..b2b0325c2 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -607,6 +607,7 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) + print "RES BODY:", res.body server = json.loads(res.body)['server'] self.assertEqual(16, len(server['adminPass'])) self.assertEqual('server_test', server['name']) diff --git a/nova/utils.py b/nova/utils.py index ecca5303a..954947589 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -726,52 +726,3 @@ def parse_server_string(server_str): except: LOG.debug(_('Invalid server_string: %s' % server_str)) return ('', '') - - -def parse_image_ref(image_ref): - """Parse an image href into composite parts. - - If the image_ref passed in is an integer, it will - return (image_ref, None, None), otherwise it will - return (image_id, host, port) - - :param image_ref: href or id of an image - - """ - if str(image_ref).isdigit(): - return (int(image_ref), None, None) - - o = urlparse(image_ref) - port = o.port or 80 - host = o.netloc.split(':', 1)[0] - image_id = int(o.path.split('/')[-1]) - return (image_id, host, port) - - -def get_default_image_service(): - ImageService = import_class(FLAGS.image_service) - return ImageService() - - -def get_image_service(image_ref): - """Get the proper image_service and id for the given image_ref. - - The image_ref param can be an href of the form - http://myglanceserver:9292/images/42, or just an int such as 42. If the - image_ref is an int, then the default image service is returned. - - :param image_ref: image ref/id for an image - :returns: a tuple of the form (image_service, image_id) - - """ - image_ref = image_ref or 0 - if str(image_ref).isdigit(): - return (get_default_image_service(), int(image_ref)) - - try: - (image_id, host, port) = parse_image_ref(image_ref) - except: - raise exception.InvalidImageRef(image_ref=image_ref) - glance_client = nova.image.glance.GlanceClient(host, port) - image_service = nova.image.glance.GlanceImageService(glance_client) - return (image_service, image_id) diff --git a/nova/virt/images.py b/nova/virt/images.py index 45887f38d..f571a9949 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -23,6 +23,7 @@ Handling of VM disk images. from nova import context from nova import flags +from nova import image from nova import log as logging from nova import utils @@ -36,7 +37,7 @@ def fetch(image_id, path, _user, _project): # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. - (image_service, service_image_id) = utils.get_image_service(image_id) + (image_service, service_image_id) = image.get_image_service(image_id) with open(path, "wb") as image_file: elevated = context.get_admin_context() metadata = image_service.get(elevated, service_image_id, image_file) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 8ba5d09ba..8c31f9e27 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -58,6 +58,7 @@ from nova import context from nova import db from nova import exception from nova import flags +import nova.image from nova import ipv6 from nova import log as logging from nova import utils @@ -449,11 +450,12 @@ class LibvirtConnection(driver.ComputeDriver): to support this command. """ - image_service = utils.get_default_image_service() virt_dom = self._lookup_by_name(instance['name']) elevated = context.get_admin_context() - base = image_service.show(elevated, instance['image_id']) + (image_service, service_image_id) = nova.image.get_image_service( + instance['image_id']) + base = image_service.show(elevated, service_image_id) metadata = {'disk_format': base['disk_format'], 'container_format': base['container_format'], -- cgit From ffac2aa8162ba5111a01b495d9dd7e43bfda4af4 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 23 May 2011 14:38:37 -0500 Subject: initial fudging in of swap disk --- nova/tests/xenapi/stubs.py | 2 +- nova/virt/xenapi/vm_utils.py | 18 ++++++++++++------ nova/virt/xenapi/vmops.py | 21 +++++++++++++-------- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 12 +++++++++--- 4 files changed, 35 insertions(+), 18 deletions(-) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 4833ccb07..d9306900d 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -37,7 +37,7 @@ def stubout_instance_snapshot(stubs): sr_ref=sr_ref, sharable=False) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec['uuid'] - return vdi_uuid + return dict(primary_vdi_uuid=vdi_uuid, swap_vdi_uuid=None) stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 9f6cd608c..c24fc7ba6 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -408,18 +408,24 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) - vdi_uuid = session.wait_for_task(task, instance_id) + vdi_uuids = session.wait_for_task(task, instance_id) + primary_vdi_uuid = vdi_uuids.get('primary_vdi_uuid') + swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid') cls.scan_sr(session, instance_id, sr_ref) # Set the name-label to ease debugging - vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) - name_label = get_name_label_for_image(image) - session.get_xenapi().VDI.set_name_label(vdi_ref, name_label) + primary_vdi_ref = session.get_xenapi().VDI.get_by_uuid(primary_vdi_uuid) + primary_name_label = get_name_label_for_image(image) + session.get_xenapi().VDI.set_name_label(primary_vdi_ref, primary_name_label) - LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s") + LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(primary_vdi_uuid)s") % locals()) - return vdi_uuid + + LOG.debug("=" * 100) + LOG.debug(rimary_vdi_uuid) + LOG.debug(swap_vdi_uuid) + return (primary_vdi_uuid, swap_vdi_uuid) @classmethod def _fetch_image_glance_disk(cls, session, instance_id, image, access, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0074444f8..4a01cac29 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -109,20 +109,20 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) - vdi_uuid = VMHelper.fetch_image(self._session, instance.id, - instance.image_id, user, project, disk_image_type) - return vdi_uuid + (primary_vdi_uuid, swap_vdi_uuid) = VMHelper.fetch_image(self._session, + instance.id, instance.image_id, user, project, disk_image_type) + return (primary_vdi_uuid, swap_vdi_uuid) def spawn(self, instance, network_info=None): - vdi_uuid = self._create_disk(instance) - vm_ref = self._create_vm(instance, vdi_uuid, network_info) + vdi_uuid, swap_uuid = self._create_disk(instance) + vm_ref = self._create_vm(instance, vdi_uuid, swap_uuid, network_info) self._spawn(instance, vm_ref) def spawn_rescue(self, instance): """Spawn a rescue instance.""" self.spawn(instance) - def _create_vm(self, instance, vdi_uuid, network_info=None): + def _create_vm(self, instance, vdi_uuid, swap_vdi_uuid=None, network_info=None): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) @@ -143,18 +143,20 @@ class VMOps(object): # Are we building from a pre-existing disk? vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + if swap_vdi_uuid: + swap_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', swap_vdi_uuid) disk_image_type = VMHelper.determine_disk_image_type(instance) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) + instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)[0] ramdisk = None if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) + instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)[0] use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id, vdi_ref, disk_image_type, instance.os_type) @@ -163,6 +165,9 @@ class VMOps(object): VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=vdi_ref, userdevice=0, bootable=True) + if swap_vdi_uuid: + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, + vdi_ref=swap_vdi_ref, userdevice=0, bootable=False) # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 4b45671ae..9d6ee78ab 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -177,8 +177,15 @@ def _fixup_vhds(sr_path, staging_path, uuid_stack): else: assert_vhd_not_hidden(base_copy_path) + # If we find a swap.vhd, go ahead and copy it into the SR + swap_uuid = None + orig_swap_path = os.path.join(staging_path, 'swap.vhd') + if os.path.exists(orig_swap_path): + swap_path, swap_uuid = rename_with_uuid(orig_swap_path) + move_into_sr(swap_path) + move_into_sr(base_copy_path) - return vdi_uuid + return dict(primary_vdi_uuid=vdi_uuid, swap_vdi_uuid=swap_uuid) def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): @@ -324,8 +331,7 @@ def download_vhd(session, args): try: _download_tarball(sr_path, staging_path, image_id, glance_host, glance_port) - vdi_uuid = _fixup_vhds(sr_path, staging_path, uuid_stack) - return vdi_uuid + return _fixup_vhds(sr_path, staging_path, uuid_stack) finally: _cleanup_staging_area(staging_path) -- cgit From 4d1fe953bbfb810f56224b9faae4c10d0d8dfac0 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Mon, 23 May 2011 16:38:30 -0400 Subject: got rid of print statement --- nova/tests/api/openstack/test_servers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index b2b0325c2..22beef05f 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -607,7 +607,6 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) - print "RES BODY:", res.body server = json.loads(res.body)['server'] self.assertEqual(16, len(server['adminPass'])) self.assertEqual('server_test', server['name']) -- cgit From 94766fac0f5fdb3c7847b1129a8f05948a97f887 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 23 May 2011 20:42:54 +0000 Subject: cleanup and fixes --- nova/virt/xenapi/vm_utils.py | 18 +++++----- nova/virt/xenapi/vmops.py | 40 +++++++++++++--------- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 12 ++++--- 3 files changed, 42 insertions(+), 28 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index c24fc7ba6..f1f7b8249 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -410,7 +410,7 @@ class VMHelper(HelperBase): task = session.async_call_plugin('glance', 'download_vhd', kwargs) vdi_uuids = session.wait_for_task(task, instance_id) primary_vdi_uuid = vdi_uuids.get('primary_vdi_uuid') - swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid') + swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) cls.scan_sr(session, instance_id, sr_ref) @@ -419,13 +419,14 @@ class VMHelper(HelperBase): primary_name_label = get_name_label_for_image(image) session.get_xenapi().VDI.set_name_label(primary_vdi_ref, primary_name_label) - LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(primary_vdi_uuid)s") - % locals()) + LOG.debug(_("xapi 'download_vhd' returned VDI UUID " + "%(primary_vdi_uuid)s") % locals()) + if swap_vdi_uuid: + LOG.debug(_("xapi 'download_vhd' returned SWAP VDI UUID " + "%(swap_vdi_uuid)s") % locals()) - LOG.debug("=" * 100) - LOG.debug(rimary_vdi_uuid) - LOG.debug(swap_vdi_uuid) - return (primary_vdi_uuid, swap_vdi_uuid) + LOG.debug("=" * 100) + return vdi_uuids @classmethod def _fetch_image_glance_disk(cls, session, instance_id, image, access, @@ -482,7 +483,8 @@ class VMHelper(HelperBase): LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref) return filename else: - return session.get_xenapi().VDI.get_uuid(vdi_ref) + vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref) + return {'primary_vdi_uuid': vdi_uuid} @classmethod def determine_disk_image_type(cls, instance): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 4a01cac29..0c30ad4cb 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -109,20 +109,21 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) - (primary_vdi_uuid, swap_vdi_uuid) = VMHelper.fetch_image(self._session, - instance.id, instance.image_id, user, project, disk_image_type) - return (primary_vdi_uuid, swap_vdi_uuid) + vdi_uuids = VMHelper.fetch_image(self._session, + instance.id, instance.image_id, user, project, + disk_image_type) + return vdi_uuids def spawn(self, instance, network_info=None): - vdi_uuid, swap_uuid = self._create_disk(instance) - vm_ref = self._create_vm(instance, vdi_uuid, swap_uuid, network_info) + vdi_uuids = self._create_disk(instance) + vm_ref = self._create_vm(instance, vdi_uuids, network_info) self._spawn(instance, vm_ref) def spawn_rescue(self, instance): """Spawn a rescue instance.""" self.spawn(instance) - def _create_vm(self, instance, vdi_uuid, swap_vdi_uuid=None, network_info=None): + def _create_vm(self, instance, vdi_uuids, network_info=None): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) @@ -142,30 +143,37 @@ class VMOps(object): project = AuthManager().get_project(instance.project_id) # Are we building from a pre-existing disk? - vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + primary_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', + vdi_uuids['primary_vdi_uuid']) + swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) if swap_vdi_uuid: swap_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', swap_vdi_uuid) + else: + swap_vdi_ref = None disk_image_type = VMHelper.determine_disk_image_type(instance) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)[0] + instance.kernel_id, user, project, + ImageType.KERNEL_RAMDISK) ramdisk = None if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)[0] + instance.ramdisk_id, user, project, + ImageType.KERNEL_RAMDISK) - use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id, - vdi_ref, disk_image_type, instance.os_type) - vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, - use_pv_kernel) + use_pv_kernel = VMHelper.determine_is_pv(self._session, + instance.id, primary_vdi_ref, disk_image_type, + instance.os_type) + vm_ref = VMHelper.create_vm(self._session, instance, kernel, + ramdisk, use_pv_kernel) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=vdi_ref, userdevice=0, bootable=True) - if swap_vdi_uuid: + vdi_ref=primary_vdi_ref, userdevice=0, bootable=True) + if swap_vdi_ref: VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=swap_vdi_ref, userdevice=0, bootable=False) @@ -177,7 +185,7 @@ class VMOps(object): # Alter the image before VM start for, e.g. network injection if FLAGS.xenapi_inject_image: VMHelper.preconfigure_instance(self._session, instance, - vdi_ref, network_info) + primary_vdi_ref, network_info) self.create_vifs(vm_ref, network_info) self.inject_network_info(instance, network_info, vm_ref) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 9d6ee78ab..6cc7617e0 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -178,15 +178,19 @@ def _fixup_vhds(sr_path, staging_path, uuid_stack): assert_vhd_not_hidden(base_copy_path) # If we find a swap.vhd, go ahead and copy it into the SR - swap_uuid = None + swap_vdi_uuid = None orig_swap_path = os.path.join(staging_path, 'swap.vhd') if os.path.exists(orig_swap_path): - swap_path, swap_uuid = rename_with_uuid(orig_swap_path) + swap_path, swap_vdi_uuid = rename_with_uuid(orig_swap_path) move_into_sr(swap_path) - move_into_sr(base_copy_path) - return dict(primary_vdi_uuid=vdi_uuid, swap_vdi_uuid=swap_uuid) + vdi_uuids = {} + vdi_uuids['primary_vdi_uuid'] = vdi_uuid + if swap_vdi_uuid: + vdi_uuids['swap_vdi_uuid'] = swap_vdi_uuid + move_into_sr(base_copy_path) + return vdi_uuids def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): """Hard-link VHDs into staging area with appropriate filename -- cgit From b6a4f6aa5b2a97a6a7d79c40c1a3160abc1def39 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Mon, 23 May 2011 16:47:25 -0400 Subject: Renaming service_image_id vars to image_id to reduce confusion. Also some minor cleanup. --- nova/api/openstack/images.py | 11 ++++------- nova/api/openstack/servers.py | 5 ++--- nova/api/openstack/views/servers.py | 8 ++++---- nova/compute/api.py | 4 ++-- nova/tests/api/openstack/test_servers.py | 1 - nova/utils.py | 2 -- nova/virt/images.py | 8 ++++---- nova/virt/libvirt_conn.py | 10 +++++----- 8 files changed, 21 insertions(+), 28 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index c61b5c6a6..fc26b6c1b 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -85,9 +85,8 @@ class Controller(common.OpenstackController): context = req.environ['nova.context'] try: - (image_service, service_image_id) = nova.image.get_image_service( - id) - image = image_service.show(context, service_image_id) + (image_service, image_id) = nova.image.get_image_service(id) + image = image_service.show(context, image_id) except (exception.NotFound, exception.InvalidImageRef): explanation = _("Image not found.") raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) @@ -100,11 +99,9 @@ class Controller(common.OpenstackController): :param req: `wsgi.Request` object :param id: Image identifier (integer) """ - image_id = id context = req.environ['nova.context'] - (image_service, service_image_id) = nova.image.get_image_service( - image_id) - image_service.delete(context, service_image_id) + (image_service, image_id) = nova.image.get_image_service(id) + image_service.delete(context, image_id) return webob.exc.HTTPNoContent() def create(self, req): diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 181833a23..4a0b208e0 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -560,9 +560,8 @@ class Controller(common.OpenstackController): associated kernel and ramdisk image IDs. """ context = req.environ['nova.context'] - image_service, service_image_id = nova.image.get_image_service( - image_id) - image = image_service.show(context, service_image_id) + image_service, _ = nova.image.get_image_service(image_id) + image = image_service.show(context, image_id) # NOTE(sirp): extracted to a separate method to aid unit-testing, the # new method doesn't need a request obj or an ImageService stub kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image( diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 0fe9dbe4a..4d825ff53 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -131,10 +131,10 @@ class ViewBuilderV11(ViewBuilder): def _build_image(self, response, inst): if 'image_id' in dict(inst): - image_id = inst['image_id'] - if str(image_id).isdigit(): - image_id = int(image_id) - response['imageRef'] = image_id + image_ref = inst['image_id'] + if str(image_ref).isdigit(): + image_ref = int(image_ref) + response['imageRef'] = image_ref def _build_flavor(self, response, inst): if "instance_type" in dict(inst): diff --git a/nova/compute/api.py b/nova/compute/api.py index a9075ff8a..47f7a594f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -157,9 +157,9 @@ class API(base.Base): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) - (image_service, service_image_id) = nova.image.get_image_service( + (image_service, image_id) = nova.image.get_image_service( image_ref or image_id) - image = image_service.show(context, service_image_id) + image = image_service.show(context, image_id) os_type = None if 'properties' in image and 'os_type' in image['properties']: diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index b2b0325c2..f5cfd64e7 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -29,7 +29,6 @@ from nova import db from nova import exception from nova import flags from nova import test -from nova import utils import nova.api.openstack from nova.api.openstack import servers import nova.compute.api diff --git a/nova/utils.py b/nova/utils.py index 954947589..361fc9873 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -35,7 +35,6 @@ import struct import sys import time import types -from urlparse import urlparse from xml.sax import saxutils from eventlet import event @@ -43,7 +42,6 @@ from eventlet import greenthread from eventlet import semaphore from eventlet.green import subprocess -import nova from nova import exception from nova import flags from nova import log as logging diff --git a/nova/virt/images.py b/nova/virt/images.py index f571a9949..61ea77ab0 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -23,7 +23,7 @@ Handling of VM disk images. from nova import context from nova import flags -from nova import image +import nova.image from nova import log as logging from nova import utils @@ -32,15 +32,15 @@ FLAGS = flags.FLAGS LOG = logging.getLogger('nova.virt.images') -def fetch(image_id, path, _user, _project): +def fetch(image_ref, path, _user, _project): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. - (image_service, service_image_id) = image.get_image_service(image_id) + (image_service, image_id) = nova.image.get_image_service(image_ref) with open(path, "wb") as image_file: elevated = context.get_admin_context() - metadata = image_service.get(elevated, service_image_id, image_file) + metadata = image_service.get(elevated, image_id, image_file) return metadata diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 8c31f9e27..e67f08dbf 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -442,7 +442,7 @@ class LibvirtConnection(driver.ComputeDriver): virt_dom.detachDevice(xml) @exception.wrap_exception - def snapshot(self, instance, image_id): + def snapshot(self, instance, image_ref): """Create snapshot from a running VM instance. This command only works with qemu 0.14+, the qemu_img flag is @@ -453,14 +453,14 @@ class LibvirtConnection(driver.ComputeDriver): virt_dom = self._lookup_by_name(instance['name']) elevated = context.get_admin_context() - (image_service, service_image_id) = nova.image.get_image_service( + (image_service, image_id) = nova.image.get_image_service( instance['image_id']) - base = image_service.show(elevated, service_image_id) + base = image_service.show(elevated, image_id) metadata = {'disk_format': base['disk_format'], 'container_format': base['container_format'], 'is_public': False, - 'name': '%s.%s' % (base['name'], image_id), + 'name': '%s.%s' % (base['name'], image_ref), 'properties': {'architecture': base['architecture'], 'kernel_id': instance['kernel_id'], 'image_location': 'snapshot', @@ -503,7 +503,7 @@ class LibvirtConnection(driver.ComputeDriver): # Upload that image to the image service with open(out_path) as image_file: image_service.update(elevated, - image_id, + image_ref, metadata, image_file) -- cgit From bac28418b7b92aa2654fad39d0240a85aa637488 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Mon, 23 May 2011 17:25:59 -0400 Subject: Removing code duplication between parse_image_ref and get_image service. Made parse_image_ref private. --- nova/image/__init__.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/nova/image/__init__.py b/nova/image/__init__.py index be692680a..d957c38fe 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -28,19 +28,13 @@ from nova import flags FLAGS = flags.FLAGS -def parse_image_ref(image_ref): +def _parse_image_ref(image_ref): """Parse an image href into composite parts. - If the image_ref passed in is an integer, it will - return (image_ref, None, None), otherwise it will - return (image_id, host, port) - - :param image_ref: href or id of an image + :param image_ref: href of an image + :returns: a tuple of the form (image_id, host, port) """ - if str(image_ref).isdigit(): - return (int(image_ref), None, None) - o = urlparse(image_ref) port = o.port or 80 host = o.netloc.split(':', 1)[0] @@ -69,7 +63,7 @@ def get_image_service(image_ref): return (get_default_image_service(), int(image_ref)) try: - (image_id, host, port) = parse_image_ref(image_ref) + (image_id, host, port) = _parse_image_ref(image_ref) except: raise exception.InvalidImageRef(image_ref=image_ref) glance_client = nova.image.glance.GlanceClient(host, port) -- cgit From 42c209d90f491d19b3aabc70f8dafc33b76cf20d Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 23 May 2011 16:51:28 -0500 Subject: fix tests, have glance plugin return json encoded string of vdi uuids --- nova/tests/xenapi/stubs.py | 11 +++++++++-- nova/virt/xenapi/vm_utils.py | 6 +++++- nova/virt/xenapi/vmops.py | 4 ++-- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 6 +++++- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index d9306900d..9f6f64318 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -17,6 +17,7 @@ """Stubouts, mocks and fixtures for the test suite""" import eventlet +import json from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils @@ -37,7 +38,7 @@ def stubout_instance_snapshot(stubs): sr_ref=sr_ref, sharable=False) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec['uuid'] - return dict(primary_vdi_uuid=vdi_uuid, swap_vdi_uuid=None) + return {'primary_vdi_uuid': vdi_uuid} stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) @@ -132,10 +133,16 @@ class FakeSessionForVMTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForVMTests, self).__init__(uri) - def host_call_plugin(self, _1, _2, _3, _4, _5): + def host_call_plugin(self, _1, _2, plugin, method, _5): sr_ref = fake.get_all('SR')[0] vdi_ref = fake.create_vdi('', False, sr_ref, False) vdi_rec = fake.get_record('VDI', vdi_ref) + if plugin == "glance" and method == "download_vhd": + swap_vdi_ref = fake.create_vdi('', False, sr_ref, False) + swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref) + return '%s' % json.dumps( + {'primary_vdi_uuid': vdi_rec['uuid'], + 'swap_vdi_uuid': swap_vdi_rec['uuid']}) return '%s' % vdi_rec['uuid'] def VM_start(self, _1, ref, _2, _3): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index f1f7b8249..3d980013a 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,6 +19,7 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ +import json import os import pickle import re @@ -408,7 +409,8 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) - vdi_uuids = session.wait_for_task(task, instance_id) + result = session.wait_for_task(task, instance_id) + vdi_uuids = json.loads(result) primary_vdi_uuid = vdi_uuids.get('primary_vdi_uuid') swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) @@ -571,6 +573,8 @@ class VMHelper(HelperBase): args['raw'] = 'true' task = session.async_call_plugin('objectstore', fn, args) uuid = session.wait_for_task(task, instance_id) + if image_type != ImageType.KERNEL_RAMDISK: + return {'primary_vdi_uuid': uuid} return uuid @classmethod diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0c30ad4cb..0d7ef5fac 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -91,7 +91,7 @@ class VMOps(object): def finish_resize(self, instance, disk_info): vdi_uuid = self.link_disks(instance, disk_info['base_copy'], disk_info['cow']) - vm_ref = self._create_vm(instance, vdi_uuid) + vm_ref = self._create_vm(instance, {'primary_vdi_uuid': vdi_uuid}) self.resize_instance(instance, vdi_uuid) self._spawn(instance, vm_ref) @@ -144,7 +144,7 @@ class VMOps(object): # Are we building from a pre-existing disk? primary_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', - vdi_uuids['primary_vdi_uuid']) + vdi_uuids.get('primary_vdi_uuid')) swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) if swap_vdi_uuid: swap_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', swap_vdi_uuid) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 6cc7617e0..0d02adfe9 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -22,6 +22,10 @@ # import httplib +try: + import json +except ImportError: + import simplejson as json import os import os.path import pickle @@ -335,7 +339,7 @@ def download_vhd(session, args): try: _download_tarball(sr_path, staging_path, image_id, glance_host, glance_port) - return _fixup_vhds(sr_path, staging_path, uuid_stack) + return json.dumps(_fixup_vhds(sr_path, staging_path, uuid_stack)) finally: _cleanup_staging_area(staging_path) -- cgit From 7a521f49f6daf0a0a37a9ef98ff1ea8813f04a6f Mon Sep 17 00:00:00 2001 From: John Tran Date: Mon, 23 May 2011 14:54:11 -0700 Subject: merged from trunk --- nova/tests/test_cloud.py | 51 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index ebfb5ee44..f3887b07b 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -37,7 +37,6 @@ from nova import rpc from nova import service from nova import test from nova import utils -from nova import exception from nova.auth import manager from nova.compute import power_state from nova.api.ec2 import cloud @@ -279,6 +278,26 @@ class CloudTestCase(test.TestCase): user_group=['all']) self.assertEqual(True, result['is_public']) + def test_deregister_image(self): + deregister_image = self.cloud.deregister_image + + def fake_delete(self, context, id): + return None + + self.stubs.Set(local.LocalImageService, 'delete', fake_delete) + # valid image + result = deregister_image(self.context, 'ami-00000001') + self.assertEqual(result['imageId'], 'ami-00000001') + # invalid image + self.stubs.UnsetAll() + + def fake_detail_empty(self, context): + return [] + + self.stubs.Set(local.LocalImageService, 'detail', fake_detail_empty) + self.assertRaises(exception.ImageNotFound, deregister_image, + self.context, 'ami-bad001') + def test_console_output(self): instance_type = FLAGS.default_instance_type max_count = 1 @@ -334,6 +353,36 @@ class CloudTestCase(test.TestCase): self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys)) self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) + def test_import_public_key(self): + # test when user provides all values + result1 = self.cloud.import_public_key(self.context, + 'testimportkey1', + 'mytestpubkey', + 'mytestfprint') + self.assertTrue(result1) + keydata = db.key_pair_get(self.context, + self.context.user.id, + 'testimportkey1') + self.assertEqual('mytestpubkey', keydata['public_key']) + self.assertEqual('mytestfprint', keydata['fingerprint']) + # test when user omits fingerprint + pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key') + f = open(pubkey_path + '/dummy.pub', 'r') + dummypub = f.readline().rstrip() + f.close + f = open(pubkey_path + '/dummy.fingerprint', 'r') + dummyfprint = f.readline().rstrip() + f.close + result2 = self.cloud.import_public_key(self.context, + 'testimportkey2', + dummypub) + self.assertTrue(result2) + keydata = db.key_pair_get(self.context, + self.context.user.id, + 'testimportkey2') + self.assertEqual(dummypub, keydata['public_key']) + self.assertEqual(dummyfprint, keydata['fingerprint']) + def test_delete_key_pair(self): self._create_key('test') self.cloud.delete_key_pair(self.context, 'test') -- cgit From 038ce7e16ee7ee1afc86ded260c1aa0d40d1e1ad Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 23 May 2011 22:52:56 +0000 Subject: swap should use device 1 and rescue use device 2 --- nova/virt/xenapi/vmops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0d7ef5fac..6ff8fd6a4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -175,7 +175,7 @@ class VMOps(object): vdi_ref=primary_vdi_ref, userdevice=0, bootable=True) if swap_vdi_ref: VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=swap_vdi_ref, userdevice=0, bootable=False) + vdi_ref=swap_vdi_ref, userdevice=1, bootable=False) # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. @@ -711,7 +711,7 @@ class VMOps(object): vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"] rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, - vdi_ref, 1, False) + vdi_ref, 2, False) self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref) -- cgit From a5efbca08a6b057290622ba5938f87d2e44be3eb Mon Sep 17 00:00:00 2001 From: William Wolf Date: Mon, 23 May 2011 21:55:15 -0400 Subject: take out irrelevant TODO --- nova/compute/api.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 47f7a594f..7ffe9c90c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -525,8 +525,7 @@ class API(base.Base): 'user_id': str(context.user_id)} sent_meta = {'name': name, 'is_public': False, 'properties': properties} - # TODO(wwolf): not sure if we need to use - # utils.get_image_service() here ? + recv_meta = self.image_service.create(context, sent_meta) params = {'image_id': recv_meta['id']} self._cast_compute_message('snapshot_instance', context, instance_id, -- cgit From f49024c437f2680a18eb702f2975de2955b98889 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Mon, 23 May 2011 22:47:44 -0400 Subject: make image_ref and image_id usage more consistant, eliminate redundancy in compute_api.create() call --- nova/api/ec2/cloud.py | 2 +- nova/api/openstack/servers.py | 3 +-- nova/compute/api.py | 9 ++++----- nova/tests/test_cloud.py | 4 ++-- nova/tests/test_compute.py | 6 +++--- nova/tests/test_quota.py | 10 +++++----- 6 files changed, 16 insertions(+), 18 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index cc2e140b0..72bd56ed7 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -849,7 +849,7 @@ class CloudController(object): instances = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), - image_id=self._get_image(context, kwargs['image_id'])['id'], + image_ref=self._get_image(context, kwargs['image_ref'])['id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 4a0b208e0..01509f771 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -172,8 +172,7 @@ class Controller(common.OpenstackController): (inst,) = self.compute_api.create( context, inst_type, - image_id, - image_ref=image_ref, + image_ref, kernel_id=kernel_id, ramdisk_id=ramdisk_id, display_name=name, diff --git a/nova/compute/api.py b/nova/compute/api.py index 7ffe9c90c..4c4bc592b 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -130,12 +130,12 @@ class API(base.Base): raise quota.QuotaError(msg, "MetadataLimitExceeded") def create(self, context, instance_type, - image_id, kernel_id=None, ramdisk_id=None, + image_ref, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, - injected_files=None, image_ref=None): + injected_files=None): """Create the number and type of instances requested. Verifies that quota and other arguments are valid. @@ -157,8 +157,7 @@ class API(base.Base): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) - (image_service, image_id) = nova.image.get_image_service( - image_ref or image_id) + (image_service, image_id) = nova.image.get_image_service(image_ref) image = image_service.show(context, image_id) os_type = None @@ -202,7 +201,7 @@ class API(base.Base): base_options = { 'reservation_id': utils.generate_uid('r'), - 'image_id': image_ref or image_id, + 'image_id': image_ref, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 54c0454de..3aaca6831 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -302,7 +302,7 @@ class CloudTestCase(test.TestCase): def test_console_output(self): instance_type = FLAGS.default_instance_type max_count = 1 - kwargs = {'image_id': 'ami-1', + kwargs = {'image_ref': 'ami-1', 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) @@ -318,7 +318,7 @@ class CloudTestCase(test.TestCase): greenthread.sleep(0.3) def test_ajax_console(self): - kwargs = {'image_id': 'ami-1'} + kwargs = {'image_ref': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] greenthread.sleep(0.3) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 9170837b6..b02b99f66 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -150,7 +150,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_id=None, + image_ref=None, security_group=['testgroup']) try: self.assertEqual(len(db.security_group_get_by_instance( @@ -168,7 +168,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_id=None, + image_ref=None, security_group=['testgroup']) try: db.instance_destroy(self.context, ref[0]['id']) @@ -184,7 +184,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_id=None, + image_ref=None, security_group=['testgroup']) try: diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 02b641a47..c0499c7a9 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -201,7 +201,7 @@ class QuotaTestCase(test.TestCase): min_count=1, max_count=1, instance_type=inst_type, - image_id=1) + image_ref=1) for instance_id in instance_ids: db.instance_destroy(self.context, instance_id) @@ -215,7 +215,7 @@ class QuotaTestCase(test.TestCase): min_count=1, max_count=1, instance_type=inst_type, - image_id=1) + image_ref=1) for instance_id in instance_ids: db.instance_destroy(self.context, instance_id) @@ -271,7 +271,7 @@ class QuotaTestCase(test.TestCase): min_count=1, max_count=1, instance_type=inst_type, - image_id='fake', + image_ref='fake', metadata=metadata) def test_allowed_injected_files(self): @@ -284,14 +284,14 @@ class QuotaTestCase(test.TestCase): api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') api.create(self.context, min_count=1, max_count=1, - instance_type=inst_type, image_id='3', + instance_type=inst_type, image_ref='3', injected_files=files) def test_no_injected_files(self): FLAGS.image_service = 'nova.image.fake.FakeImageService' api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') - api.create(self.context, instance_type=inst_type, image_id='3') + api.create(self.context, instance_type=inst_type, image_ref='3') def test_max_injected_files(self): files = [] -- cgit From 51b3d877c53d9c79dbbea21ed4d4abd0a1b91bf8 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Tue, 24 May 2011 04:08:10 -0400 Subject: Fixing year of copyright. --- nova/image/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/image/__init__.py b/nova/image/__init__.py index d957c38fe..088b7796e 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -1,6 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # -# Copyright 2010 OpenStack LLC. +# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may -- cgit From 84209a3f02f35c16de0614fa81685b242784bf20 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Tue, 24 May 2011 05:26:04 -0400 Subject: Fixing _get_kernel_ramdisk_from_image to use the correct image service. --- nova/api/openstack/servers.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 01509f771..4f823ccf7 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -144,7 +144,7 @@ class Controller(common.OpenstackController): try: image_service, image_id = nova.image.get_image_service(image_ref) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( - req, image_id) + req, image_service, image_id) images = set([str(x['id']) for x in image_service.index(context)]) assert str(image_id) in images except: @@ -554,18 +554,15 @@ class Controller(common.OpenstackController): error=item.error)) return dict(actions=actions) - def _get_kernel_ramdisk_from_image(self, req, image_id): + def _get_kernel_ramdisk_from_image(self, req, image_service, image_id): """Fetch an image from the ImageService, then if present, return the associated kernel and ramdisk image IDs. """ context = req.environ['nova.context'] - image_service, _ = nova.image.get_image_service(image_id) - image = image_service.show(context, image_id) + image_meta = image_service.show(context, image_id) # NOTE(sirp): extracted to a separate method to aid unit-testing, the # new method doesn't need a request obj or an ImageService stub - kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image( - image) - return kernel_id, ramdisk_id + return self._do_get_kernel_ramdisk_from_image(image_meta) @staticmethod def _do_get_kernel_ramdisk_from_image(image_meta): -- cgit From 884b6d3ed74c5a5f766e405ac2178066314fb6d3 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 24 May 2011 09:51:21 -0400 Subject: make _make_fixture respect name passed in --- nova/tests/api/openstack/test_images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 2c329f920..82bf66e49 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -127,7 +127,7 @@ class _BaseImageServiceTests(test.TestCase): @staticmethod def _make_fixture(name): - fixture = {'name': 'test image', + fixture = {'name': name, 'updated': None, 'created': None, 'status': None, -- cgit From 6e271a42258b439e8fed55c922792b632e062b63 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Tue, 24 May 2011 10:27:26 -0400 Subject: Fixing docstring. --- nova/api/openstack/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index fc26b6c1b..171c4a036 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -80,7 +80,7 @@ class Controller(common.OpenstackController): """Return detailed information about a specific image. :param req: `wsgi.Request` object - :param image_id: Image identifier (integer) + :param id: Image identifier """ context = req.environ['nova.context'] -- cgit From 8e7c3121fab4b5a87c2efe865f3c06b1bd267cbc Mon Sep 17 00:00:00 2001 From: John Tran Date: Tue, 24 May 2011 08:59:02 -0700 Subject: added imageid string to exception, per peer review --- nova/tests/test_cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index f3887b07b..e37aca4d6 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -226,7 +226,7 @@ class CloudTestCase(test.TestCase): 'type': 'machine'}}] def fake_show_none(meh, context, id): - raise exception.ImageNotFound + raise exception.ImageNotFound('bad_image_id') self.stubs.Set(local.LocalImageService, 'detail', fake_detail) # list all -- cgit From a0cffc4de8ba4b15958e320308477d42287858e7 Mon Sep 17 00:00:00 2001 From: John Tran Date: Tue, 24 May 2011 09:43:52 -0700 Subject: specified image_id keyword in exception arg --- nova/api/ec2/cloud.py | 2 +- nova/tests/test_cloud.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 59e00781e..80c61d62b 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -848,7 +848,7 @@ class CloudController(object): kwargs['ramdisk_id'] = ramdisk['id'] image = self._get_image(context, kwargs['image_id']) if not image: - raise exception.ImageNotFound(kwargs['image_id']) + raise exception.ImageNotFound(image_id=kwargs['image_id']) try: available = (image['properties']['image_state'] == 'available') except KeyError: diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index e37aca4d6..1e583377b 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -226,7 +226,7 @@ class CloudTestCase(test.TestCase): 'type': 'machine'}}] def fake_show_none(meh, context, id): - raise exception.ImageNotFound('bad_image_id') + raise exception.ImageNotFound(image_id='bad_image_id') self.stubs.Set(local.LocalImageService, 'detail', fake_detail) # list all -- cgit From f3d7ec3fd2b2b987ae1118a6ae96874e8bbfdac5 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 24 May 2011 15:16:07 -0400 Subject: initial use of limited_by_marker --- nova/api/openstack/images.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 34d4c27fc..b06429943 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -153,3 +153,25 @@ class ControllerV11(Controller): def get_default_xmlns(self, req): return common.XML_NS_V11 + + def index(self, req): + """Return an index listing of images available to the request. + + :param req: `wsgi.Request` object + """ + context = req.environ['nova.context'] + images = self._image_service.index(context) + images = common.limited_by_marker(images, req) + builder = self.get_builder(req).build + return dict(images=[builder(image, detail=False) for image in images]) + + def detail(self, req): + """Return a detailed index listing of images available to the request. + + :param req: `wsgi.Request` object. + """ + context = req.environ['nova.context'] + images = self._image_service.detail(context) + images = common.limited_by_marker(images, req) + builder = self.get_builder(req).build + return dict(images=[builder(image, detail=True) for image in images]) -- cgit From f488576ae27f8eb96a04022d0ecd11a28bd15116 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Tue, 24 May 2011 16:44:28 -0400 Subject: Added filtering on image properties --- nova/api/openstack/images.py | 23 +++++++++++++++++++++-- nova/image/fake.py | 4 ++-- nova/image/glance.py | 8 ++++---- nova/tests/api/openstack/fakes.py | 4 ++-- nova/tests/api/openstack/test_images.py | 9 +++++++++ nova/tests/image/test_glance.py | 2 +- 6 files changed, 39 insertions(+), 11 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 34d4c27fc..755ce8ead 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -28,6 +28,9 @@ from nova.api.openstack.views import images as images_view LOG = log.getLogger('nova.api.openstack.images') FLAGS = flags.FLAGS +SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format', + 'size_min', 'size_max'] + class Controller(common.OpenstackController): """Base `wsgi.Controller` for retrieving/displaying images.""" @@ -59,7 +62,8 @@ class Controller(common.OpenstackController): :param req: `wsgi.Request` object """ context = req.environ['nova.context'] - images = self._image_service.index(context) + filters = self._get_filters(req) + images = self._image_service.index(context, filters) images = common.limited(images, req) builder = self.get_builder(req).build return dict(images=[builder(image, detail=False) for image in images]) @@ -70,11 +74,26 @@ class Controller(common.OpenstackController): :param req: `wsgi.Request` object. """ context = req.environ['nova.context'] - images = self._image_service.detail(context) + filters = self._get_filters(req) + images = self._image_service.detail(context, filters) images = common.limited(images, req) builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + filters = {} + for param in req.str_params: + if param in SUPPORTED_FILTERS or param.startswith('property-'): + filters[param] = req.str_params.get(param) + + return filters + def show(self, req, id): """Return detailed information about a specific image. diff --git a/nova/image/fake.py b/nova/image/fake.py index b400b2adb..8e84c8597 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -52,11 +52,11 @@ class FakeImageService(service.BaseImageService): self.create(None, image) super(FakeImageService, self).__init__() - def index(self, context): + def index(self, context, filters=None): """Returns list of images.""" return copy.deepcopy(self.images.values()) - def detail(self, context): + def detail(self, context, filters=None): """Return list of detailed image information.""" return copy.deepcopy(self.images.values()) diff --git a/nova/image/glance.py b/nova/image/glance.py index 193e37273..dec797619 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -58,23 +58,23 @@ class GlanceImageService(service.BaseImageService): else: self.client = client - def index(self, context): + def index(self, context, filters=None): """Calls out to Glance for a list of images available.""" # NOTE(sirp): We need to use `get_images_detailed` and not # `get_images` here because we need `is_public` and `properties` # included so we can filter by user filtered = [] - image_metas = self.client.get_images_detailed() + image_metas = self.client.get_images_detailed(filters=filters) for image_meta in image_metas: if self._is_image_available(context, image_meta): meta_subset = utils.subset_dict(image_meta, ('id', 'name')) filtered.append(meta_subset) return filtered - def detail(self, context): + def detail(self, context, filters=None): """Calls out to Glance for a list of detailed image information.""" filtered = [] - image_metas = self.client.get_images_detailed() + image_metas = self.client.get_images_detailed(filters=filters) for image_meta in image_metas: if self._is_image_available(context, image_meta): base_image_meta = self._translate_to_base(image_meta) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index bf51239e6..8e0156afa 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -166,11 +166,11 @@ def stub_out_glance(stubs, initial_fixtures=None): def __init__(self, initial_fixtures): self.fixtures = initial_fixtures or [] - def fake_get_images(self): + def fake_get_images(self, filters=None): return [dict(id=f['id'], name=f['name']) for f in self.fixtures] - def fake_get_images_detailed(self): + def fake_get_images_detailed(self, filters=None): return copy.deepcopy(self.fixtures) def fake_get_image_meta(self, image_id): diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 2c329f920..76d4e2f56 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -708,6 +708,15 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) + def test_get_image_request_filters(self): + request =\ + webob.Request.blank('/v1.1/images/detail?status=ACTIVE&name=testname') + filters = images.Controller()._get_filters(request) + expected = {'status': 'ACTIVE', + 'name': 'testname', + } + self.assertDictMatch(expected, filters) + def test_get_image_found(self): req = webob.Request.blank('/v1.0/images/123') res = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 109905ded..6d108d494 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -34,7 +34,7 @@ class StubGlanceClient(object): def get_image_meta(self, image_id): return self.images[image_id] - def get_images_detailed(self): + def get_images_detailed(self, filters=None): return self.images.itervalues() def get_image(self, image_id): -- cgit From 26842cba90bd5637bd6aa185b300102ff257d9f1 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 24 May 2011 22:39:16 +0000 Subject: move devices back --- nova/virt/xenapi/vmops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6ff8fd6a4..6fff1d494 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -175,7 +175,7 @@ class VMOps(object): vdi_ref=primary_vdi_ref, userdevice=0, bootable=True) if swap_vdi_ref: VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=swap_vdi_ref, userdevice=1, bootable=False) + vdi_ref=swap_vdi_ref, userdevice=2, bootable=False) # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. @@ -711,7 +711,7 @@ class VMOps(object): vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"] rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, - vdi_ref, 2, False) + vdi_ref, 1, False) self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref) -- cgit From 17abaeafaf3fed2847e4377a16b47771eb663304 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Wed, 25 May 2011 16:27:28 +0900 Subject: Fix wrong call of the volume api create() --- nova/api/openstack/contrib/volumes.py | 2 +- nova/tests/test_quota.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index 18de2ec71..b22bd2846 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -135,7 +135,7 @@ class VolumeController(wsgi.Controller): vol = env['volume'] size = vol['size'] LOG.audit(_("Create volume of %s GB"), size, context=context) - new_volume = self.volume_api.create(context, size, + new_volume = self.volume_api.create(context, size, None, vol.get('display_name'), vol.get('display_description')) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 7ace2ad7d..990068fae 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -228,6 +228,7 @@ class QuotaTestCase(test.TestCase): volume.API().create, self.context, size=10, + snapshot_id=None, name='', description='') for volume_id in volume_ids: @@ -241,6 +242,7 @@ class QuotaTestCase(test.TestCase): volume.API().create, self.context, size=10, + snapshot_id=None, name='', description='') for volume_id in volume_ids: -- cgit From 46ddecc177830ea0ccef82e84d72c48261450b40 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 25 May 2011 03:29:16 -0400 Subject: Don't need to import json. --- nova/tests/test_notifier.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 14bef79b8..523f38f24 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import json - import stubout import nova -- cgit From 7139cf1f0cfe9241a1710e5b7c621db569a2fc2d Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Wed, 25 May 2011 16:37:52 +0900 Subject: Make snapshot_id=None a default value in VolumeManager:create_volume(). It is not a regular case to create a volume from a snapshot. --- nova/volume/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 84085fbd8..b6f0f5eeb 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -90,7 +90,7 @@ class VolumeManager(manager.SchedulerDependentManager): else: LOG.info(_("volume %s: skipping export"), volume['name']) - def create_volume(self, context, volume_id, snapshot_id): + def create_volume(self, context, volume_id, snapshot_id=None): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) -- cgit From f3125b3012da7b6429e4e551060498e665c4596e Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Wed, 25 May 2011 17:51:30 +0900 Subject: Add unittests for cloning volumes. --- nova/tests/test_cloud.py | 19 +++++++++++++++++++ nova/tests/test_volume.py | 20 +++++++++++++++++++- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index d9169a646..8c7520fe8 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -171,6 +171,25 @@ class CloudTestCase(test.TestCase): db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id']) + def test_create_volume_from_snapshot(self): + """Makes sure create_volume works when we specify a snapshot.""" + vol = db.volume_create(self.context, {'size': 1}) + snap = db.snapshot_create(self.context, {'volume_id': vol['id'], + 'volume_size': vol['size'], + 'status': "available"}) + snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x') + + result = self.cloud.create_volume(self.context, + snapshot_id=snapshot_id) + volume_id = result['volumeId'] + result = self.cloud.describe_volumes(self.context) + self.assertEqual(len(result['volumeSet']), 2) + self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id) + + db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id)) + db.snapshot_destroy(self.context, snap['id']) + db.volume_destroy(self.context, vol['id']) + def test_describe_availability_zones(self): """Makes sure describe_availability_zones works and filters results.""" service1 = db.service_create(self.context, {'host': 'host1_zones', diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index c66b66959..1c25d601a 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -45,10 +45,11 @@ class VolumeTestCase(test.TestCase): self.context = context.get_admin_context() @staticmethod - def _create_volume(size='0'): + def _create_volume(size='0', snapshot_id=None): """Create a volume object.""" vol = {} vol['size'] = size + vol['snapshot_id'] = snapshot_id vol['user_id'] = 'fake' vol['project_id'] = 'fake' vol['availability_zone'] = FLAGS.storage_availability_zone @@ -69,6 +70,23 @@ class VolumeTestCase(test.TestCase): self.context, volume_id) + def test_create_volume_from_snapshot(self): + """Test volume can be created from a snapshot.""" + volume_src_id = self._create_volume() + self.volume.create_volume(self.context, volume_src_id) + snapshot_id = self._create_snapshot(volume_src_id) + self.volume.create_snapshot(self.context, volume_src_id, snapshot_id) + volume_dst_id = self._create_volume(0, snapshot_id) + self.volume.create_volume(self.context, volume_dst_id, snapshot_id) + self.assertEqual(volume_dst_id, db.volume_get(context.get_admin_context(), + volume_dst_id).id) + self.assertEqual(snapshot_id, db.volume_get(context.get_admin_context(), + volume_dst_id).snapshot_id) + + self.volume.delete_volume(self.context, volume_dst_id) + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume_src_id) + def test_too_big_volume(self): """Ensure failure if a too large of a volume is requested.""" # FIXME(vish): validation needs to move into the data layer in -- cgit From d380729b162c8d6120279db74327e61a4942e28f Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Wed, 25 May 2011 18:02:07 +0900 Subject: Avoid wildcard import. --- .../sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py index 0a50123bf..10bd9d5c9 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py @@ -15,8 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * +from sqlalchemy import Column, Table, MetaData, Integer from nova import log as logging -- cgit From 3d9569147cee2eaa94fc49c55b40f70a72171ebe Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 25 May 2011 09:33:51 -0400 Subject: Added test --- nova/tests/api/openstack/test_images.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 76d4e2f56..233419c6d 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -709,11 +709,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) def test_get_image_request_filters(self): - request =\ - webob.Request.blank('/v1.1/images/detail?status=ACTIVE&name=testname') + request = webob.Request.blank( + '/v1.1/images/detail?status=ACTIVE&name=testname&property-test=3') filters = images.Controller()._get_filters(request) expected = {'status': 'ACTIVE', 'name': 'testname', + 'property-test': '3', + } + self.assertDictMatch(expected, filters) + + def test_get_image_request_filters_not_supported(self): + request = webob.Request.blank( + '/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname') + filters = images.Controller()._get_filters(request) + expected = {'status': 'ACTIVE', } self.assertDictMatch(expected, filters) -- cgit From 0b9ede226674b253f638b78cdce5fa40b2991701 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 25 May 2011 11:21:46 -0400 Subject: simplified the limiting differences for different versions of the API --- nova/api/openstack/images.py | 30 +++++++----------------------- 1 file changed, 7 insertions(+), 23 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index b06429943..c96b1c3e3 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -53,6 +53,9 @@ class Controller(common.OpenstackController): self._compute_service = compute_service or compute.API() self._image_service = image_service or _default_service + def _limit_items(self, items, req): + return common.limited(items, req) + def index(self, req): """Return an index listing of images available to the request. @@ -60,7 +63,7 @@ class Controller(common.OpenstackController): """ context = req.environ['nova.context'] images = self._image_service.index(context) - images = common.limited(images, req) + images = self._limit_items(images, req) builder = self.get_builder(req).build return dict(images=[builder(image, detail=False) for image in images]) @@ -71,7 +74,7 @@ class Controller(common.OpenstackController): """ context = req.environ['nova.context'] images = self._image_service.detail(context) - images = common.limited(images, req) + images = self._limited_items(images, req) builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) @@ -154,24 +157,5 @@ class ControllerV11(Controller): def get_default_xmlns(self, req): return common.XML_NS_V11 - def index(self, req): - """Return an index listing of images available to the request. - - :param req: `wsgi.Request` object - """ - context = req.environ['nova.context'] - images = self._image_service.index(context) - images = common.limited_by_marker(images, req) - builder = self.get_builder(req).build - return dict(images=[builder(image, detail=False) for image in images]) - - def detail(self, req): - """Return a detailed index listing of images available to the request. - - :param req: `wsgi.Request` object. - """ - context = req.environ['nova.context'] - images = self._image_service.detail(context) - images = common.limited_by_marker(images, req) - builder = self.get_builder(req).build - return dict(images=[builder(image, detail=True) for image in images]) + def _limit_items(self, items, req): + return common.limited_by_marker(items, req) -- cgit From 537c5aea298a6c09b3329185c2d0eed77a0a21bd Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Wed, 25 May 2011 12:09:53 -0400 Subject: try out mox for testing image request filters --- nova/tests/api/openstack/test_images.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 233419c6d..e25334732 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -28,6 +28,7 @@ import shutil import tempfile import xml.dom.minidom as minidom +import mox import stubout import webob @@ -709,14 +710,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) def test_get_image_request_filters(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE', + 'name': 'testname', + 'property-test': '3'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail?status=ACTIVE&name=testname&property-test=3') - filters = images.Controller()._get_filters(request) - expected = {'status': 'ACTIVE', - 'name': 'testname', - 'property-test': '3', - } - self.assertDictMatch(expected, filters) + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() def test_get_image_request_filters_not_supported(self): request = webob.Request.blank( -- cgit From e4bf97ba29e8e5858f37cedb34e20ccd8e210bae Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 25 May 2011 12:24:27 -0400 Subject: Updated tests to use mox pep8 --- nova/api/openstack/images.py | 2 +- nova/tests/api/openstack/test_images.py | 14 ++++++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 755ce8ead..553566d58 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -93,7 +93,7 @@ class Controller(common.OpenstackController): filters[param] = req.str_params.get(param) return filters - + def show(self, req, id): """Return detailed information about a specific image. diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index e25334732..f3f0217d6 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -726,12 +726,18 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): mocker.VerifyAll() def test_get_image_request_filters_not_supported(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname') - filters = images.Controller()._get_filters(request) - expected = {'status': 'ACTIVE', - } - self.assertDictMatch(expected, filters) + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() def test_get_image_found(self): req = webob.Request.blank('/v1.0/images/123') -- cgit From c440aecaaacf3caa8683234022bc10836d232971 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 25 May 2011 17:28:10 -0400 Subject: Added params to local and base image service --- nova/image/local.py | 4 ++-- nova/image/service.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/image/local.py b/nova/image/local.py index 918180bae..677d5302b 100644 --- a/nova/image/local.py +++ b/nova/image/local.py @@ -63,7 +63,7 @@ class LocalImageService(service.BaseImageService): images.append(unhexed_image_id) return images - def index(self, context): + def index(self, context, *args, **kwargs): filtered = [] image_metas = self.detail(context) for image_meta in image_metas: @@ -71,7 +71,7 @@ class LocalImageService(service.BaseImageService): filtered.append(meta) return filtered - def detail(self, context): + def detail(self, context, *args, **kwargs): images = [] for image_id in self._ids(): try: diff --git a/nova/image/service.py b/nova/image/service.py index ab6749049..5361cfc89 100644 --- a/nova/image/service.py +++ b/nova/image/service.py @@ -46,7 +46,7 @@ class BaseImageService(object): # the ImageService subclass SERVICE_IMAGE_ATTRS = [] - def index(self, context): + def index(self, context, *args, **kwargs): """List images. :returns: a sequence of mappings with the following signature @@ -55,7 +55,7 @@ class BaseImageService(object): """ raise NotImplementedError - def detail(self, context): + def detail(self, context, *args, **kwargs): """Detailed information about an images. :returns: a sequence of mappings with the following signature -- cgit From fdd27860724cd57db6df059a97e98289f88ce6ac Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: add support to rpc for multicall --- nova/rpc.py | 99 +++++++++++++++++++++++++++++++++++++------------- nova/tests/test_rpc.py | 17 +++++++++ 2 files changed, 90 insertions(+), 26 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 2116f22c3..04198a4a6 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -32,8 +32,11 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging +import eventlet from eventlet import greenpool from eventlet import greenthread +from eventlet import queue + from nova import context from nova import exception @@ -131,7 +134,8 @@ class Consumer(messaging.Consumer): self.connection = Connection.recreate() self.backend = self.connection.create_backend() self.declare() - super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) + return super(Consumer, self).fetch( + no_ack, auto_ack, enable_callbacks) if self.failed_connection: LOG.error(_('Reconnected to queue')) self.failed_connection = False @@ -347,8 +351,9 @@ def _unpack_context(msg): if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) LOG.debug(_('unpacked context: %s'), context_dict) - return context.RequestContext.from_dict(context_dict) + return RpcContext.from_dict(context_dict) def _pack_context(msg, context): @@ -365,26 +370,27 @@ def _pack_context(msg, context): msg.update(context) -def call(context, topic, msg): - """Sends a message on a topic and wait for a response.""" +class RpcContext(context.RequestContext): + def __init__(self, *args, **kwargs): + msg_id = kwargs.pop('msg_id', None) + self.msg_id = msg_id + super(RpcContext, self).__init__(*args, **kwargs) + + def reply(self, *args, **kwargs): + msg_reply(self.msg_id, *args, **kwargs) + + +def multicall(context, topic, msg): + """Make a call that returns multiple times.""" LOG.debug(_('Making asynchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - class WaitMessage(object): - def __call__(self, data, message): - """Acks message and sets result.""" - message.ack() - if data['failure']: - self.result = RemoteError(*data['failure']) - else: - self.result = data['result'] - - wait_msg = WaitMessage() conn = Connection.instance() consumer = DirectConsumer(connection=conn, msg_id=msg_id) + wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) conn = Connection.instance() @@ -392,18 +398,59 @@ def call(context, topic, msg): publisher.send(msg) publisher.close() - try: - consumer.wait(limit=1) - except StopIteration: - pass - consumer.close() - # NOTE(termie): this is a little bit of a change from the original - # non-eventlet code where returning a Failure - # instance from a deferred call is very similar to - # raising an exception - if isinstance(wait_msg.result, Exception): - raise wait_msg.result - return wait_msg.result + return wait_msg + + +class MulticallWaiter(object): + def __init__(self, consumer): + self._consumer = consumer + self._results = queue.Queue() + self._closed = False + + def close(self): + self._closed = True + self._consumer.close() + + def __call__(self, data, message): + """Acks message and sets result.""" + message.ack() + if data['failure']: + self._results.put(RemoteError(*data['failure'])) + else: + self._results.put(data['result']) + + def __iter__(self): + return self.wait() + + def wait(self): + # TODO(termie): This is probably really a much simpler issue but am + # trying to solve the problem quickly. This works but + # I'd prefer to dig in and do it the best way later on. + + def _waiter(): + while not self._closed: + try: + self._consumer.wait(limit=1) + except StopIteration: + pass + eventlet.spawn(_waiter) + + while True: + result = self._results.get() + if isinstance(result, Exception): + raise result + if result == None: + self.close() + raise StopIteration + yield result + + +def call(context, topic, msg): + """Sends a message on a topic and wait for a response.""" + rv = multicall(context, topic, msg) + for x in rv: + rv.close() + return x def cast(context, topic, msg): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 44d7c91eb..92ddfcffc 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -49,6 +49,17 @@ class RpcTestCase(test.TestCase): "args": {"value": value}}) self.assertEqual(value, result) + def test_multicall_succeed_three_times(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + + for x in result: + self.assertEqual(value, x) + def test_context_passed(self): """Makes sure a context is passed through rpc call""" value = 42 @@ -126,6 +137,12 @@ class TestReceiver(object): LOG.debug(_("Received %s"), context) return context.to_dict() + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value) + context.reply(value) + @staticmethod def fail(context, value): """Raises an exception with the value sent in""" -- cgit From d46c9fffe4fab8f55483c73d3e6ef12116de9bc5 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: make the test more expicit --- nova/tests/test_rpc.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 92ddfcffc..acab3e758 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -56,9 +56,10 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times", "args": {"value": value}}) - + i = 0 for x in result: - self.assertEqual(value, x) + self.assertEqual(value + i, x) + i += 1 def test_context_passed(self): """Makes sure a context is passed through rpc call""" @@ -140,8 +141,8 @@ class TestReceiver(object): @staticmethod def echo_three_times(context, value): context.reply(value) - context.reply(value) - context.reply(value) + context.reply(value + 1) + context.reply(value + 2) @staticmethod def fail(context, value): -- cgit From 7622e854ef68fbdbfc531690cf74916301956c8e Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: add commented out unworking code for yield-based returns --- nova/rpc.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nova/rpc.py b/nova/rpc.py index 04198a4a6..f43291c4b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -201,6 +201,11 @@ class AdapterConsumer(Consumer): try: rval = node_func(context=ctxt, **node_args) if msg_id: + # TODO(termie): re-enable when fix the yielding issue + #if hasattr(rval, 'send'): + # logging.error('rval! %s', rval) + # for x in rval: + # msg_reply(msg_id, x, None) msg_reply(msg_id, rval, None) except Exception as e: logging.exception('Exception during message handling') -- cgit From b44c1fe9561ee8754137d2700bab295f20a4032b Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: Add a connection pool for rpc cast/call Use the same rabbit connection for all topic listening and wait to be notified vs doing a 0.1 second poll for each. --- nova/rpc.py | 96 ++++++++++++++++++++++++++++++++++++++++++--------------- nova/service.py | 21 +++++++------ 2 files changed, 84 insertions(+), 33 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index f43291c4b..62590ca92 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -35,9 +35,9 @@ from carrot import messaging import eventlet from eventlet import greenpool from eventlet import greenthread +from eventlet import pools from eventlet import queue - from nova import context from nova import exception from nova import fakerabbit @@ -92,6 +92,11 @@ class Connection(carrot_connection.BrokerConnection): pass return cls.instance() +class Pool(pools.Pool): + def create(self): + return Connection.instance(new=True) + +ConnectionPool = Pool(max_size=20) class Consumer(messaging.Consumer): """Consumer base class. @@ -163,21 +168,9 @@ class AdapterConsumer(Consumer): self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) super(AdapterConsumer, self).__init__(connection=connection, topic=topic) + self.register_callback(self.process_data) - def receive(self, *args, **kwargs): - self.pool.spawn_n(self._receive, *args, **kwargs) - - @exception.wrap_exception - def _receive(self, message_data, message): - """Magically looks for a method on the proxy object and calls it. - - Message data should be a dictionary with two keys: - method: string representing the method to call - args: dictionary of arg: value - - Example: {'method': 'echo', 'args': {'value': 42}} - - """ + def process_data(self, message_data, message): LOG.debug(_('received %s') % message_data) msg_id = message_data.pop('_msg_id', None) @@ -194,6 +187,19 @@ class AdapterConsumer(Consumer): LOG.warn(_('no method for message: %s') % message_data) msg_reply(msg_id, _('No method for message: %s') % message_data) return + self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) + + @exception.wrap_exception + def _process_data(self, msg_id, ctxt, method, args): + """Magically looks for a method on the proxy object and calls it. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) @@ -214,11 +220,6 @@ class AdapterConsumer(Consumer): return -class Publisher(messaging.Publisher): - """Publisher base class.""" - pass - - class TopicAdapterConsumer(AdapterConsumer): """Consumes messages on a specific topic.""" @@ -251,6 +252,50 @@ class FanoutAdapterConsumer(AdapterConsumer): topic=topic, proxy=proxy) +class ConsumerSet(object): + """Groups consumers to listen on together on a single connection""" + + def __init__(self, conn, consumer_list): + self.consumer_list = set(consumer_list) + self.consumer_set = None + self.init(conn) + + def init(self, conn): + if not conn: + conn = Connection.instance(new=True) + if self.consumer_set: + self.consumer_set.close() + self.consumer_set = messaging.ConsumerSet(conn) + for consumer in self.consumer_list: + consumer.connection = conn + # consumer.backend is set for us + self.consumer_set.add_consumer(consumer) + + def reconnect(self): + self.init(None) + + def wait(self, limit=None): + while True: + it = self.consumer_set.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + except Exception as e: + LOG.error(_("Received exception %s " % str(e) + \ + "while processing consumer")) + fuck + self.reconnect() + # Break to outer loop + break + + +class Publisher(messaging.Publisher): + """Publisher base class.""" + pass + + class TopicPublisher(Publisher): """Publishes messages on a specific topic.""" @@ -315,7 +360,7 @@ def msg_reply(msg_id, reply=None, failure=None): LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) @@ -324,7 +369,9 @@ def msg_reply(msg_id, reply=None, failure=None): {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), 'failure': failure}) + publisher.close() + ConnectionPool.put(conn) class RemoteError(exception.Error): @@ -393,12 +440,11 @@ def multicall(context, topic, msg): LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() consumer = DirectConsumer(connection=conn, msg_id=msg_id) wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) publisher.close() @@ -462,10 +508,11 @@ def cast(context, topic, msg): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) publisher.close() + ConnectionPool.put(conn) def fanout_cast(context, topic, msg): @@ -511,6 +558,7 @@ def send_message(topic, message, wait=True): if wait: consumer.wait() + consumer.close() if __name__ == '__main__': diff --git a/nova/service.py b/nova/service.py index ab1238c3b..7761cfef5 100644 --- a/nova/service.py +++ b/nova/service.py @@ -91,26 +91,29 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - conn1 = rpc.Connection.instance(new=True) - conn2 = rpc.Connection.instance(new=True) - conn3 = rpc.Connection.instance(new=True) + if self.report_interval: + conn = rpc.Connection.instance(new=True) + + # Share this same connection for these Consumers consumer_all = rpc.TopicAdapterConsumer( - connection=conn1, + connection=conn, topic=self.topic, proxy=self) consumer_node = rpc.TopicAdapterConsumer( - connection=conn2, + connection=conn, topic='%s.%s' % (self.topic, self.host), proxy=self) fanout = rpc.FanoutAdapterConsumer( - connection=conn3, + connection=conn, topic=self.topic, proxy=self) - self.timers.append(consumer_all.attach_to_eventlet()) - self.timers.append(consumer_node.attach_to_eventlet()) - self.timers.append(fanout.attach_to_eventlet()) + cset = rpc.ConsumerSet(conn, [consumer_all, + consumer_node, + fanout]) + # Wait forever, processing these consumers + greenthread.spawn_n(cset.wait) pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) -- cgit From e1a47584cc63136280cf3ca9ef02da3efc1dff7f Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: pep8 and comment fixes --- nova/rpc.py | 25 ++++++++++++++++--------- nova/service.py | 1 - 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 62590ca92..db5aec826 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -92,12 +92,16 @@ class Connection(carrot_connection.BrokerConnection): pass return cls.instance() + class Pool(pools.Pool): + """Class that implements a Pool of Connections""" + def create(self): return Connection.instance(new=True) ConnectionPool = Pool(max_size=20) + class Consumer(messaging.Consumer): """Consumer base class. @@ -171,6 +175,16 @@ class AdapterConsumer(Consumer): self.register_callback(self.process_data) def process_data(self, message_data, message): + """Consumer callback that parses the message for validity and + fires off a thread to call the proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ LOG.debug(_('received %s') % message_data) msg_id = message_data.pop('_msg_id', None) @@ -191,14 +205,8 @@ class AdapterConsumer(Consumer): @exception.wrap_exception def _process_data(self, msg_id, ctxt, method, args): - """Magically looks for a method on the proxy object and calls it. - - Message data should be a dictionary with two keys: - method: string representing the method to call - args: dictionary of arg: value - - Example: {'method': 'echo', 'args': {'value': 42}} - + """Thread that maigcally looks for a method on the proxy + object and calls it. """ node_func = getattr(self.proxy, str(method)) @@ -285,7 +293,6 @@ class ConsumerSet(object): except Exception as e: LOG.error(_("Received exception %s " % str(e) + \ "while processing consumer")) - fuck self.reconnect() # Break to outer loop break diff --git a/nova/service.py b/nova/service.py index 7761cfef5..c51c9b066 100644 --- a/nova/service.py +++ b/nova/service.py @@ -91,7 +91,6 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - if self.report_interval: conn = rpc.Connection.instance(new=True) -- cgit From d0be426d4e7bbfb1ecb3f078c71c1e176da441a5 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: convert fanout_cast to ConnectionPool --- nova/rpc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index db5aec826..fdb228695 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -526,10 +526,11 @@ def fanout_cast(context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = FanoutPublisher(topic, connection=conn) publisher.send(msg) publisher.close() + ConnectionPool.put(conn) def generic_response(message_data, message): -- cgit From f2c2a593c828fc86e298d3eb31672a09b498c41f Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: fakerabbit's declare_consumer should support more than 1 consumer. also: make fakerabbit Backend.consume be an iterator like it should be.. --- nova/fakerabbit.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index a7dee8caf..a29ba9d86 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -77,6 +77,10 @@ class Queue(object): class Backend(base.BaseBackend): + def __init__(self, connection, **kwargs): + super(Backend, self).__init__(connection, **kwargs) + self.consumers = [] + def queue_declare(self, queue, **kwargs): global QUEUES if queue not in QUEUES: @@ -97,16 +101,20 @@ class Backend(base.BaseBackend): EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) def declare_consumer(self, queue, callback, *args, **kwargs): - self.current_queue = queue - self.current_callback = callback + self.consumers.append((queue, callback)) def consume(self, limit=None): + num = 0 while True: - item = self.get(self.current_queue) - if item: - self.current_callback(item) - raise StopIteration() - greenthread.sleep(0) + for (queue, callback) in self.consumers: + item = self.get(queue) + if item: + callback(item) + num += 1 + yield + if limit and num == limit: + raise StopIteration() + greenthread.sleep(0.1) def get(self, queue, no_ack=False): global QUEUES -- cgit From 90e30806a2e0c235612eb09792656cd861997f84 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 25 May 2011 15:42:24 -0700 Subject: fix consumers to actually be deleted and clean up cloud test --- nova/fakerabbit.py | 13 +++++++++---- nova/rpc.py | 13 ++++++++++--- nova/service.py | 8 +++----- nova/tests/test_cloud.py | 26 ++++++++++---------------- 4 files changed, 32 insertions(+), 28 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index a29ba9d86..5f3e75c48 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -79,7 +79,7 @@ class Queue(object): class Backend(base.BaseBackend): def __init__(self, connection, **kwargs): super(Backend, self).__init__(connection, **kwargs) - self.consumers = [] + self.consumers = {} def queue_declare(self, queue, **kwargs): global QUEUES @@ -100,13 +100,18 @@ class Backend(base.BaseBackend): ' key %(routing_key)s') % locals()) EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) - def declare_consumer(self, queue, callback, *args, **kwargs): - self.consumers.append((queue, callback)) + def declare_consumer(self, queue, callback, consumer_tag, *args, **kwargs): + LOG.debug("Adding consumer %s", consumer_tag) + self.consumers[consumer_tag] = (queue, callback) + + def cancel(self, consumer_tag): + LOG.debug("Removing consumer %s", consumer_tag) + del self.consumers[consumer_tag] def consume(self, limit=None): num = 0 while True: - for (queue, callback) in self.consumers: + for (queue, callback) in self.consumers.itervalues(): item = self.get(queue) if item: callback(item) diff --git a/nova/rpc.py b/nova/rpc.py index fdb228695..e2e962fcc 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -30,11 +30,11 @@ import time import traceback import uuid +import greenlet from carrot import connection as carrot_connection from carrot import messaging import eventlet from eventlet import greenpool -from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -266,6 +266,7 @@ class ConsumerSet(object): def __init__(self, conn, consumer_list): self.consumer_list = set(consumer_list) self.consumer_set = None + self.enabled = True self.init(conn) def init(self, conn): @@ -283,15 +284,21 @@ class ConsumerSet(object): self.init(None) def wait(self, limit=None): - while True: + running = True + while running: it = self.consumer_set.iterconsume(limit=limit) + if not it: + break while True: try: it.next() except StopIteration: return + except greenlet.GreenletExit: + running = False + break except Exception as e: - LOG.error(_("Received exception %s " % str(e) + \ + LOG.error(_("Received exception %s " % type(e) + \ "while processing consumer")) self.reconnect() # Break to outer loop diff --git a/nova/service.py b/nova/service.py index c51c9b066..a0ff7c9f3 100644 --- a/nova/service.py +++ b/nova/service.py @@ -21,12 +21,8 @@ import inspect import os -import sys -import time -from eventlet import event from eventlet import greenthread -from eventlet import greenpool from nova import context from nova import db @@ -112,7 +108,7 @@ class Service(object): consumer_node, fanout]) # Wait forever, processing these consumers - greenthread.spawn_n(cset.wait) + self.csetthread = greenthread.spawn(cset.wait) pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) @@ -169,6 +165,8 @@ class Service(object): def kill(self): """Destroy the service object in the datastore.""" + self.csetthread.kill() + self.csetthread.wait() self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 54c0454de..1e14c327c 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -17,13 +17,8 @@ # under the License. from base64 import b64decode -import json from M2Crypto import BIO from M2Crypto import RSA -import os -import shutil -import tempfile -import time from eventlet import greenthread @@ -33,12 +28,10 @@ from nova import db from nova import flags from nova import log as logging from nova import rpc -from nova import service from nova import test from nova import utils from nova import exception from nova.auth import manager -from nova.compute import power_state from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils from nova.image import local @@ -79,6 +72,15 @@ class CloudTestCase(test.TestCase): self.stubs.Set(local.LocalImageService, 'show', fake_show) self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) + # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish + rpc_cast = rpc.cast + + def finish_cast(*args, **kwargs): + rpc_cast(*args, **kwargs) + greenthread.sleep(0.2) + + self.stubs.Set(rpc, 'cast', finish_cast) + def tearDown(self): network_ref = db.project_get_network(self.context, self.project.id) @@ -113,7 +115,6 @@ class CloudTestCase(test.TestCase): self.cloud.describe_addresses(self.context) self.cloud.release_address(self.context, public_ip=address) - greenthread.sleep(0.3) db.floating_ip_destroy(self.context, address) def test_associate_disassociate_address(self): @@ -129,12 +130,10 @@ class CloudTestCase(test.TestCase): self.cloud.associate_address(self.context, instance_id=ec2_id, public_ip=address) - greenthread.sleep(0.3) self.cloud.disassociate_address(self.context, public_ip=address) self.cloud.release_address(self.context, public_ip=address) - greenthread.sleep(0.3) self.network.deallocate_fixed_ip(self.context, fixed) db.instance_destroy(self.context, inst['id']) db.floating_ip_destroy(self.context, address) @@ -306,31 +305,26 @@ class CloudTestCase(test.TestCase): 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) - greenthread.sleep(0.3) instance_id = rv['instancesSet'][0]['instanceId'] output = self.cloud.get_console_output(context=self.context, instance_id=[instance_id]) self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT') # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. - greenthread.sleep(0.3) rv = self.cloud.terminate_instances(self.context, [instance_id]) - greenthread.sleep(0.3) def test_ajax_console(self): + kwargs = {'image_id': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] - greenthread.sleep(0.3) output = self.cloud.get_ajax_console(context=self.context, instance_id=[instance_id]) self.assertEquals(output['url'], '%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url) # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. - greenthread.sleep(0.3) rv = self.cloud.terminate_instances(self.context, [instance_id]) - greenthread.sleep(0.3) def test_key_generation(self): result = self._create_key('test') -- cgit From 8f2557dcd3e3d88c0eabb63bcce90ced79347ae4 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: catch greenlet.GreenletExit when shutting service down --- nova/rpc.py | 2 +- nova/service.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index e2e962fcc..02052ecf5 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -24,13 +24,13 @@ No fan-out support yet. """ +import greenlet import json import sys import time import traceback import uuid -import greenlet from carrot import connection as carrot_connection from carrot import messaging import eventlet diff --git a/nova/service.py b/nova/service.py index a0ff7c9f3..c7e48544c 100644 --- a/nova/service.py +++ b/nova/service.py @@ -19,6 +19,7 @@ """Generic Node baseclass for all workers that run on hosts.""" +import greenlet import inspect import os @@ -166,7 +167,10 @@ class Service(object): def kill(self): """Destroy the service object in the datastore.""" self.csetthread.kill() - self.csetthread.wait() + try: + self.csetthread.wait() + except greenlet.GreenletExit: + pass self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) -- cgit From 5f3adfc3110ed8095cdac43cc651aa46087c5490 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: Always create Service consumers no matter if report_interval is 0 Fix tests to handle how Service loads Consumers now --- nova/service.py | 46 +++++++++++++++++++------------------ nova/tests/test_service.py | 57 +++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 75 insertions(+), 28 deletions(-) diff --git a/nova/service.py b/nova/service.py index c7e48544c..3a364b6c6 100644 --- a/nova/service.py +++ b/nova/service.py @@ -88,29 +88,31 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - if self.report_interval: - conn = rpc.Connection.instance(new=True) - - # Share this same connection for these Consumers - consumer_all = rpc.TopicAdapterConsumer( - connection=conn, - topic=self.topic, - proxy=self) - consumer_node = rpc.TopicAdapterConsumer( - connection=conn, - topic='%s.%s' % (self.topic, self.host), - proxy=self) - fanout = rpc.FanoutAdapterConsumer( - connection=conn, - topic=self.topic, - proxy=self) - - cset = rpc.ConsumerSet(conn, [consumer_all, - consumer_node, - fanout]) - # Wait forever, processing these consumers - self.csetthread = greenthread.spawn(cset.wait) + conn = rpc.Connection.instance(new=True) + logging.debug("Creating Consumer connection for Service %s" % \ + self.topic) + + # Share this same connection for these Consumers + consumer_all = rpc.TopicAdapterConsumer( + connection=conn, + topic=self.topic, + proxy=self) + consumer_node = rpc.TopicAdapterConsumer( + connection=conn, + topic='%s.%s' % (self.topic, self.host), + proxy=self) + fanout = rpc.FanoutAdapterConsumer( + connection=conn, + topic=self.topic, + proxy=self) + + cset = rpc.ConsumerSet(conn, [consumer_all, + consumer_node, + fanout]) + # Wait forever, processing these consumers + self.csetthread = greenthread.spawn(cset.wait) + if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) self.timers.append(pulse) diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index d48de2057..0bba01d92 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -106,7 +106,10 @@ class ServiceTestCase(test.TestCase): # NOTE(vish): Create was moved out of mox replay to make sure that # the looping calls are created in StartService. - app = service.Service.create(host=host, binary=binary) + app = service.Service.create(host=host, binary=binary, topic=topic) + + self.mox.StubOutWithMock(service.rpc.Connection, 'instance') + service.rpc.Connection.instance(new=mox.IgnoreArg()) self.mox.StubOutWithMock(rpc, 'TopicAdapterConsumer', @@ -114,6 +117,11 @@ class ServiceTestCase(test.TestCase): self.mox.StubOutWithMock(rpc, 'FanoutAdapterConsumer', use_mock_anything=True) + + self.mox.StubOutWithMock(rpc, + 'ConsumerSet', + use_mock_anything=True) + rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), topic=topic, proxy=mox.IsA(service.Service)).AndReturn( @@ -129,9 +137,13 @@ class ServiceTestCase(test.TestCase): proxy=mox.IsA(service.Service)).AndReturn( rpc.FanoutAdapterConsumer) - rpc.TopicAdapterConsumer.attach_to_eventlet() - rpc.TopicAdapterConsumer.attach_to_eventlet() - rpc.FanoutAdapterConsumer.attach_to_eventlet() + def wait_func(self, limit=None): + return None + + mock_cset = self.mox.CreateMock(rpc.ConsumerSet, + {'wait': wait_func}) + rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + wait_func(mox.IgnoreArg()) service_create = {'host': host, 'binary': binary, @@ -287,8 +299,41 @@ class ServiceTestCase(test.TestCase): # Creating mocks self.mox.StubOutWithMock(service.rpc.Connection, 'instance') service.rpc.Connection.instance(new=mox.IgnoreArg()) - service.rpc.Connection.instance(new=mox.IgnoreArg()) - service.rpc.Connection.instance(new=mox.IgnoreArg()) + + self.mox.StubOutWithMock(rpc, + 'TopicAdapterConsumer', + use_mock_anything=True) + self.mox.StubOutWithMock(rpc, + 'FanoutAdapterConsumer', + use_mock_anything=True) + + self.mox.StubOutWithMock(rpc, + 'ConsumerSet', + use_mock_anything=True) + + rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), + topic=topic, + proxy=mox.IsA(service.Service)).AndReturn( + rpc.TopicAdapterConsumer) + + rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), + topic='%s.%s' % (topic, host), + proxy=mox.IsA(service.Service)).AndReturn( + rpc.TopicAdapterConsumer) + + rpc.FanoutAdapterConsumer(connection=mox.IgnoreArg(), + topic=topic, + proxy=mox.IsA(service.Service)).AndReturn( + rpc.FanoutAdapterConsumer) + + def wait_func(self, limit=None): + return None + + mock_cset = self.mox.CreateMock(rpc.ConsumerSet, + {'wait': wait_func}) + rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + wait_func(mox.IgnoreArg()) + self.mox.StubOutWithMock(serv.manager.driver, 'update_available_resource') serv.manager.driver.update_available_resource(mox.IgnoreArg(), host) -- cgit From 11d3672ad655c39265e5d2477a30db3a12adc65c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: Add rpc_conn_pool_size flag for the new connection pool --- nova/rpc.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 02052ecf5..82869fc46 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -50,7 +50,10 @@ LOG = logging.getLogger('nova.rpc') FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): @@ -99,7 +102,7 @@ class Pool(pools.Pool): def create(self): return Connection.instance(new=True) -ConnectionPool = Pool(max_size=20) +ConnectionPool = Pool(max_size=FLAGS.rpc_conn_pool_size) class Consumer(messaging.Consumer): -- cgit From b193b97054f11664a72cd53547f355d1c9044f88 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: connection pool tests and make the pool LIFO --- nova/rpc.py | 8 +++++++- nova/tests/test_rpc.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index 82869fc46..3cc0dadd4 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -99,10 +99,16 @@ class Connection(carrot_connection.BrokerConnection): class Pool(pools.Pool): """Class that implements a Pool of Connections""" + # TODO(comstud): Timeout connections not used in a while def create(self): return Connection.instance(new=True) -ConnectionPool = Pool(max_size=FLAGS.rpc_conn_pool_size) +# Create a ConnectionPool to use for RPC calls. We'll order the +# pool as a stack (LIFO), so that we can potentially loop through and +# timeout old unused connections at some point +ConnectionPool = Pool( + max_size=FLAGS.rpc_conn_pool_size, + order_as_stack=True) class Consumer(messaging.Consumer): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index acab3e758..f64209596 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -120,6 +120,48 @@ class RpcTestCase(test.TestCase): "value": value}}) self.assertEqual(value, result) + def test_connectionpool_single(self): + """Test that ConnectionPool recycles a single connection""" + + conn1 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn1) + conn2 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn2) + self.assertEqual(conn1, conn2) + + def test_connectionpool_double(self): + """Test that ConnectionPool returns 2 separate connections + when called consecutively and the pool returns connections LIFO + """ + + conn1 = rpc.ConnectionPool.get() + conn2 = rpc.ConnectionPool.get() + + self.assertNotEqual(conn1, conn2) + rpc.ConnectionPool.put(conn1) + rpc.ConnectionPool.put(conn2) + + conn3 = rpc.ConnectionPool.get() + conn4 = rpc.ConnectionPool.get() + self.assertEqual(conn2, conn3) + self.assertEqual(conn1, conn4) + + def test_connectionpool_limit(self): + """Test connection pool limit and verify all connections + are unique + """ + + max_size = FLAGS.rpc_conn_pool_size + conns = [] + + for i in xrange(max_size): + conns.append(rpc.ConnectionPool.get()) + + self.assertFalse(rpc.ConnectionPool.free_items) + self.assertEqual(rpc.ConnectionPool.current_size, + rpc.ConnectionPool.max_size) + self.assertEqual(len(set(conns)), max_size) + class TestReceiver(object): """Simple Proxy class so the consumer has methods to call -- cgit From 51e8eeb9b3a23f811bcbf52d9700d94c5c8b15e4 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: bring back commits lost in merge --- nova/rpc.py | 107 +++++++++++++++++++++++++++++-------------------- nova/tests/test_rpc.py | 19 +++++++++ 2 files changed, 82 insertions(+), 44 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 3cc0dadd4..d7d7bb014 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -35,6 +35,7 @@ from carrot import connection as carrot_connection from carrot import messaging import eventlet from eventlet import greenpool +from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -140,30 +141,30 @@ class Consumer(messaging.Consumer): FLAGS.rabbit_max_retries) sys.exit(1) - def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): - """Wraps the parent fetch with some logic for failed connection.""" - # TODO(vish): the logic for failed connections and logging should be - # refactored into some sort of connection manager object - try: - if self.failed_connection: - # NOTE(vish): connection is defined in the parent class, we can - # recreate it as long as we create the backend too - # pylint: disable=W0201 - self.connection = Connection.recreate() - self.backend = self.connection.create_backend() - self.declare() - return super(Consumer, self).fetch( - no_ack, auto_ack, enable_callbacks) - if self.failed_connection: - LOG.error(_('Reconnected to queue')) - self.failed_connection = False - # NOTE(vish): This is catching all errors because we really don't - # want exceptions to be logged 10 times a second if some - # persistent failure occurs. - except Exception, e: # pylint: disable=W0703 - if not self.failed_connection: - LOG.exception(_('Failed to fetch message from queue: %s' % e)) - self.failed_connection = True + #def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + # """Wraps the parent fetch with some logic for failed connection.""" + # # TODO(vish): the logic for failed connections and logging should be + # # refactored into some sort of connection manager object + # try: + # if self.failed_connection: + # # NOTE(vish): connection is defined in the parent class, we can + # # recreate it as long as we create the backend too + # # pylint: disable=W0201 + # self.connection = Connection.recreate() + # self.backend = self.connection.create_backend() + # self.declare() + # return super(Consumer, self).fetch( + # no_ack, auto_ack, enable_callbacks) + # if self.failed_connection: + # LOG.error(_('Reconnected to queue')) + # self.failed_connection = False + # # NOTE(vish): This is catching all errors because we really don't + # # want exceptions to be logged 10 times a second if some + # # persistent failure occurs. + # except Exception, e: # pylint: disable=W0703 + # if not self.failed_connection: + # LOG.exception(_('Failed to fetch message from queue: %s' % e)) + # self.failed_connection = True def attach_to_eventlet(self): """Only needed for unit tests!""" @@ -195,7 +196,7 @@ class AdapterConsumer(Consumer): """ LOG.debug(_('received %s') % message_data) - msg_id = message_data.pop('_msg_id', None) + msg_id = message_data.get('_msg_id', None) ctxt = _unpack_context(message_data) @@ -225,11 +226,14 @@ class AdapterConsumer(Consumer): rval = node_func(context=ctxt, **node_args) if msg_id: # TODO(termie): re-enable when fix the yielding issue - #if hasattr(rval, 'send'): - # logging.error('rval! %s', rval) - # for x in rval: - # msg_reply(msg_id, x, None) - msg_reply(msg_id, rval, None) + if hasattr(rval, 'send'): + logging.error('rval! %s', rval) + for x in rval: + msg_reply(msg_id, x, None) + msg_reply(msg_id, None, None) + else: + msg_reply(msg_id, rval, None) + #msg_reply(msg_id, rval, None) except Exception as e: logging.exception('Exception during message handling') if msg_id: @@ -355,7 +359,7 @@ class DirectConsumer(Consumer): self.routing_key = msg_id self.exchange = msg_id self.auto_delete = True - self.exclusive = True + self.exclusive = False super(DirectConsumer, self).__init__(connection=connection) @@ -387,7 +391,9 @@ def msg_reply(msg_id, reply=None, failure=None): publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) + LOG.error('MSG REPLY SUCCESS') except TypeError: + LOG.error('MSG REPLY FAILURE') publisher.send( {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), @@ -440,9 +446,9 @@ def _pack_context(msg, context): for args at some point. """ - context = dict([('_context_%s' % key, value) - for (key, value) in context.to_dict().iteritems()]) - msg.update(context) + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) class RpcContext(context.RequestContext): @@ -463,12 +469,13 @@ def multicall(context, topic, msg): LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - conn = ConnectionPool.get() - consumer = DirectConsumer(connection=conn, msg_id=msg_id) + con_conn = ConnectionPool.get() + consumer = DirectConsumer(connection=con_conn, msg_id=msg_id) wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - publisher = TopicPublisher(connection=conn, topic=topic) + pub_conn = ConnectionPool.get() + publisher = TopicPublisher(connection=pub_conn, topic=topic) publisher.send(msg) publisher.close() @@ -484,6 +491,7 @@ class MulticallWaiter(object): def close(self): self._closed = True self._consumer.close() + ConnectionPool.put(self._consumer.connection) def __call__(self, data, message): """Acks message and sets result.""" @@ -501,15 +509,26 @@ class MulticallWaiter(object): # trying to solve the problem quickly. This works but # I'd prefer to dig in and do it the best way later on. - def _waiter(): - while not self._closed: - try: - self._consumer.wait(limit=1) - except StopIteration: - pass - eventlet.spawn(_waiter) + #def _waiter(): + # i = 0 + # while not self._closed: + # LOG.error('Iteration #%s (%s)', i, self._consumer.consumer_tag) + # i += 1 + # try: + # self._consumer.wait(limit=1) + # except StopIteration: + # pass + # self._consumer.close() + # ConnectionPool.put(self._consumer.connection) + #eventlet.spawn(_waiter) while True: + rv = None + while rv is None and not self._closed: + rv = self._consumer.fetch(enable_callbacks=True) + time.sleep(0.01) + + LOG.error('RV %s', rv) result = self._results.get() if isinstance(result, Exception): raise result diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index f64209596..e5d99474d 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -61,6 +61,18 @@ class RpcTestCase(test.TestCase): self.assertEqual(value + i, x) i += 1 + def test_multicall_succeed_three_times_yield(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + i = 0 + for x in result: + self.assertEqual(value + i, x) + i += 1 + def test_context_passed(self): """Makes sure a context is passed through rpc call""" value = 42 @@ -83,6 +95,7 @@ class RpcTestCase(test.TestCase): 'test', {"method": "fail", "args": {"value": value}}) + LOG.error('INNNNNNN BETTTWWWWWWWWWWEEEEEEEEEEN') try: rpc.call(self.context, 'test', @@ -186,6 +199,12 @@ class TestReceiver(object): context.reply(value + 1) context.reply(value + 2) + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + @staticmethod def fail(context, value): """Raises an exception with the value sent in""" -- cgit From 64b13a2aad676d2310947e3bf8b9e3dde6b763e7 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: almost everything working with fake_rabbit --- nova/rpc.py | 16 +++++++++++++++- nova/service.py | 22 ++++++++++++++++------ nova/test.py | 1 + nova/tests/test_cloud.py | 4 ++-- run_tests.py | 1 + 5 files changed, 35 insertions(+), 9 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index d7d7bb014..e1f594a99 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -102,6 +102,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): + LOG.debug('Creating new connection') return Connection.instance(new=True) # Create a ConnectionPool to use for RPC calls. We'll order the @@ -166,6 +167,10 @@ class Consumer(messaging.Consumer): # LOG.exception(_('Failed to fetch message from queue: %s' % e)) # self.failed_connection = True + def close(self, *args, **kwargs): + LOG.debug('Closing consumer %s', self.consumer_tag) + return super(Consumer, self).close(*args, **kwargs) + def attach_to_eventlet(self): """Only needed for unit tests!""" timer = utils.LoopingCall(self.fetch, enable_callbacks=True) @@ -317,6 +322,8 @@ class ConsumerSet(object): # Break to outer loop break + def close(self): + self.consumer_set.close() class Publisher(messaging.Publisher): """Publisher base class.""" @@ -525,12 +532,19 @@ class MulticallWaiter(object): while True: rv = None while rv is None and not self._closed: - rv = self._consumer.fetch(enable_callbacks=True) + try: + rv = self._consumer.fetch(enable_callbacks=True) + except Exception: + self.close() + raise + #rv = self._consumer.fetch(enable_callbacks=True) time.sleep(0.01) LOG.error('RV %s', rv) result = self._results.get() + LOG.error('RESULT %s', result) if isinstance(result, Exception): + self.close() raise result if result == None: self.close() diff --git a/nova/service.py b/nova/service.py index 3a364b6c6..2da510140 100644 --- a/nova/service.py +++ b/nova/service.py @@ -88,29 +88,39 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - conn = rpc.Connection.instance(new=True) + conn1 = rpc.Connection.instance(new=True) + conn2 = rpc.Connection.instance(new=True) + conn3 = rpc.Connection.instance(new=True) logging.debug("Creating Consumer connection for Service %s" % \ self.topic) # Share this same connection for these Consumers consumer_all = rpc.TopicAdapterConsumer( - connection=conn, + connection=conn1, topic=self.topic, proxy=self) consumer_node = rpc.TopicAdapterConsumer( - connection=conn, + connection=conn1, topic='%s.%s' % (self.topic, self.host), proxy=self) fanout = rpc.FanoutAdapterConsumer( - connection=conn, + connection=conn1, topic=self.topic, proxy=self) - cset = rpc.ConsumerSet(conn, [consumer_all, + cset = rpc.ConsumerSet(conn1, [consumer_all, consumer_node, fanout]) # Wait forever, processing these consumers - self.csetthread = greenthread.spawn(cset.wait) + def _wait(): + cset.wait() + cset.close() + + self.csetthread = greenthread.spawn(_wait) + + #self.timers.append(consumer_all.attach_to_eventlet()) + #self.timers.append(consumer_node.attach_to_eventlet()) + #self.timers.append(fanout.attach_to_eventlet()) if self.report_interval: pulse = utils.LoopingCall(self.report_state) diff --git a/nova/test.py b/nova/test.py index 4deb2a175..7b2cf94b6 100644 --- a/nova/test.py +++ b/nova/test.py @@ -85,6 +85,7 @@ class TestCase(unittest.TestCase): self._monkey_patch_attach() self._monkey_patch_wsgi() self._original_flags = FLAGS.FlagValuesDict() + rpc.ConnectionPool = rpc.Pool(max_size=30) def tearDown(self): """Runs after each test method to tear down test environment.""" diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 1e14c327c..a838dd530 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -87,8 +87,8 @@ class CloudTestCase(test.TestCase): db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) - self.compute.kill() - self.network.kill() + #self.compute.kill() + #self.network.kill() super(CloudTestCase, self).tearDown() def _create_key(self, name): diff --git a/run_tests.py b/run_tests.py index d5d8acd16..509a60caa 100644 --- a/run_tests.py +++ b/run_tests.py @@ -285,6 +285,7 @@ if __name__ == '__main__': # If any argument looks like a test name but doesn't have "nova.tests" in # front of it, automatically add that so we don't have to type as much argv = [] + logging.getLogger('amqplib').setLevel(logging.DEBUG) for x in sys.argv: if x.startswith('test_'): argv.append('nova.tests.%s' % x) -- cgit From e3a88390fd62308cde3d4c597d653c8dc245bed4 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: don't need to use a separate connection --- nova/rpc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index e1f594a99..a212383fd 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -481,8 +481,7 @@ def multicall(context, topic, msg): wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - pub_conn = ConnectionPool.get() - publisher = TopicPublisher(connection=pub_conn, topic=topic) + publisher = TopicPublisher(connection=con_conn, topic=topic) publisher.send(msg) publisher.close() -- cgit From c9b21b0619891c069251c568e4d89be791af56c3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 25 May 2011 15:42:24 -0700 Subject: lots of fixes for rpc and extra imports --- nova/fakerabbit.py | 12 +++-- nova/rpc.py | 68 +++++++++++------------------ nova/service.py | 9 ++-- nova/test.py | 8 ++-- nova/tests/integrated/integrated_helpers.py | 5 +-- 5 files changed, 45 insertions(+), 57 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 5f3e75c48..ff993e29a 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -31,6 +31,7 @@ LOG = logging.getLogger("nova.fakerabbit") EXCHANGES = {} QUEUES = {} +CONSUMERS = {} class Message(base.BaseMessage): @@ -101,17 +102,20 @@ class Backend(base.BaseBackend): EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) def declare_consumer(self, queue, callback, consumer_tag, *args, **kwargs): + global CONSUMERS LOG.debug("Adding consumer %s", consumer_tag) - self.consumers[consumer_tag] = (queue, callback) + CONSUMERS[consumer_tag] = (queue, callback) def cancel(self, consumer_tag): + global CONSUMERS LOG.debug("Removing consumer %s", consumer_tag) - del self.consumers[consumer_tag] + del CONSUMERS[consumer_tag] def consume(self, limit=None): + global CONSUMERS num = 0 while True: - for (queue, callback) in self.consumers.itervalues(): + for (queue, callback) in CONSUMERS.itervalues(): item = self.get(queue) if item: callback(item) @@ -147,5 +151,7 @@ class Backend(base.BaseBackend): def reset_all(): global EXCHANGES global QUEUES + global CONSUMERS EXCHANGES = {} QUEUES = {} + CONSUMERS = {} diff --git a/nova/rpc.py b/nova/rpc.py index a212383fd..7faed4d3a 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -33,9 +33,7 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging -import eventlet from eventlet import greenpool -from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -142,30 +140,30 @@ class Consumer(messaging.Consumer): FLAGS.rabbit_max_retries) sys.exit(1) - #def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): - # """Wraps the parent fetch with some logic for failed connection.""" - # # TODO(vish): the logic for failed connections and logging should be - # # refactored into some sort of connection manager object - # try: - # if self.failed_connection: - # # NOTE(vish): connection is defined in the parent class, we can - # # recreate it as long as we create the backend too - # # pylint: disable=W0201 - # self.connection = Connection.recreate() - # self.backend = self.connection.create_backend() - # self.declare() - # return super(Consumer, self).fetch( - # no_ack, auto_ack, enable_callbacks) - # if self.failed_connection: - # LOG.error(_('Reconnected to queue')) - # self.failed_connection = False - # # NOTE(vish): This is catching all errors because we really don't - # # want exceptions to be logged 10 times a second if some - # # persistent failure occurs. - # except Exception, e: # pylint: disable=W0703 - # if not self.failed_connection: - # LOG.exception(_('Failed to fetch message from queue: %s' % e)) - # self.failed_connection = True + def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + """Wraps the parent fetch with some logic for failed connection.""" + # TODO(vish): the logic for failed connections and logging should be + # refactored into some sort of connection manager object + try: + if self.failed_connection: + # NOTE(vish): connection is defined in the parent class, we can + # recreate it as long as we create the backend too + # pylint: disable=W0201 + self.connection = Connection.recreate() + self.backend = self.connection.create_backend() + self.declare() + return super(Consumer, self).fetch( + no_ack, auto_ack, enable_callbacks) + if self.failed_connection: + LOG.error(_('Reconnected to queue')) + self.failed_connection = False + # NOTE(vish): This is catching all errors because we really don't + # want exceptions to be logged 10 times a second if some + # persistent failure occurs. + except Exception, e: # pylint: disable=W0703 + if not self.failed_connection: + LOG.exception(_('Failed to fetch message from queue: %s' % e)) + self.failed_connection = True def close(self, *args, **kwargs): LOG.debug('Closing consumer %s', self.consumer_tag) @@ -325,6 +323,7 @@ class ConsumerSet(object): def close(self): self.consumer_set.close() + class Publisher(messaging.Publisher): """Publisher base class.""" pass @@ -511,23 +510,6 @@ class MulticallWaiter(object): return self.wait() def wait(self): - # TODO(termie): This is probably really a much simpler issue but am - # trying to solve the problem quickly. This works but - # I'd prefer to dig in and do it the best way later on. - - #def _waiter(): - # i = 0 - # while not self._closed: - # LOG.error('Iteration #%s (%s)', i, self._consumer.consumer_tag) - # i += 1 - # try: - # self._consumer.wait(limit=1) - # except StopIteration: - # pass - # self._consumer.close() - # ConnectionPool.put(self._consumer.connection) - #eventlet.spawn(_waiter) - while True: rv = None while rv is None and not self._closed: diff --git a/nova/service.py b/nova/service.py index 2da510140..2626c49ae 100644 --- a/nova/service.py +++ b/nova/service.py @@ -89,8 +89,6 @@ class Service(object): self.manager.update_available_resource(ctxt) conn1 = rpc.Connection.instance(new=True) - conn2 = rpc.Connection.instance(new=True) - conn3 = rpc.Connection.instance(new=True) logging.debug("Creating Consumer connection for Service %s" % \ self.topic) @@ -111,10 +109,13 @@ class Service(object): cset = rpc.ConsumerSet(conn1, [consumer_all, consumer_node, fanout]) + # Wait forever, processing these consumers def _wait(): - cset.wait() - cset.close() + try: + cset.wait() + finally: + cset.close() self.csetthread = greenthread.spawn(_wait) diff --git a/nova/test.py b/nova/test.py index 7b2cf94b6..df48afbb1 100644 --- a/nova/test.py +++ b/nova/test.py @@ -31,17 +31,15 @@ import uuid import unittest import mox -import shutil import stubout from eventlet import greenthread -from nova import context -from nova import db from nova import fakerabbit from nova import flags from nova import rpc from nova import service from nova import wsgi +from nova.virt import fake FLAGS = flags.FLAGS @@ -100,6 +98,10 @@ class TestCase(unittest.TestCase): if FLAGS.fake_rabbit: fakerabbit.reset_all() + if FLAGS.connection_type == 'fake': + if hasattr(fake.FakeConnection, '_instance'): + del fake.FakeConnection._instance + # Reset any overriden flags self.reset_flags() diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index bc98921f0..7f590441e 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -154,10 +154,7 @@ class _IntegratedTestBase(test.TestCase): # set up services self.start_service('compute') self.start_service('volume') - # NOTE(justinsb): There's a bug here which is eluding me... - # If we start the network_service, all is good, but then subsequent - # tests fail: CloudTestCase.test_ajax_console in particular. - #self.start_service('network') + self.start_service('network') self.start_service('scheduler') self._start_api_service() -- cgit From 9334d41c6fe638a3119327702094695cfbd38271 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:25 -0700 Subject: make sure that using multicall on a call with a single result still functions --- nova/rpc.py | 4 ++-- nova/tests/test_rpc.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 7faed4d3a..84493271f 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -233,10 +233,10 @@ class AdapterConsumer(Consumer): logging.error('rval! %s', rval) for x in rval: msg_reply(msg_id, x, None) - msg_reply(msg_id, None, None) else: msg_reply(msg_id, rval, None) - #msg_reply(msg_id, rval, None) + # This final None tells multicall that it is done. + msg_reply(msg_id, None, None) except Exception as e: logging.exception('Exception during message handling') if msg_id: diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index e5d99474d..c1ef60ff6 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -49,6 +49,35 @@ class RpcTestCase(test.TestCase): "args": {"value": value}}) self.assertEqual(value, result) + def test_call_succeed_despite_multiple_returns(self): + """Get a value through rpc call""" + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + """Get a value through rpc call""" + value = 42 + result = rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_multicall_succeed_once(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + i = 0 + for x in result: + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + i += 1 + def test_multicall_succeed_three_times(self): """Get a value through rpc call""" value = 42 -- cgit From c7fe7e5e28b9f4bb999c8309f56953f6609cbc57 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:49 -0700 Subject: cleanup the code for merging --- nova/fakerabbit.py | 4 --- nova/rpc.py | 78 ++++++++++++++++++++++-------------------------- nova/service.py | 23 +++++++------- nova/test.py | 2 +- nova/tests/test_cloud.py | 3 -- nova/tests/test_rpc.py | 1 - run_tests.py | 1 - 7 files changed, 46 insertions(+), 66 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index ff993e29a..e7e9dab77 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -78,10 +78,6 @@ class Queue(object): class Backend(base.BaseBackend): - def __init__(self, connection, **kwargs): - super(Backend, self).__init__(connection, **kwargs) - self.consumers = {} - def queue_declare(self, queue, **kwargs): global QUEUES if queue not in QUEUES: diff --git a/nova/rpc.py b/nova/rpc.py index 84493271f..8d14494f0 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -24,7 +24,6 @@ No fan-out support yet. """ -import greenlet import json import sys import time @@ -36,6 +35,7 @@ from carrot import messaging from eventlet import greenpool from eventlet import pools from eventlet import queue +import greenlet from nova import context from nova import exception @@ -50,9 +50,9 @@ LOG = logging.getLogger('nova.rpc') FLAGS = flags.FLAGS flags.DEFINE_integer('rpc_thread_pool_size', 1024, - 'Size of RPC thread pool') + 'Size of RPC thread pool') flags.DEFINE_integer('rpc_conn_pool_size', 30, - 'Size of RPC connection pool') + 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): @@ -96,7 +96,7 @@ class Connection(carrot_connection.BrokerConnection): class Pool(pools.Pool): - """Class that implements a Pool of Connections""" + """Class that implements a Pool of Connections.""" # TODO(comstud): Timeout connections not used in a while def create(self): @@ -152,8 +152,9 @@ class Consumer(messaging.Consumer): self.connection = Connection.recreate() self.backend = self.connection.create_backend() self.declare() - return super(Consumer, self).fetch( - no_ack, auto_ack, enable_callbacks) + return super(Consumer, self).fetch(no_ack, + auto_ack, + enable_callbacks) if self.failed_connection: LOG.error(_('Reconnected to queue')) self.failed_connection = False @@ -165,10 +166,6 @@ class Consumer(messaging.Consumer): LOG.exception(_('Failed to fetch message from queue: %s' % e)) self.failed_connection = True - def close(self, *args, **kwargs): - LOG.debug('Closing consumer %s', self.consumer_tag) - return super(Consumer, self).close(*args, **kwargs) - def attach_to_eventlet(self): """Only needed for unit tests!""" timer = utils.LoopingCall(self.fetch, enable_callbacks=True) @@ -188,8 +185,10 @@ class AdapterConsumer(Consumer): self.register_callback(self.process_data) def process_data(self, message_data, message): - """Consumer callback that parses the message for validity and - fires off a thread to call the proxy object method. + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. Message data should be a dictionary with two keys: method: string representing the method to call @@ -199,8 +198,8 @@ class AdapterConsumer(Consumer): """ LOG.debug(_('received %s') % message_data) + # This will be popped off in _unpack_context msg_id = message_data.get('_msg_id', None) - ctxt = _unpack_context(message_data) method = message_data.get('method') @@ -228,13 +227,13 @@ class AdapterConsumer(Consumer): try: rval = node_func(context=ctxt, **node_args) if msg_id: - # TODO(termie): re-enable when fix the yielding issue + # Check if the result was a generator if hasattr(rval, 'send'): - logging.error('rval! %s', rval) for x in rval: msg_reply(msg_id, x, None) else: msg_reply(msg_id, rval, None) + # This final None tells multicall that it is done. msg_reply(msg_id, None, None) except Exception as e: @@ -277,7 +276,7 @@ class FanoutAdapterConsumer(AdapterConsumer): class ConsumerSet(object): - """Groups consumers to listen on together on a single connection""" + """Groups consumers to listen on together on a single connection.""" def __init__(self, conn, consumer_list): self.consumer_list = set(consumer_list) @@ -365,7 +364,7 @@ class DirectConsumer(Consumer): self.routing_key = msg_id self.exchange = msg_id self.auto_delete = True - self.exclusive = False + self.exclusive = True super(DirectConsumer, self).__init__(connection=connection) @@ -393,20 +392,18 @@ def msg_reply(msg_id, reply=None, failure=None): LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = ConnectionPool.get() - publisher = DirectPublisher(connection=conn, msg_id=msg_id) - try: - publisher.send({'result': reply, 'failure': failure}) - LOG.error('MSG REPLY SUCCESS') - except TypeError: - LOG.error('MSG REPLY FAILURE') - publisher.send( - {'result': dict((k, repr(v)) - for k, v in reply.__dict__.iteritems()), - 'failure': failure}) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = DirectPublisher(connection=conn, msg_id=msg_id) + try: + publisher.send({'result': reply, 'failure': failure}) + except TypeError: + publisher.send( + {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure}) + + publisher.close() class RemoteError(exception.Error): @@ -518,12 +515,9 @@ class MulticallWaiter(object): except Exception: self.close() raise - #rv = self._consumer.fetch(enable_callbacks=True) time.sleep(0.01) - LOG.error('RV %s', rv) result = self._results.get() - LOG.error('RESULT %s', result) if isinstance(result, Exception): self.close() raise result @@ -545,22 +539,20 @@ def cast(context, topic, msg): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) - conn = ConnectionPool.get() - publisher = TopicPublisher(connection=conn, topic=topic) - publisher.send(msg) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = TopicPublisher(connection=conn, topic=topic) + publisher.send(msg) + publisher.close() def fanout_cast(context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) - conn = ConnectionPool.get() - publisher = FanoutPublisher(topic, connection=conn) - publisher.send(msg) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = FanoutPublisher(topic, connection=conn) + publisher.send(msg) + publisher.close() def generic_response(message_data, message): diff --git a/nova/service.py b/nova/service.py index 2626c49ae..94afd5f78 100644 --- a/nova/service.py +++ b/nova/service.py @@ -88,27 +88,27 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - conn1 = rpc.Connection.instance(new=True) - logging.debug("Creating Consumer connection for Service %s" % \ - self.topic) + self.conn = rpc.Connection.instance(new=True) + logging.debug("Creating Consumer connection for Service %s" % + self.topic) # Share this same connection for these Consumers consumer_all = rpc.TopicAdapterConsumer( - connection=conn1, + connection=self.conn, topic=self.topic, proxy=self) consumer_node = rpc.TopicAdapterConsumer( - connection=conn1, + connection=self.conn, topic='%s.%s' % (self.topic, self.host), proxy=self) fanout = rpc.FanoutAdapterConsumer( - connection=conn1, + connection=self.conn, topic=self.topic, proxy=self) - cset = rpc.ConsumerSet(conn1, [consumer_all, - consumer_node, - fanout]) + cset = rpc.ConsumerSet(self.conn, [consumer_all, + consumer_node, + fanout]) # Wait forever, processing these consumers def _wait(): @@ -119,10 +119,6 @@ class Service(object): self.csetthread = greenthread.spawn(_wait) - #self.timers.append(consumer_all.attach_to_eventlet()) - #self.timers.append(consumer_node.attach_to_eventlet()) - #self.timers.append(fanout.attach_to_eventlet()) - if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) @@ -185,6 +181,7 @@ class Service(object): except greenlet.GreenletExit: pass self.stop() + rpc.ConnectionPool.put(self.conn) try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: diff --git a/nova/test.py b/nova/test.py index df48afbb1..80b2d0a74 100644 --- a/nova/test.py +++ b/nova/test.py @@ -83,7 +83,7 @@ class TestCase(unittest.TestCase): self._monkey_patch_attach() self._monkey_patch_wsgi() self._original_flags = FLAGS.FlagValuesDict() - rpc.ConnectionPool = rpc.Pool(max_size=30) + rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size) def tearDown(self): """Runs after each test method to tear down test environment.""" diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index a838dd530..ca3ef7ffe 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -87,8 +87,6 @@ class CloudTestCase(test.TestCase): db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) - #self.compute.kill() - #self.network.kill() super(CloudTestCase, self).tearDown() def _create_key(self, name): @@ -314,7 +312,6 @@ class CloudTestCase(test.TestCase): rv = self.cloud.terminate_instances(self.context, [instance_id]) def test_ajax_console(self): - kwargs = {'image_id': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index c1ef60ff6..fcecfb352 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -124,7 +124,6 @@ class RpcTestCase(test.TestCase): 'test', {"method": "fail", "args": {"value": value}}) - LOG.error('INNNNNNN BETTTWWWWWWWWWWEEEEEEEEEEN') try: rpc.call(self.context, 'test', diff --git a/run_tests.py b/run_tests.py index 509a60caa..d5d8acd16 100644 --- a/run_tests.py +++ b/run_tests.py @@ -285,7 +285,6 @@ if __name__ == '__main__': # If any argument looks like a test name but doesn't have "nova.tests" in # front of it, automatically add that so we don't have to type as much argv = [] - logging.getLogger('amqplib').setLevel(logging.DEBUG) for x in sys.argv: if x.startswith('test_'): argv.append('nova.tests.%s' % x) -- cgit From 7755bbfc7b16248dab23bfab479d09501519290f Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: cleanups --- nova/tests/test_rpc.py | 47 +++++++++++++++++++++-------------------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index fcecfb352..8523b409c 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -31,7 +31,6 @@ LOG = logging.getLogger('nova.tests.rpc') class RpcTestCase(test.TestCase): - """Test cases for rpc""" def setUp(self): super(RpcTestCase, self).setUp() self.conn = rpc.Connection.instance(True) @@ -43,21 +42,18 @@ class RpcTestCase(test.TestCase): self.context = context.get_admin_context() def test_call_succeed(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo", "args": {"value": value}}) self.assertEqual(value, result) def test_call_succeed_despite_multiple_returns(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times", "args": {"value": value}}) self.assertEqual(value, result) def test_call_succeed_despite_multiple_returns_yield(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times_yield", @@ -65,7 +61,6 @@ class RpcTestCase(test.TestCase): self.assertEqual(value, result) def test_multicall_succeed_once(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -79,7 +74,6 @@ class RpcTestCase(test.TestCase): i += 1 def test_multicall_succeed_three_times(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -91,7 +85,6 @@ class RpcTestCase(test.TestCase): i += 1 def test_multicall_succeed_three_times_yield(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -103,7 +96,7 @@ class RpcTestCase(test.TestCase): i += 1 def test_context_passed(self): - """Makes sure a context is passed through rpc call""" + """Makes sure a context is passed through rpc call.""" value = 42 result = rpc.call(self.context, 'test', {"method": "context", @@ -111,11 +104,12 @@ class RpcTestCase(test.TestCase): self.assertEqual(self.context.to_dict(), result) def test_call_exception(self): - """Test that exception gets passed back properly + """Test that exception gets passed back properly. rpc.call returns a RemoteError object. The value of the exception is converted to a string, so we convert it back to an int in the test. + """ value = 42 self.assertRaises(rpc.RemoteError, @@ -134,7 +128,7 @@ class RpcTestCase(test.TestCase): self.assertEqual(int(exc.value), value) def test_nested_calls(self): - """Test that we can do an rpc.call inside another call""" + """Test that we can do an rpc.call inside another call.""" class Nested(object): @staticmethod def echo(context, queue, value): @@ -162,8 +156,7 @@ class RpcTestCase(test.TestCase): self.assertEqual(value, result) def test_connectionpool_single(self): - """Test that ConnectionPool recycles a single connection""" - + """Test that ConnectionPool recycles a single connection.""" conn1 = rpc.ConnectionPool.get() rpc.ConnectionPool.put(conn1) conn2 = rpc.ConnectionPool.get() @@ -171,10 +164,13 @@ class RpcTestCase(test.TestCase): self.assertEqual(conn1, conn2) def test_connectionpool_double(self): - """Test that ConnectionPool returns 2 separate connections - when called consecutively and the pool returns connections LIFO - """ + """Test that ConnectionPool returns and reuses separate connections. + + When called consecutively we should get separate connections and upon + returning them those connections should be reused for future calls + before generating a new connection. + """ conn1 = rpc.ConnectionPool.get() conn2 = rpc.ConnectionPool.get() @@ -184,14 +180,11 @@ class RpcTestCase(test.TestCase): conn3 = rpc.ConnectionPool.get() conn4 = rpc.ConnectionPool.get() - self.assertEqual(conn2, conn3) - self.assertEqual(conn1, conn4) + self.assertEqual(conn1, conn3) + self.assertEqual(conn2, conn4) def test_connectionpool_limit(self): - """Test connection pool limit and verify all connections - are unique - """ - + """Test connection pool limit and connection uniqueness.""" max_size = FLAGS.rpc_conn_pool_size conns = [] @@ -205,19 +198,21 @@ class RpcTestCase(test.TestCase): class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. - Uses static methods because we aren't actually storing any state""" + """ @staticmethod def echo(context, value): - """Simply returns whatever value is sent in""" + """Simply returns whatever value is sent in.""" LOG.debug(_("Received %s"), value) return value @staticmethod def context(context, value): - """Returns dictionary version of context""" + """Returns dictionary version of context.""" LOG.debug(_("Received %s"), context) return context.to_dict() @@ -235,5 +230,5 @@ class TestReceiver(object): @staticmethod def fail(context, value): - """Raises an exception with the value sent in""" + """Raises an exception with the value sent in.""" raise Exception(value) -- cgit From f56df190ee888ae731740e7e949fb6f0c012d687 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: replace removed import --- nova/tests/test_cloud.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index ca3ef7ffe..b64be662e 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -19,6 +19,7 @@ from base64 import b64decode from M2Crypto import BIO from M2Crypto import RSA +import os from eventlet import greenthread -- cgit From b3506a471bbce063d72aead211f45d693bda7853 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: don't put connection back in pool --- nova/service.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/service.py b/nova/service.py index 94afd5f78..141fd4253 100644 --- a/nova/service.py +++ b/nova/service.py @@ -181,7 +181,6 @@ class Service(object): except greenlet.GreenletExit: pass self.stop() - rpc.ConnectionPool.put(self.conn) try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: -- cgit From a05e8e7587e42633e8459fd050eee3a4da247330 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: move consumerset killing into stop --- nova/service.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/service.py b/nova/service.py index 141fd4253..782183322 100644 --- a/nova/service.py +++ b/nova/service.py @@ -175,11 +175,6 @@ class Service(object): def kill(self): """Destroy the service object in the datastore.""" - self.csetthread.kill() - try: - self.csetthread.wait() - except greenlet.GreenletExit: - pass self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) @@ -187,6 +182,11 @@ class Service(object): logging.warn(_('Service killed that has no database entry')) def stop(self): + self.csetthread.kill() + try: + self.csetthread.wait() + except greenlet.GreenletExit: + pass for x in self.timers: try: x.stop() -- cgit From feb04f0117450bcd6e8f4966f4487575073be41c Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: change the behavior of calling a multicall --- nova/rpc.py | 8 +++++--- nova/tests/test_rpc.py | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 8d14494f0..493978e57 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -236,6 +236,9 @@ class AdapterConsumer(Consumer): # This final None tells multicall that it is done. msg_reply(msg_id, None, None) + elif hasattr(rval, 'send'): + # NOTE(vish): this iterates through the generator + list(rval) except Exception as e: logging.exception('Exception during message handling') if msg_id: @@ -530,9 +533,8 @@ class MulticallWaiter(object): def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) - for x in rv: - rv.close() - return x + # NOTE(vish): return the last result from the multicall + return list(rv)[-1] def cast(context, topic, msg): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 8523b409c..35f4a64d9 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -51,14 +51,14 @@ class RpcTestCase(test.TestCase): value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times", "args": {"value": value}}) - self.assertEqual(value, result) + self.assertEqual(value + 2, result) def test_call_succeed_despite_multiple_returns_yield(self): value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times_yield", "args": {"value": value}}) - self.assertEqual(value, result) + self.assertEqual(value + 2, result) def test_multicall_succeed_once(self): value = 42 -- cgit From 781672793c5fb774c5d9d291798775db471233b2 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 25 May 2011 19:57:04 -0400 Subject: Renamed image_ref variables to image_href. Since the convention is that x_ref vars may imply that they are db objects. --- nova/api/ec2/cloud.py | 2 +- nova/api/openstack/servers.py | 14 +++++++------- nova/api/openstack/views/servers.py | 8 ++++---- nova/compute/api.py | 6 +++--- nova/exception.py | 2 +- nova/image/__init__.py | 26 +++++++++++++------------- nova/tests/api/openstack/test_servers.py | 26 +++++++++++++------------- nova/tests/integrated/integrated_helpers.py | 8 ++++---- nova/tests/test_cloud.py | 4 ++-- nova/tests/test_compute.py | 6 +++--- nova/tests/test_quota.py | 10 +++++----- nova/virt/images.py | 4 ++-- nova/virt/libvirt_conn.py | 6 +++--- 13 files changed, 61 insertions(+), 61 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 72bd56ed7..ed33e2d42 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -849,7 +849,7 @@ class CloudController(object): instances = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), - image_ref=self._get_image(context, kwargs['image_ref'])['id'], + image_href=self._get_image(context, kwargs['image_href'])['id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 4f823ccf7..cac433557 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -140,15 +140,15 @@ class Controller(common.OpenstackController): key_name = key_pair['name'] key_data = key_pair['public_key'] - image_ref = self._image_ref_from_req_data(env) + image_href = self._image_ref_from_req_data(env) try: - image_service, image_id = nova.image.get_image_service(image_ref) + image_service, image_id = nova.image.get_image_service(image_href) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_service, image_id) images = set([str(x['id']) for x in image_service.index(context)]) assert str(image_id) in images except: - msg = _("Cannot find requested image %s") % image_ref + msg = _("Cannot find requested image %s") % image_href return faults.Fault(exc.HTTPBadRequest(msg)) personality = env['server'].get('personality') @@ -172,7 +172,7 @@ class Controller(common.OpenstackController): (inst,) = self.compute_api.create( context, inst_type, - image_ref, + image_href, kernel_id=kernel_id, ramdisk_id=ramdisk_id, display_name=name, @@ -188,7 +188,7 @@ class Controller(common.OpenstackController): return faults.Fault(exc.HTTPBadRequest(msg)) inst['instance_type'] = inst_type - inst['image_id'] = image_ref + inst['image_id'] = image_href builder = self._get_view_builder(req) server = builder.build(inst, is_detail=True) @@ -701,13 +701,13 @@ class ControllerV11(Controller): instance_id = int(instance_id) try: - image_ref = info["rebuild"]["imageRef"] + image_href = info["rebuild"]["imageRef"] except (KeyError, TypeError): msg = _("Could not parse imageRef from request.") LOG.debug(msg) return faults.Fault(exc.HTTPBadRequest(explanation=msg)) - image_id = common.get_id_from_href(image_ref) + image_id = common.get_id_from_href(image_href) personalities = info["rebuild"].get("personality", []) metadata = info["rebuild"].get("metadata", {}) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 4d825ff53..ddd17ab93 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -131,10 +131,10 @@ class ViewBuilderV11(ViewBuilder): def _build_image(self, response, inst): if 'image_id' in dict(inst): - image_ref = inst['image_id'] - if str(image_ref).isdigit(): - image_ref = int(image_ref) - response['imageRef'] = image_ref + image_href = inst['image_id'] + if str(image_href).isdigit(): + image_href = int(image_href) + response['imageRef'] = image_href def _build_flavor(self, response, inst): if "instance_type" in dict(inst): diff --git a/nova/compute/api.py b/nova/compute/api.py index 4c4bc592b..bb419520d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -130,7 +130,7 @@ class API(base.Base): raise quota.QuotaError(msg, "MetadataLimitExceeded") def create(self, context, instance_type, - image_ref, kernel_id=None, ramdisk_id=None, + image_href, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', @@ -157,7 +157,7 @@ class API(base.Base): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) - (image_service, image_id) = nova.image.get_image_service(image_ref) + (image_service, image_id) = nova.image.get_image_service(image_href) image = image_service.show(context, image_id) os_type = None @@ -201,7 +201,7 @@ class API(base.Base): base_options = { 'reservation_id': utils.generate_uid('r'), - 'image_id': image_ref, + 'image_id': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, diff --git a/nova/exception.py b/nova/exception.py index 13f5bf95c..5b91e1cde 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -284,7 +284,7 @@ class DiskNotFound(NotFound): class InvalidImageRef(Invalid): - message = _("Invalid image ref %(image_ref)s.") + message = _("Invalid image ref %(image_href)s.") class ImageNotFound(NotFound): diff --git a/nova/image/__init__.py b/nova/image/__init__.py index 088b7796e..9b9108bd2 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -28,14 +28,14 @@ from nova import flags FLAGS = flags.FLAGS -def _parse_image_ref(image_ref): +def _parse_image_ref(image_href): """Parse an image href into composite parts. - :param image_ref: href of an image + :param image_href: href of an image :returns: a tuple of the form (image_id, host, port) """ - o = urlparse(image_ref) + o = urlparse(image_href) port = o.port or 80 host = o.netloc.split(':', 1)[0] image_id = int(o.path.split('/')[-1]) @@ -47,25 +47,25 @@ def get_default_image_service(): return ImageService() -def get_image_service(image_ref): - """Get the proper image_service and id for the given image_ref. +def get_image_service(image_href): + """Get the proper image_service and id for the given image_href. - The image_ref param can be an href of the form + The image_href param can be an href of the form http://myglanceserver:9292/images/42, or just an int such as 42. If the - image_ref is an int, then the default image service is returned. + image_href is an int, then the default image service is returned. - :param image_ref: image ref/id for an image + :param image_href: image ref/id for an image :returns: a tuple of the form (image_service, image_id) """ - image_ref = image_ref or 0 - if str(image_ref).isdigit(): - return (get_default_image_service(), int(image_ref)) + image_href = image_href or 0 + if str(image_href).isdigit(): + return (get_default_image_service(), int(image_href)) try: - (image_id, host, port) = _parse_image_ref(image_ref) + (image_id, host, port) = _parse_image_ref(image_href) except: - raise exception.InvalidImageRef(image_ref=image_ref) + raise exception.InvalidImageRef(image_href=image_href) glance_client = nova.image.glance.GlanceClient(host, port) image_service = nova.image.glance.GlanceImageService(glance_client) return (image_service, image_id) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 3910f9820..8f8c6b024 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -584,12 +584,12 @@ class ServersTest(test.TestCase): def test_create_instance_v1_1(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/2' + image_href = 'http://localhost/v1.1/images/2' flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': image_ref, + 'imageRef': image_href, 'flavorRef': flavor_ref, 'metadata': { 'hello': 'world', @@ -611,16 +611,16 @@ class ServersTest(test.TestCase): self.assertEqual('server_test', server['name']) self.assertEqual(1, server['id']) self.assertEqual(flavor_ref, server['flavorRef']) - self.assertEqual(image_ref, server['imageRef']) + self.assertEqual(image_href, server['imageRef']) self.assertEqual(res.status_int, 200) def test_create_instance_v1_1_bad_href(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/asdf' + image_href = 'http://localhost/v1.1/images/asdf' flavor_ref = 'http://localhost/v1.1/flavors/3' body = dict(server=dict( - name='server_test', imageRef=image_ref, flavorRef=flavor_ref, + name='server_test', imageRef=image_href, flavorRef=flavor_ref, metadata={'hello': 'world', 'open': 'stack'}, personality={})) req = webob.Request.blank('/v1.1/servers') @@ -633,12 +633,12 @@ class ServersTest(test.TestCase): def test_create_instance_v1_1_local_href(self): self._setup_for_create_instance() - image_ref = 2 + image_id = 2 flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': image_ref, + 'imageRef': image_id, 'flavorRef': flavor_ref, }, } @@ -653,7 +653,7 @@ class ServersTest(test.TestCase): server = json.loads(res.body)['server'] self.assertEqual(1, server['id']) self.assertEqual(flavor_ref, server['flavorRef']) - self.assertEqual(image_ref, server['imageRef']) + self.assertEqual(image_id, server['imageRef']) self.assertEqual(res.status_int, 200) def test_create_instance_with_admin_pass_v1_0(self): @@ -680,12 +680,12 @@ class ServersTest(test.TestCase): def test_create_instance_with_admin_pass_v1_1(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/2' + image_href = 'http://localhost/v1.1/images/2' flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': image_ref, + 'imageRef': image_href, 'flavorRef': flavor_ref, 'adminPass': 'testpass', }, @@ -702,12 +702,12 @@ class ServersTest(test.TestCase): def test_create_instance_with_empty_admin_pass_v1_1(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/2' + image_href = 'http://localhost/v1.1/images/2' flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': image_ref, + 'imageRef': image_href, 'flavorRef': flavor_ref, 'adminPass': '', }, @@ -1658,7 +1658,7 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""", request = self.deserializer.deserialize(serial_request) self.assertEqual(request, expected) - def test_request_xmlser_with_flavor_image_ref(self): + def test_request_xmlser_with_flavor_image_href(self): serial_request = """ Date: Thu, 26 May 2011 19:46:11 +0900 Subject: creating _take_action_to_instance to nova.virt.libvirt_conn.py --- nova/virt/libvirt_conn.py | 43 +++++++++++++++---------------------------- 1 file changed, 15 insertions(+), 28 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index aa5e42fc8..4e2e2292e 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -45,6 +45,7 @@ import sys import tempfile import time import uuid +import inspect from xml.dom import minidom from xml.etree import ElementTree @@ -548,53 +549,39 @@ class LibvirtConnection(driver.ComputeDriver): timer = utils.LoopingCall(_wait_for_reboot) return timer.start(interval=0.5, now=True) - @exception.wrap_exception - def pause(self, instance, callback): - """Pause VM instance""" + def _take_action_to_instance(self, action, instance, *arg): + """action VM instance""" if self.read_only: tmpconn = self._connect(self.libvirt_uri, False) dom = tmpconn.lookupByName(instance.name) - dom.suspend() + method = getattr(dom, action) + method(*arg) tmpconn.close() else: dom = self._conn.lookupByName(instance.name) - dom.suspend() + method = getattr(dom, action) + method(*arg) + + @exception.wrap_exception + def pause(self, instance, callback): + """Pause VM instance""" + self._take_action_to_instance("suspend", instance) @exception.wrap_exception def unpause(self, instance, callback): """Unpause paused VM instance""" - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance.name) - dom.resume() - tmpconn.close() - else: - dom = self._conn.lookupByName(instance.name) - dom.resume() + self._take_action_to_instance("resume", instance) @exception.wrap_exception def suspend(self, instance, callback): """Suspend the specified instance""" - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance.name) - dom.managedSave(0) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance.name) - dom.managedSave(0) + self._take_action_to_instance("managedSave", instance, 0) @exception.wrap_exception def resume(self, instance, callback): """resume the specified instance""" try: - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance.name) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance.name) - dom.create() + self._take_action_to_instance("create", instance) except libvirt.LibvirtError: xml = self.to_xml(instance, None) self._create_new_domain(xml) -- cgit From ce5c7287e06cb7ce1d1a41354a5d6ea073d308d0 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 26 May 2011 20:31:50 +0900 Subject: remove unnecessary import inspect at nova.virt.libvirt_conn --- nova/virt/libvirt_conn.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 4e2e2292e..f9c441505 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -45,7 +45,6 @@ import sys import tempfile import time import uuid -import inspect from xml.dom import minidom from xml.etree import ElementTree -- cgit From 87717c33ae78201a24c0f5a3416ae4b0080e4668 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 26 May 2011 20:49:14 +0900 Subject: replace double quatation to single quatation at nova.virt.libvirt_conn --- nova/virt/libvirt_conn.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index f9c441505..8c9a3550a 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -564,23 +564,23 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def pause(self, instance, callback): """Pause VM instance""" - self._take_action_to_instance("suspend", instance) + self._take_action_to_instance('suspend', instance) @exception.wrap_exception def unpause(self, instance, callback): """Unpause paused VM instance""" - self._take_action_to_instance("resume", instance) + self._take_action_to_instance('resume', instance) @exception.wrap_exception def suspend(self, instance, callback): """Suspend the specified instance""" - self._take_action_to_instance("managedSave", instance, 0) + self._take_action_to_instance('managedSave', instance, 0) @exception.wrap_exception def resume(self, instance, callback): """resume the specified instance""" try: - self._take_action_to_instance("create", instance) + self._take_action_to_instance('create', instance) except libvirt.LibvirtError: xml = self.to_xml(instance, None) self._create_new_domain(xml) -- cgit From ac3348ae282b218a941b33a2d17b7d5ddaeebab6 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 26 May 2011 09:16:02 -0400 Subject: Switching back to chown. I'm fine w/ setfacl too but nova already has 'chown' via sudoers so this seems reasonable for now. --- nova/virt/xenapi/vm_utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 931f8e2d4..fdf51ff74 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -1013,8 +1013,7 @@ def _stream_disk(dev, image_type, virtual_size, image_file): offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) - utils.execute('sudo', 'setfacl', '-m', 'u:%s:rw' % os.getuid(), - '/dev/%s' % dev) + utils.execute('sudo', 'chown', os.getuid(), '/dev/%s' % dev) with open('/dev/%s' % dev, 'wb') as f: f.seek(offset) -- cgit From 131d5bcae4e5f0ab48369e2979f16468bd0900a4 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 26 May 2011 10:34:17 -0400 Subject: Switch the run_instances call in the EC2 back to 'image_id'. Incoming requests use 'imageId' so we shouldn't modify this for image HREF's. --- nova/api/ec2/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ed33e2d42..cc2e140b0 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -849,7 +849,7 @@ class CloudController(object): instances = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), - image_href=self._get_image(context, kwargs['image_href'])['id'], + image_id=self._get_image(context, kwargs['image_id'])['id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), -- cgit From f37d94428dd0b56632958d5d3a6930531a51cd44 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 26 May 2011 10:54:46 -0400 Subject: Restricted image filtering by name and status only --- nova/api/openstack/images.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 553566d58..2e779da79 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -28,8 +28,7 @@ from nova.api.openstack.views import images as images_view LOG = log.getLogger('nova.api.openstack.images') FLAGS = flags.FLAGS -SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format', - 'size_min', 'size_max'] +SUPPORTED_FILTERS = ['name', 'status'] class Controller(common.OpenstackController): -- cgit From 995a65ac42b4e36679ad0708a227139cdd3bc06e Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 26 May 2011 11:21:28 -0400 Subject: Fix test_cloud tests. --- nova/api/ec2/cloud.py | 2 +- nova/tests/test_cloud.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index cc2e140b0..8580dc79e 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -849,7 +849,7 @@ class CloudController(object): instances = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), - image_id=self._get_image(context, kwargs['image_id'])['id'], + image_href=self._get_image(context, kwargs['image_id'])['id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 1219d600e..54c0454de 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -302,7 +302,7 @@ class CloudTestCase(test.TestCase): def test_console_output(self): instance_type = FLAGS.default_instance_type max_count = 1 - kwargs = {'image_href': 'ami-1', + kwargs = {'image_id': 'ami-1', 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) @@ -318,7 +318,7 @@ class CloudTestCase(test.TestCase): greenthread.sleep(0.3) def test_ajax_console(self): - kwargs = {'image_href': 'ami-1'} + kwargs = {'image_id': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] greenthread.sleep(0.3) -- cgit From 7c0564baf72cbb5c3693ab72c72684a5c6b333c5 Mon Sep 17 00:00:00 2001 From: John Tran Date: Thu, 26 May 2011 10:22:45 -0700 Subject: instance obj returned is not a hash, instead is sqlalchemy obj and hostname attr is what the logic is looking for --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 26c0d776c..51373d282 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -536,7 +536,7 @@ class FloatingIpCommands(object): for floating_ip in floating_ips: instance = None if floating_ip['fixed_ip']: - instance = floating_ip['fixed_ip']['instance']['ec2_id'] + instance = floating_ip['fixed_ip']['instance'].hostname print "%s\t%s\t%s" % (floating_ip['host'], floating_ip['address'], instance) -- cgit From ff75e808eef06a72c0198fe976c19c60256c6b74 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 26 May 2011 10:28:22 -0700 Subject: log upload errors --- nova/image/s3.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/nova/image/s3.py b/nova/image/s3.py index 673cbf56f..bad04e5c0 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -31,12 +31,14 @@ import eventlet from nova import crypto from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth import manager from nova.image import service from nova.api.ec2 import ec2utils +LOG = logging.getLogger("nova.image.s3") FLAGS = flags.FLAGS flags.DEFINE_string('image_decryption_dir', '/tmp', 'parent dir for tempdir used for image decryption') @@ -181,6 +183,8 @@ class S3ImageService(service.BaseImageService): shutil.copyfileobj(part, combined) except Exception: + LOG.exception(_("Failed to download %(image_location)s " + "to %(image_path)s"), locals()) metadata['properties']['image_state'] = 'failed_download' self.service.update(context, image_id, metadata) raise @@ -203,6 +207,9 @@ class S3ImageService(service.BaseImageService): encrypted_iv, cloud_pk, dec_filename) except Exception: + LOG.exception(_("Failed to decrypt %(image_location)s " + "to %(image_path)s"), locals()) + LOG.exception(_("Failed to decrypt %s"), enc_filename) metadata['properties']['image_state'] = 'failed_decrypt' self.service.update(context, image_id, metadata) raise @@ -213,6 +220,8 @@ class S3ImageService(service.BaseImageService): try: unz_filename = self._untarzip_image(image_path, dec_filename) except Exception: + LOG.exception(_("Failed to untar %(image_location)s " + "to %(image_path)s"), locals()) metadata['properties']['image_state'] = 'failed_untar' self.service.update(context, image_id, metadata) raise @@ -224,6 +233,8 @@ class S3ImageService(service.BaseImageService): self.service.update(context, image_id, metadata, image_file) except Exception: + LOG.exception(_("Failed to upload %(image_location)s " + "to %(image_path)s"), locals()) metadata['properties']['image_state'] = 'failed_upload' self.service.update(context, image_id, metadata) raise -- cgit From 75ec3d77d3fa4078bbe7d647377f987d87d97651 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 26 May 2011 10:30:27 -0700 Subject: exceptions are logged via the raise, so just log an error message --- nova/image/s3.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/nova/image/s3.py b/nova/image/s3.py index bad04e5c0..ec8852f09 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -183,8 +183,8 @@ class S3ImageService(service.BaseImageService): shutil.copyfileobj(part, combined) except Exception: - LOG.exception(_("Failed to download %(image_location)s " - "to %(image_path)s"), locals()) + LOG.error(_("Failed to download %(image_location)s " + "to %(image_path)s"), locals()) metadata['properties']['image_state'] = 'failed_download' self.service.update(context, image_id, metadata) raise @@ -207,9 +207,8 @@ class S3ImageService(service.BaseImageService): encrypted_iv, cloud_pk, dec_filename) except Exception: - LOG.exception(_("Failed to decrypt %(image_location)s " - "to %(image_path)s"), locals()) - LOG.exception(_("Failed to decrypt %s"), enc_filename) + LOG.error(_("Failed to decrypt %(image_location)s " + "to %(image_path)s"), locals()) metadata['properties']['image_state'] = 'failed_decrypt' self.service.update(context, image_id, metadata) raise @@ -220,8 +219,8 @@ class S3ImageService(service.BaseImageService): try: unz_filename = self._untarzip_image(image_path, dec_filename) except Exception: - LOG.exception(_("Failed to untar %(image_location)s " - "to %(image_path)s"), locals()) + LOG.error(_("Failed to untar %(image_location)s " + "to %(image_path)s"), locals()) metadata['properties']['image_state'] = 'failed_untar' self.service.update(context, image_id, metadata) raise @@ -233,8 +232,8 @@ class S3ImageService(service.BaseImageService): self.service.update(context, image_id, metadata, image_file) except Exception: - LOG.exception(_("Failed to upload %(image_location)s " - "to %(image_path)s"), locals()) + LOG.error(_("Failed to upload %(image_location)s " + "to %(image_path)s"), locals()) metadata['properties']['image_state'] = 'failed_upload' self.service.update(context, image_id, metadata) raise -- cgit From b9b16ca71d4bbb9782482bdf5d848bb5b787732f Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 26 May 2011 13:59:25 -0400 Subject: Expanded tests --- nova/tests/api/openstack/test_images.py | 122 ++++++++++++++++++++++++++++++-- 1 file changed, 116 insertions(+), 6 deletions(-) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index f3f0217d6..9f1f28611 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -709,23 +709,119 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) - def test_get_image_request_filters(self): + def test_image_filter_with_name(self): mocker = mox.Mox() image_service = mocker.CreateMockAnything() context = object() - filters = {'status': 'ACTIVE', - 'name': 'testname', - 'property-test': '3'} + filters = {'name': 'testname'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?name=testname') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_filter_with_status(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?status=ACTIVE') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_filter_with_property(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'property-test': '3'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?property-test=3') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_filter_not_supported(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_no_filters(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_detail_filter_with_name(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'name': 'testname'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images/detail?name=testname') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() + + def test_image_detail_filter_with_status(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images/detail?status=ACTIVE') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() + + def test_image_detail_filter_with_property(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'property-test': '3'} image_service.detail(context, filters).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( - '/v1.1/images/detail?status=ACTIVE&name=testname&property-test=3') + '/v1.1/images/detail?property-test=3') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.detail(request) mocker.VerifyAll() - def test_get_image_request_filters_not_supported(self): + def test_image_detail_filter_not_supported(self): mocker = mox.Mox() image_service = mocker.CreateMockAnything() context = object() @@ -739,6 +835,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): controller.detail(request) mocker.VerifyAll() + def test_image_detail_no_filters(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images/detail') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() + def test_get_image_found(self): req = webob.Request.blank('/v1.0/images/123') res = req.get_response(fakes.wsgi_app()) -- cgit From 2d834fa19078c645e3c36001b5dd34fb8e708f0a Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 26 May 2011 14:09:59 -0400 Subject: review fixups --- nova/api/openstack/wsgi.py | 27 ++++++++++++++++----------- nova/tests/api/openstack/test_wsgi.py | 2 +- nova/wsgi.py | 4 ++-- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index bd840a6f7..5577d326f 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -62,7 +62,7 @@ class TextDeserializer(object): """Find local deserialization method and parse request body.""" try: action_method = getattr(self, action) - except Exception: + except (AttributeError, TypeError): action_method = self.default return action_method(datastring) @@ -162,7 +162,7 @@ class RequestDeserializer(object): def get_deserializer(self, content_type): try: return self.deserializers[content_type] - except Exception: + except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) def get_expected_content_type(self, request): @@ -172,16 +172,20 @@ class RequestDeserializer(object): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() + except Exception: + return {} + try: del args['controller'] + except KeyError: + pass - if 'format' in args: - del args['format'] - - return args - + try: + del args['format'] except KeyError: - return {} + pass + + return args class DictSerializer(object): @@ -191,7 +195,7 @@ class DictSerializer(object): """Find local serialization method and encode response body.""" try: action_method = getattr(self, action) - except Exception: + except (AttributeError, TypeError): action_method = self.default return action_method(data) @@ -316,7 +320,7 @@ class ResponseSerializer(object): def get_serializer(self, content_type): try: return self.serializers[content_type] - except Exception: + except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) @@ -347,7 +351,8 @@ class Resource(wsgi.Application): def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" - LOG.debug("%s %s" % (request.method, request.url)) + LOG.debug("%(method)s %(url)s" % {"method": request.method, + "url": request.url}) try: action, action_args, accept = self.deserializer.deserialize( diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index 89603d82b..ebbdc9409 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -205,7 +205,7 @@ class ResponseSerializerTest(test.TestCase): def test_serialize_response_dict_to_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, self.serializer.serialize, - 'application/unknown', {}) + {}, 'application/unknown') class RequestDeserializerTest(test.TestCase): diff --git a/nova/wsgi.py b/nova/wsgi.py index d59d2ee13..33ba852bc 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -260,8 +260,8 @@ class Router(object): Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as - well and have your controller be a controller, who will route - the request to the action method. + well and have your controller be an object that can route + the request to the action-specific method. Examples: mapper = routes.Mapper() -- cgit From 3264c18fffa26b1288fc253f2526d9a78fdc9dd4 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 26 May 2011 15:01:24 -0400 Subject: cleaning up getattr calls with default param --- nova/api/openstack/wsgi.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 5577d326f..7a747842e 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -58,13 +58,9 @@ class Request(webob.Request): class TextDeserializer(object): """Custom request body deserialization based on controller action name.""" - def deserialize(self, datastring, action=None): + def deserialize(self, datastring, action='default'): """Find local deserialization method and parse request body.""" - try: - action_method = getattr(self, action) - except (AttributeError, TypeError): - action_method = self.default - + action_method = getattr(self, action, self.default) return action_method(datastring) def default(self, datastring): @@ -191,13 +187,9 @@ class RequestDeserializer(object): class DictSerializer(object): """Custom response body serialization based on controller action name.""" - def serialize(self, data, action=None): + def serialize(self, data, action='default'): """Find local serialization method and encode response body.""" - try: - action_method = getattr(self, action) - except (AttributeError, TypeError): - action_method = self.default - + action_method = getattr(self, action, self.default) return action_method(data) def default(self, data): -- cgit From 899642030dd60541153ccee810d082816f92dd49 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 26 May 2011 19:27:27 +0000 Subject: Change the return from glance to be a list of dictionaries describing VDIs Fix the rest of the code to account for this Add a test for swap --- nova/tests/test_xenapi.py | 23 +++++ nova/tests/xenapi/stubs.py | 23 +++-- nova/virt/xenapi/fake.py | 5 +- nova/virt/xenapi/vm_utils.py | 49 +++++++---- nova/virt/xenapi/vmops.py | 52 ++++++------ plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 97 ++++++++++++++-------- 6 files changed, 168 insertions(+), 81 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index be1e35697..18a267896 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -395,6 +395,29 @@ class XenAPIVMTestCase(test.TestCase): os_type="linux") self.check_vm_params_for_linux() + def test_spawn_vhd_glance_swapdisk(self): + # Change the default host_call_plugin to one that'll return + # a swap disk + orig_func = stubs.FakeSessionForVMTests.host_call_plugin + + stubs.FakeSessionForVMTests.host_call_plugin = \ + stubs.FakeSessionForVMTests.host_call_plugin_swap + + try: + # We'll steal the above glance linux test + self.test_spawn_vhd_glance_linux() + finally: + # Make sure to put this back + stubs.FakeSessionForVMTests.host_call_plugin = orig_func + + # We should have 2 VBDs. + self.assertEqual(len(self.vm['VBDs']), 2) + # Now test that we have 1. + self.tearDown() + self.setUp() + self.test_spawn_vhd_glance_linux() + self.assertEqual(len(self.vm['VBDs']), 1) + def test_spawn_vhd_glance_windows(self): FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 9f6f64318..35308d95f 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -38,7 +38,7 @@ def stubout_instance_snapshot(stubs): sr_ref=sr_ref, sharable=False) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec['uuid'] - return {'primary_vdi_uuid': vdi_uuid} + return [dict(vdi_type='os', vdi_uuid=vdi_uuid)] stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) @@ -134,16 +134,29 @@ class FakeSessionForVMTests(fake.SessionBase): super(FakeSessionForVMTests, self).__init__(uri) def host_call_plugin(self, _1, _2, plugin, method, _5): + sr_ref = fake.get_all('SR')[0] + vdi_ref = fake.create_vdi('', False, sr_ref, False) + vdi_rec = fake.get_record('VDI', vdi_ref) + if plugin == "glance" and method == "download_vhd": + ret_str = json.dumps([dict(vdi_type='os', + vdi_uuid=vdi_rec['uuid'])]) + else: + ret_str = vdi_rec['uuid'] + return '%s' % ret_str + + def host_call_plugin_swap(self, _1, _2, plugin, method, _5): sr_ref = fake.get_all('SR')[0] vdi_ref = fake.create_vdi('', False, sr_ref, False) vdi_rec = fake.get_record('VDI', vdi_ref) if plugin == "glance" and method == "download_vhd": swap_vdi_ref = fake.create_vdi('', False, sr_ref, False) swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref) - return '%s' % json.dumps( - {'primary_vdi_uuid': vdi_rec['uuid'], - 'swap_vdi_uuid': swap_vdi_rec['uuid']}) - return '%s' % vdi_rec['uuid'] + ret_str = json.dumps( + [dict(vdi_type='os', vdi_uuid=vdi_rec['uuid']), + dict(vdi_type='swap', vdi_uuid=swap_vdi_rec['uuid'])]) + else: + ret_str = vdi_rec['uuid'] + return '%s' % ret_str def VM_start(self, _1, ref, _2, _3): vm = fake.get_record('VM', ref) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index e36ef3288..76988b172 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -159,7 +159,10 @@ def after_VBD_create(vbd_ref, vbd_rec): vbd_rec['device'] = '' vm_ref = vbd_rec['VM'] vm_rec = _db_content['VM'][vm_ref] - vm_rec['VBDs'] = [vbd_ref] + if vm_rec.get('VBDs', None): + vm_rec['VBDs'].append(vbd_ref) + else: + vm_rec['VBDs'] = [vbd_ref] vm_name_label = _db_content['VM'][vm_ref]['name_label'] vbd_rec['vm_name_label'] = vm_name_label diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 3d980013a..bee9742a4 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -377,6 +377,9 @@ class VMHelper(HelperBase): xenapi_image_service = ['glance', 'objectstore'] glance_address = 'address for glance services' glance_port = 'port for glance services' + + Returns: A single filename if image_type is KERNEL_RAMDISK + A list of dictionaries that describe VDIs, otherwise """ access = AuthManager().get_access_key(user, project) @@ -391,6 +394,10 @@ class VMHelper(HelperBase): @classmethod def _fetch_image_glance_vhd(cls, session, instance_id, image, access, image_type): + """Tell glance to download an image and put the VHDs into the SR + + Returns: A list of dictionaries that describe VDIs + """ LOG.debug(_("Asking xapi to fetch vhd image %(image)s") % locals()) @@ -410,25 +417,21 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) result = session.wait_for_task(task, instance_id) - vdi_uuids = json.loads(result) - primary_vdi_uuid = vdi_uuids.get('primary_vdi_uuid') - swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) + vdis = json.loads(result) + for vdi in vdis: + LOG.debug(_("xapi 'download_vhd' returned VDI of " + "type '%(vdi_type)s' with UUID '%(vdi_uuid)s'" % vdi)) cls.scan_sr(session, instance_id, sr_ref) + # Pull out the UUID of the first VDI + vdi_uuid = vdis[0]['vdi_uuid'] # Set the name-label to ease debugging - primary_vdi_ref = session.get_xenapi().VDI.get_by_uuid(primary_vdi_uuid) + vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) primary_name_label = get_name_label_for_image(image) - session.get_xenapi().VDI.set_name_label(primary_vdi_ref, primary_name_label) - - LOG.debug(_("xapi 'download_vhd' returned VDI UUID " - "%(primary_vdi_uuid)s") % locals()) - if swap_vdi_uuid: - LOG.debug(_("xapi 'download_vhd' returned SWAP VDI UUID " - "%(swap_vdi_uuid)s") % locals()) + session.get_xenapi().VDI.set_name_label(vdi_ref, primary_name_label) - LOG.debug("=" * 100) - return vdi_uuids + return vdis @classmethod def _fetch_image_glance_disk(cls, session, instance_id, image, access, @@ -440,6 +443,8 @@ class VMHelper(HelperBase): plugin; instead, it streams the disks through domU to the VDI directly. + Returns: A single filename if image_type is KERNEL_RAMDISK + A list of dictionaries that describe VDIs, otherwise """ # FIXME(sirp): Since the Glance plugin seems to be required for the # VHD disk, it may be worth using the plugin for both VHD and RAW and @@ -486,7 +491,7 @@ class VMHelper(HelperBase): return filename else: vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref) - return {'primary_vdi_uuid': vdi_uuid} + return [dict(vdi_type='os', vdi_uuid=vdi_uuid)] @classmethod def determine_disk_image_type(cls, instance): @@ -545,6 +550,11 @@ class VMHelper(HelperBase): @classmethod def _fetch_image_glance(cls, session, instance_id, image, access, image_type): + """Fetch image from glance based on image type. + + Returns: A single filename if image_type is KERNEL_RAMDISK + A list of dictionaries that describe VDIs, otherwise + """ if image_type == ImageType.DISK_VHD: return cls._fetch_image_glance_vhd( session, instance_id, image, access, image_type) @@ -555,6 +565,11 @@ class VMHelper(HelperBase): @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, secret, image_type): + """Fetch an image from objectstore. + + Returns: A single filename if image_type is KERNEL_RAMDISK + A list of dictionaries that describe VDIs, otherwise + """ url = images.image_url(image) LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) if image_type == ImageType.KERNEL_RAMDISK: @@ -572,10 +587,10 @@ class VMHelper(HelperBase): if image_type == ImageType.DISK_RAW: args['raw'] = 'true' task = session.async_call_plugin('objectstore', fn, args) - uuid = session.wait_for_task(task, instance_id) + uuid_or_fn = session.wait_for_task(task, instance_id) if image_type != ImageType.KERNEL_RAMDISK: - return {'primary_vdi_uuid': uuid} - return uuid + return [dict(vdi_type='os', vdi_uuid=uuid_or_fn)] + return uuid_or_fn @classmethod def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 2a8d97a9d..02e140dcc 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -91,7 +91,8 @@ class VMOps(object): def finish_resize(self, instance, disk_info): vdi_uuid = self.link_disks(instance, disk_info['base_copy'], disk_info['cow']) - vm_ref = self._create_vm(instance, {'primary_vdi_uuid': vdi_uuid}) + vm_ref = self._create_vm(instance, + [dict(vdi_type='os', vdi_uuid=vdi_uuid)]) self.resize_instance(instance, vdi_uuid) self._spawn(instance, vm_ref) @@ -105,25 +106,25 @@ class VMOps(object): LOG.debug(_("Starting instance %s"), instance.name) self._session.call_xenapi('VM.start', vm_ref, False, False) - def _create_disk(self, instance): + def _create_disks(self, instance): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) - vdi_uuids = VMHelper.fetch_image(self._session, + vdis = VMHelper.fetch_image(self._session, instance.id, instance.image_id, user, project, disk_image_type) - return vdi_uuids + return vdis def spawn(self, instance, network_info=None): - vdi_uuids = self._create_disk(instance) - vm_ref = self._create_vm(instance, vdi_uuids, network_info) + vdis = self._create_disks(instance) + vm_ref = self._create_vm(instance, vdis, network_info) self._spawn(instance, vm_ref) def spawn_rescue(self, instance): """Spawn a rescue instance.""" self.spawn(instance) - def _create_vm(self, instance, vdi_uuids, network_info=None): + def _create_vm(self, instance, vdis, network_info=None): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) @@ -142,15 +143,6 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - # Are we building from a pre-existing disk? - primary_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', - vdi_uuids.get('primary_vdi_uuid')) - swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) - if swap_vdi_uuid: - swap_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', swap_vdi_uuid) - else: - swap_vdi_ref = None - disk_image_type = VMHelper.determine_disk_image_type(instance) kernel = None @@ -165,17 +157,29 @@ class VMOps(object): instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) + # Create the VM ref and attach the first disk + first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', + vdis[0]['vdi_uuid']) use_pv_kernel = VMHelper.determine_is_pv(self._session, - instance.id, primary_vdi_ref, disk_image_type, + instance.id, first_vdi_ref, disk_image_type, instance.os_type) - vm_ref = VMHelper.create_vm(self._session, instance, kernel, - ramdisk, use_pv_kernel) - + vm_ref = VMHelper.create_vm(self._session, instance, + kernel, ramdisk, use_pv_kernel) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=primary_vdi_ref, userdevice=0, bootable=True) - if swap_vdi_ref: + vdi_ref=first_vdi_ref, userdevice=0, bootable=True) + + # Attach any other disks + # userdevice 1 is reserved for rescue + userdevice = 2 + for vdi in vdis[1:]: + # vdi['vdi_type'] is either 'os' or 'swap', but we don't + # really care what it is right here. + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', + vdi['vdi_uuid']) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=swap_vdi_ref, userdevice=2, bootable=False) + vdi_ref=vdi_ref, userdevice=userdevice, + bootable=False) + userdevice += 1 # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. @@ -185,7 +189,7 @@ class VMOps(object): # Alter the image before VM start for, e.g. network injection if FLAGS.xenapi_inject_image: VMHelper.preconfigure_instance(self._session, instance, - primary_vdi_ref, network_info) + first_vdi_ref, network_info) self.create_vifs(vm_ref, network_info) self.inject_network_info(instance, network_info, vm_ref) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 0d02adfe9..039d1b8f6 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -91,8 +91,8 @@ def _download_tarball(sr_path, staging_path, image_id, glance_host, conn.close() -def _fixup_vhds(sr_path, staging_path, uuid_stack): - """Fixup the downloaded VHDs before we move them into the SR. +def _import_vhds(sr_path, staging_path, uuid_stack): + """Import the VHDs found in the staging path. We cannot extract VHDs directly into the SR since they don't yet have UUIDs, aren't properly associated with each other, and would be subject to @@ -102,16 +102,25 @@ def _fixup_vhds(sr_path, staging_path, uuid_stack): To avoid these we problems, we use a staging area to fixup the VHDs before moving them into the SR. The steps involved are: - 1. Extracting tarball into staging area + 1. Extracting tarball into staging area (done prior to this call) 2. Renaming VHDs to use UUIDs ('snap.vhd' -> 'ffff-aaaa-...vhd') - 3. Linking the two VHDs together + 3. Linking VHDs together if there's a snap.vhd 4. Pseudo-atomically moving the images into the SR. (It's not really - atomic because it takes place as two os.rename operations; however, - the chances of an SR.scan occuring between the two rename() + atomic because it takes place as multiple os.rename operations; + however, the chances of an SR.scan occuring between the rename()s invocations is so small that we can safely ignore it) + + Returns: A list of VDIs. Each list element is a dictionary containing + information about the VHD. Dictionary keys are: + 1. "vdi_type" - The type of VDI. Currently they can be "os_disk" or + "swap" + 2. "vdi_uuid" - The UUID of the VDI + + Example return: [{"vdi_type": "os_disk","vdi_uuid": "ffff-aaa..vhd"}, + {"vdi_type": "swap","vdi_uuid": "ffff-bbb..vhd"}] """ def rename_with_uuid(orig_path): """Rename VHD using UUID so that it will be recognized by SR on a @@ -162,39 +171,57 @@ def _fixup_vhds(sr_path, staging_path, uuid_stack): "VHD %(path)s is marked as hidden without child" % locals()) - orig_base_copy_path = os.path.join(staging_path, 'image.vhd') - if not os.path.exists(orig_base_copy_path): + def prepare_if_exists(staging_path, vhd_name, parent_path=None): + """ + Check for existance of a particular VHD in the staging path and + preparing it for moving into the SR. + + Returns: Tuple of (Path to move into the SR, VDI_UUID) + None, if the vhd_name doesn't exist in the staging path + + If the VHD exists, we will do the following: + 1. Rename it with a UUID. + 2. If parent_path exists, we'll link up the VHDs. + """ + orig_path = os.path.join(staging_path, vhd_name) + if not os.path.exists(orig_path): + return None + new_path, vdi_uuid = rename_with_uuid(orig_path) + if parent_path: + # NOTE(sirp): this step is necessary so that an SR scan won't + # delete the base_copy out from under us (since it would be + # orphaned) + link_vhds(new_path, parent_path) + return (new_path, vdi_uuid) + + vdi_return_list = [] + paths_to_move = [] + + image_info = prepare_if_exists(staging_path, 'image.vhd') + if not image_info: raise Exception("Invalid image: image.vhd not present") - base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path) - - vdi_uuid = base_copy_uuid - orig_snap_path = os.path.join(staging_path, 'snap.vhd') - if os.path.exists(orig_snap_path): - snap_path, snap_uuid = rename_with_uuid(orig_snap_path) - vdi_uuid = snap_uuid - # NOTE(sirp): this step is necessary so that an SR scan won't - # delete the base_copy out from under us (since it would be - # orphaned) - link_vhds(snap_path, base_copy_path) - move_into_sr(snap_path) + paths_to_move.append(image_info[0]) + + snap_info = prepare_if_exists(staging_path, 'snap.vhd', image_path) + if snap_info: + paths_to_move.append(snap_info[0]) + # We return this snap as the VDI instead of image.vhd + vdi_return_list.append(dict(vdi_type="os", vdi_uuid=snap_info[1])) else: - assert_vhd_not_hidden(base_copy_path) + # If there's no snap, we return the image.vhd UUID + vdi_return_list.append(dict(vdi_type="os", vdi_uuid=image_info[1])) + + swap_info = prepare_if_exists(staging_path, 'swap.vhd') + if swap_info: + paths_to_move.append(swap_info[0]) + vdi_return_list.append(dict(vdi_type="swap", vdi_uuid=swap_info[1])) - # If we find a swap.vhd, go ahead and copy it into the SR - swap_vdi_uuid = None - orig_swap_path = os.path.join(staging_path, 'swap.vhd') - if os.path.exists(orig_swap_path): - swap_path, swap_vdi_uuid = rename_with_uuid(orig_swap_path) - move_into_sr(swap_path) + for path in paths_to_move: + move_into_sr(path) - vdi_uuids = {} - vdi_uuids['primary_vdi_uuid'] = vdi_uuid - if swap_vdi_uuid: - vdi_uuids['swap_vdi_uuid'] = swap_vdi_uuid + return vdi_return_list - move_into_sr(base_copy_path) - return vdi_uuids def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): """Hard-link VHDs into staging area with appropriate filename @@ -339,7 +366,9 @@ def download_vhd(session, args): try: _download_tarball(sr_path, staging_path, image_id, glance_host, glance_port) - return json.dumps(_fixup_vhds(sr_path, staging_path, uuid_stack)) + # Right now, it's easier to return a single string via XenAPI, + # so we'll json encode the list of VHDs. + return json.dumps(_import_vhds(sr_path, staging_path, uuid_stack)) finally: _cleanup_staging_area(staging_path) -- cgit From 04785db717492c8ba7c2d184924b3773ec944f4c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 26 May 2011 19:37:51 +0000 Subject: fix image_path in glance plugin --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 039d1b8f6..fef31a9ff 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -203,7 +203,8 @@ def _import_vhds(sr_path, staging_path, uuid_stack): paths_to_move.append(image_info[0]) - snap_info = prepare_if_exists(staging_path, 'snap.vhd', image_path) + snap_info = prepare_if_exists(staging_path, 'snap.vhd', + image_info[0]) if snap_info: paths_to_move.append(snap_info[0]) # We return this snap as the VDI instead of image.vhd -- cgit From 9e22f51c80cc5f7f5ea60b5b8bb779779a19667c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 26 May 2011 20:01:09 +0000 Subject: put back the hidden assert check i accidentally removed from glance plugin --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index fef31a9ff..0c00d168b 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -210,11 +210,13 @@ def _import_vhds(sr_path, staging_path, uuid_stack): # We return this snap as the VDI instead of image.vhd vdi_return_list.append(dict(vdi_type="os", vdi_uuid=snap_info[1])) else: + assert_vhd_not_hidden(image_info[0]) # If there's no snap, we return the image.vhd UUID vdi_return_list.append(dict(vdi_type="os", vdi_uuid=image_info[1])) swap_info = prepare_if_exists(staging_path, 'swap.vhd') if swap_info: + assert_vhd_not_hidden(swap_info[0]) paths_to_move.append(swap_info[0]) vdi_return_list.append(dict(vdi_type="swap", vdi_uuid=swap_info[1])) -- cgit From fc27a0ac4f907282a669e2c9f3e128890907f236 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 26 May 2011 20:21:40 +0000 Subject: add a comment when calling glance:download_vhd so it's clear what is returned --- nova/virt/xenapi/vm_utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index bee9742a4..06ee8ee9b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -417,6 +417,10 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) result = session.wait_for_task(task, instance_id) + # 'download_vhd' will return a json encoded string containing + # a list of dictionaries describing VDIs. The dictionary will + # contain 'vdi_type' and 'vdi_uuid' keys. 'vdi_type' can be + # 'os' or 'swap' right now. vdis = json.loads(result) for vdi in vdis: LOG.debug(_("xapi 'download_vhd' returned VDI of " -- cgit From 613aee2dd146957cb0c040d7a7a1a661b487efbc Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Thu, 26 May 2011 16:58:06 -0400 Subject: move udev file so it follows the xen-backend.rules --- plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules | 3 --- .../xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules | 3 +++ .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) delete mode 100644 plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules create mode 100644 plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules diff --git a/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules b/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules deleted file mode 100644 index b179f0847..000000000 --- a/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules +++ /dev/null @@ -1,3 +0,0 @@ -SUBSYSTEM=="xen-backend", KERNEL=="vif*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all" -# is this one needed? -#SUBSYSTEM=="net", KERNEL=="tap*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all" diff --git a/plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules b/plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules new file mode 100644 index 000000000..b179f0847 --- /dev/null +++ b/plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules @@ -0,0 +1,3 @@ +SUBSYSTEM=="xen-backend", KERNEL=="vif*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all" +# is this one needed? +#SUBSYSTEM=="net", KERNEL=="tap*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all" diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 2ebc4dd8c..9fde69377 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -172,7 +172,7 @@ def apply_ovs_ipv6_flows(ovs, bridge, params): if __name__ == "__main__": if len(sys.argv) != 4: - print "usage: %s [online|offline] vif-domid-idx ipv4|ipv6|all " % \ + print "usage: %s [online|offline] vif-domid-idx [ipv4|ipv6|all] " % \ os.path.basename(sys.argv[0]) sys.exit(1) else: -- cgit From a79f01fcea81bb6be233a65670c6a79af8534a10 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 26 May 2011 17:27:48 -0400 Subject: adding TODOs per dabo's review --- nova/api/openstack/wsgi.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 7a747842e..ddf4e6fa9 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -237,6 +237,7 @@ class XMLDictSerializer(DictSerializer): if xmlns: result.setAttribute('xmlns', xmlns) + #TODO(bcwaldon): accomplish this without a type-check if type(data) is list: collections = metadata.get('list_collections', {}) if nodename in collections: @@ -255,6 +256,7 @@ class XMLDictSerializer(DictSerializer): for item in data: node = self._to_xml_node(doc, metadata, singular, item) result.appendChild(node) + #TODO(bcwaldon): accomplish this without a type-check elif type(data) is dict: collections = metadata.get('dict_collections', {}) if nodename in collections: -- cgit From d7e0b45a9bc415e87beee32f10c8d6bdff9819ed Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 26 May 2011 15:08:53 -0700 Subject: changes per review --- nova/rpc.py | 17 ++++++++++------- nova/service.py | 17 ++++++++--------- nova/tests/test_rpc.py | 12 +++--------- nova/tests/test_service.py | 6 ++++-- 4 files changed, 25 insertions(+), 27 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 493978e57..1ec495bc8 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -28,6 +28,7 @@ import json import sys import time import traceback +import types import uuid from carrot import connection as carrot_connection @@ -228,7 +229,7 @@ class AdapterConsumer(Consumer): rval = node_func(context=ctxt, **node_args) if msg_id: # Check if the result was a generator - if hasattr(rval, 'send'): + if isinstance(rval, types.GeneratorType): for x in rval: msg_reply(msg_id, x, None) else: @@ -236,7 +237,7 @@ class AdapterConsumer(Consumer): # This final None tells multicall that it is done. msg_reply(msg_id, None, None) - elif hasattr(rval, 'send'): + elif isinstance(rval, types.GeneratorType): # NOTE(vish): this iterates through the generator list(rval) except Exception as e: @@ -281,11 +282,11 @@ class FanoutAdapterConsumer(AdapterConsumer): class ConsumerSet(object): """Groups consumers to listen on together on a single connection.""" - def __init__(self, conn, consumer_list): + def __init__(self, connection, consumer_list): self.consumer_list = set(consumer_list) self.consumer_set = None self.enabled = True - self.init(conn) + self.init(connection) def init(self, conn): if not conn: @@ -316,8 +317,7 @@ class ConsumerSet(object): running = False break except Exception as e: - LOG.error(_("Received exception %s " % type(e) + \ - "while processing consumer")) + LOG.exception(_("Exception while processing consumer")) self.reconnect() # Break to outer loop break @@ -534,7 +534,10 @@ def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) # NOTE(vish): return the last result from the multicall - return list(rv)[-1] + rv = list(rv) + if not rv: + return + return rv[-1] def cast(context, topic, msg): diff --git a/nova/service.py b/nova/service.py index 782183322..74f9f04d8 100644 --- a/nova/service.py +++ b/nova/service.py @@ -105,19 +105,18 @@ class Service(object): connection=self.conn, topic=self.topic, proxy=self) - - cset = rpc.ConsumerSet(self.conn, [consumer_all, - consumer_node, - fanout]) + consumer_set = rpc.ConsumerSet( + connection=self.conn, + consumer_list=[consumer_all, consumer_node, fanout]) # Wait forever, processing these consumers def _wait(): try: - cset.wait() + consumer_set.wait() finally: - cset.close() + consumer_set.close() - self.csetthread = greenthread.spawn(_wait) + self.consumer_set_thread = greenthread.spawn(_wait) if self.report_interval: pulse = utils.LoopingCall(self.report_state) @@ -182,9 +181,9 @@ class Service(object): logging.warn(_('Service killed that has no database entry')) def stop(self): - self.csetthread.kill() + self.consumer_set_thread.kill() try: - self.csetthread.wait() + self.consumer_set_thread.wait() except greenlet.GreenletExit: pass for x in self.timers: diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 35f4a64d9..ffd748efe 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -66,12 +66,10 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): if i > 0: self.fail('should only receive one response') self.assertEqual(value + i, x) - i += 1 def test_multicall_succeed_three_times(self): value = 42 @@ -79,10 +77,8 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): self.assertEqual(value + i, x) - i += 1 def test_multicall_succeed_three_times_yield(self): value = 42 @@ -90,10 +86,8 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times_yield", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): self.assertEqual(value + i, x) - i += 1 def test_context_passed(self): """Makes sure a context is passed through rpc call.""" diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index 0bba01d92..d1cc8bd61 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -142,7 +142,8 @@ class ServiceTestCase(test.TestCase): mock_cset = self.mox.CreateMock(rpc.ConsumerSet, {'wait': wait_func}) - rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + rpc.ConsumerSet(connection=mox.IgnoreArg(), + consumer_list=mox.IsA(list)).AndReturn(mock_cset) wait_func(mox.IgnoreArg()) service_create = {'host': host, @@ -331,7 +332,8 @@ class ServiceTestCase(test.TestCase): mock_cset = self.mox.CreateMock(rpc.ConsumerSet, {'wait': wait_func}) - rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + rpc.ConsumerSet(connection=mox.IgnoreArg(), + consumer_list=mox.IsA(list)).AndReturn(mock_cset) wait_func(mox.IgnoreArg()) self.mox.StubOutWithMock(serv.manager.driver, -- cgit From 2819681b762fe8a23f3af68f1c1cbed0a113c08e Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 26 May 2011 18:14:38 -0400 Subject: Rename instances.image_id to instances.image_ref. --- nova/api/ec2/cloud.py | 6 +++--- nova/compute/api.py | 2 +- nova/compute/manager.py | 2 +- nova/db/sqlalchemy/api.py | 2 +- nova/db/sqlalchemy/models.py | 4 ++-- nova/tests/test_cloud.py | 6 +++--- nova/tests/test_compute.py | 2 +- nova/virt/libvirt/firewall.py | 4 ++-- 8 files changed, 14 insertions(+), 14 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 8580dc79e..5bbee1afd 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -159,7 +159,7 @@ class CloudController(object): floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) - image_ec2_id = self.image_ec2_id(instance_ref['image_id']) + image_ec2_id = self.image_ec2_id(instance_ref['image_ref']) data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { @@ -724,13 +724,13 @@ class CloudController(object): instances = self.compute_api.get_all(context, **kwargs) for instance in instances: if not context.is_admin: - if instance['image_id'] == str(FLAGS.vpn_image_id): + if instance['image_ref'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id - i['imageId'] = self.image_ec2_id(instance['image_id']) + i['imageId'] = self.image_ec2_id(instance['image_ref']) i['instanceState'] = { 'code': instance['state'], 'name': instance['state_description']} diff --git a/nova/compute/api.py b/nova/compute/api.py index 432ea1fad..61b45843d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -207,7 +207,7 @@ class API(base.Base): base_options = { 'reservation_id': utils.generate_uid('r'), - 'image_id': image_href, + 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d1e01f275..7c88236ba 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -235,7 +235,7 @@ class ComputeManager(manager.SchedulerDependentManager): power_state.NOSTATE, 'networking') - is_vpn = instance_ref['image_id'] == str(FLAGS.vpn_image_id) + is_vpn = instance_ref['image_ref'] == str(FLAGS.vpn_image_id) # NOTE(vish): This could be a cast because we don't do anything # with the address currently, but I'm leaving it as # a call to ensure that network setup completes. We diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e4dda5c12..4403cd7d9 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -955,7 +955,7 @@ def instance_get_project_vpn(context, project_id): options(joinedload('security_groups')).\ options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ - filter_by(image_id=str(FLAGS.vpn_image_id)).\ + filter_by(image_ref=str(FLAGS.vpn_image_id)).\ filter_by(deleted=can_read_deleted(context)).\ first() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 1215448f8..6d4be8767 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -184,11 +184,11 @@ class Instance(BASE, NovaBase): def project(self): return auth.manager.AuthManager().get_project(self.project_id) - image_id = Column(String(255)) + image_ref = Column(String(255)) kernel_id = Column(String(255)) ramdisk_id = Column(String(255)) -# image_id = Column(Integer, ForeignKey('images.id'), nullable=True) +# image_ref = Column(Integer, ForeignKey('images.id'), nullable=True) # kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) # ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) # ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 54c0454de..eefab58d0 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -191,10 +191,10 @@ class CloudTestCase(test.TestCase): def test_describe_instances(self): """Makes sure describe_instances works and filters results.""" inst1 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_id': 1, + 'image_ref': 1, 'host': 'host1'}) inst2 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_id': 1, + 'image_ref': 1, 'host': 'host2'}) comp1 = db.service_create(self.context, {'host': 'host1', 'availability_zone': 'zone1', @@ -390,7 +390,7 @@ class CloudTestCase(test.TestCase): def test_terminate_instances(self): inst1 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_id': 1, + 'image_ref': 1, 'host': 'host1'}) terminate_instances = self.cloud.terminate_instances # valid instance_id diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index b4097660f..25454087d 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -84,7 +84,7 @@ class ComputeTestCase(test.TestCase): def _create_instance(self, params={}): """Create a test instance""" inst = {} - inst['image_id'] = 1 + inst['image_ref'] = 1 inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' inst['user_id'] = self.user.id diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 7e00662cd..12727f2b1 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -195,7 +195,7 @@ class NWFilterFirewall(FirewallDriver): logging.info('ensuring static filters') self._ensure_static_filters() - if instance['image_id'] == str(FLAGS.vpn_image_id): + if instance['image_ref'] == str(FLAGS.vpn_image_id): base_filter = 'nova-vpn' else: base_filter = 'nova-base' @@ -336,7 +336,7 @@ class NWFilterFirewall(FirewallDriver): def _create_network_filters(self, instance, network_info, instance_secgroup_filter_name): - if instance['image_id'] == str(FLAGS.vpn_image_id): + if instance['image_ref'] == str(FLAGS.vpn_image_id): base_filter = 'nova-vpn' else: base_filter = 'nova-base' -- cgit From 103bcae9f172dfee64e7b9235807bcfe1a8aefb3 Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 26 May 2011 17:06:52 -0700 Subject: fix a minor bug unrelated to this change --- nova/rpc.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index 1ec495bc8..c5277c6a9 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -212,7 +212,9 @@ class AdapterConsumer(Consumer): # we just log the message and send an error string # back to the caller LOG.warn(_('no method for message: %s') % message_data) - msg_reply(msg_id, _('No method for message: %s') % message_data) + if msg_id: + msg_reply(msg_id, + _('No method for message: %s') % message_data) return self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) -- cgit From 6b0ed0cb61838d01b15df26fc32df0de90f1cfbe Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Fri, 27 May 2011 13:20:45 +0900 Subject: Fix a description of 'snapshot_name_template'. --- nova/db/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/api.py b/nova/db/api.py index 3597732b9..e85ce9f16 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -48,7 +48,7 @@ flags.DEFINE_string('instance_name_template', 'instance-%08x', flags.DEFINE_string('volume_name_template', 'volume-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x', - 'Template string to be used to generate instance names') + 'Template string to be used to generate snapshot names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], -- cgit From 8b4c91b9f2c28e4809659f199affddbd66482dbb Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Fri, 27 May 2011 13:36:59 +0900 Subject: Fix pep8 violations. --- nova/api/ec2/cloud.py | 13 +++++++++---- .../versions/019_add_volume_snapshot_support.py | 3 +-- nova/db/sqlalchemy/models.py | 1 + nova/tests/test_volume.py | 5 +++-- nova/volume/driver.py | 6 +++--- nova/volume/manager.py | 3 ++- 6 files changed, 19 insertions(+), 12 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 6927d6774..403b7ab40 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -285,7 +285,9 @@ class CloudController(object): snapshots = [] for ec2_id in snapshot_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) - snapshot = self.volume_api.get_snapshot(context, snapshot_id=internal_id) + snapshot = self.volume_api.get_snapshot( + context, + snapshot_id=internal_id) snapshots.append(snapshot) else: snapshots = self.volume_api.get_all_snapshots(context) @@ -295,7 +297,8 @@ class CloudController(object): def _format_snapshot(self, context, snapshot): s = {} s['snapshotId'] = ec2utils.id_to_ec2_id(snapshot['id'], 'snap-%08x') - s['volumeId'] = ec2utils.id_to_ec2_id(snapshot['volume_id'], 'vol-%08x') + s['volumeId'] = ec2utils.id_to_ec2_id(snapshot['volume_id'], + 'vol-%08x') s['status'] = snapshot['status'] s['startTime'] = snapshot['created_at'] s['progress'] = snapshot['progress'] @@ -308,7 +311,8 @@ class CloudController(object): return s def create_snapshot(self, context, volume_id, **kwargs): - LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) + LOG.audit(_("Create snapshot of volume %s"), volume_id, + context=context) volume_id = ec2utils.ec2_id_to_id(volume_id) snapshot = self.volume_api.create_snapshot( context, @@ -629,7 +633,8 @@ class CloudController(object): else: v['attachmentSet'] = [{}] if volume.get('snapshot_id') != None: - v['snapshotId'] = ec2utils.id_to_ec2_id(volume['snapshot_id'], 'snap-%08x') + v['snapshotId'] = ec2utils.id_to_ec2_id(volume['snapshot_id'], + 'snap-%08x') else: v['snapshotId'] = None diff --git a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py index 5a44bac16..f16d6db56 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py @@ -48,8 +48,7 @@ snapshots = Table('snapshots', meta, unicode_error=None, _warn_on_bytestring=False)), Column('display_description', String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - ) + unicode_error=None, _warn_on_bytestring=False))) def upgrade(migrate_engine): diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index b887c5bad..480f62399 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -353,6 +353,7 @@ class Snapshot(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) + class ExportDevice(BASE, NovaBase): """Represates a shelf and blade that a volume can be exported on.""" __tablename__ = 'export_devices' diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index c66b66959..3472b1f59 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -193,8 +193,9 @@ class VolumeTestCase(test.TestCase): self.volume.create_volume(self.context, volume_id) snapshot_id = self._create_snapshot(volume_id) self.volume.create_snapshot(self.context, volume_id, snapshot_id) - self.assertEqual(snapshot_id, db.snapshot_get(context.get_admin_context(), - snapshot_id).id) + self.assertEqual(snapshot_id, + db.snapshot_get(context.get_admin_context(), + snapshot_id).id) self.volume.delete_snapshot(self.context, snapshot_id) self.assertRaises(exception.NotFound, diff --git a/nova/volume/driver.py b/nova/volume/driver.py index e0e18b9bf..21cc228c9 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -97,7 +97,7 @@ class VolumeDriver(object): def _copy_volume(self, srcstr, deststr, size_in_g): self._execute('sudo', 'dd', 'if=%s' % srcstr, 'of=%s' % deststr, 'count=%d' % (size_in_g * 1024), 'bs=1M') - + def _volume_not_present(self, volume_name): path_name = '%s/%s' % (FLAGS.volume_group, volume_name) try: @@ -115,7 +115,7 @@ class VolumeDriver(object): self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % (FLAGS.volume_group, self._escape_snapshot(volume['name']))) - + def _sizestr(self, size_in_g): if int(size_in_g) == 0: return '100M' @@ -150,7 +150,7 @@ class VolumeDriver(object): out = out.strip() if (out[0] == 'o') or (out[0] == 'O'): raise exception.VolumeIsBusy(volume_name=volume['name']) - + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): diff --git a/nova/volume/manager.py b/nova/volume/manager.py index fd889633d..40a104d35 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -169,7 +169,8 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) model_update = self.driver.create_snapshot(snapshot_ref) if model_update: - self.db.snapshot_update(context, snapshot_ref['id'], model_update) + self.db.snapshot_update(context, snapshot_ref['id'], + model_update) except Exception: self.db.snapshot_update(context, -- cgit From c229d6e32f5275b2eb10e760f89a52dc31635c47 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Fri, 27 May 2011 14:13:17 +0900 Subject: Fix pep8 errors. --- nova/api/ec2/cloud.py | 7 ++++--- nova/tests/test_volume.py | 10 ++++++---- nova/volume/api.py | 3 ++- nova/volume/driver.py | 4 ++-- nova/volume/manager.py | 5 +++-- 5 files changed, 17 insertions(+), 12 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index b717a10c0..79cc3b3bf 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -666,14 +666,15 @@ class CloudController(object): return v def create_volume(self, context, **kwargs): - size = kwargs.get('size'); + size = kwargs.get('size') if kwargs.get('snapshot_id') != None: snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) - LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) + LOG.audit(_("Create volume from snapshot %s"), snapshot_id, + context=context) else: snapshot_id = None LOG.audit(_("Create volume of %s GB"), size, context=context) - + volume = self.volume_api.create( context, size=size, diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 8d58b3135..4f10ee6af 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -78,10 +78,12 @@ class VolumeTestCase(test.TestCase): self.volume.create_snapshot(self.context, volume_src_id, snapshot_id) volume_dst_id = self._create_volume(0, snapshot_id) self.volume.create_volume(self.context, volume_dst_id, snapshot_id) - self.assertEqual(volume_dst_id, db.volume_get(context.get_admin_context(), - volume_dst_id).id) - self.assertEqual(snapshot_id, db.volume_get(context.get_admin_context(), - volume_dst_id).snapshot_id) + self.assertEqual(volume_dst_id, db.volume_get( + context.get_admin_context(), + volume_dst_id).id) + self.assertEqual(snapshot_id, db.volume_get( + context.get_admin_context(), + volume_dst_id).snapshot_id) self.volume.delete_volume(self.context, volume_dst_id) self.volume.delete_snapshot(self.context, snapshot_id) diff --git a/nova/volume/api.py b/nova/volume/api.py index 7fa80383b..5804955f7 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -43,7 +43,8 @@ class API(base.Base): if snapshot_id != None: snapshot = self.get_snapshot(context, snapshot_id) if snapshot['status'] != "available": - raise exception.ApiError(_("Snapshot status must be available")) + raise exception.ApiError( + _("Snapshot status must be available")) size = snapshot['volume_size'] if quota.allowed_volumes(context, 1, size) < 1: diff --git a/nova/volume/driver.py b/nova/volume/driver.py index df9767a79..87e13277f 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -674,10 +674,10 @@ class SheepdogDriver(VolumeDriver): def create_volume_from_snapshot(self, volume, snapshot): """Creates a sheepdog volume from a snapshot.""" self._try_execute('qemu-img', 'create', '-b', - "sheepdog:%s:%s" % (snapshot['volume_name'], snapshot['name']), + "sheepdog:%s:%s" % (snapshot['volume_name'], + snapshot['name']), "sheepdog:%s" % volume['name']) - def delete_volume(self, volume): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 673771aa7..ff53f0701 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -112,8 +112,9 @@ class VolumeManager(manager.SchedulerDependentManager): model_update = self.driver.create_volume(volume_ref) else: snapshot_ref = self.db.snapshot_get(context, snapshot_id) - model_update = self.driver.create_volume_from_snapshot(volume_ref, - snapshot_ref) + model_update = self.driver.create_volume_from_snapshot( + volume_ref, + snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) -- cgit From ca5a91b3fe6eaa1c2d2b85cb5a11d2bb36e7a436 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 27 May 2011 15:14:16 +0900 Subject: fixed read_only check --- nova/virt/libvirt_conn.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 8c9a3550a..7982611fa 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -564,23 +564,27 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def pause(self, instance, callback): """Pause VM instance""" - self._take_action_to_instance('suspend', instance) + dom = self._lookup_by_name(instance.name) + dom.suspend() @exception.wrap_exception def unpause(self, instance, callback): """Unpause paused VM instance""" - self._take_action_to_instance('resume', instance) + dom = self._lookup_by_name(instance.name) + dom.resume() @exception.wrap_exception def suspend(self, instance, callback): """Suspend the specified instance""" - self._take_action_to_instance('managedSave', instance, 0) + dom = self._lookup_by_name(instance.name) + dom.managedSave(0) @exception.wrap_exception def resume(self, instance, callback): """resume the specified instance""" try: - self._take_action_to_instance('create', instance) + dom = self._lookup_by_name(instance.name) + dom.create() except libvirt.LibvirtError: xml = self.to_xml(instance, None) self._create_new_domain(xml) -- cgit From a92f2bcbbaa40458e81bad3f6cb21288161322f9 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 27 May 2011 06:56:50 +0000 Subject: fix calls to openssl properly now. Only append \n to stdin when decoding. Updated the test slightly, also. --- nova/tests/test_xenapi.py | 1 + nova/virt/xenapi/vmops.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 18a267896..3ba37a762 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -595,6 +595,7 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def test_encryption(self): msg = "This is a top-secret message" enc = self.alice.encrypt(msg) + self.assertFalse(enc.endswith('\n')) dec = self.bob.decrypt(enc) self.assertEquals(dec, msg) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6d516ddbc..1d8678ce2 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1195,12 +1195,16 @@ class SimpleDH(object): '-nosalt %(dec_flag)s') if which.lower()[0] == 'd': dec_flag = ' -d' + # When decoding base64, we need to make sure there's a + # single '\n' at the end of the base64 encoded data. + # It's kinda dumb that openssl wants to see a newline + text = text.strip('\n') + '\n' else: dec_flag = '' shared = self._shared cmd = base_cmd % locals() proc = _runproc(cmd) - proc.stdin.write(text + '\n') + proc.stdin.write(text) proc.stdin.close() proc.wait() err = proc.stderr.read() -- cgit From 34bd57c380c348fa9c60cf6b3371352da6e8853c Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 27 May 2011 16:03:56 +0900 Subject: remove _take_action_to_instance --- nova/virt/libvirt_conn.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 7982611fa..47a77b3ae 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -548,19 +548,6 @@ class LibvirtConnection(driver.ComputeDriver): timer = utils.LoopingCall(_wait_for_reboot) return timer.start(interval=0.5, now=True) - def _take_action_to_instance(self, action, instance, *arg): - """action VM instance""" - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance.name) - method = getattr(dom, action) - method(*arg) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance.name) - method = getattr(dom, action) - method(*arg) - @exception.wrap_exception def pause(self, instance, callback): """Pause VM instance""" -- cgit From bd19bd2edd612dfea09e4230c59422e59c6de181 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 27 May 2011 05:01:42 -0700 Subject: fixed docstrings and general tidying --- nova/compute/api.py | 39 ++++++++++++++++---------------- nova/scheduler/host_filter.py | 41 +++++++++++++++++++++------------- nova/scheduler/zone_aware_scheduler.py | 33 +++++++++++++++++---------- 3 files changed, 67 insertions(+), 46 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 7f1fe1b5c..3e5105050 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -88,10 +88,10 @@ class API(base.Base): {"method": "get_network_topic", "args": {'fake': 1}}) def _check_injected_file_quota(self, context, injected_files): - """Enforce quota limits on injected files. + """ + Enforce quota limits on injected files. Raises a QuotaError if any limit is exceeded. - """ if injected_files is None: return @@ -137,10 +137,10 @@ class API(base.Base): availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None): - """Create the number and type of instances requested. + """ + Create the number and type of instances requested. Verifies that quota and other arguments are valid. - """ if not instance_type: instance_type = instance_types.get_default_instance_type() @@ -293,13 +293,13 @@ class API(base.Base): return False def ensure_default_security_group(self, context): - """Ensure that a context has a security group. + """ + Ensure that a context has a security group. Creates a security group for the security context if it does not already exist. :param context: the security context - """ try: db.security_group_get_by_name(context, context.project_id, @@ -328,11 +328,11 @@ class API(base.Base): "args": {"security_group_id": security_group.id}}) def trigger_security_group_members_refresh(self, context, group_id): - """Called when a security group gains a new or loses a member. + """ + Called when a security group gains a new or loses a member. Sends an update request to each compute node for whom this is relevant. - """ # First, we get the security group rules that reference this group as # the grantee.. @@ -370,7 +370,8 @@ class API(base.Base): "args": {"security_group_id": group_id}}) def update(self, context, instance_id, **kwargs): - """Updates the instance in the datastore. + """ + Updates the instance in the datastore. :param context: The security context :param instance_id: ID of the instance to update @@ -379,7 +380,6 @@ class API(base.Base): updated :returns: None - """ rv = self.db.instance_update(context, instance_id, kwargs) return dict(rv.iteritems()) @@ -424,22 +424,22 @@ class API(base.Base): @scheduler_api.reroute_compute("get") def routing_get(self, context, instance_id): - """A version of get with special routing characteristics. + """ + A version of get with special routing characteristics. Use this method instead of get() if this is the only operation you intend to to. It will route to novaclient.get if the instance is not found. - """ return self.get(context, instance_id) def get_all(self, context, project_id=None, reservation_id=None, fixed_ip=None): - """Get all instances filtered by one of the given parameters. + """ + Get all instances filtered by one of the given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system. - """ if reservation_id is not None: return self.db.instance_get_all_by_reservation( @@ -463,13 +463,13 @@ class API(base.Base): def _cast_compute_message(self, method, context, instance_id, host=None, params=None): - """Generic handler for RPC casts to compute. + """ + Generic handler for RPC casts to compute. :param params: Optional dictionary of arguments to be passed to the compute worker :returns: None - """ if not params: params = {} @@ -483,7 +483,8 @@ class API(base.Base): def _call_compute_message(self, method, context, instance_id, host=None, params=None): - """Generic handler for RPC calls to compute. + """ + Generic handler for RPC calls to compute. :param params: Optional dictionary of arguments to be passed to the compute worker @@ -516,10 +517,10 @@ class API(base.Base): % instance_id) def snapshot(self, context, instance_id, name): - """Snapshot the given instance. + """ + Snapshot the given instance. :returns: A dict containing image metadata - """ properties = {'instance_id': str(instance_id), 'user_id': str(context.user_id)} diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index ed76c90bf..89faace45 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -69,9 +69,11 @@ class HostFilter(object): class AllHostsFilter(HostFilter): - """NOP host filter. Returns all hosts in ZoneManager. + """ + NOP host filter. Returns all hosts in ZoneManager. This essentially does what the old Scheduler+Chance used - to give us.""" + to give us. + """ def instance_type_to_filter(self, instance_type): """Return anything to prevent base-class from raising @@ -133,8 +135,10 @@ class InstanceTypeFilter(HostFilter): class JsonFilter(HostFilter): - """Host Filter to allow simple JSON-based grammar for - selecting hosts.""" + """ + Host Filter to allow simple JSON-based grammar for + selecting hosts. + """ def _equals(self, args): """First term is == all the other terms.""" @@ -229,8 +233,10 @@ class JsonFilter(HostFilter): return (self._full_name(), json.dumps(query)) def _parse_string(self, string, host, services): - """Strings prefixed with $ are capability lookups in the - form '$service.capability[.subcap*]'""" + """ + Strings prefixed with $ are capability lookups in the + form '$service.capability[.subcap*]' + """ if not string: return None if string[0] != '$': @@ -277,22 +283,25 @@ FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] def choose_host_filter(filter_name=None): - """Since the caller may specify which filter to use we need - to have an authoritative list of what is permissible. This - function checks the filter name against a predefined set - of acceptable filters.""" + """ + Since the caller may specify which filter to use we need + to have an authoritative list of what is permissible. This + function checks the filter name against a predefined set + of acceptable filters. + """ if not filter_name: filter_name = FLAGS.default_host_filter for filter_class in FILTERS: - if "%s.%s" % (filter_class.__module__, filter_class.__name__) == \ - filter_name: + host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) + if host_match == filter_name: return filter_class() raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): - """The HostFilterScheduler uses the HostFilter to filter + """ + The HostFilterScheduler uses the HostFilter to filter hosts for weighing. The particular filter used may be passed in as an argument or the default will be used. @@ -313,6 +322,8 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): return host_filter.filter_hosts(self.zone_manager, query) def weigh_hosts(self, num, request_spec, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format.""" + """ + Derived classes must override this method and return + a lists of hosts in [{weight, hostname}] format. + """ return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index dc18fc427..236907626 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -40,13 +40,15 @@ class ZoneAwareScheduler(driver.Scheduler): def schedule_run_instance(self, context, instance_id, request_spec, *args, **kwargs): - """This method is called from nova.compute.api to provision + """ + This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being passed in to see if this is a request to: 1. Create a Build Plan and then provision, or 2. Use the Build Plan information in the request parameters to simply create the instance (either in this zone or - a child zone).""" + a child zone). + """ # TODO(sandy): We'll have to look for richer specs at some point. @@ -79,15 +81,16 @@ class ZoneAwareScheduler(driver.Scheduler): % locals()) else: # TODO(sandy) Provision in child zone ... - LOG.warning(_("Provision to Child Zone not supported (yet)") - % locals()) + LOG.warning(_("Provision to Child Zone not supported (yet)")) pass def select(self, context, request_spec, *args, **kwargs): - """Select returns a list of weights and zone/host information + """ + Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any child zone information has been encrypted so as not to reveal - anything about the children.""" + anything about the children. + """ return self._schedule(context, "compute", request_spec, *args, **kwargs) @@ -95,13 +98,15 @@ class ZoneAwareScheduler(driver.Scheduler): # so we don't implement the default "schedule()" method required # of Schedulers. def schedule(self, context, topic, request_spec, *args, **kwargs): - """The schedule() contract requires we return the one + """ + The schedule() contract requires we return the one best-suited host for this request. """ raise driver.NoValidHost(_('No hosts were available')) def _schedule(self, context, topic, request_spec, *args, **kwargs): - """Returns a list of hosts that meet the required specs, + """ + Returns a list of hosts that meet the required specs, ordered by their fitness. """ @@ -137,11 +142,15 @@ class ZoneAwareScheduler(driver.Scheduler): return weighted def filter_hosts(self, num, request_spec): - """Derived classes must override this method and return - a list of hosts in [(hostname, capability_dict)] format.""" + """ + Derived classes must override this method and return + a list of hosts in [(hostname, capability_dict)] format. + """ raise NotImplemented() def weigh_hosts(self, num, request_spec, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format.""" + """ + Derived classes must override this method and return + a lists of hosts in [{weight, hostname}] format. + """ raise NotImplemented() -- cgit From 299cadb9ce2e2600b18e2befbed967ca2941256d Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 27 May 2011 08:15:56 -0400 Subject: Commit the migration script. --- .../migrate_repo/versions/019_rename_image_ids.py | 39 ++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py b/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py new file mode 100644 index 000000000..6838f1ea6 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table +#from nova import log as logging + +meta = MetaData() + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + image_id_column = instances.c.image_id + image_id_column.alter(name='image_ref') + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + image_ref_column = instances.c.image_ref + image_ref_column.alter(name='image_id') -- cgit From e5d89198b188b9ae62ff0ac2bd72fd321f541713 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 27 May 2011 09:43:10 -0400 Subject: Libvirt updates for image_ref. --- nova/virt/libvirt/connection.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 1cedd1fe3..62c40a022 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -392,7 +392,7 @@ class LibvirtConnection(driver.ComputeDriver): elevated = context.get_admin_context() (image_service, image_id) = nova.image.get_image_service( - instance['image_id']) + instance['image_ref']) base = image_service.show(elevated, image_id) metadata = {'disk_format': base['disk_format'], @@ -779,7 +779,7 @@ class LibvirtConnection(driver.ComputeDriver): project = manager.AuthManager().get_project(inst['project_id']) if not disk_images: - disk_images = {'image_id': inst['image_id'], + disk_images = {'image_id': inst['image_ref'], 'kernel_id': inst['kernel_id'], 'ramdisk_id': inst['ramdisk_id']} @@ -875,7 +875,7 @@ class LibvirtConnection(driver.ComputeDriver): if key or net: inst_name = inst['name'] - img_id = inst.image_id + img_id = inst.image_ref if key: LOG.info(_('instance %(inst_name)s: injecting key into' ' image %(img_id)s') % locals()) -- cgit From 107b15d2dd7d554d9cca177343ab45c51029d484 Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Fri, 27 May 2011 10:15:33 -0400 Subject: fix encryption handling of newlines again and restructure the code a bit --- nova/tests/test_xenapi.py | 22 ++++++++++++++++++---- nova/virt/xenapi/vmops.py | 24 +++++++----------------- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 3ba37a762..0632d05a5 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -592,12 +592,26 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): bob_shared = self.bob.compute_shared(alice_pub) self.assertEquals(alice_shared, bob_shared) - def test_encryption(self): - msg = "This is a top-secret message" - enc = self.alice.encrypt(msg) + def _test_encryption(self, message): + enc = self.alice.encrypt(message) self.assertFalse(enc.endswith('\n')) dec = self.bob.decrypt(enc) - self.assertEquals(dec, msg) + self.assertEquals(dec, message) + + def test_encrypt_simple_message(self): + self._test_encryption('This is a simple message.') + + def test_encrypt_message_with_newlines_at_end(self): + self._test_encryption('This message has a newline at the end.\n') + + def test_encrypt_many_newlines_at_end(self): + self._test_encryption('Message with lotsa newlines.\n\n\n') + + def test_encrypt_newlines_inside_message(self): + self._test_encryption('Message\nwith\ninterior\nnewlines.') + + def test_encrypt_with_leading_newlines(self): + self._test_encryption('\n\nMessage with leading newlines.') def tearDown(self): super(XenAPIDiffieHellmanTestCase, self).tearDown() diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 1d8678ce2..ce84c8652 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1190,30 +1190,20 @@ class SimpleDH(object): mpi = M2Crypto.m2.bn_to_mpi(bn) return mpi - def _run_ssl(self, text, which): - base_cmd = ('openssl enc -aes-128-cbc -a -pass pass:%(shared)s ' - '-nosalt %(dec_flag)s') - if which.lower()[0] == 'd': - dec_flag = ' -d' - # When decoding base64, we need to make sure there's a - # single '\n' at the end of the base64 encoded data. - # It's kinda dumb that openssl wants to see a newline - text = text.strip('\n') + '\n' - else: - dec_flag = '' - shared = self._shared - cmd = base_cmd % locals() - proc = _runproc(cmd) + def _run_ssl(self, subcommand, text): + proc = _runproc('openssl %s' % subcommand) proc.stdin.write(text) proc.stdin.close() proc.wait() err = proc.stderr.read() if err: raise RuntimeError(_('OpenSSL error: %s') % err) - return proc.stdout.read().strip('\n') + return proc.stdout.read() def encrypt(self, text): - return self._run_ssl(text, 'enc') + cmd = 'enc -aes-128-cbc -a -pass pass:%s -nosalt' % self._shared + return self._run_ssl(cmd, text).strip('\n') def decrypt(self, text): - return self._run_ssl(text, 'dec') + cmd = 'enc -aes-128-cbc -a -A -pass pass:%s -nosalt -d' % self._shared + return self._run_ssl(cmd, text) -- cgit From 3f911877a2a9facdf153f173b3fb76a18e44a2ac Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 27 May 2011 07:24:02 -0700 Subject: docstrings again and import ordering --- nova/compute/api.py | 30 ++++++++++-------------------- nova/scheduler/host_filter.py | 18 ++++++------------ nova/scheduler/zone_aware_scheduler.py | 20 +++++++------------- 3 files changed, 23 insertions(+), 45 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 3e5105050..35d60446c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -88,8 +88,7 @@ class API(base.Base): {"method": "get_network_topic", "args": {'fake': 1}}) def _check_injected_file_quota(self, context, injected_files): - """ - Enforce quota limits on injected files. + """Enforce quota limits on injected files. Raises a QuotaError if any limit is exceeded. """ @@ -137,8 +136,7 @@ class API(base.Base): availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None): - """ - Create the number and type of instances requested. + """Create the number and type of instances requested. Verifies that quota and other arguments are valid. """ @@ -293,8 +291,7 @@ class API(base.Base): return False def ensure_default_security_group(self, context): - """ - Ensure that a context has a security group. + """Ensure that a context has a security group. Creates a security group for the security context if it does not already exist. @@ -328,8 +325,7 @@ class API(base.Base): "args": {"security_group_id": security_group.id}}) def trigger_security_group_members_refresh(self, context, group_id): - """ - Called when a security group gains a new or loses a member. + """Called when a security group gains a new or loses a member. Sends an update request to each compute node for whom this is relevant. @@ -370,8 +366,7 @@ class API(base.Base): "args": {"security_group_id": group_id}}) def update(self, context, instance_id, **kwargs): - """ - Updates the instance in the datastore. + """Updates the instance in the datastore. :param context: The security context :param instance_id: ID of the instance to update @@ -424,8 +419,7 @@ class API(base.Base): @scheduler_api.reroute_compute("get") def routing_get(self, context, instance_id): - """ - A version of get with special routing characteristics. + """A version of get with special routing characteristics. Use this method instead of get() if this is the only operation you intend to to. It will route to novaclient.get if the instance is not @@ -435,8 +429,7 @@ class API(base.Base): def get_all(self, context, project_id=None, reservation_id=None, fixed_ip=None): - """ - Get all instances filtered by one of the given parameters. + """Get all instances filtered by one of the given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system. @@ -463,8 +456,7 @@ class API(base.Base): def _cast_compute_message(self, method, context, instance_id, host=None, params=None): - """ - Generic handler for RPC casts to compute. + """Generic handler for RPC casts to compute. :param params: Optional dictionary of arguments to be passed to the compute worker @@ -483,8 +475,7 @@ class API(base.Base): def _call_compute_message(self, method, context, instance_id, host=None, params=None): - """ - Generic handler for RPC calls to compute. + """Generic handler for RPC calls to compute. :param params: Optional dictionary of arguments to be passed to the compute worker @@ -517,8 +508,7 @@ class API(base.Base): % instance_id) def snapshot(self, context, instance_id, name): - """ - Snapshot the given instance. + """Snapshot the given instance. :returns: A dict containing image metadata """ diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 89faace45..4260cbf42 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -69,8 +69,7 @@ class HostFilter(object): class AllHostsFilter(HostFilter): - """ - NOP host filter. Returns all hosts in ZoneManager. + """ NOP host filter. Returns all hosts in ZoneManager. This essentially does what the old Scheduler+Chance used to give us. """ @@ -135,8 +134,7 @@ class InstanceTypeFilter(HostFilter): class JsonFilter(HostFilter): - """ - Host Filter to allow simple JSON-based grammar for + """Host Filter to allow simple JSON-based grammar for selecting hosts. """ @@ -233,8 +231,7 @@ class JsonFilter(HostFilter): return (self._full_name(), json.dumps(query)) def _parse_string(self, string, host, services): - """ - Strings prefixed with $ are capability lookups in the + """Strings prefixed with $ are capability lookups in the form '$service.capability[.subcap*]' """ if not string: @@ -283,8 +280,7 @@ FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] def choose_host_filter(filter_name=None): - """ - Since the caller may specify which filter to use we need + """Since the caller may specify which filter to use we need to have an authoritative list of what is permissible. This function checks the filter name against a predefined set of acceptable filters. @@ -300,8 +296,7 @@ def choose_host_filter(filter_name=None): class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): - """ - The HostFilterScheduler uses the HostFilter to filter + """The HostFilterScheduler uses the HostFilter to filter hosts for weighing. The particular filter used may be passed in as an argument or the default will be used. @@ -322,8 +317,7 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): return host_filter.filter_hosts(self.zone_manager, query) def weigh_hosts(self, num, request_spec, hosts): - """ - Derived classes must override this method and return + """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format. """ return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 236907626..bc67c7794 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -23,8 +23,8 @@ across zones. There are two expansion points to this class for: import operator from nova import db -from nova import rpc from nova import log as logging +from nova import rpc from nova.scheduler import api from nova.scheduler import driver @@ -40,8 +40,7 @@ class ZoneAwareScheduler(driver.Scheduler): def schedule_run_instance(self, context, instance_id, request_spec, *args, **kwargs): - """ - This method is called from nova.compute.api to provision + """This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being passed in to see if this is a request to: 1. Create a Build Plan and then provision, or @@ -85,8 +84,7 @@ class ZoneAwareScheduler(driver.Scheduler): pass def select(self, context, request_spec, *args, **kwargs): - """ - Select returns a list of weights and zone/host information + """Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any child zone information has been encrypted so as not to reveal anything about the children. @@ -98,15 +96,13 @@ class ZoneAwareScheduler(driver.Scheduler): # so we don't implement the default "schedule()" method required # of Schedulers. def schedule(self, context, topic, request_spec, *args, **kwargs): - """ - The schedule() contract requires we return the one + """The schedule() contract requires we return the one best-suited host for this request. """ raise driver.NoValidHost(_('No hosts were available')) def _schedule(self, context, topic, request_spec, *args, **kwargs): - """ - Returns a list of hosts that meet the required specs, + """Returns a list of hosts that meet the required specs, ordered by their fitness. """ @@ -142,15 +138,13 @@ class ZoneAwareScheduler(driver.Scheduler): return weighted def filter_hosts(self, num, request_spec): - """ - Derived classes must override this method and return + """Derived classes must override this method and return a list of hosts in [(hostname, capability_dict)] format. """ raise NotImplemented() def weigh_hosts(self, num, request_spec, hosts): - """ - Derived classes must override this method and return + """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format. """ raise NotImplemented() -- cgit From 28b19b9e20100236f98e04cc43bcf106768ff2bb Mon Sep 17 00:00:00 2001 From: "Dave Walker (Daviey)" Date: Fri, 27 May 2011 15:28:10 +0100 Subject: nova/auth/novarc.template: Changed NOVA_KEY_DIR to allow symlink support --- nova/auth/novarc.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index cda2ecc28..8170fcafe 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,4 +1,4 @@ -NOVA_KEY_DIR=$(pushd $(dirname $BASH_SOURCE)>/dev/null; pwd; popd>/dev/null) +NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE})) export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" export EC2_URL="%(ec2)s" -- cgit From f6d847cd867c09319f9fc451c09dc7322542e26b Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Fri, 27 May 2011 10:40:50 -0400 Subject: prevent encryption from adding newlines on long messages --- nova/tests/test_xenapi.py | 4 ++++ nova/virt/xenapi/vmops.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 0632d05a5..fe37f0ebe 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -594,6 +594,7 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def _test_encryption(self, message): enc = self.alice.encrypt(message) + print enc self.assertFalse(enc.endswith('\n')) dec = self.bob.decrypt(enc) self.assertEquals(dec, message) @@ -613,6 +614,9 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def test_encrypt_with_leading_newlines(self): self._test_encryption('\n\nMessage with leading newlines.') + def test_encrypt_really_long_message(self): + self._test_encryption(''.join(['abcd' for i in xrange(1024)])) + def tearDown(self): super(XenAPIDiffieHellmanTestCase, self).tearDown() diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index ce84c8652..1fcaaeede 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1201,7 +1201,7 @@ class SimpleDH(object): return proc.stdout.read() def encrypt(self, text): - cmd = 'enc -aes-128-cbc -a -pass pass:%s -nosalt' % self._shared + cmd = 'enc -aes-128-cbc -a -A -pass pass:%s -nosalt' % self._shared return self._run_ssl(cmd, text).strip('\n') def decrypt(self, text): -- cgit From 60a291747eeded09ade608088eae47fdb300a56b Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Fri, 27 May 2011 10:41:12 -0400 Subject: remove errant print statement --- nova/tests/test_xenapi.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index fe37f0ebe..9d56c1644 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -594,7 +594,6 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def _test_encryption(self, message): enc = self.alice.encrypt(message) - print enc self.assertFalse(enc.endswith('\n')) dec = self.bob.decrypt(enc) self.assertEquals(dec, message) -- cgit From c26be56d63a9d263ea8632514be03607713c754d Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 27 May 2011 15:48:40 -0400 Subject: Glance client updates for xenapi and vmware API to work with image refs. --- nova/image/__init__.py | 42 +++++++++++++++++++++++++++--------- nova/tests/glance/stubs.py | 12 +++++++---- nova/tests/test_vmwareapi.py | 5 ++--- nova/tests/test_xenapi.py | 29 +++++++++++-------------- nova/tests/vmwareapi/db_fakes.py | 2 +- nova/virt/vmwareapi/vmops.py | 12 +++++------ nova/virt/vmwareapi/vmware_images.py | 16 ++++++-------- nova/virt/xenapi/vm_utils.py | 14 ++++++------ nova/virt/xenapi/vmops.py | 2 +- 9 files changed, 78 insertions(+), 56 deletions(-) diff --git a/nova/image/__init__.py b/nova/image/__init__.py index 9b9108bd2..011d79d61 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -18,16 +18,16 @@ from urlparse import urlparse - from nova import exception -import nova.image.glance -from nova.utils import import_class +from nova import utils from nova import flags - FLAGS = flags.FLAGS +GlanceClient = utils.import_class('glance.client.Client') + + def _parse_image_ref(image_href): """Parse an image href into composite parts. @@ -43,10 +43,36 @@ def _parse_image_ref(image_href): def get_default_image_service(): - ImageService = import_class(FLAGS.image_service) + ImageService = utils.import_class(FLAGS.image_service) return ImageService() +def get_glance_client(image_href): + """Get the correct glance client and id for the given image_href. + + The image_href param can be an href of the form + http://myglanceserver:9292/images/42, or just an int such as 42. If the + image_href is an int, then flags are used to create the default + glance client. + + :param image_href: image ref/id for an image + :returns: a tuple of the form (glance_client, image_id) + + """ + image_href = image_href or 0 + if str(image_href).isdigit(): + glance_client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port) + return (glance_client, int(image_href)) + + try: + (image_id, host, port) = _parse_image_ref(image_href) + except: + raise exception.InvalidImageRef(image_href=image_href) + glance_client = GlanceClient(host, port) + #glance_client = client.Client(host, port) + return (glance_client, image_id) + + def get_image_service(image_href): """Get the proper image_service and id for the given image_href. @@ -62,10 +88,6 @@ def get_image_service(image_href): if str(image_href).isdigit(): return (get_default_image_service(), int(image_href)) - try: - (image_id, host, port) = _parse_image_ref(image_href) - except: - raise exception.InvalidImageRef(image_href=image_href) - glance_client = nova.image.glance.GlanceClient(host, port) + (glance_client, image_id) = get_glance_client(image_href) image_service = nova.image.glance.GlanceImageService(glance_client) return (image_service, image_id) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 5872552ec..8611fef29 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -16,13 +16,17 @@ import StringIO -import glance.client +from nova import images -def stubout_glance_client(stubs, cls): + +def get_mock_glance_client(): + return FakeGlance() + + +def stubout_glance_client(stubs): """Stubs out glance.client.Client""" - stubs.Set(glance.client, 'Client', - lambda *args, **kwargs: cls(*args, **kwargs)) + stubs.Set(images, 'get_glance_client', get_mock_glance_client) class FakeGlance(object): diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py index 22b66010a..e5ebd1600 100644 --- a/nova/tests/test_vmwareapi.py +++ b/nova/tests/test_vmwareapi.py @@ -55,8 +55,7 @@ class VMWareAPIVMTestCase(test.TestCase): vmwareapi_fake.reset() db_fakes.stub_out_db_instance_api(self.stubs) stubs.set_stubs(self.stubs) - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) self.conn = vmwareapi_conn.get_connection(False) def _create_instance_in_the_db(self): @@ -64,7 +63,7 @@ class VMWareAPIVMTestCase(test.TestCase): 'id': 1, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': "1", + 'image_ref': "1", 'kernel_id': "1", 'ramdisk_id': "1", 'instance_type': 'm1.large', diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 18a267896..56e1e47af 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -79,7 +79,7 @@ class XenAPIVolumeTestCase(test.TestCase): self.values = {'id': 1, 'project_id': 'fake', 'user_id': 'fake', - 'image_id': 1, + 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large @@ -193,8 +193,7 @@ class XenAPIVMTestCase(test.TestCase): stubs.stubout_is_vdi_pv(self.stubs) self.stubs.Set(VMOps, 'reset_network', reset_network) stubs.stub_out_vm_methods(self.stubs) - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) fake_utils.stub_out_utils_execute(self.stubs) self.context = context.RequestContext('fake', 'fake', False) self.conn = xenapi_conn.get_connection(False) @@ -207,7 +206,7 @@ class XenAPIVMTestCase(test.TestCase): 'id': id, 'project_id': proj, 'user_id': user, - 'image_id': 1, + 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large @@ -351,14 +350,14 @@ class XenAPIVMTestCase(test.TestCase): self.assertEquals(self.vm['HVM_boot_params'], {}) self.assertEquals(self.vm['HVM_boot_policy'], '') - def _test_spawn(self, image_id, kernel_id, ramdisk_id, + def _test_spawn(self, image_ref, kernel_id, ramdisk_id, instance_type_id="3", os_type="linux", instance_id=1, check_injection=False): stubs.stubout_loopingcall_start(self.stubs) values = {'id': instance_id, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': image_id, + 'image_ref': image_ref, 'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id, 'instance_type_id': instance_type_id, @@ -567,7 +566,7 @@ class XenAPIVMTestCase(test.TestCase): 'id': 1, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': 1, + 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large @@ -623,7 +622,7 @@ class XenAPIMigrateInstance(test.TestCase): self.values = {'id': 1, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': 1, + 'image_ref': 1, 'kernel_id': None, 'ramdisk_id': None, 'local_gb': 5, @@ -634,8 +633,7 @@ class XenAPIMigrateInstance(test.TestCase): fake_utils.stub_out_utils_execute(self.stubs) stubs.stub_out_migration_methods(self.stubs) stubs.stubout_get_this_vm_uuid(self.stubs) - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) def tearDown(self): super(XenAPIMigrateInstance, self).tearDown() @@ -661,8 +659,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): """Unit tests for code that detects the ImageType.""" def setUp(self): super(XenAPIDetermineDiskImageTestCase, self).setUp() - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) class FakeInstance(object): pass @@ -679,7 +676,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): def test_instance_disk(self): """If a kernel is specified, the image type is DISK (aka machine).""" FLAGS.xenapi_image_service = 'objectstore' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL self.assert_disk_type(vm_utils.ImageType.DISK) @@ -689,7 +686,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): DISK_RAW is assumed. """ FLAGS.xenapi_image_service = 'objectstore' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_RAW) @@ -699,7 +696,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): this case will be 'raw'. """ FLAGS.xenapi_image_service = 'glance' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_RAW) @@ -709,7 +706,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): this case will be 'vhd'. """ FLAGS.xenapi_image_service = 'glance' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_VHD) diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py index 0addd5573..764de42d8 100644 --- a/nova/tests/vmwareapi/db_fakes.py +++ b/nova/tests/vmwareapi/db_fakes.py @@ -61,7 +61,7 @@ def stub_out_db_instance_api(stubs): 'name': values['name'], 'id': values['id'], 'reservation_id': utils.generate_uid('r'), - 'image_id': values['image_id'], + 'image_ref': values['image_ref'], 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], 'state_description': 'scheduling', diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index c3e79a92f..d1bf2de2c 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -150,7 +150,7 @@ class VMWareVMOps(object): """ image_size, image_properties = \ vmware_images.get_vmdk_size_and_properties( - instance.image_id, instance) + instance.image_ref, instance) vmdk_file_size_in_kb = int(image_size) / 1024 os_type = image_properties.get("vmware_ostype", "otherGuest") adapter_type = image_properties.get("vmware_adaptertype", @@ -265,23 +265,23 @@ class VMWareVMOps(object): def _fetch_image_on_esx_datastore(): """Fetch image from Glance to ESX datastore.""" - LOG.debug(_("Downloading image file data %(image_id)s to the ESX " + LOG.debug(_("Downloading image file data %(image_ref)s to the ESX " "data store %(data_store_name)s") % - ({'image_id': instance.image_id, + ({'image_ref': instance.image_ref, 'data_store_name': data_store_name})) # Upload the -flat.vmdk file whose meta-data file we just created # above vmware_images.fetch_image( - instance.image_id, + instance.image_ref, instance, host=self._session._host_ip, data_center_name=self._get_datacenter_name_and_ref()[1], datastore_name=data_store_name, cookies=cookies, file_path=flat_uploaded_vmdk_name) - LOG.debug(_("Downloaded image file data %(image_id)s to the ESX " + LOG.debug(_("Downloaded image file data %(image_ref)s to the ESX " "data store %(data_store_name)s") % - ({'image_id': instance.image_id, + ({'image_ref': instance.image_ref, 'data_store_name': data_store_name})) _fetch_image_on_esx_datastore() diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py index 50c6baedf..11f4fe06a 100644 --- a/nova/virt/vmwareapi/vmware_images.py +++ b/nova/virt/vmwareapi/vmware_images.py @@ -18,10 +18,9 @@ Utility functions for Image transfer. """ -from glance import client - from nova import exception from nova import flags +import nova.image from nova import log as logging from nova.virt.vmwareapi import io_util from nova.virt.vmwareapi import read_write_util @@ -117,8 +116,8 @@ def upload_image(image, instance, **kwargs): def _get_glance_image(image, instance, **kwargs): """Download image from the glance image server.""" LOG.debug(_("Downloading image %s from glance image server") % image) - glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port) - metadata, read_iter = glance_client.get_image(image) + glance_client, image_id = nova.image.get_glance_client(image) + metadata, read_iter = glance_client.get_image(image_id) read_file_handle = read_write_util.GlanceFileRead(read_iter) file_size = int(metadata['size']) write_file_handle = read_write_util.VMWareHTTPWriteFile( @@ -153,7 +152,7 @@ def _put_glance_image(image, instance, **kwargs): kwargs.get("cookies"), kwargs.get("file_path")) file_size = read_file_handle.get_size() - glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port) + glance_client, image_id = nova.image.get_glance_client(image) # The properties and other fields that we need to set for the image. image_metadata = {"is_public": True, "disk_format": "vmdk", @@ -165,7 +164,7 @@ def _put_glance_image(image, instance, **kwargs): "vmware_image_version": kwargs.get("image_version")}} start_transfer(read_file_handle, file_size, glance_client=glance_client, - image_id=image, image_meta=image_metadata) + image_id=image_id, image_meta=image_metadata) LOG.debug(_("Uploaded image %s to the Glance image server") % image) @@ -188,9 +187,8 @@ def get_vmdk_size_and_properties(image, instance): LOG.debug(_("Getting image size for the image %s") % image) if FLAGS.image_service == "nova.image.glance.GlanceImageService": - glance_client = client.Client(FLAGS.glance_host, - FLAGS.glance_port) - meta_data = glance_client.get_image_meta(image) + glance_client, image_id = nova.image.get_glance_client(image) + meta_data = glance_client.get_image_meta(image_id) size, properties = meta_data["size"], meta_data["properties"] elif FLAGS.image_service == "nova.image.s3.S3ImageService": raise NotImplementedError diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 06ee8ee9b..3b1209da8 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -32,6 +32,7 @@ from xml.dom import minidom import glance.client from nova import exception from nova import flags +import nova.image from nova import log as logging from nova import utils from nova.auth.manager import AuthManager @@ -455,8 +456,8 @@ class VMHelper(HelperBase): # DISK restores sr_ref = safe_find_sr(session) - client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) - meta, image_file = client.get_image(image) + glance_client, image_id = nova.image.get_glance_client(image) + meta, image_file = glance_client.get_image(image_id) virtual_size = int(meta['size']) vdi_size = virtual_size LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals()) @@ -515,10 +516,10 @@ class VMHelper(HelperBase): ImageType.DISK_RAW: 'DISK_RAW', ImageType.DISK_VHD: 'DISK_VHD'} disk_format = pretty_format[image_type] - image_id = instance.image_id + image_ref = instance.image_ref instance_id = instance.id LOG.debug(_("Detected %(disk_format)s format for image " - "%(image_id)s, instance %(instance_id)s") % locals()) + "%(image_ref)s, instance %(instance_id)s") % locals()) def determine_from_glance(): glance_disk_format2nova_type = { @@ -527,8 +528,9 @@ class VMHelper(HelperBase): 'ari': ImageType.KERNEL_RAMDISK, 'raw': ImageType.DISK_RAW, 'vhd': ImageType.DISK_VHD} - client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) - meta = client.get_image_meta(instance.image_id) + image_ref = instance.image_ref + glance_client, image_id = nova.image.get_glance_client(image_ref) + meta = glance_client.get_image_meta(image_id) disk_format = meta['disk_format'] try: return glance_disk_format2nova_type[disk_format] diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6d516ddbc..183d29470 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -111,7 +111,7 @@ class VMOps(object): project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) vdis = VMHelper.fetch_image(self._session, - instance.id, instance.image_id, user, project, + instance.id, instance.image_ref, user, project, disk_image_type) return vdis -- cgit From 1af3ac5f60bb9a4ad201f0bd84a355235be2f354 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 27 May 2011 19:50:57 +0000 Subject: fixed so all the new encryption tests pass.. including data with newlines and so forth --- nova/virt/xenapi/vmops.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 1fcaaeede..e116ef2d1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1190,8 +1190,12 @@ class SimpleDH(object): mpi = M2Crypto.m2.bn_to_mpi(bn) return mpi - def _run_ssl(self, subcommand, text): - proc = _runproc('openssl %s' % subcommand) + def _run_ssl(self, text, extra_args=None): + if not extra_args: + extra_args = '' + cmd = 'enc -aes-128-cbc -a -pass pass:%s -nosalt %s' % ( + self._shared, extra_args) + proc = _runproc('openssl %s' % cmd) proc.stdin.write(text) proc.stdin.close() proc.wait() @@ -1201,9 +1205,9 @@ class SimpleDH(object): return proc.stdout.read() def encrypt(self, text): - cmd = 'enc -aes-128-cbc -a -A -pass pass:%s -nosalt' % self._shared - return self._run_ssl(cmd, text).strip('\n') + return self._run_ssl(text).strip('\n') def decrypt(self, text): - cmd = 'enc -aes-128-cbc -a -A -pass pass:%s -nosalt -d' % self._shared - return self._run_ssl(cmd, text) + if text[len(text)-1:] != '\n': + text = text + '\n' + return self._run_ssl(text, '-d') -- cgit From cb42d3ec2c358a1666fde06d4252d1d76baeffff Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 27 May 2011 20:29:48 +0000 Subject: added -A back in to pass to openssl --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index e116ef2d1..389c27598 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1193,7 +1193,7 @@ class SimpleDH(object): def _run_ssl(self, text, extra_args=None): if not extra_args: extra_args = '' - cmd = 'enc -aes-128-cbc -a -pass pass:%s -nosalt %s' % ( + cmd = 'enc -aes-128-cbc -A -a -pass pass:%s -nosalt %s' % ( self._shared, extra_args) proc = _runproc('openssl %s' % cmd) proc.stdin.write(text) -- cgit From 2e4fca0b2a8dc4295d14a337ffa2771fab857420 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Fri, 27 May 2011 16:31:18 -0400 Subject: now pip-requires mox version 0.5.3 --- tools/pip-requires | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/pip-requires b/tools/pip-requires index 8f8018765..f1c5b2003 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -17,8 +17,7 @@ redis==2.0.0 routes==1.12.3 WebOb==0.9.8 wsgiref==0.1.2 -mox==0.5.0 --f http://pymox.googlecode.com/files/mox-0.5.0.tar.gz +mox==0.5.3 greenlet==0.3.1 nose bzr -- cgit From 132d0579a11b5f3b0be930e5a9369205cb282e35 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 27 May 2011 20:48:57 +0000 Subject: added \n is not needed with -A --- nova/virt/xenapi/vmops.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 389c27598..2b3fb6a39 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1208,6 +1208,4 @@ class SimpleDH(object): return self._run_ssl(text).strip('\n') def decrypt(self, text): - if text[len(text)-1:] != '\n': - text = text + '\n' return self._run_ssl(text, '-d') -- cgit From e75bbc348c713775af11293fc6e5e05667279234 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sat, 28 May 2011 02:18:48 -0400 Subject: More image_id to image_ref stuff. Also fixed tests in test_servers. --- nova/api/openstack/servers.py | 2 +- nova/api/openstack/views/servers.py | 8 ++++---- nova/tests/api/openstack/test_servers.py | 16 +++++++++------- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 76800795c..7593694bd 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -189,7 +189,7 @@ class Controller(common.OpenstackController): return faults.Fault(exc.HTTPBadRequest(msg)) inst['instance_type'] = inst_type - inst['image_id'] = image_href + inst['image_ref'] = image_href builder = self._get_view_builder(req) server = builder.build(inst, is_detail=True) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index ddd17ab93..dd1d68ff0 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -112,8 +112,8 @@ class ViewBuilderV10(ViewBuilder): """Model an Openstack API V1.0 server response.""" def _build_image(self, response, inst): - if 'image_id' in dict(inst): - response['imageId'] = int(inst['image_id']) + if 'image_ref' in dict(inst): + response['imageId'] = int(inst['image_ref']) def _build_flavor(self, response, inst): if 'instance_type' in dict(inst): @@ -130,8 +130,8 @@ class ViewBuilderV11(ViewBuilder): self.base_url = base_url def _build_image(self, response, inst): - if 'image_id' in dict(inst): - image_href = inst['image_id'] + if 'image_ref' in dict(inst): + image_href = inst['image_ref'] if str(image_href).isdigit(): image_href = int(image_href) response['imageRef'] = image_href diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index a6ab9c0c8..1ce0e8e84 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -98,7 +98,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None, "admin_pass": "", "user_id": user_id, "project_id": "", - "image_id": "10", + "image_ref": "10", "kernel_id": "", "ramdisk_id": "", "launch_index": 0, @@ -475,12 +475,13 @@ class ServersTest(test.TestCase): def image_id_from_hash(*args, **kwargs): return 2 - def fake_image_service(*args): - return nova.image.fake.FakeImageService() + def fake_get_image_service(image_href): + image_id = int(str(image_href).split('/')[-1]) + return (nova.image.fake.FakeImageService(), image_id) - FLAGS.image_service = 'nova.image.fake.FakeImageService' - self.stubs.Set( - nova.image.glance, 'GlanceImageService', fake_image_service) + self.stubs.Set(nova.image, 'get_default_image_service', + lambda: nova.image.fake.FakeImageService()) + self.stubs.Set(nova.image, 'get_image_service', fake_get_image_service) self.stubs.Set(nova.db.api, 'project_get_network', project_get_network) self.stubs.Set(nova.db.api, 'instance_create', instance_create) self.stubs.Set(nova.rpc, 'cast', fake_method) @@ -1685,6 +1686,8 @@ class TestServerInstanceCreation(test.TestCase): fakes.stub_out_auth(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) self.allow_admin = FLAGS.allow_admin_api + self.stubs.Set(nova.image, 'get_default_image_service', + lambda: nova.image.fake.FakeImageService()) def tearDown(self): self.stubs.UnsetAll() @@ -1714,7 +1717,6 @@ class TestServerInstanceCreation(test.TestCase): return stub_method compute_api = MockComputeAPI() - FLAGS.image_service = 'nova.image.fake.FakeImageService' self.stubs.Set(nova.compute, 'API', make_stub_method(compute_api)) self.stubs.Set(nova.api.openstack.servers.Controller, '_get_kernel_ramdisk_from_image', make_stub_method((1, 1))) -- cgit From 1fced8f7a527f25abde457cfcf056a9a082a79c3 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sat, 28 May 2011 04:29:35 -0400 Subject: Fixing integration tests by correctly stubbing image service. --- nova/tests/integrated/integrated_helpers.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 3b4c49c93..5eacc829d 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -152,10 +152,10 @@ class _IntegratedTestBase(test.TestCase): f = self._get_flags() self.flags(**f) - def fake_image_service(*args): - return nova.image.fake.FakeImageService() - self.stubs.Set( - nova.image.glance, 'GlanceImageService', fake_image_service) + def fake_get_image_service(image_href): + image_id = int(str(image_href).split('/')[-1]) + return (nova.image.fake.FakeImageService(), image_id) + self.stubs.Set(nova.image, 'get_image_service', fake_get_image_service) # set up services self.start_service('compute') -- cgit From bceac9e68021959c8711a0be4ed7ac13352a4623 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sat, 28 May 2011 06:04:19 -0400 Subject: Fixing xen and vmware tests by correctly mocking glance client. --- nova/tests/glance/stubs.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 8611fef29..fdd9ad4da 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -16,19 +16,16 @@ import StringIO - -from nova import images - - -def get_mock_glance_client(): - return FakeGlance() +import nova.image def stubout_glance_client(stubs): - """Stubs out glance.client.Client""" - stubs.Set(images, 'get_glance_client', get_mock_glance_client) - + def fake_get_glance_client(image_href): + image_id = int(str(image_href).split('/')[-1]) + return (FakeGlance('foo'), image_id) + stubs.Set(nova.image, 'get_glance_client', fake_get_glance_client) + class FakeGlance(object): IMAGE_MACHINE = 1 IMAGE_KERNEL = 2 -- cgit From 9ce5728a0d800374a76cacf935daf2c032f1c33d Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sat, 28 May 2011 06:25:04 -0400 Subject: Fixing nova.tests.api.openstack.fakes.stub_out_image_service. It now stubs out the get_image_service and get_default_image_service functions. Also some pep8 whitespace fixes. --- .../sqlalchemy/migrate_repo/versions/019_rename_image_ids.py | 3 ++- nova/tests/api/openstack/fakes.py | 11 +++++++---- nova/tests/api/openstack/test_servers.py | 10 +--------- nova/tests/glance/stubs.py | 2 +- nova/virt/vmwareapi/vmware_images.py | 6 +++--- 5 files changed, 14 insertions(+), 18 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py b/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py index 6838f1ea6..73a5e8477 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py @@ -15,10 +15,11 @@ # under the License. from sqlalchemy import Column, Integer, MetaData, String, Table -#from nova import log as logging + meta = MetaData() + def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index bf51239e6..01b42d00c 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -38,6 +38,7 @@ from nova.api.openstack import auth from nova.api.openstack import versions from nova.api.openstack import limits from nova.auth.manager import User, Project +import nova.image.fake from nova.image import glance from nova.image import local from nova.image import service @@ -104,10 +105,12 @@ def stub_out_key_pair_funcs(stubs, have_key_pair=True): def stub_out_image_service(stubs): - def fake_image_show(meh, context, id): - return dict(kernelId=1, ramdiskId=1) - - stubs.Set(local.LocalImageService, 'show', fake_image_show) + def fake_get_image_service(image_href): + image_id = int(str(image_href).split('/')[-1]) + return (nova.image.fake.FakeImageService(), image_id) + stubs.Set(nova.image, 'get_image_service', fake_get_image_service) + stubs.Set(nova.image, 'get_default_image_service', + lambda: nova.image.fake.FakeImageService()) def stub_out_auth(stubs): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 1ce0e8e84..9f3b53cdf 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -475,13 +475,6 @@ class ServersTest(test.TestCase): def image_id_from_hash(*args, **kwargs): return 2 - def fake_get_image_service(image_href): - image_id = int(str(image_href).split('/')[-1]) - return (nova.image.fake.FakeImageService(), image_id) - - self.stubs.Set(nova.image, 'get_default_image_service', - lambda: nova.image.fake.FakeImageService()) - self.stubs.Set(nova.image, 'get_image_service', fake_get_image_service) self.stubs.Set(nova.db.api, 'project_get_network', project_get_network) self.stubs.Set(nova.db.api, 'instance_create', instance_create) self.stubs.Set(nova.rpc, 'cast', fake_method) @@ -1684,10 +1677,9 @@ class TestServerInstanceCreation(test.TestCase): fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} fakes.stub_out_auth(self.stubs) + fakes.stub_out_image_service(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) self.allow_admin = FLAGS.allow_admin_api - self.stubs.Set(nova.image, 'get_default_image_service', - lambda: nova.image.fake.FakeImageService()) def tearDown(self): self.stubs.UnsetAll() diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index fdd9ad4da..1e0b90d82 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -25,7 +25,7 @@ def stubout_glance_client(stubs): return (FakeGlance('foo'), image_id) stubs.Set(nova.image, 'get_glance_client', fake_get_glance_client) - + class FakeGlance(object): IMAGE_MACHINE = 1 IMAGE_KERNEL = 2 diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py index 11f4fe06a..48edc5384 100644 --- a/nova/virt/vmwareapi/vmware_images.py +++ b/nova/virt/vmwareapi/vmware_images.py @@ -116,7 +116,7 @@ def upload_image(image, instance, **kwargs): def _get_glance_image(image, instance, **kwargs): """Download image from the glance image server.""" LOG.debug(_("Downloading image %s from glance image server") % image) - glance_client, image_id = nova.image.get_glance_client(image) + (glance_client, image_id) = nova.image.get_glance_client(image) metadata, read_iter = glance_client.get_image(image_id) read_file_handle = read_write_util.GlanceFileRead(read_iter) file_size = int(metadata['size']) @@ -152,7 +152,7 @@ def _put_glance_image(image, instance, **kwargs): kwargs.get("cookies"), kwargs.get("file_path")) file_size = read_file_handle.get_size() - glance_client, image_id = nova.image.get_glance_client(image) + (glance_client, image_id) = nova.image.get_glance_client(image) # The properties and other fields that we need to set for the image. image_metadata = {"is_public": True, "disk_format": "vmdk", @@ -187,7 +187,7 @@ def get_vmdk_size_and_properties(image, instance): LOG.debug(_("Getting image size for the image %s") % image) if FLAGS.image_service == "nova.image.glance.GlanceImageService": - glance_client, image_id = nova.image.get_glance_client(image) + (glance_client, image_id) = nova.image.get_glance_client(image) meta_data = glance_client.get_image_meta(image_id) size, properties = meta_data["size"], meta_data["properties"] elif FLAGS.image_service == "nova.image.s3.S3ImageService": -- cgit From b0636780291fc6531d89a69e164e82203414a875 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sat, 28 May 2011 07:49:31 -0400 Subject: Another image_id location in hyperv. --- nova/virt/hyperv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 1142e97a4..05b4775c1 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -151,7 +151,7 @@ class HyperVConnection(driver.ComputeDriver): base_vhd_filename = os.path.join(FLAGS.instances_path, instance.name) vhdfile = "%s.vhd" % (base_vhd_filename) - images.fetch(instance['image_id'], vhdfile, user, project) + images.fetch(instance['image_ref'], vhdfile, user, project) try: self._create_vm(instance) -- cgit From 29387999d6befc29dddfb7dfd5d543607676e106 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sat, 28 May 2011 14:18:25 -0400 Subject: Added missing nova import to image/__init__.py. --- nova/image/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/image/__init__.py b/nova/image/__init__.py index 011d79d61..f42332a29 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -18,6 +18,7 @@ from urlparse import urlparse +import nova from nova import exception from nova import utils from nova import flags -- cgit From 5976b50299b31292d578dcdd8576607e175fca44 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sat, 28 May 2011 21:10:57 -0400 Subject: Cleanup instances_path in test_libvirt test_spawn_with_network_info test. --- nova/tests/test_libvirt.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 4efdd6ae9..1fac4e4e6 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -18,6 +18,7 @@ import eventlet import mox import os import re +import shutil import sys from xml.etree.ElementTree import fromstring as xml_to_tree @@ -645,6 +646,8 @@ class LibvirtConnTestCase(test.TestCase): except Exception, e: count = (0 <= str(e.message).find('Unexpected method call')) + shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name)) + self.assertTrue(count) def test_get_host_ip_addr(self): -- cgit From a9278909cbb6d5ea9283231dbd6efc67b812abff Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sat, 28 May 2011 23:10:42 -0400 Subject: Update the rebuild_instance function in the compute manager so that it accepts the arguments that our current compute API sends. --- nova/compute/manager.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d1e01f275..3897b3a9e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -331,7 +331,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock - def rebuild_instance(self, context, instance_id, image_id): + def rebuild_instance(self, context, instance_id, **kwargs): """Destroy and re-make this instance. A 'rebuild' effectively purges all existing data from the system and @@ -349,7 +349,8 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_state(context, instance_id, power_state.BUILDING) self.driver.destroy(instance_ref) - instance_ref.image_id = image_id + instance_ref.image_id = kwargs.get('image_id') + instance_ref.injected_files = kwargs.get('injected_files', []) self.driver.spawn(instance_ref) self._update_image_id(context, instance_id, image_id) -- cgit From ccf522daaca0d4136c072c1905dd9fbaa1dfb2e9 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sat, 28 May 2011 23:12:07 -0400 Subject: Fixes to the SQLAlchmeny API such that metadata is saved on an instance_update. Added integration test to test that instance metadata is updated on a rebuild. --- nova/db/sqlalchemy/api.py | 22 +++++++++++++--------- nova/tests/integrated/api/client.py | 10 ++++++++-- nova/tests/integrated/test_servers.py | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 11 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e4dda5c12..1a7cae6e9 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -771,6 +771,15 @@ def fixed_ip_update(context, address, values): ################### +def _metadata_refs(metadata_dict): + metadata_refs = [] + if metadata_dict: + for k, v in metadata_dict.iteritems(): + metadata_ref = models.InstanceMetadata() + metadata_ref['key'] = k + metadata_ref['value'] = v + metadata_refs.append(metadata_ref) + return metadata_refs @require_context @@ -780,15 +789,7 @@ def instance_create(context, values): context - request context object values - dict containing column values. """ - metadata = values.get('metadata') - metadata_refs = [] - if metadata: - for k, v in metadata.iteritems(): - metadata_ref = models.InstanceMetadata() - metadata_ref['key'] = k - metadata_ref['value'] = v - metadata_refs.append(metadata_ref) - values['metadata'] = metadata_refs + values['metadata'] = _metadata_refs(values.get('metadata')) instance_ref = models.Instance() instance_ref.update(values) @@ -1010,6 +1011,9 @@ def instance_set_state(context, instance_id, state, description=None): @require_context def instance_update(context, instance_id, values): session = get_session() + metadata = values.get('metadata') + if metadata: + values['metadata'] = _metadata_refs(values.get('metadata')) with session.begin(): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py index 7e20c9b00..eb9a3056e 100644 --- a/nova/tests/integrated/api/client.py +++ b/nova/tests/integrated/api/client.py @@ -152,7 +152,10 @@ class TestOpenStackClient(object): def _decode_json(self, response): body = response.read() LOG.debug(_("Decoding JSON: %s") % (body)) - return json.loads(body) + if body: + return json.loads(body) + else: + return "" def api_get(self, relative_uri, **kwargs): kwargs.setdefault('check_response_status', [200]) @@ -166,7 +169,7 @@ class TestOpenStackClient(object): headers['Content-Type'] = 'application/json' kwargs['body'] = json.dumps(body) - kwargs.setdefault('check_response_status', [200]) + kwargs.setdefault('check_response_status', [200, 202]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) @@ -185,6 +188,9 @@ class TestOpenStackClient(object): def post_server(self, server): return self.api_post('/servers', server)['server'] + def post_server_action(self, server_id, data): + return self.api_post('/servers/%s/action' % server_id, data) + def delete_server(self, server_id): return self.api_delete('/servers/%s' % server_id) diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index e89d0100a..604faf59f 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -179,6 +179,40 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Cleanup self._delete_server(created_server_id) + def test_create_and_rebuild_server_with_metadata(self): + """Rebuild a server with metadata.""" + + # create a server with initially has no metadata + server = self._build_minimal_create_server_request() + server_post = {'server': server} + created_server = self.api.post_server(server_post) + LOG.debug("created_server: %s" % created_server) + self.assertTrue(created_server['id']) + created_server_id = created_server['id'] + + # rebuild the server with metadata + post = {} + post['rebuild'] = { + "imageRef": "https://localhost/v1.1/32278/images/2", + "name": "blah" + } + + metadata = {} + for i in range(30): + metadata['key_%s' % i] = 'value_%s' % i + + post['rebuild']['metadata'] = metadata + + self.api.post_server_action(created_server_id, post) + LOG.debug("rebuilt server: %s" % created_server) + self.assertTrue(created_server['id']) + + found_server = self.api.get_server(created_server_id) + self.assertEqual(created_server_id, found_server['id']) + self.assertEqual(metadata, found_server.get('metadata')) + + # Cleanup + self._delete_server(created_server_id) if __name__ == "__main__": unittest.main() -- cgit From 394b37f8c944fbd3ca683d7752cd751bc69cce51 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sun, 29 May 2011 00:00:02 -0400 Subject: Implement the v1.1 style resize action with support for flavorRef. --- nova/api/openstack/servers.py | 32 ++++++++++++++++++++++++++++++++ nova/tests/api/openstack/test_servers.py | 19 +++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 5c10fc916..a3066e578 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -332,6 +332,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() def _action_resize(self, input_dict, req, id): + return exc.HTTPNotImplemented() """ Resizes a given instance to the flavor size requested """ try: if 'resize' in input_dict and 'flavorId' in input_dict['resize']: @@ -610,6 +611,21 @@ class ControllerV10(Controller): self.compute_api.set_admin_password(context, server_id, inst_dict['server']['adminPass']) + def _action_resize(self, input_dict, req, id): + """ Resizes a given instance to the flavor size requested """ + try: + if 'resize' in input_dict and 'flavorId' in input_dict['resize']: + flavor_id = input_dict['resize']['flavorId'] + self.compute_api.resize(req.environ['nova.context'], id, + flavor_id) + else: + LOG.exception(_("Missing arguments for resize")) + return faults.Fault(exc.HTTPUnprocessableEntity()) + except Exception, e: + LOG.exception(_("Error in resize %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPAccepted() + def _action_rebuild(self, info, request, instance_id): context = request.environ['nova.context'] instance_id = int(instance_id) @@ -695,6 +711,22 @@ class ControllerV11(Controller): LOG.info(msg) raise faults.Fault(exc.HTTPBadRequest(explanation=msg)) + def _action_resize(self, input_dict, req, id): + """ Resizes a given instance to the flavor size requested """ + try: + if 'resize' in input_dict and 'flavorRef' in input_dict['resize']: + flavor_ref = input_dict['resize']['flavorRef'] + flavor_id = common.get_id_from_href(flavor_ref) + self.compute_api.resize(req.environ['nova.context'], id, + flavor_id) + else: + LOG.exception(_("Missing arguments for resize")) + return faults.Fault(exc.HTTPUnprocessableEntity()) + except Exception, e: + LOG.exception(_("Error in resize %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPAccepted() + def _action_rebuild(self, info, request, instance_id): context = request.environ['nova.context'] instance_id = int(instance_id) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index fbde5c9ce..e0910fed6 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1267,6 +1267,25 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 202) self.assertEqual(self.resize_called, True) + def test_resize_server_v11(self): + + req = webob.Request.blank('/v1.1/servers/1/action') + req.content_type = 'application/json' + req.method = 'POST' + body_dict = dict(resize=dict(flavorRef="http://localhost/3")) + req.body = json.dumps(body_dict) + + self.resize_called = False + + def resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + def test_resize_bad_flavor_fails(self): req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3))) -- cgit From 833481d796db557dddde6b4b9e75b7cf518b88fa Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sun, 29 May 2011 07:51:44 -0400 Subject: Use metadata variable when calling _metadata_refs. --- nova/db/sqlalchemy/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 1a7cae6e9..a678ebedd 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1013,7 +1013,7 @@ def instance_update(context, instance_id, values): session = get_session() metadata = values.get('metadata') if metadata: - values['metadata'] = _metadata_refs(values.get('metadata')) + values['metadata'] = _metadata_refs(metadata) with session.begin(): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) -- cgit From c9926b12f4c554d9a21c6e77fc657e54a2dd4888 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Sun, 29 May 2011 18:01:46 -0700 Subject: starting --- doc/source/devref/distributed_scheduler.rst | 164 ++++++++++++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100644 doc/source/devref/distributed_scheduler.rst diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst new file mode 100644 index 000000000..75a4d57ce --- /dev/null +++ b/doc/source/devref/distributed_scheduler.rst @@ -0,0 +1,164 @@ +.. + Copyright 2011 OpenStack LLC + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Distributed Scheduler +===== + +The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and Compute nodes are selected for where the work should be performed. In a small deployment we may be happy with the currently available Change Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone). + +But for larger deployments a more complex scheduling algorithm is required. Additionally, if you are using Zones in your Nova setup, you'll need a scheduler that understand how to pass instance requests from Zone to Zone. + +This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capabilities of a Zone and its component services to make informed decisions on where a new instance should be created. When making this decision it consults not only all the Compute nodes in the current Zone, but the Compute nodes in each Child Zone. This continues recursively until the ideal host is found. + +So, how does this all work? + +This document will explain the strategy employed by the ZoneAwareScheduler and its derivations. + +Costs & Weights +---------- +When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to put an Instance with 8G of RAM on a Host that only has 4G remaining would have a very high cost. But putting a 512m RAM instance on an empty Host should have a low cost. + +Some Costs are more esoteric. Consider a rule that says we should prefer Hosts that don't already have an instance on it that is owned by the user requesting it (to mitigate against machine failures). Here we have to look at all the other Instances on the host to compute our cost. + +An example of some other costs might include selecting: +* a GPU-based host over a standard CPU +* a host with fast ethernet over a 10mbps line +* a host than can run Windows instances +* a host in the EU vs North America +* etc + +This Weight is computed for each Instance requested. If the customer asked for 1000 instances, the consumed resources on each Host are "virtually" depleted so the Cost can change accordingly. + +nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler +----------- +As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about Child Zones and each of the Services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions. + +Here is how it works: + +1. The Compute nodes are Filtered and the remaining set are Weighted. +1a. Filtering the hosts is a simple matter of ensuring the Compute node has ample resources (CPU, RAM, DISK, etc) to fulfil the request. +1b. Weighing of the remaining Compute nodes is performed +2. The same request is sent to each Child Zone and step #1 is done there too. The resulting Weighted List is returned to the parent. +3. The Parent Zone sorts and aggregates all the Weights and a final Build Plan is constructed. +4. The Build Plan is executed upon. Concurrently, Instance Create requests are sent to each of the selected Hosts, be they local or in a child zone. Child Zones may forward the requests to their Child Zones as needed. + +Filtering and Weighing +------------ +Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. + + + + + + +- +Routing between Zones is based on the Capabilities of that Zone. Capabilities are nothing more than key/value pairs. Values are multi-value, with each value separated with a semicolon (`;`). When expressed as a string they take the form: + +:: + + key=value;value;value, key=value;value;value + +Zones have Capabilities which are general to the Zone and are set via `--zone_capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag. + +Flow within a Zone +------------------ +The brunt of the work within a Zone is done in the Scheduler Service. The Scheduler is responsible for: +- collecting capability messages from the Compute, Volume and Network nodes, +- polling the child Zones for their status and +- providing data to the Distributed Scheduler for performing load balancing calculations + +Inter-service communication within a Zone is done with RabbitMQ. Each class of Service (Compute, Volume and Network) has both a named message exchange (particular to that host) and a general message exchange (particular to that class of service). Messages sent to these exchanges are picked off in round-robin fashion. Zones introduce a new fan-out exchange per service. Messages sent to the fan-out exchange are picked up by all services of a particular class. This fan-out exchange is used by the Scheduler services to receive capability messages from the Compute, Volume and Network nodes. + +These capability messages are received by the Scheduler services and stored in the `ZoneManager` object. The SchedulerManager object has a reference to the `ZoneManager` it can use for load balancing. + +The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone_name` flag (and defaults to "nova"). + +Zone administrative functions +----------------------------- +Zone administrative operations are usually done using python-novaclient_ + +.. _python-novaclient: https://github.com/rackspace/python-novaclient + +In order to use the Zone operations, be sure to enable administrator operations in OpenStack API by setting the `--allow_admin_api=true` flag. + +Finally you need to enable Zone Forwarding. This will be used by the Distributed Scheduler initiative currently underway. Set `--enable_zone_routing=true` to enable this feature. + +Find out about this Zone +------------------------ +In any Zone you can find the Zone's name and capabilities with the ``nova zone-info`` command. + +:: + + alice@novadev:~$ nova zone-info + +-----------------+---------------+ + | Property | Value | + +-----------------+---------------+ + | compute_cpu | 0.7,0.7 | + | compute_disk | 123000,123000 | + | compute_network | 800,800 | + | hypervisor | xenserver | + | name | nova | + | network_cpu | 0.7,0.7 | + | network_disk | 123000,123000 | + | network_network | 800,800 | + | os | linux | + +-----------------+---------------+ + +This equates to a GET operation on `.../zones/info`. If you have no child Zones defined you'll usually only get back the default `name`, `hypervisor` and `os` capabilities. Otherwise you'll get back a tuple of min, max values for each capabilities of all the hosts of all the services running in the child zone. These take the `_ = ,` format. + +Adding a child Zone +------------------- +Any Zone can be a parent Zone. Children are associated to a Zone. The Zone where this command originates from is known as the Parent Zone. Routing is only ever conducted from a Zone to its children, never the other direction. From a parent zone you can add a child zone with the following command: + +:: + + nova zone-add + +You can get the `child zone api url`, `nova api key` and `username` from the `novarc` file in the child zone. For example: + +:: + + export NOVA_API_KEY="3bd1af06-6435-4e23-a827-413b2eb86934" + export NOVA_USERNAME="alice" + export NOVA_URL="http://192.168.2.120:8774/v1.0/" + + +This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done when this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information. + +Getting a list of child Zones +----------------------------- + +:: + + nova zone-list + + alice@novadev:~$ nova zone-list + +----+-------+-----------+--------------------------------------------+---------------------------------+ + | ID | Name | Is Active | Capabilities | API URL | + +----+-------+-----------+--------------------------------------------+---------------------------------+ + | 2 | zone1 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.108:8774/v1.0/ | + | 3 | zone2 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.115:8774/v1.0/ | + +----+-------+-----------+--------------------------------------------+---------------------------------+ + +This equates to a GET operation to `.../zones`. + +Removing a child Zone +--------------------- +:: + + nova zone-delete + +This equates to a DELETE call to `.../zones/N`. The Zone with ID=N will be removed. This will only remove the zone entry from the current (parent) Zone, no child Zones are affected. Removing a Child Zone doesn't affect any other part of the hierarchy. -- cgit From 5aa54545486ffe9d9988761576f497de9a957d47 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Sun, 29 May 2011 23:42:46 -0300 Subject: lots more --- doc/source/devref/distributed_scheduler.rst | 44 +++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index 75a4d57ce..7599f2cc5 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -25,7 +25,7 @@ This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capab So, how does this all work? -This document will explain the strategy employed by the ZoneAwareScheduler and its derivations. +This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the Zones documentation before reading this. Costs & Weights ---------- @@ -48,19 +48,51 @@ As we explained in the Zones documentation, each Scheduler has a `ZoneManager` o Here is how it works: -1. The Compute nodes are Filtered and the remaining set are Weighted. +1. The Compute nodes are Filtered and the nodes remaining are Weighed. 1a. Filtering the hosts is a simple matter of ensuring the Compute node has ample resources (CPU, RAM, DISK, etc) to fulfil the request. -1b. Weighing of the remaining Compute nodes is performed +1b. Weighing of the remaining Compute nodes assigns a number based on their suitability for the request. 2. The same request is sent to each Child Zone and step #1 is done there too. The resulting Weighted List is returned to the parent. 3. The Parent Zone sorts and aggregates all the Weights and a final Build Plan is constructed. 4. The Build Plan is executed upon. Concurrently, Instance Create requests are sent to each of the selected Hosts, be they local or in a child zone. Child Zones may forward the requests to their Child Zones as needed. Filtering and Weighing ------------ -Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. - - +Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. We will explain how to do this later in this document. +Requesting a new instance +------------ +To request a new instance, a call is made to `nova.compute.api.create()`. The type of instance created depends on the value of the `InstanceType` record being passed in. The `InstanceType` determines the amount of disk, cpu, ram and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table, but we'll discuss that later. + +`nova.compute.api.create()` performs the following actions: +1. it validates all the fields passed into it. +2. it creates an entry in the `Instance` table for each instance requested +3. it puts one `run_instance` message in the scheduler queue for each instance requested +4. the schedulers pick off the messages and decide which Compute node should handle the request. +5. the `run_instance` message is forwarded to the Compute node for processing and the instance is created. +6. it returns a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_id`'s are valid. + +Generally, the standard schedulers (like `ChangeScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of Child Zones. + +The problem with this approach is that each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once. + +For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently: +1. it validates all the fields passed into it. +2. it creates a single `request_id` for all of instances created. This is a UUID. +3. it creates a single `run_instance` request in the scheduler queue +4. a scheduler picks the message off the queue and works on it. +5. the scheduler sends off an OS API `POST /zones/select` command to each Child Zone. The `BODY` payload of the call contains the `request_spec`. +6. the Child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones. +7. if the Child Zone has its own Child Zone's, the `/zones/select` call will be sent down to them as well. +8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed. +9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant Child Zone. The parameters to the Child Zone call are the same as what was passed in by the user. + +The Catch +------------- +This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, Database and set of Nova Services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world. + +When `POST /zones/select` is called to estimate which Compute node to use, time passes until the `POST /servers` call is issued. If we only passed the Weight back from the `select` we would have to re-compute the appropriate Compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to Child Zones asking for estimates. + +Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. -- cgit From c3c2c1a63c126f046457d0d61306ebe9c46af700 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 30 May 2011 00:00:28 -0300 Subject: basic flow done --- doc/source/devref/distributed_scheduler.rst | 105 ++++------------------------ 1 file changed, 13 insertions(+), 92 deletions(-) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index 7599f2cc5..c9aaf8c01 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -55,6 +55,8 @@ Here is how it works: 3. The Parent Zone sorts and aggregates all the Weights and a final Build Plan is constructed. 4. The Build Plan is executed upon. Concurrently, Instance Create requests are sent to each of the selected Hosts, be they local or in a child zone. Child Zones may forward the requests to their Child Zones as needed. +`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which Host filtering and Weighing strategy will be used. We'll go into more detail on that later. + Filtering and Weighing ------------ Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. We will explain how to do this later in this document. @@ -77,7 +79,7 @@ The problem with this approach is that each request is scattered amongst each of For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently: 1. it validates all the fields passed into it. -2. it creates a single `request_id` for all of instances created. This is a UUID. +2. it creates a single `reservation_id` for all of instances created. This is a UUID. 3. it creates a single `run_instance` request in the scheduler queue 4. a scheduler picks the message off the queue and works on it. 5. the scheduler sends off an OS API `POST /zones/select` command to each Child Zone. The `BODY` payload of the call contains the `request_spec`. @@ -85,6 +87,7 @@ For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to 7. if the Child Zone has its own Child Zone's, the `/zones/select` call will be sent down to them as well. 8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed. 9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant Child Zone. The parameters to the Child Zone call are the same as what was passed in by the user. +10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`. The Catch ------------- @@ -92,105 +95,23 @@ This all seems pretty straightforward but, like most things, there's a catch. Zo When `POST /zones/select` is called to estimate which Compute node to use, time passes until the `POST /servers` call is issued. If we only passed the Weight back from the `select` we would have to re-compute the appropriate Compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to Child Zones asking for estimates. -Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. - - - -- -Routing between Zones is based on the Capabilities of that Zone. Capabilities are nothing more than key/value pairs. Values are multi-value, with each value separated with a semicolon (`;`). When expressed as a string they take the form: - -:: - - key=value;value;value, key=value;value;value - -Zones have Capabilities which are general to the Zone and are set via `--zone_capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag. - -Flow within a Zone ------------------- -The brunt of the work within a Zone is done in the Scheduler Service. The Scheduler is responsible for: -- collecting capability messages from the Compute, Volume and Network nodes, -- polling the child Zones for their status and -- providing data to the Distributed Scheduler for performing load balancing calculations - -Inter-service communication within a Zone is done with RabbitMQ. Each class of Service (Compute, Volume and Network) has both a named message exchange (particular to that host) and a general message exchange (particular to that class of service). Messages sent to these exchanges are picked off in round-robin fashion. Zones introduce a new fan-out exchange per service. Messages sent to the fan-out exchange are picked up by all services of a particular class. This fan-out exchange is used by the Scheduler services to receive capability messages from the Compute, Volume and Network nodes. - -These capability messages are received by the Scheduler services and stored in the `ZoneManager` object. The SchedulerManager object has a reference to the `ZoneManager` it can use for load balancing. - -The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone_name` flag (and defaults to "nova"). - -Zone administrative functions ------------------------------ -Zone administrative operations are usually done using python-novaclient_ - -.. _python-novaclient: https://github.com/rackspace/python-novaclient - -In order to use the Zone operations, be sure to enable administrator operations in OpenStack API by setting the `--allow_admin_api=true` flag. - -Finally you need to enable Zone Forwarding. This will be used by the Distributed Scheduler initiative currently underway. Set `--enable_zone_routing=true` to enable this feature. - -Find out about this Zone ------------------------- -In any Zone you can find the Zone's name and capabilities with the ``nova zone-info`` command. - -:: - - alice@novadev:~$ nova zone-info - +-----------------+---------------+ - | Property | Value | - +-----------------+---------------+ - | compute_cpu | 0.7,0.7 | - | compute_disk | 123000,123000 | - | compute_network | 800,800 | - | hypervisor | xenserver | - | name | nova | - | network_cpu | 0.7,0.7 | - | network_disk | 123000,123000 | - | network_network | 800,800 | - | os | linux | - +-----------------+---------------+ - -This equates to a GET operation on `.../zones/info`. If you have no child Zones defined you'll usually only get back the default `name`, `hypervisor` and `os` capabilities. Otherwise you'll get back a tuple of min, max values for each capabilities of all the hosts of all the services running in the child zone. These take the `_ = ,` format. - -Adding a child Zone -------------------- -Any Zone can be a parent Zone. Children are associated to a Zone. The Zone where this command originates from is known as the Parent Zone. Routing is only ever conducted from a Zone to its children, never the other direction. From a parent zone you can add a child zone with the following command: - -:: - - nova zone-add - -You can get the `child zone api url`, `nova api key` and `username` from the `novarc` file in the child zone. For example: - -:: +Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. - export NOVA_API_KEY="3bd1af06-6435-4e23-a827-413b2eb86934" - export NOVA_USERNAME="alice" - export NOVA_URL="http://192.168.2.120:8774/v1.0/" +In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent. +Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.zone_aware_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use. -This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done when this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information. +Reservation ID's +--------------- -Getting a list of child Zones ------------------------------ -:: - nova zone-list - alice@novadev:~$ nova zone-list - +----+-------+-----------+--------------------------------------------+---------------------------------+ - | ID | Name | Is Active | Capabilities | API URL | - +----+-------+-----------+--------------------------------------------+---------------------------------+ - | 2 | zone1 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.108:8774/v1.0/ | - | 3 | zone2 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.115:8774/v1.0/ | - +----+-------+-----------+--------------------------------------------+---------------------------------+ +Host Filter +-------------- -This equates to a GET operation to `.../zones`. -Removing a child Zone ---------------------- -:: +Cost Scheduler Weighing +-------------- - nova zone-delete -This equates to a DELETE call to `.../zones/N`. The Zone with ID=N will be removed. This will only remove the zone entry from the current (parent) Zone, no child Zones are affected. Removing a Child Zone doesn't affect any other part of the hierarchy. -- cgit From 5101aa300b087bf57f22cb128649679e8b11051d Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 30 May 2011 00:45:15 -0300 Subject: reservation_id's done --- doc/source/devref/distributed_scheduler.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index c9aaf8c01..402f50bee 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -104,8 +104,17 @@ Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.c Reservation ID's --------------- +NOTE: The features described in this section are related to the up-coming 'merge-4' branch. +The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created. +NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would be bad. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled. + +We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/servers` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches. + +Finally, we need to give the user a way to get information on each of the instances created under this `reservation_id`. Fortunately, this is still possible with the existing `GET /servers` command, so long as we add a new optional `reservation_id` parameter. + +`python-novaclient` will be extended to support both of these changes. Host Filter -------------- -- cgit From 45818393a20a56d5e0aab23f3c78e430e0c1167a Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 30 May 2011 14:17:00 +0900 Subject: fixed nova.virt.libvirt_conn.resume() method - removing try-catch --- nova/virt/libvirt_conn.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 47a77b3ae..32f374955 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -569,12 +569,8 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def resume(self, instance, callback): """resume the specified instance""" - try: - dom = self._lookup_by_name(instance.name) - dom.create() - except libvirt.LibvirtError: - xml = self.to_xml(instance, None) - self._create_new_domain(xml) + dom = self._lookup_by_name(instance.name) + dom.create() @exception.wrap_exception def rescue(self, instance): -- cgit From 0cf5316131aecbac5e843282e2e2eb2acd3fc9e3 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 30 May 2011 05:03:45 -0700 Subject: first cut complete --- doc/source/devref/distributed_scheduler.rst | 41 +++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index 402f50bee..a45505640 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -119,8 +119,49 @@ Finally, we need to give the user a way to get information on each of the instan Host Filter -------------- +As we mentioned earlier, filtering hosts is a very deployment specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms. +The filter used is determined by the `--default_host_filter` flag, which points to a Python Class. By default this flag is set to `nova.scheduler.host_filter.AllHostsFilter` which simply returns all available hosts. But there are others. + +`nova.scheduler.host_filter.InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`. +`nova.scheduler.host_filter.JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples. + +To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide the create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of weight tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The weight tuple contains (``, ``) where `` is whatever you want it to be. + Cost Scheduler Weighing -------------- +Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `ZoneAwareScheduler` base class when all the results have been assembled. + +Simple Zone Aware Scheduling +-------------- +The easiest way to get started with the Zone Aware Scheduler is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter as and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. + +The `--scheduler_driver` flag is how you specify the Scheduler class name. + +Flags +-------------- + +All this Zone and Distributed Scheduler stuff can seem a little daunting to configure, but it's actually not too bad. Here are some of the main flags you should set in your `nova.conf` file: + +:: + --allow_admin_api=true + --enable_zone_routing=true + --zone_name=zone1 + --build_plan_encryption_key=c286696d887c9aa0611bbb3e2025a45b + --scheduler_driver=nova.scheduler.host_filter.HostFilterScheduler + --default_host_filter=nova.scheduler.host_filter.AllHostsFilter + +`--allow_admin_api` must be set for OS API to enable the new `/zones/*` commands. +`--enable_zone_routing` must be set for OS API commands such as `create()`, `pause()` and `delete()` to get routed from Zone to Zone when looking for instances. +`--zone_name` is only required in Child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue. +`build_plan_encryption_key` is the SHA-256 key for encrypting/decrypting the Host information when it leaves a Zone. Be sure to change this key for each Zone you create. Do not duplicate keys. +`scheduler_driver` is the real work horse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler` +`default_host_filter` is the host filter to be used for filtering candidate Compute nodes. + +Some optional flags which are handy for debugging are: +:: + --connection_type=fake + --verbose +Using the `Fake` virtualization driver is handy when you're setting this stuff up so you're not dealing with a million possible issues at once. When things seem to working correctly, switch back to whatever hypervisor your deployment uses. -- cgit From 2155f2b1ab22c6183ab5266e16a675f1469fca50 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 30 May 2011 11:29:55 -0400 Subject: Updates so that 'name' can be updated when doing a OS API v1.1 rebuild. Fixed issue where metadata wasn't getting deleted when an empty dict was POST'd on a rebuild. --- nova/api/openstack/servers.py | 10 +++-- nova/compute/api.py | 13 ++++--- nova/db/sqlalchemy/api.py | 17 +++++++-- nova/tests/integrated/test_servers.py | 72 +++++++++++++++++++++++++++++++++++ 4 files changed, 100 insertions(+), 12 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 5c10fc916..8e191c232 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -708,14 +708,16 @@ class ControllerV11(Controller): image_id = common.get_id_from_href(image_ref) personalities = info["rebuild"].get("personality", []) - metadata = info["rebuild"].get("metadata", {}) + metadata = info["rebuild"].get("metadata") + name = info["rebuild"].get("name") - self._validate_metadata(metadata) + if metadata: + self._validate_metadata(metadata) self._decode_personalities(personalities) try: - self.compute_api.rebuild(context, instance_id, image_id, metadata, - personalities) + self.compute_api.rebuild(context, instance_id, image_id, name, + metadata, personalities) except exception.BuildInProgress: msg = _("Instance %d is currently being rebuilt.") % instance_id LOG.debug(msg) diff --git a/nova/compute/api.py b/nova/compute/api.py index 4f2363387..151679521 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -530,7 +530,7 @@ class API(base.Base): """Reboot the given instance.""" self._cast_compute_message('reboot_instance', context, instance_id) - def rebuild(self, context, instance_id, image_id, metadata=None, + def rebuild(self, context, instance_id, image_id, name=None, metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) @@ -539,13 +539,16 @@ class API(base.Base): msg = _("Instance already building") raise exception.BuildInProgress(msg) - metadata = metadata or {} - self._check_metadata_properties_quota(context, metadata) - files_to_inject = files_to_inject or [] self._check_injected_file_quota(context, files_to_inject) - self.db.instance_update(context, instance_id, {"metadata": metadata}) + values = {} + if metadata is not None: + self._check_metadata_properties_quota(context, metadata) + values['metadata'] = metadata + if name is not None: + values['display_name'] = name + self.db.instance_update(context, instance_id, values) rebuild_params = { "image_id": image_id, diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a678ebedd..ea84e96e7 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1012,8 +1012,9 @@ def instance_set_state(context, instance_id, state, description=None): def instance_update(context, instance_id, values): session = get_session() metadata = values.get('metadata') - if metadata: - values['metadata'] = _metadata_refs(metadata) + if metadata is not None: + instance_metadata_update_or_create(context, instance_id, + values.pop('metadata'), True) with session.begin(): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) @@ -2570,8 +2571,12 @@ def instance_metadata_get_item(context, instance_id, key): @require_context -def instance_metadata_update_or_create(context, instance_id, metadata): +def instance_metadata_update_or_create(context, instance_id, metadata, + purge=False): session = get_session() + + original_metadata = instance_metadata_get(context, instance_id) + meta_ref = None for key, value in metadata.iteritems(): try: @@ -2583,4 +2588,10 @@ def instance_metadata_update_or_create(context, instance_id, metadata): "instance_id": instance_id, "deleted": 0}) meta_ref.save(session=session) + + if purge: + for key in original_metadata.keys(): + if not key in metadata.keys(): + instance_metadata_delete(context, instance_id, key) + return metadata diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 604faf59f..a67fa1bb5 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -179,6 +179,36 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Cleanup self._delete_server(created_server_id) + def test_create_and_rebuild_server(self): + """Rebuild a server.""" + + # create a server with initially has no metadata + server = self._build_minimal_create_server_request() + server_post = {'server': server} + created_server = self.api.post_server(server_post) + LOG.debug("created_server: %s" % created_server) + self.assertTrue(created_server['id']) + created_server_id = created_server['id'] + + # rebuild the server with metadata + post = {} + post['rebuild'] = { + "imageRef": "https://localhost/v1.1/32278/images/2", + "name": "blah" + } + + self.api.post_server_action(created_server_id, post) + LOG.debug("rebuilt server: %s" % created_server) + self.assertTrue(created_server['id']) + + found_server = self.api.get_server(created_server_id) + self.assertEqual(created_server_id, found_server['id']) + self.assertEqual({}, found_server.get('metadata')) + self.assertEqual('blah', found_server.get('name')) + + # Cleanup + self._delete_server(created_server_id) + def test_create_and_rebuild_server_with_metadata(self): """Rebuild a server with metadata.""" @@ -210,9 +240,51 @@ class ServersTest(integrated_helpers._IntegratedTestBase): found_server = self.api.get_server(created_server_id) self.assertEqual(created_server_id, found_server['id']) self.assertEqual(metadata, found_server.get('metadata')) + self.assertEqual('blah', found_server.get('name')) + + # Cleanup + self._delete_server(created_server_id) + + def test_create_and_rebuild_server_with_metadata_removal(self): + """Rebuild a server with metadata.""" + + # create a server with initially has no metadata + server = self._build_minimal_create_server_request() + server_post = {'server': server} + + metadata = {} + for i in range(30): + metadata['key_%s' % i] = 'value_%s' % i + + server_post['server']['metadata'] = metadata + + created_server = self.api.post_server(server_post) + LOG.debug("created_server: %s" % created_server) + self.assertTrue(created_server['id']) + created_server_id = created_server['id'] + + # rebuild the server with metadata + post = {} + post['rebuild'] = { + "imageRef": "https://localhost/v1.1/32278/images/2", + "name": "blah" + } + + metadata = {} + post['rebuild']['metadata'] = metadata + + self.api.post_server_action(created_server_id, post) + LOG.debug("rebuilt server: %s" % created_server) + self.assertTrue(created_server['id']) + + found_server = self.api.get_server(created_server_id) + self.assertEqual(created_server_id, found_server['id']) + self.assertEqual(metadata, found_server.get('metadata')) + self.assertEqual('blah', found_server.get('name')) # Cleanup self._delete_server(created_server_id) + if __name__ == "__main__": unittest.main() -- cgit From 4f8c995bbeca903319bcc1f314b25be0150eea2f Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 30 May 2011 22:47:10 -0400 Subject: Updated compute api and manager to support image_refs in rebuild. --- nova/compute/api.py | 4 ++-- nova/compute/manager.py | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 61b45843d..e0f9ec8f3 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -533,7 +533,7 @@ class API(base.Base): """Reboot the given instance.""" self._cast_compute_message('reboot_instance', context, instance_id) - def rebuild(self, context, instance_id, image_id, metadata=None, + def rebuild(self, context, instance_id, image_ref, metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) @@ -551,7 +551,7 @@ class API(base.Base): self.db.instance_update(context, instance_id, {"metadata": metadata}) rebuild_params = { - "image_id": image_id, + "image_ref": image_ref, "injected_files": files_to_inject, } diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7c88236ba..055d15c43 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -162,9 +162,9 @@ class ComputeManager(manager.SchedulerDependentManager): data = {'launched_at': launched_at or datetime.datetime.utcnow()} self.db.instance_update(context, instance_id, data) - def _update_image_id(self, context, instance_id, image_id): + def _update_image_ref(self, context, instance_id, image_ref): """Update the image_id for the given instance.""" - data = {'image_id': image_id} + data = {'image_ref': image_ref} self.db.instance_update(context, instance_id, data) def get_console_topic(self, context, **kwargs): @@ -331,7 +331,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock - def rebuild_instance(self, context, instance_id, image_id): + def rebuild_instance(self, context, instance_id, image_ref): """Destroy and re-make this instance. A 'rebuild' effectively purges all existing data from the system and @@ -339,7 +339,7 @@ class ComputeManager(manager.SchedulerDependentManager): :param context: `nova.RequestContext` object :param instance_id: Instance identifier (integer) - :param image_id: Image identifier (integer) + :param image_ref: Image identifier (href or integer) """ context = context.elevated() @@ -349,10 +349,10 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_state(context, instance_id, power_state.BUILDING) self.driver.destroy(instance_ref) - instance_ref.image_id = image_id + instance_ref.image_ref = image_ref self.driver.spawn(instance_ref) - self._update_image_id(context, instance_id, image_id) + self._update_image_ref(context, instance_id, image_ref) self._update_launched_at(context, instance_id) self._update_state(context, instance_id) -- cgit From be9113bc5c08cbafb7af9f83bd61f318d1ba6145 Mon Sep 17 00:00:00 2001 From: "Vivek YS vivek.ys@gmail.com" <> Date: Tue, 31 May 2011 09:49:06 +0530 Subject: Fixed the typo of APIError with ApiError --- nova/virt/vmwareapi/vmops.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index c3e79a92f..6d7149841 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -590,11 +590,11 @@ class VMWareVMOps(object): def pause(self, instance, callback): """Pause a VM instance.""" - raise exception.APIError("pause not supported for vmwareapi") + raise exception.ApiError("pause not supported for vmwareapi") def unpause(self, instance, callback): """Un-Pause a VM instance.""" - raise exception.APIError("unpause not supported for vmwareapi") + raise exception.ApiError("unpause not supported for vmwareapi") def suspend(self, instance, callback): """Suspend the specified instance.""" @@ -673,7 +673,7 @@ class VMWareVMOps(object): def get_diagnostics(self, instance): """Return data about VM diagnostics.""" - raise exception.APIError("get_diagnostics not implemented for " + raise exception.ApiError("get_diagnostics not implemented for " "vmwareapi") def get_console_output(self, instance): -- cgit From d6cd02a07ab3b66a53689fb8edbf55db03b4bff2 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 31 May 2011 08:20:40 -0400 Subject: Actually remove the _action_resize code from the base Servers controller. The V11 and V10 controllers implement these now. --- nova/api/openstack/servers.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index a3066e578..4bd7ddb14 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -333,19 +333,6 @@ class Controller(common.OpenstackController): def _action_resize(self, input_dict, req, id): return exc.HTTPNotImplemented() - """ Resizes a given instance to the flavor size requested """ - try: - if 'resize' in input_dict and 'flavorId' in input_dict['resize']: - flavor_id = input_dict['resize']['flavorId'] - self.compute_api.resize(req.environ['nova.context'], id, - flavor_id) - else: - LOG.exception(_("Missing arguments for resize")) - return faults.Fault(exc.HTTPUnprocessableEntity()) - except Exception, e: - LOG.exception(_("Error in resize %s"), e) - return faults.Fault(exc.HTTPBadRequest()) - return exc.HTTPAccepted() def _action_reboot(self, input_dict, req, id): if 'reboot' in input_dict and 'type' in input_dict['reboot']: -- cgit From 2bd6e5561339a6755709461dab9aa6cad4a1cf81 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Tue, 31 May 2011 09:51:20 -0400 Subject: pep8 fixes --- .../etc/xensource/scripts/ovs_configure_base_flows.py | 11 ++++++----- .../etc/xensource/scripts/ovs_configure_vif_flows.py | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py index 0186a3c8b..514a43a2d 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py @@ -34,13 +34,13 @@ def main(command, phys_dev_name, bridge_name): ovs_ofctl('del-flows', bridge_name) if command in ('online', 'reset'): - pnic_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', 'Interface', - phys_dev_name, 'ofport') + pnic_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', + 'Interface', phys_dev_name, 'ofport') # these flows are lower priority than all VM-specific flows. - # allow all traffic from the physical NIC, as it is trusted (i.e., from a - # filtered vif, or from the physical infrastructure + # allow all traffic from the physical NIC, as it is trusted (i.e., + # from a filtered vif, or from the physical infrastructure) ovs_ofctl('add-flow', bridge_name, "priority=2,in_port=%s,actions=normal" % pnic_ofport) @@ -53,7 +53,8 @@ if __name__ == "__main__": print sys.argv script_name = os.path.basename(sys.argv[0]) print "This script configures base ovs flows." - print "usage: %s [online|offline|reset] phys-dev-name bridge-name" % script_name + print "usage: %s [online|offline|reset] phys-dev-name bridge-name" \ + % script_name print " ex: %s online eth0 xenbr0" % script_name sys.exit(1) else: diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 9fde69377..accd08b91 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -52,7 +52,7 @@ def main(command, vif_raw, net_type): vif_name, dom_id, vif_index = vif_raw.split('-') vif = "%s%s.%s" % (vif_name, dom_id, vif_index) bridge = "xenbr%s" % vif_index - + xsls = execute_get_output('/usr/bin/xenstore-ls', '/local/domain/%s/vm-data/networking' % dom_id) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] -- cgit From 95f103f276f6eb7decd6ebd17ff4ac106bc7222f Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 31 May 2011 11:17:35 -0400 Subject: More specific error messages for resize requests. --- nova/api/openstack/servers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 4bd7ddb14..1ec74bc2e 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -606,7 +606,7 @@ class ControllerV10(Controller): self.compute_api.resize(req.environ['nova.context'], id, flavor_id) else: - LOG.exception(_("Missing arguments for resize")) + LOG.exception(_("Missing 'flavorId' argument for resize")) return faults.Fault(exc.HTTPUnprocessableEntity()) except Exception, e: LOG.exception(_("Error in resize %s"), e) @@ -707,7 +707,7 @@ class ControllerV11(Controller): self.compute_api.resize(req.environ['nova.context'], id, flavor_id) else: - LOG.exception(_("Missing arguments for resize")) + LOG.exception(_("Missing 'flavorRef' argument for resize")) return faults.Fault(exc.HTTPUnprocessableEntity()) except Exception, e: LOG.exception(_("Error in resize %s"), e) -- cgit From 1adb96550640a65a723635f2dc98e4595f95fd52 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 31 May 2011 08:26:11 -0700 Subject: edits based on ed's feedback --- doc/source/devref/distributed_scheduler.rst | 85 +++++++++++++++-------------- 1 file changed, 45 insertions(+), 40 deletions(-) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index a45505640..28ba20af7 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -1,3 +1,7 @@ + + + + .. Copyright 2011 OpenStack LLC All Rights Reserved. @@ -17,7 +21,7 @@ Distributed Scheduler ===== -The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and Compute nodes are selected for where the work should be performed. In a small deployment we may be happy with the currently available Change Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone). +The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Change Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone). But for larger deployments a more complex scheduling algorithm is required. Additionally, if you are using Zones in your Nova setup, you'll need a scheduler that understand how to pass instance requests from Zone to Zone. @@ -29,7 +33,7 @@ This document will explain the strategy employed by the `ZoneAwareScheduler` and Costs & Weights ---------- -When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to put an Instance with 8G of RAM on a Host that only has 4G remaining would have a very high cost. But putting a 512m RAM instance on an empty Host should have a low cost. +When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to putting a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost. Some Costs are more esoteric. Consider a rule that says we should prefer Hosts that don't already have an instance on it that is owned by the user requesting it (to mitigate against machine failures). Here we have to look at all the other Instances on the host to compute our cost. @@ -44,73 +48,73 @@ This Weight is computed for each Instance requested. If the customer asked for 1 nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler ----------- -As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about Child Zones and each of the Services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions. +As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions. Here is how it works: -1. The Compute nodes are Filtered and the nodes remaining are Weighed. -1a. Filtering the hosts is a simple matter of ensuring the Compute node has ample resources (CPU, RAM, DISK, etc) to fulfil the request. -1b. Weighing of the remaining Compute nodes assigns a number based on their suitability for the request. -2. The same request is sent to each Child Zone and step #1 is done there too. The resulting Weighted List is returned to the parent. -3. The Parent Zone sorts and aggregates all the Weights and a final Build Plan is constructed. -4. The Build Plan is executed upon. Concurrently, Instance Create requests are sent to each of the selected Hosts, be they local or in a child zone. Child Zones may forward the requests to their Child Zones as needed. +1. The compute nodes are filtered and the nodes remaining are weighed. +1a. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request. +1b. Weighing of the remaining compute nodes assigns a number based on their suitability for the request. +2. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent. +3. The parent Zone sorts and aggregates all the weights and a final build plan is constructed. +4. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed. -`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which Host filtering and Weighing strategy will be used. We'll go into more detail on that later. +`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which host filtering and weighing strategy will be used. Filtering and Weighing ------------ -Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. We will explain how to do this later in this document. +The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. Requesting a new instance ------------ -To request a new instance, a call is made to `nova.compute.api.create()`. The type of instance created depends on the value of the `InstanceType` record being passed in. The `InstanceType` determines the amount of disk, cpu, ram and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table, but we'll discuss that later. +Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table. -`nova.compute.api.create()` performs the following actions: -1. it validates all the fields passed into it. -2. it creates an entry in the `Instance` table for each instance requested -3. it puts one `run_instance` message in the scheduler queue for each instance requested -4. the schedulers pick off the messages and decide which Compute node should handle the request. -5. the `run_instance` message is forwarded to the Compute node for processing and the instance is created. -6. it returns a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_id`'s are valid. +`nova.compute.api.create()` performed the following actions: +1. it validated all the fields passed into it. +2. it created an entry in the `Instance` table for each instance requested +3. it put one `run_instance` message in the scheduler queue for each instance requested +4. the schedulers picked off the messages and decided which compute node should handle the request. +5. the `run_instance` message was forwarded to the compute node for processing and the instance is created. +6. it returned a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_id`s are valid. -Generally, the standard schedulers (like `ChangeScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of Child Zones. +Generally, the standard schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones. -The problem with this approach is that each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once. +The problem with this approach is each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once. For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently: 1. it validates all the fields passed into it. 2. it creates a single `reservation_id` for all of instances created. This is a UUID. 3. it creates a single `run_instance` request in the scheduler queue 4. a scheduler picks the message off the queue and works on it. -5. the scheduler sends off an OS API `POST /zones/select` command to each Child Zone. The `BODY` payload of the call contains the `request_spec`. -6. the Child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones. -7. if the Child Zone has its own Child Zone's, the `/zones/select` call will be sent down to them as well. +5. the scheduler sends off an OS API `POST /zones/select` command to each child Zone. The `BODY` payload of the call contains the `request_spec`. +6. the child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones. +7. if the child Zone has its own child Zones, the `/zones/select` call will be sent down to them as well. 8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed. -9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant Child Zone. The parameters to the Child Zone call are the same as what was passed in by the user. +9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant child Zone. The parameters to the child Zone call are the same as what was passed in by the user. 10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`. The Catch ------------- -This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, Database and set of Nova Services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world. +This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world. -When `POST /zones/select` is called to estimate which Compute node to use, time passes until the `POST /servers` call is issued. If we only passed the Weight back from the `select` we would have to re-compute the appropriate Compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to Child Zones asking for estimates. +When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates. -Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. +Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. It's for this reason that it is so important that each Zone defines a unique encryption key via `--build_plan_encryption_key` In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent. Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.zone_aware_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use. -Reservation ID's +Reservation IDs --------------- NOTE: The features described in this section are related to the up-coming 'merge-4' branch. The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created. -NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would be bad. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled. +NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled. -We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/servers` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches. +We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/boot` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches. Finally, we need to give the user a way to get information on each of the instances created under this `reservation_id`. Fortunately, this is still possible with the existing `GET /servers` command, so long as we add a new optional `reservation_id` parameter. @@ -119,14 +123,15 @@ Finally, we need to give the user a way to get information on each of the instan Host Filter -------------- -As we mentioned earlier, filtering hosts is a very deployment specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms. +As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms. + +The filter used is determined by the `--default_host_filter` flag, which points to a Python Class. By default this flag is set to `nova.scheduler.host_filter.AllHostsFilter` which simply returns all available hosts. But there are others: -The filter used is determined by the `--default_host_filter` flag, which points to a Python Class. By default this flag is set to `nova.scheduler.host_filter.AllHostsFilter` which simply returns all available hosts. But there are others. + * `nova.scheduler.host_filter.InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`. -`nova.scheduler.host_filter.InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`. -`nova.scheduler.host_filter.JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples. + * `nova.scheduler.host_filter.JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples. -To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide the create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of weight tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The weight tuple contains (``, ``) where `` is whatever you want it to be. +To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The host tuple contains (``, ``) where `` is whatever you want it to be. Cost Scheduler Weighing -------------- @@ -134,9 +139,9 @@ Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` metho Simple Zone Aware Scheduling -------------- -The easiest way to get started with the Zone Aware Scheduler is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter as and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. +The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter as and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. -The `--scheduler_driver` flag is how you specify the Scheduler class name. +The `--scheduler_driver` flag is how you specify the scheduler class name. Flags -------------- @@ -153,9 +158,9 @@ All this Zone and Distributed Scheduler stuff can seem a little daunting to conf `--allow_admin_api` must be set for OS API to enable the new `/zones/*` commands. `--enable_zone_routing` must be set for OS API commands such as `create()`, `pause()` and `delete()` to get routed from Zone to Zone when looking for instances. -`--zone_name` is only required in Child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue. +`--zone_name` is only required in child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue. `build_plan_encryption_key` is the SHA-256 key for encrypting/decrypting the Host information when it leaves a Zone. Be sure to change this key for each Zone you create. Do not duplicate keys. -`scheduler_driver` is the real work horse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler` +`scheduler_driver` is the real workhorse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler`. `default_host_filter` is the host filter to be used for filtering candidate Compute nodes. Some optional flags which are handy for debugging are: -- cgit From 05c21ed52da0a9505000505120c70dffd6e730e2 Mon Sep 17 00:00:00 2001 From: "Vivek YS vivek.ys@gmail.com" <> Date: Tue, 31 May 2011 21:04:01 +0530 Subject: Added myself to Authors file --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index 50f4680a9..8dbdbfdb6 100644 --- a/Authors +++ b/Authors @@ -83,6 +83,7 @@ Trey Morris Tushar Patil Vasiliy Shlykov Vishvananda Ishaya +Vivek Y S William Wolf Yoshiaki Tamura Youcef Laribi -- cgit From 099c29549a70cb88a6266a5e4145f855e1862d99 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Tue, 31 May 2011 11:58:15 -0400 Subject: Handle the case when a v1.0 api tries to list servers that contain image hrefs. --- nova/api/openstack/servers.py | 12 ++++++++++-- nova/api/openstack/views/servers.py | 6 +++++- nova/exception.py | 7 ++++++- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 7593694bd..33b677ffd 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -69,11 +69,19 @@ class Controller(common.OpenstackController): def index(self, req): """ Returns a list of server names and ids for a given user """ - return self._items(req, is_detail=False) + try: + servers = self._items(req, is_detail=False) + except exception.Invalid as err: + return exc.HTTPBadRequest(str(err)) + return servers def detail(self, req): """ Returns a list of server details for a given user """ - return self._items(req, is_detail=True) + try: + servers = self._items(req, is_detail=True) + except exception.Invalid as err: + return exc.HTTPBadRequest(str(err)) + return servers def _image_ref_from_req_data(self, data): raise NotImplementedError() diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index dd1d68ff0..595a54790 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -18,6 +18,7 @@ import hashlib import os +from nova import exception from nova.compute import power_state import nova.compute import nova.context @@ -113,7 +114,10 @@ class ViewBuilderV10(ViewBuilder): def _build_image(self, response, inst): if 'image_ref' in dict(inst): - response['imageId'] = int(inst['image_ref']) + image_ref = inst['image_ref'] + if str(image_ref).startswith('http'): + raise exception.ListingImageRefsNotSupported(); + response['imageId'] = int(image_ref) def _build_flavor(self, response, inst): if 'instance_type' in dict(inst): diff --git a/nova/exception.py b/nova/exception.py index 5b91e1cde..6ea6c3620 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -284,7 +284,12 @@ class DiskNotFound(NotFound): class InvalidImageRef(Invalid): - message = _("Invalid image ref %(image_href)s.") + message = _("Invalid image href %(image_href)s.") + + +class ListingImageRefsNotSupported(Invalid): + message = _("Some images have been stored via hrefs." + + " This version of the api does not support displaying image hrefs.") class ImageNotFound(NotFound): -- cgit From 770c0a5ecd2e19318e5b581de1f23e4e1d3f5f9d Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Tue, 31 May 2011 12:37:36 -0400 Subject: removing semicolon --- nova/api/openstack/views/servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 595a54790..b2352e3fd 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -116,7 +116,7 @@ class ViewBuilderV10(ViewBuilder): if 'image_ref' in dict(inst): image_ref = inst['image_ref'] if str(image_ref).startswith('http'): - raise exception.ListingImageRefsNotSupported(); + raise exception.ListingImageRefsNotSupported() response['imageId'] = int(image_ref) def _build_flavor(self, response, inst): -- cgit From 1eee07811f9fb5fd29192b17610a6b2d2e6c3578 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 31 May 2011 13:34:33 -0400 Subject: added get_pagination_params function in common with tests, allow fake and local image services to accept filters, markers, and limits (but ignore them for now) --- nova/api/openstack/common.py | 31 ++++++++++++++++++++++ nova/api/openstack/images.py | 25 +++++++++++++++--- nova/image/fake.py | 4 +-- nova/image/glance.py | 10 ++++--- nova/image/local.py | 4 +-- nova/tests/api/openstack/fakes.py | 5 ++-- nova/tests/api/openstack/test_common.py | 46 +++++++++++++++++++++++++++++++++ nova/tests/image/test_glance.py | 2 +- 8 files changed, 113 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 32cd689ca..69877cbce 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -36,6 +36,37 @@ XML_NS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1' +def get_pagination_params(request): + """ + Return marker, limit tuple from request + + @param request: `wsgi.Request` possibly containing 'marker' and 'limit' + GET variables. 'marker' is the id of the last element + the client has seen, and 'limit' is the maximum number + of items to return. If 'limit' is not specified, 0, or + > max_limit, we default to max_limit. Negative values + for either marker or limit will cause + exc.HTTPBadRequest() exceptions to be raised. + """ + try: + marker = int(request.GET.get('marker', 0)) + except ValueError: + raise webob.exc.HTTPBadRequest(_('offset param must be an integer')) + + try: + limit = int(request.GET.get('limit', 0)) + except ValueError: + raise webob.exc.HTTPBadRequest(_('limit param must be an integer')) + + if limit < 0: + raise webob.exc.HTTPBadRequest(_('limit param must be positive')) + + if marker < 0: + raise webob.exc.HTTPBadRequest(_('marker param must be positive')) + + return(marker, limit) + + def limited(items, request, max_limit=FLAGS.osapi_max_limit): """ Return a slice of items according to requested offset and limit. diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index c96b1c3e3..afe0f79de 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -74,7 +74,7 @@ class Controller(common.OpenstackController): """ context = req.environ['nova.context'] images = self._image_service.detail(context) - images = self._limited_items(images, req) + images = self._limit_items(images, req) builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) @@ -157,5 +157,24 @@ class ControllerV11(Controller): def get_default_xmlns(self, req): return common.XML_NS_V11 - def _limit_items(self, items, req): - return common.limited_by_marker(items, req) + def index(self, req): + """Return an index listing of images available to the request. + + :param req: `wsgi.Request` object + """ + context = req.environ['nova.context'] + (marker, limit) = common.get_pagination_params(req) + images = self._image_service.index(context, marker, limit) + builder = self.get_builder(req).build + return dict(images=[builder(image, detail=False) for image in images]) + + def detail(self, req): + """Return a detailed index listing of images available to the request. + + :param req: `wsgi.Request` object. + """ + context = req.environ['nova.context'] + (marker, limit) = common.get_pagination_params(req) + images = self._image_service.detail(context, marker, limit) + builder = self.get_builder(req).build + return dict(images=[builder(image, detail=True) for image in images]) diff --git a/nova/image/fake.py b/nova/image/fake.py index b400b2adb..4aa4219fe 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -52,11 +52,11 @@ class FakeImageService(service.BaseImageService): self.create(None, image) super(FakeImageService, self).__init__() - def index(self, context): + def index(self, context, filters=None, marker=None, limit=None): """Returns list of images.""" return copy.deepcopy(self.images.values()) - def detail(self, context): + def detail(self, context, filters=None, marker=None, limit=None): """Return list of detailed image information.""" return copy.deepcopy(self.images.values()) diff --git a/nova/image/glance.py b/nova/image/glance.py index 193e37273..e084ed8ae 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -58,23 +58,25 @@ class GlanceImageService(service.BaseImageService): else: self.client = client - def index(self, context): + def index(self, context, marker=None, limit=None): """Calls out to Glance for a list of images available.""" # NOTE(sirp): We need to use `get_images_detailed` and not # `get_images` here because we need `is_public` and `properties` # included so we can filter by user filtered = [] - image_metas = self.client.get_images_detailed() + image_metas = self.client.get_images_detailed( + marker=marker, limit=limit) for image_meta in image_metas: if self._is_image_available(context, image_meta): meta_subset = utils.subset_dict(image_meta, ('id', 'name')) filtered.append(meta_subset) return filtered - def detail(self, context): + def detail(self, context, marker=None, limit=None): """Calls out to Glance for a list of detailed image information.""" filtered = [] - image_metas = self.client.get_images_detailed() + image_metas = self.client.get_images_detailed( + marker=marker, limit=limit) for image_meta in image_metas: if self._is_image_available(context, image_meta): base_image_meta = self._translate_to_base(image_meta) diff --git a/nova/image/local.py b/nova/image/local.py index 918180bae..f320cc60c 100644 --- a/nova/image/local.py +++ b/nova/image/local.py @@ -63,7 +63,7 @@ class LocalImageService(service.BaseImageService): images.append(unhexed_image_id) return images - def index(self, context): + def index(self, context, filters=None, marker=None, limit=None): filtered = [] image_metas = self.detail(context) for image_meta in image_metas: @@ -71,7 +71,7 @@ class LocalImageService(service.BaseImageService): filtered.append(meta) return filtered - def detail(self, context): + def detail(self, context, filters=None, marker=None, limit=None): images = [] for image_id in self._ids(): try: diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index bf51239e6..2e28e421c 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -166,11 +166,12 @@ def stub_out_glance(stubs, initial_fixtures=None): def __init__(self, initial_fixtures): self.fixtures = initial_fixtures or [] - def fake_get_images(self): + def fake_get_images(self, filters=None, marker=None, limit=None): return [dict(id=f['id'], name=f['name']) for f in self.fixtures] - def fake_get_images_detailed(self): + def fake_get_images_detailed(self, filters=None, + marker=None, limit=None): return copy.deepcopy(self.fixtures) def fake_get_image_meta(self, image_id): diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 8f57c5b67..34597c7ac 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -25,6 +25,7 @@ from webob import Request from nova import test from nova.api.openstack.common import limited +from nova.api.openstack.common import get_pagination_params class LimiterTest(test.TestCase): @@ -169,3 +170,48 @@ class LimiterTest(test.TestCase): """ req = Request.blank('/?offset=-30') self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) + + +class PaginationParamsTest(test.TestCase): + """ + Unit tests for the `nova.api.openstack.common.get_pagination_params` + method which takes in a request object and returns 'marker' and 'limit' + GET params. + """ + + def test_no_params(self): + """ + Test no params. + """ + req = Request.blank('/') + self.assertEqual(get_pagination_params(req), (0, 0)) + + def test_valid_marker(self): + """ + Test valid marker param. + """ + req = Request.blank('/?marker=1') + self.assertEqual(get_pagination_params(req), (1, 0)) + + def test_invalid_marker(self): + """ + Test invalid marker param. + """ + req = Request.blank('/?marker=-2') + self.assertRaises(webob.exc.HTTPBadRequest, + get_pagination_params, req) + + def test_valid_limit(self): + """ + Test valid limit param. + """ + req = Request.blank('/?limit=10') + self.assertEqual(get_pagination_params(req), (0, 10)) + + def test_invalid_limit(self): + """ + Test invalid limit param. + """ + req = Request.blank('/?limit=-2') + self.assertRaises(webob.exc.HTTPBadRequest, + get_pagination_params, req) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 109905ded..041da1e13 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -34,7 +34,7 @@ class StubGlanceClient(object): def get_image_meta(self, image_id): return self.images[image_id] - def get_images_detailed(self): + def get_images_detailed(self, filters=None, marker=None, limit=None): return self.images.itervalues() def get_image(self, image_id): -- cgit From f668339effa089360c1989082c83afc35489f71e Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 31 May 2011 14:21:15 -0400 Subject: added tests for GlanceImageService --- nova/tests/api/openstack/fakes.py | 38 ++++++++++++++++-- nova/tests/api/openstack/test_images.py | 68 +++++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+), 3 deletions(-) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 2e28e421c..e7006debe 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -167,12 +167,44 @@ def stub_out_glance(stubs, initial_fixtures=None): self.fixtures = initial_fixtures or [] def fake_get_images(self, filters=None, marker=None, limit=None): - return [dict(id=f['id'], name=f['name']) - for f in self.fixtures] + found = True + if marker: found = False + if limit == 0: limit = None + + fixtures = [] + count = 0 + for f in self.fixtures: + if limit and count >= limit: + break + if found: + fixtures.append(f) + count = count + 1 + if f['id'] == marker: + found = True + + return [dict(id=f['id'], name=f['name']) + for f in fixtures] def fake_get_images_detailed(self, filters=None, marker=None, limit=None): - return copy.deepcopy(self.fixtures) + found = True + if marker: found = False + if limit == 0: limit = None + + fixtures = [] + count = 0 + for f in self.fixtures: + if limit and count >= limit: + break + if found: + fixtures.append(f) + count = count + 1 + if f['id'] == marker: + found = True + + + return fixtures + def fake_get_image_meta(self, image_id): image = self._find_image(image_id) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 82bf66e49..310fbd5b4 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -226,6 +226,74 @@ class GlanceImageServiceTest(_BaseImageServiceTests): expected = {'name': 'test image', 'properties': {}} self.assertDictMatch(self.sent_to_glance['metadata'], expected) + def test_index_default_limit(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture('TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.index(self.context) + i = 0 + for meta in image_metas: + expected = {'id': 'DONTCARE', + 'name': 'TestImage %d' % (i)} + self.assertDictMatch(meta, expected) + i = i + 1 + + def test_index_marker(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture('TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.index(self.context, marker=ids[1]) + self.assertEquals(len(image_metas), 8) + i = 2 + for meta in image_metas: + expected = {'id': 'DONTCARE', + 'name': 'TestImage %d' % (i)} + self.assertDictMatch(meta, expected) + i = i + 1 + + def test_index_limit(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture('TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.index(self.context, limit=3) + self.assertEquals(len(image_metas), 3) + + def test_index_marker_and_limit(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture('TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.index(self.context, marker=ids[3], limit=1) + self.assertEquals(len(image_metas), 1) + i = 4 + for meta in image_metas: + expected = {'id': 'DONTCARE', + 'name': 'TestImage %d' % (i)} + self.assertDictMatch(meta, expected) + i = i + 1 + + def test_detail(self): + fixture = self._make_fixture('test image') + image_id = self.service.create(self.context, fixture)['id'] + image_metas = self.service.index(self.context) + expected = [{'id': 'DONTCARE', 'name': 'test image'}] + self.assertDictListMatch(image_metas, expected) + class ImageControllerWithGlanceServiceTest(test.TestCase): """ -- cgit From 7beafb1aafac97e6dfc28108062785465cc8f577 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 31 May 2011 14:38:12 -0400 Subject: Use a new instance_metadata_delete_all DB api call to delete existing metadata when updating a server. --- nova/db/sqlalchemy/api.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ea84e96e7..8df96cbf4 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1013,8 +1013,9 @@ def instance_update(context, instance_id, values): session = get_session() metadata = values.get('metadata') if metadata is not None: + instance_metadata_delete_all(context, instance_id) instance_metadata_update_or_create(context, instance_id, - values.pop('metadata'), True) + values.pop('metadata')) with session.begin(): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) @@ -2554,6 +2555,17 @@ def instance_metadata_delete(context, instance_id, key): 'updated_at': literal_column('updated_at')}) +@require_context +def instance_metadata_delete_all(context, instance_id): + session = get_session() + session.query(models.InstanceMetadata).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False).\ + update({'deleted': True, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) + + @require_context def instance_metadata_get_item(context, instance_id, key): session = get_session() @@ -2571,8 +2583,7 @@ def instance_metadata_get_item(context, instance_id, key): @require_context -def instance_metadata_update_or_create(context, instance_id, metadata, - purge=False): +def instance_metadata_update_or_create(context, instance_id, metadata): session = get_session() original_metadata = instance_metadata_get(context, instance_id) @@ -2589,9 +2600,4 @@ def instance_metadata_update_or_create(context, instance_id, metadata, "deleted": 0}) meta_ref.save(session=session) - if purge: - for key in original_metadata.keys(): - if not key in metadata.keys(): - instance_metadata_delete(context, instance_id, key) - return metadata -- cgit From f2da479b8988cd55d39e89935b10e0b348df43c9 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 31 May 2011 23:36:49 +0400 Subject: Moved everything from thread-local storage to class attributes --- nova/auth/ldapdriver.py | 38 +++++++++++--------------------------- nova/auth/manager.py | 14 +++----------- 2 files changed, 14 insertions(+), 38 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 9fe0165a1..91f412baa 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -26,7 +26,6 @@ public methods. import functools import sys -import threading from nova import exception from nova import flags @@ -106,7 +105,8 @@ class LdapDriver(object): isadmin_attribute = 'isNovaAdmin' project_attribute = 'owner' project_objectclass = 'groupOfNames' - __local = threading.local() + conn = None + mc = None def __init__(self): """Imports the LDAP module""" @@ -117,15 +117,22 @@ class LdapDriver(object): LdapDriver.project_attribute = 'projectManager' LdapDriver.project_objectclass = 'novaProject' self.__cache = None + if LdapDriver.conn is None: + LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) + LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + if LdapDriver.mc is None: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def __enter__(self): - """Creates the connection to LDAP""" # TODO(yorik-sar): Should be per-request cache, not per-driver-request self.__cache = {} return self def __exit__(self, exc_type, exc_value, traceback): - """Destroys the connection to LDAP""" self.__cache = None return False @@ -149,29 +156,6 @@ class LdapDriver(object): return inner return do_wrap - @property - def conn(self): - try: - return self.__local.conn - except AttributeError: - conn = self.ldap.initialize(FLAGS.ldap_url) - conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) - self.__local.conn = conn - return conn - - @property - def mc(self): - try: - return self.__local.mc - except AttributeError: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache - mc = memcache.Client(FLAGS.memcached_servers, debug=0) - self.__local.mc = mc - return mc - @sanitize @__local_cache('uid_user-%s') def get_user(self, uid): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index c71f0f161..c887297f3 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -23,7 +23,6 @@ Nova authentication management import os import shutil import string # pylint: disable=W0402 -import threading import tempfile import uuid import zipfile @@ -207,7 +206,7 @@ class AuthManager(object): """ _instance = None - __local = threading.local() + mc = None def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" @@ -224,19 +223,12 @@ class AuthManager(object): self.network_manager = utils.import_object(FLAGS.network_manager) if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) - - @property - def mc(self): - try: - return self.__local.mc - except AttributeError: + if AuthManager.mc is None: if FLAGS.memcached_servers: import memcache else: from nova import fakememcache as memcache - mc = memcache.Client(FLAGS.memcached_servers, debug=0) - self.__local.mc = mc - return mc + AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', -- cgit From f16f55a08038c78200a490055183104fc6a9348d Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 31 May 2011 16:43:25 -0400 Subject: added tests for image detail requests --- nova/tests/api/openstack/test_images.py | 56 +++++++++++++++++++++++++++++---- 1 file changed, 50 insertions(+), 6 deletions(-) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 310fbd5b4..e8657683a 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -287,12 +287,56 @@ class GlanceImageServiceTest(_BaseImageServiceTests): self.assertDictMatch(meta, expected) i = i + 1 - def test_detail(self): - fixture = self._make_fixture('test image') - image_id = self.service.create(self.context, fixture)['id'] - image_metas = self.service.index(self.context) - expected = [{'id': 'DONTCARE', 'name': 'test image'}] - self.assertDictListMatch(image_metas, expected) + def test_detail_marker(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture('TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.detail(self.context, marker=ids[1]) + self.assertEquals(len(image_metas), 8) + i = 2 + for meta in image_metas: + expected = {'id': 'DONTCARE', 'status': None, + 'is_public': True, 'properties':{ + 'updated': None, 'created': None + }, + 'name': 'TestImage %d' % (i)} + self.assertDictMatch(meta, expected) + i = i + 1 + + def test_detail_limit(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture('TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.detail(self.context, limit=3) + self.assertEquals(len(image_metas), 3) + + def test_detail_marker_and_limit(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture('TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.detail(self.context, marker=ids[3], limit=3) + self.assertEquals(len(image_metas), 3) + i = 4 + for meta in image_metas: + expected = {'id': 'DONTCARE', 'status': None, + 'is_public': True, 'properties':{ + 'updated': None, 'created': None + }, + 'name': 'TestImage %d' % (i)} + self.assertDictMatch(meta, expected) + i = i + 1 class ImageControllerWithGlanceServiceTest(test.TestCase): -- cgit From 5922b5dc166476adf550abbbacc21e4585e53a37 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 31 May 2011 21:23:36 +0000 Subject: Fixing Scheduler Tests --- nova/tests/scheduler/test_scheduler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 54b3f80fb..b0f0e882a 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -61,7 +61,8 @@ class SchedulerTestCase(test.TestCase): """Test case for scheduler""" def setUp(self): super(SchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') + driver = 'nova.tests.scheduler.test_scheduler.TestDriver' + self.flags(scheduler_driver=driver) def _create_compute_service(self): """Create compute-manager(ComputeNode and Service record).""" -- cgit From b0c43e57ad6a7e5be8a749e70da39b7f7ba547bd Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 31 May 2011 14:49:47 -0700 Subject: switch to using webob exception --- nova/api/ec2/metadatarequesthandler.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 720f264a4..9c8e52270 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -76,12 +76,10 @@ class MetadataRequestHandler(wsgi.Application): meta_data = cc.get_metadata(remote_address) except Exception: LOG.exception(_('Failed to get metadata for ip: %s'), remote_address) - resp = webob.Response() - resp.status = 500 - message = _('An unknown error has occurred. ' - 'Please try your request again.') - resp.body = str(utils.utf8(message)) - return resp + msg = _('An unknown error has occurred. ' + 'Please try your request again.') + exc = webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + return exc if meta_data is None: LOG.error(_('Failed to get metadata for ip: %s'), remote_address) raise webob.exc.HTTPNotFound() -- cgit From 81f40ed1ca284bc9a8ee948ae23fdff93d632cb0 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 31 May 2011 15:50:33 -0700 Subject: pep8 --- nova/api/ec2/metadatarequesthandler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 9c8e52270..b70266a20 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -75,7 +75,8 @@ class MetadataRequestHandler(wsgi.Application): try: meta_data = cc.get_metadata(remote_address) except Exception: - LOG.exception(_('Failed to get metadata for ip: %s'), remote_address) + LOG.exception(_('Failed to get metadata for ip: %s'), + remote_address) msg = _('An unknown error has occurred. ' 'Please try your request again.') exc = webob.exc.HTTPInternalServerError(explanation=unicode(msg)) -- cgit From 16bd0ff62dccda5eba800b2762437d5e86faaafd Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 1 Jun 2011 09:20:15 -0400 Subject: Renamed migration to 020. --- .../migrate_repo/versions/019_rename_image_ids.py | 40 ---------------------- .../migrate_repo/versions/021_rename_image_ids.py | 40 ++++++++++++++++++++++ 2 files changed, 40 insertions(+), 40 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py b/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py deleted file mode 100644 index 73a5e8477..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py +++ /dev/null @@ -1,40 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column, Integer, MetaData, String, Table - - -meta = MetaData() - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - image_id_column = instances.c.image_id - image_id_column.alter(name='image_ref') - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - image_ref_column = instances.c.image_ref - image_ref_column.alter(name='image_id') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py b/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py new file mode 100644 index 000000000..73a5e8477 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py @@ -0,0 +1,40 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +meta = MetaData() + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + image_id_column = instances.c.image_id + image_id_column.alter(name='image_ref') + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + image_ref_column = instances.c.image_ref + image_ref_column.alter(name='image_id') -- cgit From b8f2f8d63608d76af41fd218dddb955bdc656354 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 1 Jun 2011 10:00:15 -0400 Subject: fix filtering tests --- nova/api/openstack/images.py | 8 ++++++-- nova/image/glance.py | 4 ++-- nova/tests/api/openstack/test_images.py | 30 ++++++++++++++++++++---------- 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 20e6f38ce..8afd38a4f 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -181,8 +181,10 @@ class ControllerV11(Controller): :param req: `wsgi.Request` object """ context = req.environ['nova.context'] + filters = self._get_filters(req) (marker, limit) = common.get_pagination_params(req) - images = self._image_service.index(context, marker, limit) + images = self._image_service.index( + context, filters=filters, marker=marker, limit=limit) builder = self.get_builder(req).build return dict(images=[builder(image, detail=False) for image in images]) @@ -192,7 +194,9 @@ class ControllerV11(Controller): :param req: `wsgi.Request` object. """ context = req.environ['nova.context'] + filters = self._get_filters(req) (marker, limit) = common.get_pagination_params(req) - images = self._image_service.detail(context, marker, limit) + images = self._image_service.detail( + context, filters=filters, marker=marker, limit=limit) builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) diff --git a/nova/image/glance.py b/nova/image/glance.py index 09b2240ab..06f546027 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -58,7 +58,7 @@ class GlanceImageService(service.BaseImageService): else: self.client = client - def index(self, context, marker=None, limit=None, filters=None): + def index(self, context, filters=None, marker=None, limit=None): """Calls out to Glance for a list of images available.""" # NOTE(sirp): We need to use `get_images_detailed` and not # `get_images` here because we need `is_public` and `properties` @@ -73,7 +73,7 @@ class GlanceImageService(service.BaseImageService): filtered.append(meta_subset) return filtered - def detail(self, context, marker=None, limit=None, filters=None): + def detail(self, context, filters=None, marker=None, limit=None): """Calls out to Glance for a list of detailed image information.""" filtered = [] image_metas = self.client.get_images_detailed(marker=marker, diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index d6b01400e..c859a31de 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -826,7 +826,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_service = mocker.CreateMockAnything() context = object() filters = {'name': 'testname'} - image_service.index(context, filters).AndReturn([]) + image_service.index( + context, filters=filters, marker=0, limit=0).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images?name=testname') @@ -840,7 +841,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_service = mocker.CreateMockAnything() context = object() filters = {'status': 'ACTIVE'} - image_service.index(context, filters).AndReturn([]) + image_service.index( + context, filters=filters, marker=0, limit=0).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images?status=ACTIVE') @@ -854,7 +856,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_service = mocker.CreateMockAnything() context = object() filters = {'property-test': '3'} - image_service.index(context, filters).AndReturn([]) + image_service.index( + context, filters=filters, marker=0, limit=0).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images?property-test=3') @@ -868,7 +871,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_service = mocker.CreateMockAnything() context = object() filters = {'status': 'ACTIVE'} - image_service.index(context, filters).AndReturn([]) + image_service.index( + context, filters=filters, marker=0, limit=0).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname') @@ -882,7 +886,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_service = mocker.CreateMockAnything() context = object() filters = {} - image_service.index(context, filters).AndReturn([]) + image_service.index( + context, filters=filters, marker=0, limit=0).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images') @@ -896,7 +901,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_service = mocker.CreateMockAnything() context = object() filters = {'name': 'testname'} - image_service.detail(context, filters).AndReturn([]) + image_service.detail( + context, filters=filters, marker=0, limit=0).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail?name=testname') @@ -910,7 +916,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_service = mocker.CreateMockAnything() context = object() filters = {'status': 'ACTIVE'} - image_service.detail(context, filters).AndReturn([]) + image_service.detail( + context, filters=filters, marker=0, limit=0).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail?status=ACTIVE') @@ -924,7 +931,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_service = mocker.CreateMockAnything() context = object() filters = {'property-test': '3'} - image_service.detail(context, filters).AndReturn([]) + image_service.detail( + context, filters=filters, marker=0, limit=0).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail?property-test=3') @@ -938,7 +946,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_service = mocker.CreateMockAnything() context = object() filters = {'status': 'ACTIVE'} - image_service.detail(context, filters).AndReturn([]) + image_service.detail( + context, filters=filters, marker=0, limit=0).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname') @@ -952,7 +961,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_service = mocker.CreateMockAnything() context = object() filters = {} - image_service.detail(context, filters).AndReturn([]) + image_service.detail( + context, filters=filters, marker=0, limit=0).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail') -- cgit From 9fc8e71f1b201adc0a5e49ac3a94e22bf47596fb Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 1 Jun 2011 10:17:00 -0400 Subject: pep8 fixes --- nova/log.py | 2 +- nova/tests/test_notifier.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/log.py b/nova/log.py index 960598b14..6909916a1 100644 --- a/nova/log.py +++ b/nova/log.py @@ -272,7 +272,7 @@ class PublishErrorsHandler(logging.Handler): def emit(self, record): nova.notifier.api.notify('nova.error.publisher', 'error_notification', nova.notifier.api.ERROR, dict(error=record.msg)) - + def handle_exception(type, value, tb): extra = {} diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 523f38f24..64b799a2c 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -122,12 +122,13 @@ class NotifierTestCase(test.TestCase): self.stubs.Set(nova.flags.FLAGS, 'publish_errors', True) LOG = log.getLogger('nova') LOG.setup_from_flags() - msgs = [] + def mock_cast(context, topic, data): msgs.append(data) - self.stubs.Set(nova.rpc, 'cast', mock_cast) - LOG.error('foo'); + + self.stubs.Set(nova.rpc, 'cast', mock_cast) + LOG.error('foo') self.assertEqual(1, len(msgs)) msg = msgs[0] self.assertEqual(msg['event_type'], 'error_notification') -- cgit From 3fa4ece45eea12f0923c55d87130c668bafd2751 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 1 Jun 2011 10:31:53 -0400 Subject: fix pep8 issues --- nova/api/openstack/common.py | 4 ++-- nova/tests/api/openstack/fakes.py | 13 ++++++++----- nova/tests/api/openstack/test_common.py | 6 +++--- nova/tests/api/openstack/test_images.py | 16 +++++++--------- nova/tests/api/openstack/test_servers.py | 3 +-- nova/tests/integrated/test_servers.py | 9 +++------ 6 files changed, 24 insertions(+), 27 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 69877cbce..b0d368dfa 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -39,13 +39,13 @@ XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1' def get_pagination_params(request): """ Return marker, limit tuple from request - + @param request: `wsgi.Request` possibly containing 'marker' and 'limit' GET variables. 'marker' is the id of the last element the client has seen, and 'limit' is the maximum number of items to return. If 'limit' is not specified, 0, or > max_limit, we default to max_limit. Negative values - for either marker or limit will cause + for either marker or limit will cause exc.HTTPBadRequest() exceptions to be raised. """ try: diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 6395280fd..bc21d66b4 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -168,8 +168,10 @@ def stub_out_glance(stubs, initial_fixtures=None): def fake_get_images(self, filters=None, marker=None, limit=None): found = True - if marker: found = False - if limit == 0: limit = None + if marker: + found = False + if limit == 0: + limit = None fixtures = [] count = 0 @@ -188,8 +190,10 @@ def stub_out_glance(stubs, initial_fixtures=None): def fake_get_images_detailed(self, filters=None, marker=None, limit=None): found = True - if marker: found = False - if limit == 0: limit = None + if marker: + found = False + if limit == 0: + limit = None fixtures = [] count = 0 @@ -202,7 +206,6 @@ def stub_out_glance(stubs, initial_fixtures=None): if f['id'] == marker: found = True - return fixtures def fake_get_image_meta(self, image_id): diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 34597c7ac..55142ffe1 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -174,8 +174,8 @@ class LimiterTest(test.TestCase): class PaginationParamsTest(test.TestCase): """ - Unit tests for the `nova.api.openstack.common.get_pagination_params` - method which takes in a request object and returns 'marker' and 'limit' + Unit tests for the `nova.api.openstack.common.get_pagination_params` + method which takes in a request object and returns 'marker' and 'limit' GET params. """ @@ -185,7 +185,7 @@ class PaginationParamsTest(test.TestCase): """ req = Request.blank('/') self.assertEqual(get_pagination_params(req), (0, 0)) - + def test_valid_marker(self): """ Test valid marker param. diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index c859a31de..667f2866b 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -238,7 +238,7 @@ class GlanceImageServiceTest(_BaseImageServiceTests): image_metas = self.service.index(self.context) i = 0 for meta in image_metas: - expected = {'id': 'DONTCARE', + expected = {'id': 'DONTCARE', 'name': 'TestImage %d' % (i)} self.assertDictMatch(meta, expected) i = i + 1 @@ -255,7 +255,7 @@ class GlanceImageServiceTest(_BaseImageServiceTests): self.assertEquals(len(image_metas), 8) i = 2 for meta in image_metas: - expected = {'id': 'DONTCARE', + expected = {'id': 'DONTCARE', 'name': 'TestImage %d' % (i)} self.assertDictMatch(meta, expected) i = i + 1 @@ -283,7 +283,7 @@ class GlanceImageServiceTest(_BaseImageServiceTests): self.assertEquals(len(image_metas), 1) i = 4 for meta in image_metas: - expected = {'id': 'DONTCARE', + expected = {'id': 'DONTCARE', 'name': 'TestImage %d' % (i)} self.assertDictMatch(meta, expected) i = i + 1 @@ -301,9 +301,8 @@ class GlanceImageServiceTest(_BaseImageServiceTests): i = 2 for meta in image_metas: expected = {'id': 'DONTCARE', 'status': None, - 'is_public': True, 'properties':{ - 'updated': None, 'created': None - }, + 'is_public': True, 'properties': { + 'updated': None, 'created': None}, 'name': 'TestImage %d' % (i)} self.assertDictMatch(meta, expected) i = i + 1 @@ -332,9 +331,8 @@ class GlanceImageServiceTest(_BaseImageServiceTests): i = 4 for meta in image_metas: expected = {'id': 'DONTCARE', 'status': None, - 'is_public': True, 'properties':{ - 'updated': None, 'created': None - }, + 'is_public': True, 'properties': { + 'updated': None, 'created': None}, 'name': 'TestImage %d' % (i)} self.assertDictMatch(meta, expected) i = i + 1 diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index fbde5c9ce..20379e2bd 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -774,8 +774,7 @@ class ServersTest(test.TestCase): def server_update(context, id, params): filtered_dict = dict( - display_name='server_test' - ) + display_name='server_test') self.assertEqual(params, filtered_dict) return filtered_dict diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index a67fa1bb5..35c6bb34f 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -194,8 +194,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah" - } + "name": "blah"} self.api.post_server_action(created_server_id, post) LOG.debug("rebuilt server: %s" % created_server) @@ -224,8 +223,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah" - } + "name": "blah"} metadata = {} for i in range(30): @@ -267,8 +265,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah" - } + "name": "blah"} metadata = {} post['rebuild']['metadata'] = metadata -- cgit From a26e21040681fd6db5a6ae862ca18ee17689854c Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Wed, 1 Jun 2011 18:32:49 +0400 Subject: Moved memcached driver import to the top of modules. --- nova/auth/ldapdriver.py | 10 ++++++---- nova/auth/manager.py | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 91f412baa..e26a360af 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -69,6 +69,12 @@ flags.DEFINE_string('ldap_developer', LOG = logging.getLogger("nova.ldapdriver") +if FLAGS.memcached_servers: + import memcache +else: + from nova import fakememcache as memcache + + # TODO(vish): make an abstract base class with the same public methods # to define a set interface for AuthDrivers. I'm delaying # creating this now because I'm expecting an auth refactor @@ -121,10 +127,6 @@ class LdapDriver(object): LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) if LdapDriver.mc is None: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def __enter__(self): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index c887297f3..98c7dd263 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -73,6 +73,12 @@ flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', LOG = logging.getLogger('nova.auth.manager') +if FLAGS.memcached_servers: + import memcache +else: + from nova import fakememcache as memcache + + class AuthBase(object): """Base class for objects relating to auth @@ -224,10 +230,6 @@ class AuthManager(object): if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) if AuthManager.mc is None: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def authenticate(self, access, signature, params, verb='GET', -- cgit From 4d1271821f782d4e11934d69b4ffe3aced6072eb Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Wed, 1 Jun 2011 18:34:54 +0400 Subject: PEP8 fix. --- nova/auth/ldapdriver.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index e26a360af..95e31ae3b 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -125,7 +125,8 @@ class LdapDriver(object): self.__cache = None if LdapDriver.conn is None: LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) - LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, + FLAGS.ldap_password) if LdapDriver.mc is None: LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) -- cgit From 8b716bc23ac4e5e5398db9557757621fccb08204 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 1 Jun 2011 10:37:54 -0400 Subject: fix pep8 issues --- nova/compute/api.py | 3 +-- .../migrate_repo/versions/016_make_quotas_key_and_value.py | 3 +-- nova/scheduler/host_filter.py | 3 +-- nova/tests/api/openstack/test_servers.py | 3 +-- nova/tests/integrated/test_servers.py | 9 +++------ nova/tests/test_host_filter.py | 14 +++++--------- nova/tests/test_zone_aware_scheduler.py | 10 +++------- tools/install_venv.py | 2 +- 8 files changed, 16 insertions(+), 31 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index de774e807..3e991e68a 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -270,8 +270,7 @@ class API(base.Base): 'instance_type': instance_type, 'filter': 'nova.scheduler.host_filter.' - 'InstanceTypeFilter' - }, + 'InstanceTypeFilter'}, "availability_zone": availability_zone, "injected_files": injected_files, "admin_password": admin_password}}) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py index a2d8192ca..1a2a6d7ce 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py @@ -160,8 +160,7 @@ def convert_backward(migrate_engine, old_quotas, new_quotas): 'project_id': quota.project_id, 'created_at': quota.created_at, 'updated_at': quota.updated_at, - quota.resource: quota.hard_limit - } + quota.resource: quota.hard_limit} else: quotas[quota.project_id]['created_at'] = earliest( quota.created_at, quotas[quota.project_id]['created_at']) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 4260cbf42..8827db4d4 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -226,8 +226,7 @@ class JsonFilter(HostFilter): required_disk = instance_type['local_gb'] query = ['and', ['>=', '$compute.host_memory_free', required_ram], - ['>=', '$compute.disk_available', required_disk] - ] + ['>=', '$compute.disk_available', required_disk]] return (self._full_name(), json.dumps(query)) def _parse_string(self, string, host, services): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index fbde5c9ce..20379e2bd 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -774,8 +774,7 @@ class ServersTest(test.TestCase): def server_update(context, id, params): filtered_dict = dict( - display_name='server_test' - ) + display_name='server_test') self.assertEqual(params, filtered_dict) return filtered_dict diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index a67fa1bb5..35c6bb34f 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -194,8 +194,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah" - } + "name": "blah"} self.api.post_server_action(created_server_id, post) LOG.debug("rebuilt server: %s" % created_server) @@ -224,8 +223,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah" - } + "name": "blah"} metadata = {} for i in range(30): @@ -267,8 +265,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah" - } + "name": "blah"} metadata = {} post['rebuild']['metadata'] = metadata diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py index 07817cc5a..098ebff3d 100644 --- a/nova/tests/test_host_filter.py +++ b/nova/tests/test_host_filter.py @@ -133,13 +133,11 @@ class HostFilterTestCase(test.TestCase): raw = ['or', ['and', ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300] - ], + ['<', '$compute.disk_available', 300]], ['and', ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700] - ] - ] + ['>', '$compute.disk_available', 700]]] + cooked = json.dumps(raw) hosts = hf.filter_hosts(self.zone_manager, cooked) @@ -183,13 +181,11 @@ class HostFilterTestCase(test.TestCase): self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([]))) self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({}))) self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps( - ['not', True, False, True, False] - ))) + ['not', True, False, True, False]))) try: hf.filter_hosts(self.zone_manager, json.dumps( - 'not', True, False, True, False - )) + 'not', True, False, True, False)) self.fail("Should give KeyError") except KeyError, e: pass diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py index 37169fb97..90ae427e3 100644 --- a/nova/tests/test_zone_aware_scheduler.py +++ b/nova/tests/test_zone_aware_scheduler.py @@ -39,15 +39,11 @@ class FakeZoneManager(zone_manager.ZoneManager): def __init__(self): self.service_states = { 'host1': { - 'compute': {'ram': 1000} - }, + 'compute': {'ram': 1000}}, 'host2': { - 'compute': {'ram': 2000} - }, + 'compute': {'ram': 2000}}, 'host3': { - 'compute': {'ram': 3000} - } - } + 'compute': {'ram': 3000}}} class FakeEmptyZoneManager(zone_manager.ZoneManager): diff --git a/tools/install_venv.py b/tools/install_venv.py index 812b1dd0f..f4b6583ed 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -36,7 +36,7 @@ PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) def die(message, *args): - print >>sys.stderr, message % args + print >> sys.stderr, message % args sys.exit(1) -- cgit From c80fedead72456c18c3a0e63348e1a4d40c7e7c5 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 1 Jun 2011 10:58:17 -0400 Subject: updates to keep things looking better --- nova/compute/api.py | 7 +++++-- .../versions/016_make_quotas_key_and_value.py | 3 ++- nova/scheduler/host_filter.py | 3 ++- nova/tests/api/openstack/test_servers.py | 3 +-- nova/tests/integrated/test_servers.py | 9 ++++++--- nova/tests/test_host_filter.py | 7 +++++-- nova/tests/test_zone_aware_scheduler.py | 16 ++++++++++------ 7 files changed, 31 insertions(+), 17 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 3e991e68a..263e44bab 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -270,10 +270,13 @@ class API(base.Base): 'instance_type': instance_type, 'filter': 'nova.scheduler.host_filter.' - 'InstanceTypeFilter'}, + 'InstanceTypeFilter', + }, "availability_zone": availability_zone, "injected_files": injected_files, - "admin_password": admin_password}}) + "admin_password": admin_password, + }, + }) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py index 1a2a6d7ce..5d0593f2e 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py @@ -160,7 +160,8 @@ def convert_backward(migrate_engine, old_quotas, new_quotas): 'project_id': quota.project_id, 'created_at': quota.created_at, 'updated_at': quota.updated_at, - quota.resource: quota.hard_limit} + quota.resource: quota.hard_limit, + } else: quotas[quota.project_id]['created_at'] = earliest( quota.created_at, quotas[quota.project_id]['created_at']) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 8827db4d4..7d6ee0ee3 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -226,7 +226,8 @@ class JsonFilter(HostFilter): required_disk = instance_type['local_gb'] query = ['and', ['>=', '$compute.host_memory_free', required_ram], - ['>=', '$compute.disk_available', required_disk]] + ['>=', '$compute.disk_available', required_disk], + ] return (self._full_name(), json.dumps(query)) def _parse_string(self, string, host, services): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 20379e2bd..ee27d24eb 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -773,8 +773,7 @@ class ServersTest(test.TestCase): self.body = json.dumps(dict(server=inst_dict)) def server_update(context, id, params): - filtered_dict = dict( - display_name='server_test') + filtered_dict = dict(display_name='server_test') self.assertEqual(params, filtered_dict) return filtered_dict diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 35c6bb34f..fcb517cf5 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -194,7 +194,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah"} + "name": "blah", + } self.api.post_server_action(created_server_id, post) LOG.debug("rebuilt server: %s" % created_server) @@ -223,7 +224,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah"} + "name": "blah", + } metadata = {} for i in range(30): @@ -265,7 +267,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah"} + "name": "blah", + } metadata = {} post['rebuild']['metadata'] = metadata diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py index 098ebff3d..3361c7b73 100644 --- a/nova/tests/test_host_filter.py +++ b/nova/tests/test_host_filter.py @@ -133,10 +133,13 @@ class HostFilterTestCase(test.TestCase): raw = ['or', ['and', ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300]], + ['<', '$compute.disk_available', 300], + ], ['and', ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700]]] + ['>', '$compute.disk_available', 700], + ], + ] cooked = json.dumps(raw) hosts = hf.filter_hosts(self.zone_manager, cooked) diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py index 90ae427e3..72b74be20 100644 --- a/nova/tests/test_zone_aware_scheduler.py +++ b/nova/tests/test_zone_aware_scheduler.py @@ -38,12 +38,16 @@ class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): class FakeZoneManager(zone_manager.ZoneManager): def __init__(self): self.service_states = { - 'host1': { - 'compute': {'ram': 1000}}, - 'host2': { - 'compute': {'ram': 2000}}, - 'host3': { - 'compute': {'ram': 3000}}} + 'host1': { + 'compute': {'ram': 1000}, + }, + 'host2': { + 'compute': {'ram': 2000}, + }, + 'host3': { + 'compute': {'ram': 3000}, + }, + } class FakeEmptyZoneManager(zone_manager.ZoneManager): -- cgit From ad964ef8934a14329a9100946bed26bcf37b1d52 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 1 Jun 2011 14:56:06 -0400 Subject: Updates to the 018_rename_server_management_url to avoid adding and dropping a column. Just simply rename the column. --- .../versions/018_rename_server_management_url.py | 29 ++++------------------ 1 file changed, 5 insertions(+), 24 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py index a169afb40..73c76f666 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py @@ -14,23 +14,10 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import Column, Integer, MetaData, String, Table -#from nova import log as logging +from sqlalchemy import MetaData, Table meta = MetaData() -c_manageent = Column('server_manageent_url', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - -c_management = Column('server_management_url', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; @@ -40,11 +27,8 @@ def upgrade(migrate_engine): tokens = Table('auth_tokens', meta, autoload=True, autoload_with=migrate_engine) - tokens.create_column(c_management) - migrate_engine.execute(tokens.update() - .values(server_management_url=tokens.c.server_manageent_url)) - - tokens.c.server_manageent_url.drop() + c_manageent = tokens.c.server_manageent_url + c_manageent.alter(name='server_management_url') def downgrade(migrate_engine): @@ -53,8 +37,5 @@ def downgrade(migrate_engine): tokens = Table('auth_tokens', meta, autoload=True, autoload_with=migrate_engine) - tokens.create_column(c_manageent) - migrate_engine.execute(tokens.update() - .values(server_manageent_url=tokens.c.server_management_url)) - - tokens.c.server_management_url.drop() + c_management = tokens.c.server_management_url + c_management.alter(name='server_manageent_url') -- cgit From 15257606e5346f0bf9a67145e5d4df7ba57c386a Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 1 Jun 2011 14:58:17 -0400 Subject: touch ups --- nova/image/glance.py | 12 ++++++------ nova/tests/api/openstack/test_common.py | 3 +-- nova/tests/integrated/test_servers.py | 9 ++++++--- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index 06f546027..61308431d 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -64,9 +64,9 @@ class GlanceImageService(service.BaseImageService): # `get_images` here because we need `is_public` and `properties` # included so we can filter by user filtered = [] - image_metas = self.client.get_images_detailed(marker=marker, - limit=limit, - filters=filters) + image_metas = self.client.get_images_detailed(filters=filters, + marker=marker, + limit=limit) for image_meta in image_metas: if self._is_image_available(context, image_meta): meta_subset = utils.subset_dict(image_meta, ('id', 'name')) @@ -76,9 +76,9 @@ class GlanceImageService(service.BaseImageService): def detail(self, context, filters=None, marker=None, limit=None): """Calls out to Glance for a list of detailed image information.""" filtered = [] - image_metas = self.client.get_images_detailed(marker=marker, - limit=limit, - filters=filters) + image_metas = self.client.get_images_detailed(filters=filters, + marker=marker, + limit=limit) for image_meta in image_metas: if self._is_image_available(context, image_meta): base_image_meta = self._translate_to_base(image_meta) diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 55142ffe1..c4a6e3ebf 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -24,8 +24,7 @@ import webob.exc from webob import Request from nova import test -from nova.api.openstack.common import limited -from nova.api.openstack.common import get_pagination_params +from nova.api.openstack.common import limited, get_pagination_params class LimiterTest(test.TestCase): diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 35c6bb34f..fcb517cf5 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -194,7 +194,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah"} + "name": "blah", + } self.api.post_server_action(created_server_id, post) LOG.debug("rebuilt server: %s" % created_server) @@ -223,7 +224,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah"} + "name": "blah", + } metadata = {} for i in range(30): @@ -265,7 +267,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah"} + "name": "blah", + } metadata = {} post['rebuild']['metadata'] = metadata -- cgit From d77aa5862762bc6efda46d92940143a1b9cbccf5 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 1 Jun 2011 14:46:05 -0500 Subject: Allow SSL AMQP connections. --- nova/flags.py | 1 + nova/rpc.py | 1 + 2 files changed, 2 insertions(+) diff --git a/nova/flags.py b/nova/flags.py index 9eaac5596..d5090edba 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -296,6 +296,7 @@ DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses') DEFINE_string('rabbit_host', 'localhost', 'rabbit host') DEFINE_integer('rabbit_port', 5672, 'rabbit port') +DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') diff --git a/nova/rpc.py b/nova/rpc.py index c5277c6a9..2e78a31e7 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -65,6 +65,7 @@ class Connection(carrot_connection.BrokerConnection): if new or not hasattr(cls, '_instance'): params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, + ssl=FLAGS.rabbit_use_ssl, userid=FLAGS.rabbit_userid, password=FLAGS.rabbit_password, virtual_host=FLAGS.rabbit_virtual_host) -- cgit From ced79009e6555eb75f3862184834a883d37b2062 Mon Sep 17 00:00:00 2001 From: John Tran Date: Wed, 1 Jun 2011 16:01:41 -0700 Subject: fixed as per peer review to make more consistent --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 51373d282..5de4d9e81 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -536,7 +536,7 @@ class FloatingIpCommands(object): for floating_ip in floating_ips: instance = None if floating_ip['fixed_ip']: - instance = floating_ip['fixed_ip']['instance'].hostname + instance = floating_ip['fixed_ip']['instance']['hostname'] print "%s\t%s\t%s" % (floating_ip['host'], floating_ip['address'], instance) -- cgit From 5d89721f5fa3212146749236c666f0e584c8590f Mon Sep 17 00:00:00 2001 From: John Tran Date: Wed, 1 Jun 2011 16:27:51 -0700 Subject: merged, with trunk, fixed the test failure, and split the test into 3 as per peer review. --- nova/tests/test_cloud.py | 45 ++++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 04fd02ba3..02b7c8a38 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -445,38 +445,41 @@ class CloudTestCase(test.TestCase): self.cloud.delete_key_pair(self.context, 'test') def test_run_instances(self): - all_instances = db.instance_get_all(context.get_admin_context()) - self.assertEqual(0, len(all_instances)) - - def fake_show_decrypt(self, context, id): - return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine', 'image_state': 'decrypting'}} - - def fake_show_no_state(self, context, id): - return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine'}} - - image_id = FLAGS.default_image - instance_type = FLAGS.default_instance_type - max_count = 1 - kwargs = {'image_id': image_id, - 'instance_type': instance_type, - 'max_count': max_count} + kwargs = {'image_id': FLAGS.default_image, + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1} run_instances = self.cloud.run_instances - # when image has valid image_state result = run_instances(self.context, **kwargs) instance = result['instancesSet'][0] self.assertEqual(instance['imageId'], 'ami-00000001') self.assertEqual(instance['displayName'], 'Server 1') self.assertEqual(instance['instanceId'], 'i-00000001') - self.assertEqual(instance['instanceState']['name'], 'scheduling') + self.assertEqual(instance['instanceState']['name'], 'networking') self.assertEqual(instance['instanceType'], 'm1.small') - # when image doesn't have 'image_state' attr at all + + def test_run_instances_image_state_none(self): + kwargs = {'image_id': FLAGS.default_image, + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1} + run_instances = self.cloud.run_instances + def fake_show_no_state(self, context, id): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine'}} + self.stubs.UnsetAll() self.stubs.Set(local.LocalImageService, 'show', fake_show_no_state) self.assertRaises(exception.ApiError, run_instances, self.context, **kwargs) - # when image has 'image_state' yet not 'available' + + def test_run_instances_image_state_invalid(self): + kwargs = {'image_id': FLAGS.default_image, + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1} + run_instances = self.cloud.run_instances + def fake_show_decrypt(self, context, id): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine', 'image_state': 'decrypting'}} + self.stubs.UnsetAll() self.stubs.Set(local.LocalImageService, 'show', fake_show_decrypt) self.assertRaises(exception.ApiError, run_instances, -- cgit From ef1f5b3aadde2fedb4b2d197af0f1c0f07375714 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 1 Jun 2011 16:51:26 -0700 Subject: fix novarc to work on mac and zsh --- nova/auth/novarc.template | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index 8170fcafe..4a1f41802 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,4 +1,5 @@ -NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE})) +NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' ${BASH_SOURCE-0}) +NOVA_KEY_DIR=$(dirname ${NOVARC}) export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" export EC2_URL="%(ec2)s" -- cgit From 8ee41f679bd72af6aab098f9d9735e342b281635 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 1 Jun 2011 18:55:41 -0700 Subject: missed a couple chars --- nova/auth/novarc.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index 4a1f41802..92eed3520 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,4 +1,4 @@ -NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' ${BASH_SOURCE-0}) +NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' ${BASH_SOURCE:-$0}) NOVA_KEY_DIR=$(dirname ${NOVARC}) export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" -- cgit From 4fb46873ef4332c6570d3ac5559557745056dee6 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 1 Jun 2011 23:09:37 -0400 Subject: cleanup based on waldon's comments, also caught a few other issues --- nova/api/openstack/common.py | 18 +--- nova/api/openstack/images.py | 8 +- nova/api/openstack/servers.py | 1 + nova/tests/api/openstack/fakes.py | 25 ++--- nova/tests/api/openstack/test_common.py | 180 +++++++++++++------------------ nova/tests/api/openstack/test_images.py | 33 ++++-- nova/tests/api/openstack/test_servers.py | 1 + nova/tests/integrated/test_servers.py | 3 + 8 files changed, 115 insertions(+), 154 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 342cc8b2e..c9e3dbb64 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -50,7 +50,7 @@ def get_pagination_params(request): try: marker = int(request.GET.get('marker', 0)) except ValueError: - raise webob.exc.HTTPBadRequest(_('offset param must be an integer')) + raise webob.exc.HTTPBadRequest(_('marker param must be an integer')) try: limit = int(request.GET.get('limit', 0)) @@ -102,19 +102,11 @@ def limited(items, request, max_limit=FLAGS.osapi_max_limit): def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): """Return a slice of items according to the requested marker and limit.""" + print "TEST LIMIT" + (marker, limit) = get_pagination_params(request) - try: - marker = int(request.GET.get('marker', 0)) - except ValueError: - raise webob.exc.HTTPBadRequest(_('marker param must be an integer')) - - try: - limit = int(request.GET.get('limit', max_limit)) - except ValueError: - raise webob.exc.HTTPBadRequest(_('limit param must be an integer')) - - if limit < 0: - raise webob.exc.HTTPBadRequest(_('limit param must be positive')) + if limit == 0: + limit = max_limit limit = min(max_limit, limit) start_index = 0 diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index dcedd3db2..4ef9a5974 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -46,9 +46,6 @@ class Controller(object): self._compute_service = compute_service or compute.API() self._image_service = image_service or _default_service - def _limit_items(self, items, req): - return common.limited(items, req) - def index(self, req): """Return an index listing of images available to the request. @@ -162,13 +159,11 @@ class ControllerV11(Controller): base_url = request.application_url return images_view.ViewBuilderV11(base_url) - def get_default_xmlns(self, req): - return common.XML_NS_V11 - def index(self, req): """Return an index listing of images available to the request. :param req: `wsgi.Request` object + """ context = req.environ['nova.context'] filters = self._get_filters(req) @@ -182,6 +177,7 @@ class ControllerV11(Controller): """Return a detailed index listing of images available to the request. :param req: `wsgi.Request` object. + """ context = req.environ['nova.context'] filters = self._get_filters(req) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index f2ce64e78..ad556ca84 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -55,6 +55,7 @@ class Controller(object): def detail(self, req): """ Returns a list of server details for a given user """ + print "DETAIL" return self._items(req, is_detail=True) def _image_id_from_req_data(self, data): diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index bc21d66b4..67cd395ad 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -166,7 +166,7 @@ def stub_out_glance(stubs, initial_fixtures=None): def __init__(self, initial_fixtures): self.fixtures = initial_fixtures or [] - def fake_get_images(self, filters=None, marker=None, limit=None): + def _filter_images(self, filters=None, marker=None, limit=None): found = True if marker: found = False @@ -184,29 +184,16 @@ def stub_out_glance(stubs, initial_fixtures=None): if f['id'] == marker: found = True + return fixtures + + def fake_get_images(self, filters=None, marker=None, limit=None): + fixtures = self._filter_images(filters, marker, limit) return [dict(id=f['id'], name=f['name']) for f in fixtures] def fake_get_images_detailed(self, filters=None, marker=None, limit=None): - found = True - if marker: - found = False - if limit == 0: - limit = None - - fixtures = [] - count = 0 - for f in self.fixtures: - if limit and count >= limit: - break - if found: - fixtures.append(f) - count = count + 1 - if f['id'] == marker: - found = True - - return fixtures + return self._filter_images(filters, marker, limit) def fake_get_image_meta(self, image_id): image = self._find_image(image_id) diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index c4a6e3ebf..9a9d9125c 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -24,7 +24,7 @@ import webob.exc from webob import Request from nova import test -from nova.api.openstack.common import limited, get_pagination_params +from nova.api.openstack import common class LimiterTest(test.TestCase): @@ -35,9 +35,7 @@ class LimiterTest(test.TestCase): """ def setUp(self): - """ - Run before each test. - """ + """ Run before each test. """ super(LimiterTest, self).setUp() self.tiny = range(1) self.small = range(10) @@ -45,130 +43,112 @@ class LimiterTest(test.TestCase): self.large = range(10000) def test_limiter_offset_zero(self): - """ - Test offset key works with 0. - """ + """ Test offset key works with 0. """ req = Request.blank('/?offset=0') - self.assertEqual(limited(self.tiny, req), self.tiny) - self.assertEqual(limited(self.small, req), self.small) - self.assertEqual(limited(self.medium, req), self.medium) - self.assertEqual(limited(self.large, req), self.large[:1000]) + self.assertEqual(common.limited(self.tiny, req), self.tiny) + self.assertEqual(common.limited(self.small, req), self.small) + self.assertEqual(common.limited(self.medium, req), self.medium) + self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_offset_medium(self): - """ - Test offset key works with a medium sized number. - """ + """ Test offset key works with a medium sized number. """ req = Request.blank('/?offset=10') - self.assertEqual(limited(self.tiny, req), []) - self.assertEqual(limited(self.small, req), self.small[10:]) - self.assertEqual(limited(self.medium, req), self.medium[10:]) - self.assertEqual(limited(self.large, req), self.large[10:1010]) + self.assertEqual(common.limited(self.tiny, req), []) + self.assertEqual(common.limited(self.small, req), self.small[10:]) + self.assertEqual(common.limited(self.medium, req), self.medium[10:]) + self.assertEqual(common.limited(self.large, req), self.large[10:1010]) def test_limiter_offset_over_max(self): - """ - Test offset key works with a number over 1000 (max_limit). - """ + """ Test offset key works with a number over 1000 (max_limit). """ req = Request.blank('/?offset=1001') - self.assertEqual(limited(self.tiny, req), []) - self.assertEqual(limited(self.small, req), []) - self.assertEqual(limited(self.medium, req), []) - self.assertEqual(limited(self.large, req), self.large[1001:2001]) + self.assertEqual(common.limited(self.tiny, req), []) + self.assertEqual(common.limited(self.small, req), []) + self.assertEqual(common.limited(self.medium, req), []) + self.assertEqual( + common.limited(self.large, req), self.large[1001:2001]) def test_limiter_offset_blank(self): - """ - Test offset key works with a blank offset. - """ + """ Test offset key works with a blank offset. """ req = Request.blank('/?offset=') - self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) + self.assertRaises( + webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_offset_bad(self): - """ - Test offset key works with a BAD offset. - """ + """ Test offset key works with a BAD offset. """ req = Request.blank(u'/?offset=\u0020aa') - self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) + self.assertRaises( + webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_nothing(self): - """ - Test request with no offset or limit - """ + """ Test request with no offset or limit """ req = Request.blank('/') - self.assertEqual(limited(self.tiny, req), self.tiny) - self.assertEqual(limited(self.small, req), self.small) - self.assertEqual(limited(self.medium, req), self.medium) - self.assertEqual(limited(self.large, req), self.large[:1000]) + self.assertEqual(common.limited(self.tiny, req), self.tiny) + self.assertEqual(common.limited(self.small, req), self.small) + self.assertEqual(common.limited(self.medium, req), self.medium) + self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_limit_zero(self): - """ - Test limit of zero. - """ + """ Test limit of zero. """ req = Request.blank('/?limit=0') - self.assertEqual(limited(self.tiny, req), self.tiny) - self.assertEqual(limited(self.small, req), self.small) - self.assertEqual(limited(self.medium, req), self.medium) - self.assertEqual(limited(self.large, req), self.large[:1000]) + self.assertEqual(common.limited(self.tiny, req), self.tiny) + self.assertEqual(common.limited(self.small, req), self.small) + self.assertEqual(common.limited(self.medium, req), self.medium) + self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_limit_medium(self): - """ - Test limit of 10. - """ + """ Test limit of 10. """ req = Request.blank('/?limit=10') - self.assertEqual(limited(self.tiny, req), self.tiny) - self.assertEqual(limited(self.small, req), self.small) - self.assertEqual(limited(self.medium, req), self.medium[:10]) - self.assertEqual(limited(self.large, req), self.large[:10]) + self.assertEqual(common.limited(self.tiny, req), self.tiny) + self.assertEqual(common.limited(self.small, req), self.small) + self.assertEqual(common.limited(self.medium, req), self.medium[:10]) + self.assertEqual(common.limited(self.large, req), self.large[:10]) def test_limiter_limit_over_max(self): - """ - Test limit of 3000. - """ + """ Test limit of 3000. """ req = Request.blank('/?limit=3000') - self.assertEqual(limited(self.tiny, req), self.tiny) - self.assertEqual(limited(self.small, req), self.small) - self.assertEqual(limited(self.medium, req), self.medium) - self.assertEqual(limited(self.large, req), self.large[:1000]) + self.assertEqual(common.limited(self.tiny, req), self.tiny) + self.assertEqual(common.limited(self.small, req), self.small) + self.assertEqual(common.limited(self.medium, req), self.medium) + self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_limit_and_offset(self): - """ - Test request with both limit and offset. - """ + """ Test request with both limit and offset. """ items = range(2000) req = Request.blank('/?offset=1&limit=3') - self.assertEqual(limited(items, req), items[1:4]) + self.assertEqual(common.limited(items, req), items[1:4]) req = Request.blank('/?offset=3&limit=0') - self.assertEqual(limited(items, req), items[3:1003]) + self.assertEqual(common.limited(items, req), items[3:1003]) req = Request.blank('/?offset=3&limit=1500') - self.assertEqual(limited(items, req), items[3:1003]) + self.assertEqual(common.limited(items, req), items[3:1003]) req = Request.blank('/?offset=3000&limit=10') - self.assertEqual(limited(items, req), []) + self.assertEqual(common.limited(items, req), []) def test_limiter_custom_max_limit(self): - """ - Test a max_limit other than 1000. - """ + """ Test a max_limit other than 1000. """ items = range(2000) req = Request.blank('/?offset=1&limit=3') - self.assertEqual(limited(items, req, max_limit=2000), items[1:4]) + self.assertEqual( + common.limited(items, req, max_limit=2000), items[1:4]) req = Request.blank('/?offset=3&limit=0') - self.assertEqual(limited(items, req, max_limit=2000), items[3:]) + self.assertEqual( + common.limited(items, req, max_limit=2000), items[3:]) req = Request.blank('/?offset=3&limit=2500') - self.assertEqual(limited(items, req, max_limit=2000), items[3:]) + self.assertEqual( + common.limited(items, req, max_limit=2000), items[3:]) req = Request.blank('/?offset=3000&limit=10') - self.assertEqual(limited(items, req, max_limit=2000), []) + self.assertEqual(common.limited(items, req, max_limit=2000), []) def test_limiter_negative_limit(self): - """ - Test a negative limit. - """ + """ Test a negative limit. """ req = Request.blank('/?limit=-3000') - self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) + self.assertRaises( + webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_negative_offset(self): - """ - Test a negative offset. - """ + """ Test a negative offset. """ req = Request.blank('/?offset=-30') - self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) + self.assertRaises( + webob.exc.HTTPBadRequest, common.limited, self.tiny, req) class PaginationParamsTest(test.TestCase): @@ -179,38 +159,28 @@ class PaginationParamsTest(test.TestCase): """ def test_no_params(self): - """ - Test no params. - """ + """ Test no params. """ req = Request.blank('/') - self.assertEqual(get_pagination_params(req), (0, 0)) + self.assertEqual(common.get_pagination_params(req), (0, 0)) def test_valid_marker(self): - """ - Test valid marker param. - """ + """ Test valid marker param. """ req = Request.blank('/?marker=1') - self.assertEqual(get_pagination_params(req), (1, 0)) + self.assertEqual(common.get_pagination_params(req), (1, 0)) def test_invalid_marker(self): - """ - Test invalid marker param. - """ + """ Test invalid marker param. """ req = Request.blank('/?marker=-2') - self.assertRaises(webob.exc.HTTPBadRequest, - get_pagination_params, req) + self.assertRaises( + webob.exc.HTTPBadRequest, common.get_pagination_params, req) def test_valid_limit(self): - """ - Test valid limit param. - """ + """ Test valid limit param. """ req = Request.blank('/?limit=10') - self.assertEqual(get_pagination_params(req), (0, 10)) + self.assertEqual(common.get_pagination_params(req), (0, 10)) def test_invalid_limit(self): - """ - Test invalid limit param. - """ + """ Test invalid limit param. """ req = Request.blank('/?limit=-2') - self.assertRaises(webob.exc.HTTPBadRequest, - get_pagination_params, req) + self.assertRaises( + webob.exc.HTTPBadRequest, common.get_pagination_params, req) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 667f2866b..38823c377 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -239,7 +239,7 @@ class GlanceImageServiceTest(_BaseImageServiceTests): i = 0 for meta in image_metas: expected = {'id': 'DONTCARE', - 'name': 'TestImage %d' % (i)} + 'name': 'TestImage %d' % (i)} self.assertDictMatch(meta, expected) i = i + 1 @@ -256,7 +256,7 @@ class GlanceImageServiceTest(_BaseImageServiceTests): i = 2 for meta in image_metas: expected = {'id': 'DONTCARE', - 'name': 'TestImage %d' % (i)} + 'name': 'TestImage %d' % (i)} self.assertDictMatch(meta, expected) i = i + 1 @@ -284,7 +284,7 @@ class GlanceImageServiceTest(_BaseImageServiceTests): i = 4 for meta in image_metas: expected = {'id': 'DONTCARE', - 'name': 'TestImage %d' % (i)} + 'name': 'TestImage %d' % (i)} self.assertDictMatch(meta, expected) i = i + 1 @@ -300,10 +300,17 @@ class GlanceImageServiceTest(_BaseImageServiceTests): self.assertEquals(len(image_metas), 8) i = 2 for meta in image_metas: - expected = {'id': 'DONTCARE', 'status': None, - 'is_public': True, 'properties': { - 'updated': None, 'created': None}, - 'name': 'TestImage %d' % (i)} + expected = { + 'id': 'DONTCARE', + 'status': None, + 'is_public': True, + 'name': 'TestImage %d' % (i), + 'properties': { + 'updated': None, + 'created': None, + }, + } + self.assertDictMatch(meta, expected) i = i + 1 @@ -330,10 +337,14 @@ class GlanceImageServiceTest(_BaseImageServiceTests): self.assertEquals(len(image_metas), 3) i = 4 for meta in image_metas: - expected = {'id': 'DONTCARE', 'status': None, - 'is_public': True, 'properties': { - 'updated': None, 'created': None}, - 'name': 'TestImage %d' % (i)} + expected = { + 'id': 'DONTCARE', + 'status': None, + 'is_public': True, + 'name': 'TestImage %d' % (i), + 'properties': { + 'updated': None, 'created': None}, + } self.assertDictMatch(meta, expected) i = i + 1 diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 51def1980..3de7865cd 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -448,6 +448,7 @@ class ServersTest(test.TestCase): req = webob.Request.blank('/v1.1/servers?limit=2&marker=asdf') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) + print "BODY",res.body self.assertTrue(res.body.find('marker param') > -1) def _setup_for_create_instance(self): diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index fcb517cf5..89987b645 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -88,12 +88,15 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Check it's there found_server = self.api.get_server(created_server_id) + print "FOUND_SERVER:", found_server self.assertEqual(created_server_id, found_server['id']) # It should also be in the all-servers list servers = self.api.get_servers() + print "SERVERS:", servers server_ids = [server['id'] for server in servers] self.assertTrue(created_server_id in server_ids) + return # Wait (briefly) for creation retries = 0 -- cgit From 5ded1f2c1d0d14b3c04df137f7cc6a0b65e53fda Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 1 Jun 2011 23:11:50 -0400 Subject: got rid of print debugs --- nova/api/openstack/common.py | 1 - nova/api/openstack/servers.py | 1 - nova/tests/api/openstack/test_images.py | 2 +- nova/tests/api/openstack/test_servers.py | 1 - 4 files changed, 1 insertion(+), 4 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index c9e3dbb64..559b44ef5 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -102,7 +102,6 @@ def limited(items, request, max_limit=FLAGS.osapi_max_limit): def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): """Return a slice of items according to the requested marker and limit.""" - print "TEST LIMIT" (marker, limit) = get_pagination_params(request) if limit == 0: diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ad556ca84..f2ce64e78 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -55,7 +55,6 @@ class Controller(object): def detail(self, req): """ Returns a list of server details for a given user """ - print "DETAIL" return self._items(req, is_detail=True) def _image_id_from_req_data(self, data): diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 38823c377..c2b03c281 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -306,7 +306,7 @@ class GlanceImageServiceTest(_BaseImageServiceTests): 'is_public': True, 'name': 'TestImage %d' % (i), 'properties': { - 'updated': None, + 'updated': None, 'created': None, }, } diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 3de7865cd..51def1980 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -448,7 +448,6 @@ class ServersTest(test.TestCase): req = webob.Request.blank('/v1.1/servers?limit=2&marker=asdf') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) - print "BODY",res.body self.assertTrue(res.body.find('marker param') > -1) def _setup_for_create_instance(self): -- cgit From 0e419c00ef9a463acc704f034e4c37929f0ef2eb Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 1 Jun 2011 23:37:51 -0400 Subject: image href should be passed through the rebuild pipeline, not the image id. --- nova/api/openstack/servers.py | 3 +-- nova/compute/api.py | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 55fed408c..0ef1a83da 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -714,7 +714,6 @@ class ControllerV11(Controller): LOG.debug(msg) return faults.Fault(exc.HTTPBadRequest(explanation=msg)) - image_id = common.get_id_from_href(image_href) personalities = info["rebuild"].get("personality", []) metadata = info["rebuild"].get("metadata") name = info["rebuild"].get("name") @@ -724,7 +723,7 @@ class ControllerV11(Controller): self._decode_personalities(personalities) try: - self.compute_api.rebuild(context, instance_id, image_id, name, + self.compute_api.rebuild(context, instance_id, image_href, name, metadata, personalities) except exception.BuildInProgress: msg = _("Instance %d is currently being rebuilt.") % instance_id diff --git a/nova/compute/api.py b/nova/compute/api.py index 2b353cebb..6a1c68561 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -529,8 +529,8 @@ class API(base.Base): """Reboot the given instance.""" self._cast_compute_message('reboot_instance', context, instance_id) - def rebuild(self, context, instance_id, image_id, name=None, metadata=None, - files_to_inject=None): + def rebuild(self, context, instance_id, image_href, name=None, + metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) @@ -550,7 +550,7 @@ class API(base.Base): self.db.instance_update(context, instance_id, values) rebuild_params = { - "image_ref": image_ref, + "image_ref": image_href, "injected_files": files_to_inject, } -- cgit From b39b0e66f16d49890189c63fba528734ef476068 Mon Sep 17 00:00:00 2001 From: Mike Scherbakov Date: Thu, 2 Jun 2011 10:29:58 +0400 Subject: Refactored after review, fixed merge. --- nova/image/fake.py | 5 +---- nova/tests/test_libvirt.py | 10 +++++----- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/nova/image/fake.py b/nova/image/fake.py index 4bf25d9af..019d683f4 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -82,12 +82,9 @@ class _FakeImageService(service.BaseImageService): :raises: Duplicate if the image already exist. """ - #image_id = int(metadata['id']) - # metadata['id'] may not exists, and since image_id is - # randomly generated in local.py, let us do the same here try: image_id = int(metadata['id']) - except: + except KeyError: image_id = random.randint(0, 2 ** 31 - 1) if self.images.get(image_id): diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index d9316ab4f..d008a149e 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -328,14 +328,14 @@ class LibvirtConnTestCase(test.TestCase): # To work with it from snapshot, the single image_service is needed recv_meta = image_service.create(context, sent_meta) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - libvirt_conn.LibvirtConnection._conn.lookupByName = fake_lookup - self.mox.StubOutWithMock(libvirt_conn.utils, 'execute') - libvirt_conn.utils.execute = fake_execute + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') + connection.LibvirtConnection._conn.lookupByName = fake_lookup + self.mox.StubOutWithMock(connection.utils, 'execute') + connection.utils.execute = fake_execute self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) conn.snapshot(instance_ref, recv_meta['id']) snapshot = image_service.show(context, recv_meta['id']) -- cgit From 7b24750057cfef1d0f14b21cb83b1ac9c0869836 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Thu, 2 Jun 2011 08:53:13 -0400 Subject: got rid of prints --- nova/tests/integrated/test_servers.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 89987b645..1733896fd 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -88,12 +88,10 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Check it's there found_server = self.api.get_server(created_server_id) - print "FOUND_SERVER:", found_server self.assertEqual(created_server_id, found_server['id']) # It should also be in the all-servers list servers = self.api.get_servers() - print "SERVERS:", servers server_ids = [server['id'] for server in servers] self.assertTrue(created_server_id in server_ids) return -- cgit From e28a6e96ec45439ed24a363f27d0421d720add0b Mon Sep 17 00:00:00 2001 From: William Wolf Date: Thu, 2 Jun 2011 09:34:01 -0400 Subject: move index and detail functions to v10 controller --- nova/api/openstack/images.py | 48 ++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 4ef9a5974..7f06c53df 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -46,30 +46,6 @@ class Controller(object): self._compute_service = compute_service or compute.API() self._image_service = image_service or _default_service - def index(self, req): - """Return an index listing of images available to the request. - - :param req: `wsgi.Request` object - """ - context = req.environ['nova.context'] - filters = self._get_filters(req) - images = self._image_service.index(context, filters) - images = common.limited(images, req) - builder = self.get_builder(req).build - return dict(images=[builder(image, detail=False) for image in images]) - - def detail(self, req): - """Return a detailed index listing of images available to the request. - - :param req: `wsgi.Request` object. - """ - context = req.environ['nova.context'] - filters = self._get_filters(req) - images = self._image_service.detail(context, filters) - images = common.limited(images, req) - builder = self.get_builder(req).build - return dict(images=[builder(image, detail=True) for image in images]) - def _get_filters(self, req): """ Return a dictionary of query param filters from the request @@ -150,6 +126,30 @@ class ControllerV10(Controller): base_url = request.application_url return images_view.ViewBuilderV10(base_url) + def index(self, req): + """Return an index listing of images available to the request. + + :param req: `wsgi.Request` object + """ + context = req.environ['nova.context'] + filters = self._get_filters(req) + images = self._image_service.index(context, filters) + images = common.limited(images, req) + builder = self.get_builder(req).build + return dict(images=[builder(image, detail=False) for image in images]) + + def detail(self, req): + """Return a detailed index listing of images available to the request. + + :param req: `wsgi.Request` object. + """ + context = req.environ['nova.context'] + filters = self._get_filters(req) + images = self._image_service.detail(context, filters) + images = common.limited(images, req) + builder = self.get_builder(req).build + return dict(images=[builder(image, detail=True) for image in images]) + class ControllerV11(Controller): """Version 1.1 specific controller logic.""" -- cgit From b380e0de4b7c24607c16734a46b3e11d64947b01 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 2 Jun 2011 11:12:35 -0400 Subject: Remove a rogue comment. --- nova/image/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/image/__init__.py b/nova/image/__init__.py index f42332a29..93d83df24 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -70,7 +70,6 @@ def get_glance_client(image_href): except: raise exception.InvalidImageRef(image_href=image_href) glance_client = GlanceClient(host, port) - #glance_client = client.Client(host, port) return (glance_client, image_id) -- cgit From 052f08256d2be2dda5ed792be48aa4f97cb93a93 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 2 Jun 2011 11:38:20 -0400 Subject: Remove comment about imageRef not being implemented. --- nova/tests/integrated/integrated_helpers.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 5eacc829d..522c7cb0e 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -207,10 +207,7 @@ class _IntegratedTestBase(test.TestCase): if 'imageRef' in image: image_href = image['imageRef'] else: - # NOTE(justinsb): The imageRef code hasn't yet landed - LOG.warning("imageRef not yet in images output") image_href = image['id'] - image_href = 'http://fake.server/%s' % image_href # We now have a valid imageId -- cgit From 9f1027069c47ea83e1dfca9bed48b2a403463689 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Thu, 2 Jun 2011 11:58:17 -0400 Subject: got rid of more test debugging stuff that shouldnt have made it in --- nova/tests/integrated/test_servers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 1733896fd..fcb517cf5 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -94,7 +94,6 @@ class ServersTest(integrated_helpers._IntegratedTestBase): servers = self.api.get_servers() server_ids = [server['id'] for server in servers] self.assertTrue(created_server_id in server_ids) - return # Wait (briefly) for creation retries = 0 -- cgit From 7ca707c1cbfb3164d4b6f706a4e9720e54bcc35f Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 2 Jun 2011 12:02:16 -0400 Subject: Minor comment formatting changes. --- nova/api/openstack/common.py | 6 +++--- nova/api/openstack/images.py | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 559b44ef5..40fb59765 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -36,16 +36,16 @@ XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1' def get_pagination_params(request): - """ - Return marker, limit tuple from request + """Return marker, limit tuple from request. - @param request: `wsgi.Request` possibly containing 'marker' and 'limit' + :param request: `wsgi.Request` possibly containing 'marker' and 'limit' GET variables. 'marker' is the id of the last element the client has seen, and 'limit' is the maximum number of items to return. If 'limit' is not specified, 0, or > max_limit, we default to max_limit. Negative values for either marker or limit will cause exc.HTTPBadRequest() exceptions to be raised. + """ try: marker = int(request.GET.get('marker', 0)) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 7f06c53df..73249b485 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -130,6 +130,7 @@ class ControllerV10(Controller): """Return an index listing of images available to the request. :param req: `wsgi.Request` object + """ context = req.environ['nova.context'] filters = self._get_filters(req) @@ -142,6 +143,7 @@ class ControllerV10(Controller): """Return a detailed index listing of images available to the request. :param req: `wsgi.Request` object. + """ context = req.environ['nova.context'] filters = self._get_filters(req) -- cgit From 9034bb2fcd5f03df2b25d6114adc4e7d5f3549fe Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 2 Jun 2011 13:00:17 -0400 Subject: Remove some of the extra image service calls from the OS API images controller. --- nova/api/openstack/images.py | 6 ++---- nova/tests/api/openstack/fakes.py | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 87cbef791..59d9e3082 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -94,8 +94,7 @@ class Controller(object): context = req.environ['nova.context'] try: - (image_service, image_id) = nova.image.get_image_service(id) - image = image_service.show(context, image_id) + image = self._image_service.show(context, id) except (exception.NotFound, exception.InvalidImageRef): explanation = _("Image not found.") raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) @@ -109,8 +108,7 @@ class Controller(object): :param id: Image identifier (integer) """ context = req.environ['nova.context'] - (image_service, image_id) = nova.image.get_image_service(id) - image_service.delete(context, image_id) + self._image_service.delete(context, id) return webob.exc.HTTPNoContent() def create(self, req, body): diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 97fc3900d..17d6d591c 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -211,7 +211,7 @@ def stub_out_glance(stubs, initial_fixtures=None): def _find_image(self, image_id): for f in self.fixtures: - if f['id'] == image_id: + if str(f['id']) == str(image_id): return f return None -- cgit From be2f5e986e41f8f8d63c0ef7a5c03916c70ba455 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 10:20:26 -0700 Subject: don't use python if readlink is available --- nova/auth/novarc.template | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index 92eed3520..d30bd849c 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,5 +1,6 @@ -NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' ${BASH_SOURCE:-$0}) -NOVA_KEY_DIR=$(dirname ${NOVARC}) +NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) || + NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}") +NOVA_KEY_DIR=${NOVARC%/*} export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" export EC2_URL="%(ec2)s" -- cgit From 28320ced7afb2c224ab4e1cfb8a607646a2bd2e3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 11:28:41 -0700 Subject: use %% because % is a replacement string character --- nova/auth/novarc.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index d30bd849c..eba3a8537 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,6 +1,6 @@ NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) || NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}") -NOVA_KEY_DIR=${NOVARC%/*} +NOVA_KEY_DIR=${NOVARC%%/*} export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" export EC2_URL="%(ec2)s" -- cgit From ae1842174f4b079c8d84b32ddad4df1b7ff29bec Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 12:01:49 -0700 Subject: Tests to assure all exceptions can be raised as well as fixing NotAuthorized --- nova/exception.py | 4 ++-- nova/tests/test_misc.py | 13 +++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index d3d58f3b2..5b824bba6 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -65,7 +65,7 @@ class BuildInProgress(Error): class DBError(Error): """Wraps an implementation specific exception.""" - def __init__(self, inner_exception): + def __init__(self, inner_exception=None): self.inner_exception = inner_exception super(DBError, self).__init__(str(inner_exception)) @@ -122,7 +122,7 @@ class NotAuthorized(NovaException): message = _("Not authorized.") def __init__(self, *args, **kwargs): - super(NotFound, self).__init__(**kwargs) + super(NotAuthorized, self).__init__(**kwargs) class AdminRequired(NotAuthorized): diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py index cf8f4c05e..c5875a843 100644 --- a/nova/tests/test_misc.py +++ b/nova/tests/test_misc.py @@ -21,11 +21,24 @@ import select from eventlet import greenpool from eventlet import greenthread +from nova import exception from nova import test from nova import utils from nova.utils import parse_mailmap, str_dict_replace +class ExceptionTestCase(test.TestCase): + @staticmethod + def _raise_exc(exc): + raise exc() + + def test_exceptions_raise(self): + for name in dir(exception): + exc = getattr(exception, name) + if isinstance(exc, type): + self.assertRaises(exc, self._raise_exc, exc) + + class ProjectTestCase(test.TestCase): def test_authors_up_to_date(self): topdir = os.path.normpath(os.path.dirname(__file__) + '/../../') -- cgit From b2fb1738db489206557abccb631b13991c31fd4e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 14:23:05 -0700 Subject: make all uses of utcnow use our testable utils.utcnow --- bin/nova-manage | 3 +-- nova/api/ec2/admin.py | 3 +-- nova/api/ec2/cloud.py | 5 ++-- nova/api/openstack/auth.py | 5 ++-- nova/api/openstack/contrib/__init__.py | 2 +- nova/api/openstack/limits.py | 2 +- nova/api/openstack/ratelimiting/__init__.py | 2 +- nova/compute/api.py | 3 +-- nova/compute/manager.py | 3 +-- nova/compute/monitor.py | 2 +- nova/context.py | 1 - nova/db/sqlalchemy/api.py | 29 +++++++++++----------- .../versions/016_make_quotas_key_and_value.py | 10 ++++---- nova/db/sqlalchemy/models.py | 9 +++---- nova/network/manager.py | 2 +- nova/notifier/api.py | 7 +++--- nova/scheduler/driver.py | 3 ++- nova/scheduler/simple.py | 11 ++++---- nova/test.py | 4 +-- nova/tests/api/openstack/fakes.py | 3 +-- nova/tests/api/openstack/test_images.py | 1 - nova/tests/api/openstack/test_servers.py | 8 +++--- nova/tests/test_compute.py | 5 ++-- nova/tests/test_console.py | 2 -- nova/tests/test_middleware.py | 1 - nova/tests/test_scheduler.py | 16 ++++++------ nova/utils.py | 2 +- nova/virt/xenapi/fake.py | 4 +-- nova/volume/api.py | 5 ++-- nova/volume/manager.py | 4 +-- 30 files changed, 69 insertions(+), 88 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 5de4d9e81..b545c4246 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -53,7 +53,6 @@ CLI interface for nova management. """ -import datetime import gettext import glob import json @@ -689,7 +688,7 @@ class ServiceCommands(object): """Show a list of all running services. Filter by host & service name. args: [host] [service]""" ctxt = context.get_admin_context() - now = datetime.datetime.utcnow() + now = utils.utcnow() services = db.service_get_all(ctxt) if host: services = [s for s in services if s['host'] == host] diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index ea94d9c1f..aeebd86fb 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -21,7 +21,6 @@ Admin API controller, exposed through http via the api worker. """ import base64 -import datetime from nova import db from nova import exception @@ -305,7 +304,7 @@ class AdminController(object): * Volume Count """ services = db.service_get_all(context, False) - now = datetime.datetime.utcnow() + now = utils.utcnow() hosts = [] rv = [] for host in [service['host'] for service in services]: diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 79cc3b3bf..04675174f 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -23,7 +23,6 @@ datastore. """ import base64 -import datetime import IPy import os import urllib @@ -235,7 +234,7 @@ class CloudController(object): 'zoneState': 'available'}]} services = db.service_get_all(context, False) - now = datetime.datetime.utcnow() + now = utils.utcnow() hosts = [] for host in [service['host'] for service in services]: if not host in hosts: @@ -595,7 +594,7 @@ class CloudController(object): instance_id = ec2utils.ec2_id_to_id(ec2_id) output = self.compute_api.get_console_output( context, instance_id=instance_id) - now = datetime.datetime.utcnow() + now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 6c6ee22a2..b49bf449b 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -13,9 +13,8 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. -import datetime import hashlib import time @@ -127,7 +126,7 @@ class AuthMiddleware(wsgi.Middleware): except exception.NotFound: return None if token: - delta = datetime.datetime.utcnow() - token['created_at'] + delta = utils.utcnow() - token['created_at'] if delta.days >= 2: self.db.auth_token_destroy(ctxt, token['token_hash']) else: diff --git a/nova/api/openstack/contrib/__init__.py b/nova/api/openstack/contrib/__init__.py index b42a1d89d..acb5eb280 100644 --- a/nova/api/openstack/contrib/__init__.py +++ b/nova/api/openstack/contrib/__init__.py @@ -13,7 +13,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """Contrib contains extensions that are shipped with nova. diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 4d46b92df..dc2bc6bbc 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -11,7 +11,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """ Module dedicated functions/classes dealing with rate limiting requests. diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index 88ffc3246..9ede548c2 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -13,7 +13,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """Rate limiting of arbitrary actions.""" diff --git a/nova/compute/api.py b/nova/compute/api.py index 7122ebe67..de87ddd88 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -18,7 +18,6 @@ """Handles all requests relating to instances (guest vms).""" -import datetime import eventlet import re import time @@ -405,7 +404,7 @@ class API(base.Base): instance['id'], state_description='terminating', state=0, - terminated_at=datetime.datetime.utcnow()) + terminated_at=utils.utcnow()) host = instance['host'] if host: diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3897b3a9e..a57d6e246 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -35,7 +35,6 @@ terminating it. """ -import datetime import os import socket import sys @@ -159,7 +158,7 @@ class ComputeManager(manager.SchedulerDependentManager): def _update_launched_at(self, context, instance_id, launched_at=None): """Update the launched_at parameter of the given instance.""" - data = {'launched_at': launched_at or datetime.datetime.utcnow()} + data = {'launched_at': launched_at or utils.utcnow()} self.db.instance_update(context, instance_id, data) def _update_image_id(self, context, instance_id, image_id): diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 3bb54a382..613734bef 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -86,7 +86,7 @@ RRD_VALUES = { ]} -utcnow = datetime.datetime.utcnow +utcnow = utils.utcnow LOG = logging.getLogger('nova.compute.monitor') diff --git a/nova/context.py b/nova/context.py index c113f7ea7..99085ed75 100644 --- a/nova/context.py +++ b/nova/context.py @@ -18,7 +18,6 @@ """RequestContext: context for requests that persist through all of nova.""" -import datetime import random from nova import exception diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index c3a971a82..bb7bf5b89 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -19,7 +19,6 @@ Implementation of SQLAlchemy backend. """ -import datetime import warnings from nova import db @@ -674,7 +673,7 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time): filter_by(allocated=0).\ update({'instance_id': None, 'leased': 0, - 'updated_at': datetime.datetime.utcnow()}, + 'updated_at': utils.utcnow()}, synchronize_session='fetch') return result @@ -820,17 +819,17 @@ def instance_destroy(context, instance_id): session.query(models.Instance).\ filter_by(id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1123,7 +1122,7 @@ def key_pair_destroy_all_by_user(context, user_id): session.query(models.KeyPair).\ filter_by(user_id=user_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1655,7 +1654,7 @@ def volume_destroy(context, volume_id): session.query(models.Volume).\ filter_by(id=volume_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.ExportDevice).\ filter_by(volume_id=volume_id).\ @@ -1813,7 +1812,7 @@ def snapshot_destroy(context, snapshot_id): session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1968,17 +1967,17 @@ def security_group_destroy(context, security_group_id): session.query(models.SecurityGroup).\ filter_by(id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1989,11 +1988,11 @@ def security_group_destroy_all(context, session=None): with session.begin(): session.query(models.SecurityGroup).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -2627,7 +2626,7 @@ def instance_metadata_delete(context, instance_id, key): filter_by(key=key).\ filter_by(deleted=False).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -2638,7 +2637,7 @@ def instance_metadata_delete_all(context, instance_id): filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py index 5d0593f2e..a4fe3e482 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py @@ -17,7 +17,7 @@ from sqlalchemy import Boolean, Column, DateTime, Integer from sqlalchemy import MetaData, String, Table -import datetime +from nova import utils meta = MetaData() @@ -35,9 +35,9 @@ def old_style_quotas_table(name): return Table(name, meta, Column('id', Integer(), primary_key=True), Column('created_at', DateTime(), - default=datetime.datetime.utcnow), + default=utils.utcnow), Column('updated_at', DateTime(), - onupdate=datetime.datetime.utcnow), + onupdate=utils.utcnow), Column('deleted_at', DateTime()), Column('deleted', Boolean(), default=False), Column('project_id', @@ -57,9 +57,9 @@ def new_style_quotas_table(name): return Table(name, meta, Column('id', Integer(), primary_key=True), Column('created_at', DateTime(), - default=datetime.datetime.utcnow), + default=utils.utcnow), Column('updated_at', DateTime(), - onupdate=datetime.datetime.utcnow), + onupdate=utils.utcnow), Column('deleted_at', DateTime()), Column('deleted', Boolean(), default=False), Column('project_id', diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 22a1a84e8..dbe72efd9 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -19,8 +19,6 @@ SQLAlchemy models for nova data. """ -import datetime - from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy import Column, Integer, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text @@ -33,6 +31,7 @@ from nova.db.sqlalchemy.session import get_session from nova import auth from nova import exception from nova import flags +from nova import utils FLAGS = flags.FLAGS @@ -43,8 +42,8 @@ class NovaBase(object): """Base class for Nova Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} __table_initialized__ = False - created_at = Column(DateTime, default=datetime.datetime.utcnow) - updated_at = Column(DateTime, onupdate=datetime.datetime.utcnow) + created_at = Column(DateTime, default=utils.utcnow) + updated_at = Column(DateTime, onupdate=utils.utcnow) deleted_at = Column(DateTime) deleted = Column(Boolean, default=False) @@ -64,7 +63,7 @@ class NovaBase(object): def delete(self, session=None): """Delete this object.""" self.deleted = True - self.deleted_at = datetime.datetime.utcnow() + self.deleted_at = utils.utcnow() self.save(session=session) def __setitem__(self, key, value): diff --git a/nova/network/manager.py b/nova/network/manager.py index 5a6fdde5a..f726c4b26 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -235,7 +235,7 @@ class NetworkManager(manager.SchedulerDependentManager): inst_addr = instance_ref['mac_address'] raise exception.Error(_('IP %(address)s leased to bad mac' ' %(inst_addr)s vs %(mac)s') % locals()) - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': True, diff --git a/nova/notifier/api.py b/nova/notifier/api.py index a3e7a039e..d49517c8b 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -11,9 +11,8 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. -import datetime import uuid from nova import flags @@ -64,7 +63,7 @@ def notify(publisher_id, event_type, priority, payload): {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', - 'timestamp': datetime.datetime.utcnow(), + 'timestamp': utils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} @@ -79,5 +78,5 @@ def notify(publisher_id, event_type, priority, payload): event_type=event_type, priority=priority, payload=payload, - timestamp=str(datetime.datetime.utcnow())) + timestamp=str(utils.utcnow())) driver.notify(msg) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 2094e3565..0b257c5d8 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -28,6 +28,7 @@ from nova import exception from nova import flags from nova import log as logging from nova import rpc +from nova import utils from nova.compute import power_state FLAGS = flags.FLAGS @@ -61,7 +62,7 @@ class Scheduler(object): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. - elapsed = datetime.datetime.utcnow() - last_heartbeat + elapsed = utils.utcnow() - last_heartbeat return elapsed < datetime.timedelta(seconds=FLAGS.service_down_time) def hosts_up(self, context, topic): diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index dd568d2c6..87cdef11d 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -21,10 +21,9 @@ Simple Scheduler """ -import datetime - from nova import db from nova import flags +from nova import utils from nova.scheduler import driver from nova.scheduler import chance @@ -54,7 +53,7 @@ class SimpleScheduler(chance.ChanceScheduler): # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.instance_update(context, instance_id, {'host': host, 'scheduled_at': now}) return host @@ -66,7 +65,7 @@ class SimpleScheduler(chance.ChanceScheduler): if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.instance_update(context, instance_id, {'host': service['host'], @@ -90,7 +89,7 @@ class SimpleScheduler(chance.ChanceScheduler): # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.volume_update(context, volume_id, {'host': host, 'scheduled_at': now}) return host @@ -103,7 +102,7 @@ class SimpleScheduler(chance.ChanceScheduler): if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.volume_update(context, volume_id, {'host': service['host'], diff --git a/nova/test.py b/nova/test.py index 80b2d0a74..60b599ce4 100644 --- a/nova/test.py +++ b/nova/test.py @@ -23,7 +23,6 @@ inline callbacks. """ -import datetime import functools import os import shutil @@ -37,6 +36,7 @@ from eventlet import greenthread from nova import fakerabbit from nova import flags from nova import rpc +from nova import utils from nova import service from nova import wsgi from nova.virt import fake @@ -69,7 +69,7 @@ class TestCase(unittest.TestCase): # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. - self.start = datetime.datetime.utcnow() + self.start = utils.utcnow() shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db), os.path.join(FLAGS.state_path, FLAGS.sqlite_db)) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 8e0156afa..4fb0613fc 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -16,7 +16,6 @@ # under the License. import copy -import datetime import json import random import string @@ -253,7 +252,7 @@ class FakeAuthDatabase(object): @staticmethod def auth_token_create(context, token): - fake_token = FakeToken(created_at=datetime.datetime.now(), **token) + fake_token = FakeToken(created_at=utils.utcnow(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 9f1f28611..93b402081 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -22,7 +22,6 @@ and as a WSGI layer import copy import json -import datetime import os import shutil import tempfile diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 11dcaaade..50d5fe980 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -16,7 +16,6 @@ # under the License. import base64 -import datetime import json import unittest from xml.dom import minidom @@ -29,6 +28,7 @@ from nova import db from nova import exception from nova import flags from nova import test +from nova import utils import nova.api.openstack from nova.api.openstack import servers import nova.compute.api @@ -114,9 +114,9 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None, "user_data": "", "reservation_id": "", "mac_address": "", - "scheduled_at": datetime.datetime.now(), - "launched_at": datetime.datetime.now(), - "terminated_at": datetime.datetime.now(), + "scheduled_at": utils.utcnow(), + "launched_at": utils.utcnow(), + "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": "server%s" % id, "display_description": "", diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 9170837b6..c726080ee 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -19,7 +19,6 @@ Tests For Compute """ -import datetime import mox import stubout @@ -217,12 +216,12 @@ class ComputeTestCase(test.TestCase): instance_ref = db.instance_get(self.context, instance_id) self.assertEqual(instance_ref['launched_at'], None) self.assertEqual(instance_ref['deleted_at'], None) - launch = datetime.datetime.utcnow() + launch = utils.utcnow() self.compute.run_instance(self.context, instance_id) instance_ref = db.instance_get(self.context, instance_id) self.assert_(instance_ref['launched_at'] > launch) self.assertEqual(instance_ref['deleted_at'], None) - terminate = datetime.datetime.utcnow() + terminate = utils.utcnow() self.compute.terminate_instance(self.context, instance_id) self.context = self.context.elevated(True) instance_ref = db.instance_get(self.context, instance_id) diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py index 1a9a867ee..831e7670f 100644 --- a/nova/tests/test_console.py +++ b/nova/tests/test_console.py @@ -20,8 +20,6 @@ Tests For Console proxy. """ -import datetime - from nova import context from nova import db from nova import exception diff --git a/nova/tests/test_middleware.py b/nova/tests/test_middleware.py index 6564a6955..40d117c45 100644 --- a/nova/tests/test_middleware.py +++ b/nova/tests/test_middleware.py @@ -16,7 +16,6 @@ # License for the specific language governing permissions and limitations # under the License. -import datetime import webob import webob.dec import webob.exc diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 54b3f80fb..1cf6bbfbf 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -196,7 +196,7 @@ class ZoneSchedulerTestCase(test.TestCase): service.topic = 'compute' service.id = kwargs['id'] service.availability_zone = kwargs['zone'] - service.created_at = datetime.datetime.utcnow() + service.created_at = utils.utcnow() return service def test_with_two_zones(self): @@ -290,7 +290,7 @@ class SimpleDriverTestCase(test.TestCase): dic['host'] = kwargs.get('host', 'dummy') s_ref = db.service_create(self.context, dic) if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): - t = datetime.datetime.utcnow() - datetime.timedelta(0) + t = utils.utcnow() - datetime.timedelta(0) dic['created_at'] = kwargs.get('created_at', t) dic['updated_at'] = kwargs.get('updated_at', t) db.service_update(self.context, s_ref['id'], dic) @@ -401,7 +401,7 @@ class SimpleDriverTestCase(test.TestCase): FLAGS.compute_manager) compute1.start() s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() + now = utils.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta db.service_update(self.context, s1['id'], {'updated_at': past}) @@ -542,7 +542,7 @@ class SimpleDriverTestCase(test.TestCase): def test_wont_sechedule_if_specified_host_is_down(self): compute1 = self.start_service('compute', host='host1') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() + now = utils.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta db.service_update(self.context, s1['id'], {'updated_at': past}) @@ -692,7 +692,7 @@ class SimpleDriverTestCase(test.TestCase): dic = {'instance_id': instance_id, 'size': 1} v_ref = db.volume_create(self.context, {'instance_id': instance_id, 'size': 1}) - t1 = datetime.datetime.utcnow() - datetime.timedelta(1) + t1 = utils.utcnow() - datetime.timedelta(1) dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', 'topic': 'volume', 'report_count': 0} s_ref = db.service_create(self.context, dic) @@ -709,7 +709,7 @@ class SimpleDriverTestCase(test.TestCase): """Confirms src-compute node is alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) + t = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) @@ -737,7 +737,7 @@ class SimpleDriverTestCase(test.TestCase): """Confirms exception raises in case dest host does not exist.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) + t = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) @@ -796,7 +796,7 @@ class SimpleDriverTestCase(test.TestCase): # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t1 = datetime.datetime.utcnow() - datetime.timedelta(10) + t1 = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t1, updated_at=t1, host=dest) diff --git a/nova/utils.py b/nova/utils.py index 361fc9873..b1638e72c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -307,7 +307,7 @@ def get_my_linklocal(interface): def utcnow(): - """Overridable version of datetime.datetime.utcnow.""" + """Overridable version of utils.utcnow.""" if utcnow.override_time: return utcnow.override_time return datetime.datetime.utcnow() diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 76988b172..165888cb2 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -51,13 +51,13 @@ A fake XenAPI SDK. """ -import datetime import uuid from pprint import pformat from nova import exception from nova import log as logging +from nova import utils _CLASSES = ['host', 'network', 'session', 'SR', 'VBD', @@ -540,7 +540,7 @@ class SessionBase(object): except Failure, exc: task['error_info'] = exc.details task['status'] = 'failed' - task['finished'] = datetime.datetime.now() + task['finished'] = utils.utcnow() return task_ref def _check_session(self, params): diff --git a/nova/volume/api.py b/nova/volume/api.py index 5804955f7..b07f2e94b 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -20,14 +20,13 @@ Handles all requests relating to volumes. """ -import datetime -from nova import db from nova import exception from nova import flags from nova import log as logging from nova import quota from nova import rpc +from nova import utils from nova.db import base FLAGS = flags.FLAGS @@ -78,7 +77,7 @@ class API(base.Base): volume = self.get(context, volume_id) if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) host = volume['host'] diff --git a/nova/volume/manager.py b/nova/volume/manager.py index ff53f0701..798bd379a 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -42,8 +42,6 @@ intact. """ -import datetime - from nova import context from nova import exception @@ -127,7 +125,7 @@ class VolumeManager(manager.SchedulerDependentManager): volume_ref['id'], {'status': 'error'}) raise - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) -- cgit From 4762aebe4ddc57d8502ed3b5aec56b613d0ec93b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 14:51:30 -0700 Subject: switch zones to use utcnow --- nova/scheduler/zone_manager.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index 3ddf6f3c3..3f483adff 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -17,16 +17,17 @@ ZoneManager oversees all communications with child Zones. """ +import datetime import novaclient import thread import traceback -from datetime import datetime from eventlet import greenpool from nova import db from nova import flags from nova import log as logging +from nova import utils FLAGS = flags.FLAGS flags.DEFINE_integer('zone_db_check_interval', 60, @@ -42,7 +43,7 @@ class ZoneState(object): self.name = None self.capabilities = None self.attempt = 0 - self.last_seen = datetime.min + self.last_seen = datetime.datetime.min self.last_exception = None self.last_exception_time = None @@ -56,7 +57,7 @@ class ZoneState(object): def update_metadata(self, zone_metadata): """Update zone metadata after successful communications with child zone.""" - self.last_seen = datetime.now() + self.last_seen = utils.utcnow() self.attempt = 0 self.name = zone_metadata.get("name", "n/a") self.capabilities = ", ".join(["%s=%s" % (k, v) @@ -72,7 +73,7 @@ class ZoneState(object): """Something went wrong. Check to see if zone should be marked as offline.""" self.last_exception = exception - self.last_exception_time = datetime.now() + self.last_exception_time = utils.utcnow() api_url = self.api_url logging.warning(_("'%(exception)s' error talking to " "zone %(api_url)s") % locals()) @@ -104,7 +105,7 @@ def _poll_zone(zone): class ZoneManager(object): """Keeps the zone states updated.""" def __init__(self): - self.last_zone_db_check = datetime.min + self.last_zone_db_check = datetime.datetime.min self.zone_states = {} # { : ZoneState } self.service_states = {} # { : { : { cap k : v }}} self.green_pool = greenpool.GreenPool() @@ -158,10 +159,10 @@ class ZoneManager(object): def ping(self, context=None): """Ping should be called periodically to update zone status.""" - diff = datetime.now() - self.last_zone_db_check + diff = utils.utcnow() - self.last_zone_db_check if diff.seconds >= FLAGS.zone_db_check_interval: logging.debug(_("Updating zone cache from db.")) - self.last_zone_db_check = datetime.now() + self.last_zone_db_check = utils.utcnow() self._refresh_from_db(context) self._poll_zones(context) -- cgit From d7d628d58612b94491310a1a03727e1afa9d5ad5 Mon Sep 17 00:00:00 2001 From: Justin Shepherd Date: Thu, 2 Jun 2011 20:45:36 -0500 Subject: Added paramiko to tools/pip-requires --- tools/pip-requires | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pip-requires b/tools/pip-requires index f1c5b2003..e81ef944a 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -33,3 +33,4 @@ suds==0.4 coverage nosexcover GitPython +paramiko -- cgit From a3b8b3467d836463dda806c93756841a52c055d3 Mon Sep 17 00:00:00 2001 From: Justin Shepherd Date: Thu, 2 Jun 2011 21:18:09 -0500 Subject: added nova_adminclient to tools/pip-requires --- tools/pip-requires | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pip-requires b/tools/pip-requires index e81ef944a..035e4347d 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -34,3 +34,4 @@ coverage nosexcover GitPython paramiko +nova_adminclient -- cgit From 9ee103a91fe3bed03c3f4c6c1a6e89fa474e1aae Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Fri, 3 Jun 2011 12:37:58 +0400 Subject: Fixed FakeLdapDriver, made it call LdapDriver.__init__ --- nova/auth/ldapdriver.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 95e31ae3b..183f7a985 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -676,6 +676,7 @@ class LdapDriver(object): class FakeLdapDriver(LdapDriver): """Fake Ldap Auth driver""" - def __init__(self): # pylint: disable=W0231 - __import__('nova.auth.fakeldap') - self.ldap = sys.modules['nova.auth.fakeldap'] + def __init__(self): + import nova.auth.fakeldap + sys.modules['ldap'] = nova.auth.fakeldap + super(FakeLdapDriver, self).__init__() -- cgit From 72a47784dc09d9b840db146d58ea71f6af30a8ea Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Fri, 3 Jun 2011 13:39:22 +0400 Subject: Flush AuthManager's cache before each test. --- nova/tests/test_auth.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index f02dd94b7..7d00bddfe 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -86,6 +86,7 @@ class _AuthManagerBaseTestCase(test.TestCase): super(_AuthManagerBaseTestCase, self).setUp() self.flags(connection_type='fake') self.manager = manager.AuthManager(new=True) + self.manager.mc.cache = {} def test_create_and_find_user(self): with user_generator(self.manager): -- cgit From 29eec21f6752ef2c03412213a74aa12745286c82 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 3 Jun 2011 05:23:43 -0700 Subject: little tweaks --- doc/source/devref/distributed_scheduler.rst | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index 28ba20af7..eb6a1a03e 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -1,7 +1,3 @@ - - - - .. Copyright 2011 OpenStack LLC All Rights Reserved. @@ -40,7 +36,7 @@ Some Costs are more esoteric. Consider a rule that says we should prefer Hosts t An example of some other costs might include selecting: * a GPU-based host over a standard CPU * a host with fast ethernet over a 10mbps line -* a host than can run Windows instances +* a host that can run Windows instances * a host in the EU vs North America * etc -- cgit From 8739529368cb755d33c3d8c532dd1c5d86f0bf85 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 3 Jun 2011 08:50:30 -0400 Subject: Implement OSAPI v1.1 style image create. --- nova/api/openstack/images.py | 11 ++++++++- nova/tests/api/openstack/fakes.py | 2 +- nova/tests/api/openstack/test_images.py | 41 +++++++++++++++++++++++++++++++++ 3 files changed, 52 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 59d9e3082..48ea04248 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -123,7 +123,7 @@ class Controller(object): raise webob.exc.HTTPBadRequest() try: - server_id = body["image"]["serverId"] + server_id = self._server_id_from_req_data(body) image_name = body["image"]["name"] except KeyError: raise webob.exc.HTTPBadRequest() @@ -135,6 +135,9 @@ class Controller(object): """Indicates that you must use a Controller subclass.""" raise NotImplementedError + def _server_id_from_req_data(self, data): + raise NotImplementedError() + class ControllerV10(Controller): """Version 1.0 specific controller logic.""" @@ -144,6 +147,9 @@ class ControllerV10(Controller): base_url = request.application_url return images_view.ViewBuilderV10(base_url) + def _server_id_from_req_data(self, data): + return data['image']['serverId'] + class ControllerV11(Controller): """Version 1.1 specific controller logic.""" @@ -153,6 +159,9 @@ class ControllerV11(Controller): base_url = request.application_url return images_view.ViewBuilderV11(base_url) + def _server_id_from_req_data(self, data): + return data['image']['serverRef'] + def create_resource(version='1.0'): controller = { diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 17d6d591c..e9b46f933 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -143,7 +143,7 @@ def stub_out_networking(stubs): def stub_out_compute_api_snapshot(stubs): def snapshot(self, context, instance_id, name): - return 123 + return dict(id='123') stubs.Set(nova.compute.API, 'snapshot', snapshot) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 9f1f28611..961c271ca 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -249,6 +249,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): fakes.stub_out_key_pair_funcs(self.stubs) self.fixtures = self._make_image_fixtures() fakes.stub_out_glance(self.stubs, initial_fixtures=self.fixtures) + fakes.stub_out_compute_api_snapshot(self.stubs) def tearDown(self): """Run after each test.""" @@ -871,6 +872,46 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 404) + def test_create_image(self): + + body = dict(image=dict(serverId='123', name='Backup 1')) + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, response.status_int) + + def test_create_image_no_server_id(self): + + body = dict(image=dict(name='Backup 1')) + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_image_v1_1(self): + + body = dict(image=dict(serverRef='123', name='Backup 1')) + req = webob.Request.blank('/v1.1/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, response.status_int) + + def test_create_image_v1_1_no_server_ref(self): + + body = dict(image=dict(name='Backup 1')) + req = webob.Request.blank('/v1.1/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + @classmethod def _make_image_fixtures(cls): image_id = 123 -- cgit From a9f21962a9e1e703730fbfae120129618b7a79ca Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Fri, 3 Jun 2011 09:24:46 -0400 Subject: Fixed pylint: no metadata member in models.py --- nova/db/sqlalchemy/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index edb7ffe4b..82b521e77 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -47,6 +47,7 @@ class NovaBase(object): updated_at = Column(DateTime, onupdate=datetime.datetime.utcnow) deleted_at = Column(DateTime) deleted = Column(Boolean, default=False) + metadata = None def save(self, session=None): """Save this object.""" -- cgit From 0ef4a127e9539f90ac1d2f2846832ecc48b51e05 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 3 Jun 2011 09:31:43 -0400 Subject: Add serverRef to image metadata serialization list. --- nova/api/openstack/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 48ea04248..1fa3267dc 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -177,7 +177,7 @@ def create_resource(version='1.0'): metadata = { "attributes": { "image": ["id", "name", "updated", "created", "status", - "serverId", "progress"], + "serverId", "progress", "serverRef"], "link": ["rel", "type", "href"], }, } -- cgit From b45d07ded9db7c92e03cea1427413d4dda95d869 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 3 Jun 2011 10:23:38 -0400 Subject: Make libvirt snapshotting work with images that don't have an 'architecture' property. --- nova/virt/libvirt/connection.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index c491418ae..98cdff311 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -403,8 +403,7 @@ class LibvirtConnection(driver.ComputeDriver): 'is_public': False, 'status': 'active', 'name': snapshot['name'], - 'properties': {'architecture': - base['properties']['architecture'], + 'properties': { 'kernel_id': instance['kernel_id'], 'image_location': 'snapshot', 'image_state': 'available', @@ -412,6 +411,9 @@ class LibvirtConnection(driver.ComputeDriver): 'ramdisk_id': instance['ramdisk_id'], } } + if 'architecture' in base['properties']: + arch = base['properties']['architecture'] + metadata['properties']['architecture'] = arch # Make the snapshot snapshot_name = uuid.uuid4().hex -- cgit From 5b00ca3ac874d0fff1eb2835cd4219f49d8a169f Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Fri, 3 Jun 2011 11:08:43 -0400 Subject: Set pylint to ignore correct lines that it could not determine were correct, due to the means by which eventlet.green imported subprocess Minimized the number of these lines to ignore --- nova/utils.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/nova/utils.py b/nova/utils.py index 361fc9873..4e1b7c26a 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -142,24 +142,26 @@ def execute(*cmd, **kwargs): env = os.environ.copy() if addl_env: env.update(addl_env) + _PIPE = subprocess.PIPE #pylint: disable=E1101 obj = subprocess.Popen(cmd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + stdin=_PIPE, + stdout=_PIPE, + stderr=_PIPE, env=env) result = None if process_input is not None: result = obj.communicate(process_input) else: result = obj.communicate() - obj.stdin.close() - if obj.returncode: - LOG.debug(_('Result was %s') % obj.returncode) + obj.stdin.close() #pylint: disable=E1101 + _returncode = obj.returncode #pylint: disable=E1101 + if _returncode: + LOG.debug(_('Result was %s') % _returncode) if type(check_exit_code) == types.IntType \ - and obj.returncode != check_exit_code: + and _returncode != check_exit_code: (stdout, stderr) = result raise exception.ProcessExecutionError( - exit_code=obj.returncode, + exit_code=_returncode, stdout=stdout, stderr=stderr, cmd=' '.join(cmd)) -- cgit From 24a90512f20310007f4ca8ab01da8e19a6b5bf6f Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Fri, 3 Jun 2011 11:28:49 -0400 Subject: Removed unused and erroneous (yes, it was both) function --- nova/api/ec2/admin.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index ea94d9c1f..4d981f70b 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -325,7 +325,3 @@ class AdminController(object): rv.append(host_dict(host, compute, instances, volume, volumes, now)) return {'hosts': rv} - - def describe_host(self, _context, name, **_kwargs): - """Returns status info for single node.""" - return host_dict(db.host_get(name)) -- cgit From eadabab8b70bdc4789615844e2263cbed7aa283c Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 3 Jun 2011 11:34:49 -0400 Subject: Added a test case for XML serialization. --- nova/tests/api/openstack/fakes.py | 3 ++- nova/tests/api/openstack/test_images.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index e9b46f933..601c1e9e4 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -143,7 +143,8 @@ def stub_out_networking(stubs): def stub_out_compute_api_snapshot(stubs): def snapshot(self, context, instance_id, name): - return dict(id='123') + return dict(id='123', status='ACTIVE', + properties=dict(instance_id='123')) stubs.Set(nova.compute.API, 'snapshot', snapshot) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 961c271ca..ae7025146 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -902,6 +902,39 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): response = req.get_response(fakes.wsgi_app()) self.assertEqual(200, response.status_int) + def test_create_image_v1_1_xml_serialization(self): + + body = dict(image=dict(serverRef='123', name='Backup 1')) + req = webob.Request.blank('/v1.1/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + req.headers["accept"] = "application/xml" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, response.status_int) + resp_xml = minidom.parseString(response.body.replace(" ", "")) + expected_href = "http://localhost/v1.1/images/123" + expected_image = minidom.parseString(""" + + + + + + + + """.replace(" ", "") % (locals())) + + self.assertEqual(expected_image.toxml(), resp_xml.toxml()) + def test_create_image_v1_1_no_server_ref(self): body = dict(image=dict(name='Backup 1')) -- cgit From 25c8e9318c1ffbf2f2c88d3ed644df9e81b92b04 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Fri, 3 Jun 2011 11:52:20 -0400 Subject: Fixed pip-requires double requirement. --- tools/pip-requires | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/pip-requires b/tools/pip-requires index 035e4347d..e81ef944a 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -34,4 +34,3 @@ coverage nosexcover GitPython paramiko -nova_adminclient -- cgit From f521426039e8a9cc5dccc2c7e7e1797cfe778d7e Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 3 Jun 2011 14:14:28 -0400 Subject: Updated to use the '/v1/images' URL when uploading images to glance in the Xen glance plugin. Fixes issue where snapshots failed to get uploaded. --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 0c00d168b..46031ebe8 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -244,7 +244,7 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type): conn = httplib.HTTPConnection(glance_host, glance_port) # NOTE(sirp): httplib under python2.4 won't accept a file-like object # to request - conn.putrequest('PUT', '/images/%s' % image_id) + conn.putrequest('PUT', '/v1/images/%s' % image_id) # NOTE(sirp): There is some confusion around OVF. Here's a summary of # where we currently stand: -- cgit From f6aa513024e14975709ef8facf1db6535eefbc44 Mon Sep 17 00:00:00 2001 From: Justin Shepherd Date: Fri, 3 Jun 2011 13:20:34 -0500 Subject: added 'nova-manage config list' which will list out all of the flags and their values. I also alphabetized the list of available categories --- bin/nova-manage | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 5de4d9e81..fb3810779 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -1081,24 +1081,35 @@ class ImageCommands(object): self._convert_images(machine_images) +class ConfigCommands(object): + """Class for exposing the flags defined by flag_file(s).""" + + def __init__(self): + pass + + def list(self): + print FLAGS.FlagsIntoString() + + CATEGORIES = [ - ('user', UserCommands), ('account', AccountCommands), - ('project', ProjectCommands), - ('role', RoleCommands), - ('shell', ShellCommands), - ('vpn', VpnCommands), + ('config', ConfigCommands), + ('db', DbCommands), ('fixed', FixedIpCommands), + ('flavor', InstanceTypeCommands), ('floating', FloatingIpCommands), + ('instance_type', InstanceTypeCommands), + ('image', ImageCommands), ('network', NetworkCommands), - ('vm', VmCommands), + ('project', ProjectCommands), + ('role', RoleCommands), ('service', ServiceCommands), - ('db', DbCommands), + ('shell', ShellCommands), + ('user', UserCommands), + ('version', VersionCommands), + ('vm', VmCommands), ('volume', VolumeCommands), - ('instance_type', InstanceTypeCommands), - ('image', ImageCommands), - ('flavor', InstanceTypeCommands), - ('version', VersionCommands)] + ('vpn', VpnCommands)] def lazy_match(name, key_value_tuples): -- cgit From 9c38da46d121e65707346473e6d51da3a2cf021f Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Mon, 6 Jun 2011 09:18:13 -0400 Subject: Fixed incorrect exception --- nova/db/sqlalchemy/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index cf1a84cd5..6970a2168 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -738,7 +738,7 @@ def fixed_ip_get_all_by_instance(context, instance_id): filter_by(instance_id=instance_id).\ filter_by(deleted=False) if not rv: - raise exception.NoFloatingIpsFoundForInstance(instance_id=instance_id) + raise exception.NoFixedIpsFoundForInstance(instance_id=instance_id) return rv -- cgit From ec5e5bcd3592dca44d1d71455ccd99e2c7f24d26 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Mon, 6 Jun 2011 10:49:29 -0400 Subject: Small pylint fixes --- nova/api/openstack/extensions.py | 6 ++++-- nova/api/openstack/views/limits.py | 9 --------- nova/tests/xenapi/stubs.py | 4 ++-- 3 files changed, 6 insertions(+), 13 deletions(-) diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 881b61733..9dad2f48d 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -137,7 +137,8 @@ class ActionExtensionResource(wsgi.Resource): def __init__(self, application): controller = ActionExtensionController(application) - super(ActionExtensionResource, self).__init__(controller) + #super(ActionExtensionResource, self).__init__(controller) + wsgi.Resource.__init__(self, controller) def add_action(self, action_name, handler): self.controller.add_action(action_name, handler) @@ -164,7 +165,8 @@ class RequestExtensionResource(wsgi.Resource): def __init__(self, application): controller = RequestExtensionController(application) - super(RequestExtensionResource, self).__init__(controller) + #super(RequestExtensionResource, self).__init__(controller) + wsgi.Resource.__init__(self, controller) def add_handler(self, handler): self.controller.add_handler(handler) diff --git a/nova/api/openstack/views/limits.py b/nova/api/openstack/views/limits.py index e21c9f2fd..934b4921a 100644 --- a/nova/api/openstack/views/limits.py +++ b/nova/api/openstack/views/limits.py @@ -29,9 +29,6 @@ class ViewBuilder(object): def _build_rate_limit(self, rate_limit): raise NotImplementedError() - def _build_absolute_limits(self, absolute_limit): - raise NotImplementedError() - def build(self, rate_limits, absolute_limits): rate_limits = self._build_rate_limits(rate_limits) absolute_limits = self._build_absolute_limits(absolute_limits) @@ -67,12 +64,6 @@ class ViewBuilder(object): limits[name] = value return limits - def _build_rate_limits(self, rate_limits): - raise NotImplementedError() - - def _build_rate_limit(self, rate_limit): - raise NotImplementedError() - class ViewBuilderV10(ViewBuilder): """Openstack API v1.0 limits view builder.""" diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 35308d95f..5d2d1641a 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -251,10 +251,10 @@ class FakeSessionForMigrationTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForMigrationTests, self).__init__(uri) - def VDI_get_by_uuid(*args): + def VDI_get_by_uuid(self, *args): return 'hurr' - def VDI_resize_online(*args): + def VDI_resize_online(self, *args): pass def VM_start(self, _1, ref, _2, _3): -- cgit From 3fb0b8fd8e4ad5911c85fddcb6ef5127fa4cd384 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Mon, 6 Jun 2011 11:00:51 -0400 Subject: Removed extraneous code --- nova/tests/xenapi/stubs.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 5d2d1641a..151a3e909 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -42,20 +42,6 @@ def stubout_instance_snapshot(stubs): stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) - def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, - original_parent_uuid): - from nova.virt.xenapi.fake import create_vdi - name_label = "instance-%s" % instance_id - #TODO: create fake SR record - sr_ref = "fakesr" - vdi_ref = create_vdi(name_label=name_label, read_only=False, - sr_ref=sr_ref, sharable=False) - vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) - vdi_uuid = vdi_rec['uuid'] - return vdi_uuid - - stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) - def fake_parse_xmlrpc_value(val): return val -- cgit From a2f74c2f706bdf45ec36348468b1ba5797fcde87 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Mon, 6 Jun 2011 11:20:25 -0400 Subject: Use super on an old style class --- nova/twistd.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/twistd.py b/nova/twistd.py index c07ed991f..15cf67825 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -78,7 +78,7 @@ def WrapTwistedOptions(wrapped): self._absorbParameters() self._absorbHandlers() - super(TwistedOptionsToFlags, self).__init__() + wrapped.__init__(self) def _absorbFlags(self): twistd_flags = [] @@ -163,12 +163,12 @@ def WrapTwistedOptions(wrapped): def parseArgs(self, *args): # TODO(termie): figure out a decent way of dealing with args #return - super(TwistedOptionsToFlags, self).parseArgs(*args) + wrapped.parseArgs(self, *args) def postOptions(self): self._doHandlers() - super(TwistedOptionsToFlags, self).postOptions() + wrapped.postOptions(self) def __getitem__(self, key): key = key.replace('-', '_') -- cgit From 0eb6db6f994963d519f9fe07e3dbc41e0c8079c6 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Mon, 6 Jun 2011 11:29:05 -0400 Subject: Removed Duplicate method --- nova/virt/xenapi/fake.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 76988b172..5d3b67417 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -340,10 +340,6 @@ class SessionBase(object): return db_ref['xenstore_data'][key] = None - def network_get_all_records_where(self, _1, _2): - # TODO (salvatore-orlando): filter table on _2 - return _db_content['network'] - def VM_add_to_xenstore_data(self, _1, vm_ref, key, value): db_ref = _db_content['VM'][vm_ref] if not 'xenstore_data' in db_ref: @@ -354,7 +350,7 @@ class SessionBase(object): #Always return 12GB available return 12 * 1024 * 1024 * 1024 - def host_call_plugin(*args): + def host_call_plugin(self, *args): return 'herp' def network_get_all_records_where(self, _1, filter): -- cgit From 3d481e551ac81a35cafcd79c2b17d2bd9c8a050f Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Mon, 6 Jun 2011 11:39:34 -0400 Subject: Ignore complaining about dynamic definition --- nova/api/direct.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/direct.py b/nova/api/direct.py index ea20042a7..ea7425e19 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -324,7 +324,7 @@ class Limited(object): def __init__(self, proxy): self._proxy = proxy - if not self.__doc__: + if not self.__doc__: #pylint: disable=E0203 self.__doc__ = proxy.__doc__ if not self._allowed: self._allowed = [] -- cgit From 267178748e712098af4e55872029c5883af9a51c Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Mon, 6 Jun 2011 12:42:27 -0400 Subject: Change to a more generic error and update documentation. --- nova/db/sqlalchemy/api.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 56739e9db..6dbf53a6c 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -60,9 +60,7 @@ def is_user_context(context): def authorize_project_context(context, project_id): - """Ensures that the request context has permission to access the - given project. - """ + """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project: raise exception.NotAuthorized() @@ -71,9 +69,7 @@ def authorize_project_context(context, project_id): def authorize_user_context(context, user_id): - """Ensures that the request context has permission to access the - given user. - """ + """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user: raise exception.NotAuthorized() @@ -89,9 +85,12 @@ def can_read_deleted(context): def require_admin_context(f): - """Decorator used to indicate that the method requires an - administrator context. + """Decorator to require admin request context. + + The first argument to the wrapped function must be the context. + """ + def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() @@ -100,12 +99,19 @@ def require_admin_context(f): def require_context(f): - """Decorator used to indicate that the method requires either - an administrator or normal user context. + """Decorator to require *any* user or admin context. + + This does no authorization for user or project access matching, see + :py:func:`authorize_project_context` and + :py:func:`authorize_user_context`. + + The first argument to the wrapped function must be the context. + """ + def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): - raise exception.AdminRequired() + raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper -- cgit From 9fca0b2156f1e7f3d007916ef18b2ed9fbc761df Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Mon, 6 Jun 2011 15:59:20 -0400 Subject: Added test case for snapshoting base image without architecture. --- nova/tests/test_libvirt.py | 92 +++++++++++++++++++++++++++++++++------------- 1 file changed, 66 insertions(+), 26 deletions(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index b6b36745a..d0bdaa738 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +import copy import eventlet import mox import os @@ -125,6 +126,7 @@ class CacheConcurrencyTestCase(test.TestCase): class LibvirtConnTestCase(test.TestCase): + def setUp(self): super(LibvirtConnTestCase, self).setUp() connection._late_load_cheetah() @@ -207,6 +209,31 @@ class LibvirtConnTestCase(test.TestCase): self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') connection.LibvirtConnection._conn = fake + def fake_lookup(self, instance_name): + class FakeVirtDomain(object): + + def __init__(self): + pass + + def snapshotCreateXML(self, *args): + return None + + def XMLDesc(self, *args): + return """ + + + + + + + + """ + + return FakeVirtDomain() + + def fake_execute(self, *args): + open(args[-1], "a").close() + def create_service(self, **kwargs): service_ref = {'host': kwargs.get('host', 'dummy'), 'binary': 'nova-compute', @@ -283,43 +310,56 @@ class LibvirtConnTestCase(test.TestCase): self._check_xml_and_container(instance_data) def test_snapshot(self): + if not self.lazy_load_library_exists(): + return + FLAGS.image_service = 'nova.image.fake.FakeImageService' - # Only file-based instance storages are supported at the moment - test_xml = """ - - - - - - - - """ + # Start test + image_service = utils.import_object(FLAGS.image_service) - class FakeVirtDomain(object): + # Assuming that base image already exists in image_service + instance_ref = db.instance_create(self.context, self.test_instance) + properties = {'instance_id': instance_ref['id'], + 'user_id': str(self.context.user_id)} + snapshot_name = 'test-snap' + sent_meta = {'name': snapshot_name, 'is_public': False, + 'status': 'creating', 'properties': properties} + # Create new image. It will be updated in snapshot method + # To work with it from snapshot, the single image_service is needed + recv_meta = image_service.create(context, sent_meta) - def __init__(self): - pass + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') + connection.LibvirtConnection._conn.lookupByName = self.fake_lookup + self.mox.StubOutWithMock(connection.utils, 'execute') + connection.utils.execute = self.fake_execute - def snapshotCreateXML(self, *args): - return None + self.mox.ReplayAll() - def XMLDesc(self, *args): - return test_xml + conn = connection.LibvirtConnection(False) + conn.snapshot(instance_ref, recv_meta['id']) - def fake_lookup(instance_name): - if instance_name == instance_ref.name: - return FakeVirtDomain() + snapshot = image_service.show(context, recv_meta['id']) + self.assertEquals(snapshot['properties']['image_state'], 'available') + self.assertEquals(snapshot['status'], 'active') + self.assertEquals(snapshot['name'], snapshot_name) - def fake_execute(*args): - # Touch filename to pass 'with open(out_path)' - open(args[-1], "a").close() + def test_snapshot_no_image_architecture(self): + if not self.lazy_load_library_exists(): + return + + FLAGS.image_service = 'nova.image.fake.FakeImageService' # Start test image_service = utils.import_object(FLAGS.image_service) + # Assign image_ref = 2 from nova/images/fakes for testing different + # base image + test_instance = copy.deepcopy(self.test_instance) + test_instance["image_ref"] = "2" + # Assuming that base image already exists in image_service - instance_ref = db.instance_create(self.context, self.test_instance) + instance_ref = db.instance_create(self.context, test_instance) properties = {'instance_id': instance_ref['id'], 'user_id': str(self.context.user_id)} snapshot_name = 'test-snap' @@ -330,9 +370,9 @@ class LibvirtConnTestCase(test.TestCase): recv_meta = image_service.create(context, sent_meta) self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') - connection.LibvirtConnection._conn.lookupByName = fake_lookup + connection.LibvirtConnection._conn.lookupByName = self.fake_lookup self.mox.StubOutWithMock(connection.utils, 'execute') - connection.utils.execute = fake_execute + connection.utils.execute = self.fake_execute self.mox.ReplayAll() -- cgit From 57df676a3302f8d754ef54e415d2fd82a4291f49 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Mon, 6 Jun 2011 15:59:39 -0400 Subject: Removed commented code --- nova/api/openstack/extensions.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 9dad2f48d..54e17e23d 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -137,7 +137,6 @@ class ActionExtensionResource(wsgi.Resource): def __init__(self, application): controller = ActionExtensionController(application) - #super(ActionExtensionResource, self).__init__(controller) wsgi.Resource.__init__(self, controller) def add_action(self, action_name, handler): @@ -165,7 +164,6 @@ class RequestExtensionResource(wsgi.Resource): def __init__(self, application): controller = RequestExtensionController(application) - #super(RequestExtensionResource, self).__init__(controller) wsgi.Resource.__init__(self, controller) def add_handler(self, handler): -- cgit From e745c21724e5990874a12c4abff53127755185ea Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 6 Jun 2011 16:08:58 -0400 Subject: Use True/False instead of 1/0 when setting updating 'deleted' column attributes.Fixes casting issues when running nova with Postgres. --- nova/db/sqlalchemy/api.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 6dbf53a6c..103668b94 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1127,7 +1127,7 @@ def key_pair_destroy_all_by_user(context, user_id): with session.begin(): session.query(models.KeyPair).\ filter_by(user_id=user_id).\ - update({'deleted': 1, + update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1659,7 +1659,7 @@ def volume_destroy(context, volume_id): with session.begin(): session.query(models.Volume).\ filter_by(id=volume_id).\ - update({'deleted': 1, + update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.ExportDevice).\ @@ -1817,7 +1817,7 @@ def snapshot_destroy(context, snapshot_id): with session.begin(): session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ - update({'deleted': 1, + update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1972,17 +1972,17 @@ def security_group_destroy(context, security_group_id): with session.begin(): session.query(models.SecurityGroup).\ filter_by(id=security_group_id).\ - update({'deleted': 1, + update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=security_group_id).\ - update({'deleted': 1, + update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ - update({'deleted': 1, + update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1993,11 +1993,11 @@ def security_group_destroy_all(context, session=None): session = get_session() with session.begin(): session.query(models.SecurityGroup).\ - update({'deleted': 1, + update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ - update({'deleted': 1, + update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -2678,7 +2678,7 @@ def instance_metadata_update_or_create(context, instance_id, metadata): meta_ref = models.InstanceMetadata() meta_ref.update({"key": key, "value": value, "instance_id": instance_id, - "deleted": 0}) + "deleted": False}) meta_ref.save(session=session) return metadata -- cgit From 727317333978ac5cf0fb1cd3f86e49e9868f1e19 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 6 Jun 2011 17:58:40 -0700 Subject: fixed up tests after trunk merge --- nova/api/openstack/zones.py | 13 ++++--------- nova/compute/api.py | 10 +++++----- nova/tests/api/openstack/test_zones.py | 3 +++ 3 files changed, 12 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index 330aee85f..0f83afb34 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -58,12 +58,7 @@ def check_encryption_key(func): return wrapped -class Controller(common.OpenstackController): - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "zone": ["id", "api_url", "name", "capabilities"]}}} +class Controller(object): def index(self, req): """Return all zones in brief""" @@ -114,12 +109,12 @@ class Controller(common.OpenstackController): return dict(zone=_scrub_zone(zone)) @check_encryption_key - def select(self, req): + def select(self, req, body): """Returns a weighted list of costs to create instances of desired capabilities.""" ctx = req.environ['nova.context'] - json_specs = json.loads(req.body) - specs = json.loads(json_specs) + print "**** ZONES ", body + specs = json.loads(body) build_plan = api.select(ctx, specs=specs) cooked = self._scrub_build_plan(build_plan) return {"weights": cooked} diff --git a/nova/compute/api.py b/nova/compute/api.py index 24f04f226..e09127d5c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -134,7 +134,7 @@ class API(base.Base): raise quota.QuotaError(msg, "MetadataLimitExceeded") def _check_create_parameters(self, context, instance_type, - image_id, kernel_id=None, ramdisk_id=None, + image_href, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', @@ -300,7 +300,7 @@ class API(base.Base): "injected_files": injected_files}}) def create_all_at_once(self, context, instance_type, - image_id, kernel_id=None, ramdisk_id=None, + image_href, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', @@ -312,7 +312,7 @@ class API(base.Base): num_instances, base_options, security_groups = \ self._check_create_parameters( context, instance_type, - image_id, kernel_id, ramdisk_id, + image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, key_name, key_data, security_group, @@ -328,7 +328,7 @@ class API(base.Base): return base_options['reservation_id'] def create(self, context, instance_type, - image_id, kernel_id=None, ramdisk_id=None, + image_href, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', @@ -346,7 +346,7 @@ class API(base.Base): num_instances, base_options, security_groups = \ self._check_create_parameters( context, instance_type, - image_id, kernel_id, ramdisk_id, + image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, key_name, key_data, security_group, diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index e21b5ce86..fc70a1679 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -210,11 +210,14 @@ class ZonesTest(test.TestCase): req = webob.Request.blank('/v1.0/zones/select') req.method = 'POST' + req.headers["Content-Type"] = "application/json" # Select queries end up being JSON encoded twice. # Once to a string and again as an HTTP POST Body req.body = json.dumps(json.dumps({})) + print "********** BODY", req.body res = req.get_response(fakes.wsgi_app()) + print "********** RES", res res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) -- cgit From 225c8cb8843de17abe192b5efc7c0bd9db0b4d75 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 6 Jun 2011 19:05:31 -0700 Subject: sanity check --- nova/api/openstack/zones.py | 1 - nova/scheduler/zone_aware_scheduler.py | 1 + nova/tests/api/openstack/test_zones.py | 2 -- 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index 0f83afb34..b2f7898cb 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -113,7 +113,6 @@ class Controller(object): """Returns a weighted list of costs to create instances of desired capabilities.""" ctx = req.environ['nova.context'] - print "**** ZONES ", body specs = json.loads(body) build_plan = api.select(ctx, specs=specs) cooked = self._scrub_build_plan(build_plan) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index c125c7436..faa969124 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -22,6 +22,7 @@ across zones. There are two expansion points to this class for: import operator import json + import M2Crypto import novaclient diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index fc70a1679..098577e4c 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -215,9 +215,7 @@ class ZonesTest(test.TestCase): # Once to a string and again as an HTTP POST Body req.body = json.dumps(json.dumps({})) - print "********** BODY", req.body res = req.get_response(fakes.wsgi_app()) - print "********** RES", res res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) -- cgit From 8747611e4bd69b6da204b2c021fd5400c961db1d Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 7 Jun 2011 10:47:29 -0400 Subject: Removed empty init --- nova/tests/test_libvirt.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index d0bdaa738..8b4183164 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -210,10 +210,8 @@ class LibvirtConnTestCase(test.TestCase): connection.LibvirtConnection._conn = fake def fake_lookup(self, instance_name): - class FakeVirtDomain(object): - def __init__(self): - pass + class FakeVirtDomain(object): def snapshotCreateXML(self, *args): return None -- cgit From 7bae412d230171baf1ba7bec7262705404d1ed7f Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 7 Jun 2011 10:47:14 -0500 Subject: Add the option to specify a default IPv6 gateway. --- bin/nova-manage | 13 +++++++++---- nova/network/manager.py | 11 +++++++++-- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index b0cd343f5..7f024f9ca 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -96,6 +96,7 @@ flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('vlan_start', 'nova.network.manager') flags.DECLARE('vpn_start', 'nova.network.manager') flags.DECLARE('fixed_range_v6', 'nova.network.manager') +flags.DECLARE('gateway_v6', 'nova.network.manager') flags.DECLARE('images_path', 'nova.image.local') flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection') flags.DEFINE_flag(flags.HelpFlag()) @@ -545,13 +546,14 @@ class FloatingIpCommands(object): class NetworkCommands(object): """Class for managing networks.""" - def create(self, fixed_range=None, num_networks=None, - network_size=None, vlan_start=None, - vpn_start=None, fixed_range_v6=None, label='public'): + def create(self, fixed_range=None, num_networks=None, network_size=None, + vlan_start=None, vpn_start=None, fixed_range_v6=None, + gateway_v6=None, label='public'): """Creates fixed ips for host by range arguments: fixed_range=FLAG, [num_networks=FLAG], [network_size=FLAG], [vlan_start=FLAG], - [vpn_start=FLAG], [fixed_range_v6=FLAG]""" + [vpn_start=FLAG], [fixed_range_v6=FLAG], + [gateway_v6=FLAG]""" if not fixed_range: msg = _('Fixed range in the form of 10.0.0.0/8 is ' 'required to create networks.') @@ -567,6 +569,8 @@ class NetworkCommands(object): vpn_start = FLAGS.vpn_start if not fixed_range_v6: fixed_range_v6 = FLAGS.fixed_range_v6 + if not gateway_v6: + gateway_v6 = FLAGS.gateway_v6 net_manager = utils.import_object(FLAGS.network_manager) try: net_manager.create_networks(context.get_admin_context(), @@ -576,6 +580,7 @@ class NetworkCommands(object): vlan_start=int(vlan_start), vpn_start=int(vpn_start), cidr_v6=fixed_range_v6, + gateway_v6=gateway_v6, label=label) except ValueError, e: print e diff --git a/nova/network/manager.py b/nova/network/manager.py index f726c4b26..b5352ca0f 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -86,6 +86,7 @@ flags.DEFINE_string('floating_range', '4.4.4.0/24', 'Floating IP address block') flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block') flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block') +flags.DEFINE_string('gateway_v6', None, 'Default IPv6 gateway') flags.DEFINE_integer('cnt_vpn_clients', 0, 'Number of addresses reserved for vpn clients') flags.DEFINE_string('network_driver', 'nova.network.linux_net', @@ -292,7 +293,7 @@ class NetworkManager(manager.SchedulerDependentManager): return host def create_networks(self, context, cidr, num_networks, network_size, - cidr_v6, label, *args, **kwargs): + cidr_v6, gateway_v6, label, *args, **kwargs): """Create networks based on parameters.""" fixed_net = IPy.IP(cidr) fixed_net_v6 = IPy.IP(cidr_v6) @@ -324,7 +325,13 @@ class NetworkManager(manager.SchedulerDependentManager): significant_bits_v6) net['cidr_v6'] = cidr_v6 project_net_v6 = IPy.IP(cidr_v6) - net['gateway_v6'] = str(project_net_v6[1]) + + if gateway_v6: + # use a pre-defined gateway if one is provided + net['gateway_v6'] = str(gateway_v6) + else: + net['gateway_v6'] = str(project_net_v6[1]) + net['netmask_v6'] = str(project_net_v6.prefixlen()) network_ref = self.db.network_create_safe(context, net) -- cgit From aa343c994c4738374bd91531ae2e260175690a56 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 7 Jun 2011 11:45:25 -0500 Subject: Remove unnecessary docstrings. --- bin/nova-manage | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 7f024f9ca..0147ae21b 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -549,11 +549,7 @@ class NetworkCommands(object): def create(self, fixed_range=None, num_networks=None, network_size=None, vlan_start=None, vpn_start=None, fixed_range_v6=None, gateway_v6=None, label='public'): - """Creates fixed ips for host by range - arguments: fixed_range=FLAG, [num_networks=FLAG], - [network_size=FLAG], [vlan_start=FLAG], - [vpn_start=FLAG], [fixed_range_v6=FLAG], - [gateway_v6=FLAG]""" + """Creates fixed ips for host by range""" if not fixed_range: msg = _('Fixed range in the form of 10.0.0.0/8 is ' 'required to create networks.') -- cgit From e8d6740fefcac3734021edaf53a40ecb145ccaa3 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 7 Jun 2011 13:47:40 -0400 Subject: DRY up the image_state logic. Fix an issue where glance style images (which aren't required to have an 'image_state' property) couldn't be used to run instances on the EC2 controller. --- nova/api/ec2/cloud.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ac73cd595..316298c39 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -136,6 +136,13 @@ class CloudController(object): return services[0]['availability_zone'] return 'unknown zone' + def _get_image_state(self, image): + # NOTE(vish): fallback status if image_state isn't set + state = image.get('status') + if state == 'active': + state = 'available' + return image['properties'].get('image_state', state) + def get_metadata(self, address): ctxt = context.get_admin_context() instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address) @@ -896,14 +903,13 @@ class CloudController(object): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ramdisk['id'] image = self._get_image(context, kwargs['image_id']) - if not image: + + if image: + image_state = self._get_image_state(image) + else: raise exception.ImageNotFound(image_id=kwargs['image_id']) - try: - available = (image['properties']['image_state'] == 'available') - except KeyError: - available = False - if not available: + if image_state != 'available': raise exception.ApiError(_('Image must be available')) instances = self.compute_api.create(context, @@ -1021,11 +1027,8 @@ class CloudController(object): get('image_location'), name) else: i['imageLocation'] = image['properties'].get('image_location') - # NOTE(vish): fallback status if image_state isn't set - state = image.get('status') - if state == 'active': - state = 'available' - i['imageState'] = image['properties'].get('image_state', state) + + i['imageState'] = self._get_image_state(image) i['displayName'] = name i['description'] = image.get('description') display_mapping = {'aki': 'kernel', -- cgit From 641f16a5343ca5d95ea10ec5031a27a7f131c337 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Tue, 7 Jun 2011 15:17:34 -0400 Subject: pep8 --- nova/api/direct.py | 2 +- nova/utils.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/api/direct.py b/nova/api/direct.py index ea7425e19..ec79151b1 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -324,7 +324,7 @@ class Limited(object): def __init__(self, proxy): self._proxy = proxy - if not self.__doc__: #pylint: disable=E0203 + if not self.__doc__: # pylint: disable=E0203 self.__doc__ = proxy.__doc__ if not self._allowed: self._allowed = [] diff --git a/nova/utils.py b/nova/utils.py index e77c80262..691134ada 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -142,7 +142,7 @@ def execute(*cmd, **kwargs): env = os.environ.copy() if addl_env: env.update(addl_env) - _PIPE = subprocess.PIPE #pylint: disable=E1101 + _PIPE = subprocess.PIPE # pylint: disable=E1101 obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, @@ -153,8 +153,8 @@ def execute(*cmd, **kwargs): result = obj.communicate(process_input) else: result = obj.communicate() - obj.stdin.close() #pylint: disable=E1101 - _returncode = obj.returncode #pylint: disable=E1101 + obj.stdin.close() # pylint: disable=E1101 + _returncode = obj.returncode # pylint: disable=E1101 if _returncode: LOG.debug(_('Result was %s') % _returncode) if type(check_exit_code) == types.IntType \ -- cgit From 8f93aa59aca5440a4d9668942703bf235379ed59 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 7 Jun 2011 16:05:03 -0400 Subject: Added test_run_instances_image_status_active to test_cloud. --- nova/tests/test_cloud.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index a58e8bc39..ba133c860 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -487,6 +487,21 @@ class CloudTestCase(test.TestCase): self.assertRaises(exception.ApiError, run_instances, self.context, **kwargs) + def test_run_instances_image_status_active(self): + kwargs = {'image_id': FLAGS.default_image, + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1} + run_instances = self.cloud.run_instances + + def fake_show_stat_active(self, context, id): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine'}, 'status': 'active'} + + self.stubs.Set(local.LocalImageService, 'show', fake_show_stat_active) + + result = run_instances(self.context, **kwargs) + self.assertEqual(len(result['instancesSet']), 1) + def test_terminate_instances(self): inst1 = db.instance_create(self.context, {'reservation_id': 'a', 'image_ref': 1, -- cgit From dcb0d38aa829e1e2492defffaf6ad393b809289b Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Wed, 8 Jun 2011 08:13:23 -0700 Subject: removed straggler code --- nova/compute/api.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index e09127d5c..b0949a729 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -55,11 +55,6 @@ def generate_default_hostname(instance_id): class API(base.Base): """API for interacting with the compute manager.""" - # Should we create instances all-at-once or as single-shot requests. - # Different schedulers use different approaches. - # This is cached across all API instances. - should_create_all_at_once = None # None implies uninitialized. - def __init__(self, image_service=None, network_api=None, volume_api=None, hostname_factory=generate_default_hostname, **kwargs): -- cgit