From f835c01f41fdba5791190b9275775ae7fcfcafc6 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 18 Mar 2011 20:35:44 -0400 Subject: * committing ovs scripts --- .../xensource/scripts/ovs_configure_base_flows.py | 68 ++++++++ .../xensource/scripts/ovs_configure_vif_flows.py | 194 +++++++++++++++++++++ .../networking/etc/xensource/scripts/vif_rules.py | 9 +- 3 files changed, 267 insertions(+), 4 deletions(-) create mode 100755 plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py create mode 100755 plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py new file mode 100755 index 000000000..c46fb4b60 --- /dev/null +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +This script is used to configure base openvswitch flows for XenServer hosts. +""" + +import os +import subprocess +import sys + + +PNIC_NAME="eth1" +XEN_BRIDGE="xenbr1" + +def main(dom_id, command, only_this_vif=None): + pnic_ofport = execute('/usr/bin/ovs-ofctl', 'get', 'Interface', PNIC_NAME, + 'ofport', return_stdout=True) + ovs_ofctl = lambda *rule: execute('/usr/bin/ovs-ofctl', *rule) + + # clear all flows + ovs_ofctl('del-flows', XEN_BRIDGE) + + # these flows are lower priority than all VM-specific flows. + + # allow all traffic from the physical NIC, as it is trusted (i.e., from a + # filtered vif, or from the physical infrastructure + ovs_ofctl('add-flow', XEN_BRIDGE, + "priority=2,in_port=%s,action=normal" % pnic_ofport) + + # default drop + ovs_ofctl('add-flow', XEN_BRIDGE, 'priority=1,action=drop') + + +def execute(*command, return_stdout=False): + devnull = open(os.devnull, 'w') + command = map(str, command) + proc = subprocess.Popen(command, close_fds=True, + stdout=subprocess.PIPE, stderr=devnull) + devnull.close() + if return_stdout: + return proc.stdout.read() + else: + return None + + +if __name__ == "__main__": + if sys.argv: + print "This script configures base ovs flows." + print "usage: %s" % os.path.basename(sys.argv[0]) + sys.exit(1) + else: + main() diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py new file mode 100755 index 000000000..a77bbbf4b --- /dev/null +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +This script is used to configure openvswitch flows on XenServer hosts. +""" + +import os +import subprocess +import sys + +# This is written to Python 2.4, since that is what is available on XenServer +import simplejson as json + + +XEN_BRIDGE = 'xenbr1' +OVS_OFCTL = '/usr/bin/ovs-ofctl' + + +def execute(*command, return_stdout=False): + devnull = open(os.devnull, 'w') + command = map(str, command) + proc = subprocess.Popen(command, close_fds=True, + stdout=subprocess.PIPE, stderr=devnull) + devnull.close() + if return_stdout: + return proc.stdout.read() + else: + return None + + +class OvsFlow(): + def __init__(self, command, params, bridge=None): + self.command = command + self.params = params + self.bridge = bridge or XEN_BRIDGE + + def add(self, rule): + execute(OVS_OFCTL, 'add-flow', self.bridge, rule) + + def delete(self, rule): + execute(OVS_OFCTL, 'del-flow', self.bridge, rule) + + def apply(self, rule): + self.delete(rule % self.params) + if self.command == 'online': + self.add(rule % params) + + +def main(dom_id, command, net, only_this_vif=None): + vif_ofport = execute('/usr/bin/ovs-ofctl', 'get', 'Interface', + only_this_vif, 'ofport', return_stdout=True) + + xsls = execute('/usr/bin/xenstore-ls', + '/local/domain/%s/vm-data/networking' % dom_id, + return_stdout=True) + macs = [line.split("=")[0].strip() for line in xsls.splitlines()] + + for mac in macs: + xsread = execute('/usr/bin/enstore-read', + '/local/domain/%s/vm-data/networking/%s' % + (dom_id, mac), True) + data = json.loads(xsread) + if data["label"] == "public": + vif = "vif%s.0" % dom_id + else: + vif = "vif%s.1" % dom_id + + if (only_this_vif is None) or (vif == only_this_vif): + params = dict(VIF=vif, MAC=data['mac']) + if net in ('ipv4', 'all'): + for ip4 in data['ips']: + params.update({'IP': ip4['ip']}) + apply_ovs_ipv4_flows(command, params) + if net in ('ipv6', 'all'): + for ip6 in data['ip6s']: + params.update({'IP': ip6['ip']}) + apply_ovs_ipv6_flows(command, params) + + +# usage: +# XEN_BRIDGE=xenbr1 +# VIF_NAME=$1 +# VIF_MAC=$2 +# VIF_IPv4=$3 +# VIF_GLOBAL_IPv6=$4 +# VIF_LOCAL_IPv6=$5 + +# # find the openflow port number associated with the vif interface +# VIF_OFPORT=`ovs-vsctl get Interface $VIF_NAME ofport` + +def apply_ovs_ipv4_flows(command, params): + flow = OvsFlow(command, params) + + # allow valid ARP outbound (both request / reply) + flow.apply("priority=3,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,arp," + "arp_sha=$VIF_MAC,nw_src=$VIF_IPv4,action=normal") + + flow.apply("priority=3,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,arp," + "arp_sha=$VIF_MAC,nw_src=0.0.0.0,action=normal") + + # allow valid IPv4 outbound + flow.apply("priority=3,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,ip," + "nw_src=$VIF_IPv4,action=normal") + + +def apply_ovs_ipv6_flows(command, params): + flow = OvsFlow(command, params) + + # allow valid IPv6 ND outbound (are both global and local IPs needed?) + # Neighbor Solicitation + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=135,nd_sll=$VIF_MAC," + "action=normal") + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=135,action=normal") + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=135,nd_sll=$VIF_MAC," + "action=normal") + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=135,action=normal") + + # Neighbor Advertisement + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=136," + "nd_target=$VIF_LOCAL_IPv6,action=normal") + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=136,action=normal") + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=136," + "nd_target=$VIF_GLOBAL_IPv6,action=normal") + flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," + "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=136,action=normal") + + # drop all other neighbor discovery (required because we permit all icmp6 below) + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=135,action=drop") + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=136,action=drop") + + # do not allow sending specifc ICMPv6 types + # Router Advertisement + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=134,action=drop") + # Redirect Gateway + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=137,action=drop") + # Mobile Prefix Solicitation + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=146,action=drop") + # Mobile Prefix Advertisement + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=147,action=drop") + # Multicast Router Advertisement + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=151,action=drop") + # Multicast Router Solicitation + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=152,action=drop") + # Multicast Router Termination + flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=153,action=drop") + + # allow valid IPv6 outbound, by type + flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," + "ipv6_src=$VIF_GLOBAL_IPv6,icmp6,action=normal") + flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," + "ipv6_src=$VIF_LOCAL_IPv6,icmp6,action=normal") + flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," + "ipv6_src=$VIF_GLOBAL_IPv6,tcp6,action=normal") + flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," + "ipv6_src=$VIF_LOCAL_IPv6,tcp6,action=normal") + flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," + "ipv6_src=$VIF_GLOBAL_IPv6,udp6,action=normal") + flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," + "ipv6_src=$VIF_LOCAL_IPv6,udp6,action=normal") + # all else will be dropped ... + + +if __name__ == "__main__": + if len(sys.argv) < 3: + print "usage: %s dom_id online|offline ipv4|ipv6|all [vif]" % \ + os.path.basename(sys.argv[0]) + sys.exit(1) + else: + dom_id, command, net = sys.argv[1:4] + vif = len(sys.argv) == 5 and sys.argv[4] or None + main(dom_id, command, net, vif) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py index 48122e6d6..500e055d8 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 OpenStack LLC. +# Copyright 2010-2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -31,7 +31,8 @@ import simplejson as json def main(dom_id, command, only_this_vif=None): xsls = execute('/usr/bin/xenstore-ls', - '/local/domain/%s/vm-data/networking' % dom_id, True) + '/local/domain/%s/vm-data/networking' % dom_id, + return_stdout=True) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: @@ -113,8 +114,8 @@ def apply_ebtables_rules(command, params): ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'], '--arp-ip-dst', params['IP'], '-j', 'ACCEPT') - ebtables('-D', 'FORWARD', '-p', '0800', '-o', - params['VIF'], '--ip-dst', params['IP'], + ebtables('-D', 'FORWARD', '-p', '0800', '-o', params['VIF'], + '--ip-dst', params['IP'], '-j', 'ACCEPT') if command == 'online': ebtables('-A', 'FORWARD', '-p', '0806', -- cgit From ab1bf7c0c12e205cd17b80be31226055cc90ef20 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Mon, 28 Mar 2011 21:00:44 +0000 Subject: minor fix and comment --- .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index a77bbbf4b..553811ab6 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -59,10 +59,11 @@ class OvsFlow(): def apply(self, rule): self.delete(rule % self.params) if self.command == 'online': - self.add(rule % params) + self.add(rule % self.params) def main(dom_id, command, net, only_this_vif=None): + # FIXME(dubs) what to do when only_this_vif is None vif_ofport = execute('/usr/bin/ovs-ofctl', 'get', 'Interface', only_this_vif, 'ofport', return_stdout=True) -- cgit From d7c51db418d554094c341639a0540ecfec8ddb19 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 1 Apr 2011 14:43:04 +0000 Subject: lots of updates to ovs scripts --- .../xensource/scripts/ovs_configure_base_flows.py | 26 ++-- .../xensource/scripts/ovs_configure_vif_flows.py | 139 ++++++++++----------- .../networking/etc/xensource/scripts/vif_rules.py | 2 +- 3 files changed, 80 insertions(+), 87 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py index c46fb4b60..1f3182e68 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010-2011 OpenStack LLC. +# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -25,26 +25,23 @@ import subprocess import sys -PNIC_NAME="eth1" -XEN_BRIDGE="xenbr1" - -def main(dom_id, command, only_this_vif=None): - pnic_ofport = execute('/usr/bin/ovs-ofctl', 'get', 'Interface', PNIC_NAME, - 'ofport', return_stdout=True) +def main(phys_dev_name, bridge_name): + pnic_ofport = execute('/usr/bin/ovs-vsctl', 'get', 'Interface', + phys_dev_name, 'ofport', return_stdout=True) ovs_ofctl = lambda *rule: execute('/usr/bin/ovs-ofctl', *rule) # clear all flows - ovs_ofctl('del-flows', XEN_BRIDGE) + ovs_ofctl('del-flows', bridge_name) # these flows are lower priority than all VM-specific flows. # allow all traffic from the physical NIC, as it is trusted (i.e., from a # filtered vif, or from the physical infrastructure - ovs_ofctl('add-flow', XEN_BRIDGE, + ovs_ofctl('add-flow', bridge_name, "priority=2,in_port=%s,action=normal" % pnic_ofport) # default drop - ovs_ofctl('add-flow', XEN_BRIDGE, 'priority=1,action=drop') + ovs_ofctl('add-flow', bridge_name, 'priority=1,action=drop') def execute(*command, return_stdout=False): @@ -60,9 +57,12 @@ def execute(*command, return_stdout=False): if __name__ == "__main__": - if sys.argv: + if len(sys.argv) != 3: + script_name = os.path.basename(sys.argv[0]) print "This script configures base ovs flows." - print "usage: %s" % os.path.basename(sys.argv[0]) + print "usage: %s phys-dev-name bridge-name" % script_name + print " ex: %s eth2 xenbr2" % script_name sys.exit(1) else: - main() + phys_dev_name, bridge_name = sys.argv[1:3] + main(phys_dev_name, bridge_name) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 553811ab6..7bad39830 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010-2011 OpenStack LLC. +# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -28,6 +28,7 @@ import sys import simplejson as json +# FIXME(dubs) this needs to be able to be passed in, check xen vif script XEN_BRIDGE = 'xenbr1' OVS_OFCTL = '/usr/bin/ovs-ofctl' @@ -62,18 +63,14 @@ class OvsFlow(): self.add(rule % self.params) -def main(dom_id, command, net, only_this_vif=None): - # FIXME(dubs) what to do when only_this_vif is None - vif_ofport = execute('/usr/bin/ovs-ofctl', 'get', 'Interface', - only_this_vif, 'ofport', return_stdout=True) - +def main(dom_id, command, net_type, only_this_vif=None): xsls = execute('/usr/bin/xenstore-ls', '/local/domain/%s/vm-data/networking' % dom_id, return_stdout=True) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: - xsread = execute('/usr/bin/enstore-read', + xsread = execute('/usr/bin/xenstore-read', '/local/domain/%s/vm-data/networking/%s' % (dom_id, mac), True) data = json.loads(xsread) @@ -83,113 +80,109 @@ def main(dom_id, command, net, only_this_vif=None): vif = "vif%s.1" % dom_id if (only_this_vif is None) or (vif == only_this_vif): - params = dict(VIF=vif, MAC=data['mac']) - if net in ('ipv4', 'all'): + vif_ofport = execute('/usr/bin/ovs-vsctl', 'get', 'Interface', + vif, 'ofport', return_stdout=True) + + params = dict(VIF_NAME=vif, + VIF_MAC=data['mac'], + VIF_OFPORT=vif_ofport) + if net_type in ('ipv4', 'all'): for ip4 in data['ips']: - params.update({'IP': ip4['ip']}) + params.update({'VIF_IPv4': ip4['ip']}) apply_ovs_ipv4_flows(command, params) - if net in ('ipv6', 'all'): + if net_type in ('ipv6', 'all'): for ip6 in data['ip6s']: - params.update({'IP': ip6['ip']}) + params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) + # TODO(dubs) calculate v6 link local addr + #params.update({'VIF_LOCAL_IPv6': XXX}) apply_ovs_ipv6_flows(command, params) -# usage: -# XEN_BRIDGE=xenbr1 -# VIF_NAME=$1 -# VIF_MAC=$2 -# VIF_IPv4=$3 -# VIF_GLOBAL_IPv6=$4 -# VIF_LOCAL_IPv6=$5 - -# # find the openflow port number associated with the vif interface -# VIF_OFPORT=`ovs-vsctl get Interface $VIF_NAME ofport` - def apply_ovs_ipv4_flows(command, params): flow = OvsFlow(command, params) # allow valid ARP outbound (both request / reply) - flow.apply("priority=3,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,arp," - "arp_sha=$VIF_MAC,nw_src=$VIF_IPv4,action=normal") + flow.apply("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," + "arp_sha=%(VIF_MAC)s,nw_src=%(VIF_IPv4)s,action=normal") - flow.apply("priority=3,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,arp," - "arp_sha=$VIF_MAC,nw_src=0.0.0.0,action=normal") + flow.apply("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," + "arp_sha=%(VIF_MAC)s,nw_src=0.0.0.0,action=normal") # allow valid IPv4 outbound - flow.apply("priority=3,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,ip," - "nw_src=$VIF_IPv4,action=normal") + flow.apply("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,ip," + "nw_src=%(VIF_IPv4)s,action=normal") def apply_ovs_ipv6_flows(command, params): flow = OvsFlow(command, params) # allow valid IPv6 ND outbound (are both global and local IPs needed?) - # Neighbor Solicitation - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=135,nd_sll=$VIF_MAC," + # Neighbor Solicitation + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," "action=normal") - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=135,action=normal") - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=135,nd_sll=$VIF_MAC," + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,action=normal") + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," "action=normal") - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=135,action=normal") + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,action=normal") # Neighbor Advertisement - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=136," - "nd_target=$VIF_LOCAL_IPv6,action=normal") - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_LOCAL_IPv6,icmp_type=136,action=normal") - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=136," - "nd_target=$VIF_GLOBAL_IPv6,action=normal") - flow.apply("priority=6,in_port=$VIF_OFPORT,dl_src=$VIF_MAC,icmp6," - "ipv6_src=$VIF_GLOBAL_IPv6,icmp_type=136,action=normal") + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136," + "nd_target=%(VIF_LOCAL_IPv6)s,action=normal") + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136,action=normal") + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136," + "nd_target=%(VIF_GLOBAL_IPv6)s,action=normal") + flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136,action=normal") # drop all other neighbor discovery (required because we permit all icmp6 below) - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=135,action=drop") - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=136,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=135,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=136,action=drop") # do not allow sending specifc ICMPv6 types # Router Advertisement - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=134,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=134,action=drop") # Redirect Gateway - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=137,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=137,action=drop") # Mobile Prefix Solicitation - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=146,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=146,action=drop") # Mobile Prefix Advertisement - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=147,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=147,action=drop") # Multicast Router Advertisement - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=151,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=151,action=drop") # Multicast Router Solicitation - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=152,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=152,action=drop") # Multicast Router Termination - flow.apply("priority=5,in_port=$VIF_OFPORT,icmp6,icmp_type=153,action=drop") + flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=153,action=drop") # allow valid IPv6 outbound, by type - flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," - "ipv6_src=$VIF_GLOBAL_IPv6,icmp6,action=normal") - flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," - "ipv6_src=$VIF_LOCAL_IPv6,icmp6,action=normal") - flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," - "ipv6_src=$VIF_GLOBAL_IPv6,tcp6,action=normal") - flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," - "ipv6_src=$VIF_LOCAL_IPv6,tcp6,action=normal") - flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," - "ipv6_src=$VIF_GLOBAL_IPv6,udp6,action=normal") - flow.apply("priority=4,in_port=$VIF_OFPORT,dl_src=$VIF_MAC," - "ipv6_src=$VIF_LOCAL_IPv6,udp6,action=normal") + flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp6,action=normal") + flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp6,action=normal") + flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,tcp6,action=normal") + flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_LOCAL_IPv6)s,tcp6,action=normal") + flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,udp6,action=normal") + flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_LOCAL_IPv6)s,udp6,action=normal") # all else will be dropped ... if __name__ == "__main__": if len(sys.argv) < 3: - print "usage: %s dom_id online|offline ipv4|ipv6|all [vif]" % \ + print "usage: %s dom_id online|offline ipv4|ipv6|all [vif_name]" % \ os.path.basename(sys.argv[0]) sys.exit(1) else: - dom_id, command, net = sys.argv[1:4] - vif = len(sys.argv) == 5 and sys.argv[4] or None - main(dom_id, command, net, vif) + dom_id, command, net_type = sys.argv[1:4] + vif_name = len(sys.argv) == 5 and sys.argv[4] or None + main(dom_id, command, net_type, vif_name) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py index 500e055d8..4e13bad9d 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py @@ -36,7 +36,7 @@ def main(dom_id, command, only_this_vif=None): macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: - xsread = execute('/usr/bin/enstore-read', + xsread = execute('/usr/bin/xenstore-read', '/local/domain/%s/vm-data/networking/%s' % (dom_id, mac), True) data = json.loads(xsread) -- cgit From 367581e63d4eb0018db293034dc1b096d2584720 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 1 Apr 2011 15:28:21 +0000 Subject: change bridge --- .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 7bad39830..2faf4e5c0 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -29,7 +29,7 @@ import simplejson as json # FIXME(dubs) this needs to be able to be passed in, check xen vif script -XEN_BRIDGE = 'xenbr1' +XEN_BRIDGE = 'xenbr0' OVS_OFCTL = '/usr/bin/ovs-ofctl' @@ -86,6 +86,7 @@ def main(dom_id, command, net_type, only_this_vif=None): params = dict(VIF_NAME=vif, VIF_MAC=data['mac'], VIF_OFPORT=vif_ofport) + if net_type in ('ipv4', 'all'): for ip4 in data['ips']: params.update({'VIF_IPv4': ip4['ip']}) -- cgit From 74b9f240c7e8c62e68011691488be9e63758e980 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 1 Apr 2011 19:54:55 +0000 Subject: extract execute methods to a library for reuse --- .../networking/etc/xensource/scripts/novalib.py | 41 ++++++++++++++++++++++ .../xensource/scripts/ovs_configure_base_flows.py | 21 ++++------- .../xensource/scripts/ovs_configure_vif_flows.py | 30 ++++++---------- 3 files changed, 57 insertions(+), 35 deletions(-) create mode 100644 plugins/xenserver/networking/etc/xensource/scripts/novalib.py diff --git a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py new file mode 100644 index 000000000..5366c385d --- /dev/null +++ b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import os +import subprocess +import sys + + +def execute_get_output(*command): + """Execute and return stdout""" + devnull = open(os.devnull, 'w') + command = map(str, command) + proc = subprocess.Popen(command, close_fds=True, + stdout=subprocess.PIPE, stderr=devnull) + devnull.close() + return proc.stdout.read() + + +def execute(*command): + """Execute without returning stdout""" + devnull = open(os.devnull, 'w') + command = map(str, command) + proc = subprocess.Popen(command, close_fds=True, + stdout=subprocess.PIPE, stderr=devnull) + devnull.close() diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py index 1f3182e68..d036cf517 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py @@ -25,9 +25,12 @@ import subprocess import sys +from novalib import execute, execute_get_output + + def main(phys_dev_name, bridge_name): - pnic_ofport = execute('/usr/bin/ovs-vsctl', 'get', 'Interface', - phys_dev_name, 'ofport', return_stdout=True) + pnic_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', 'Interface', + phys_dev_name, 'ofport') ovs_ofctl = lambda *rule: execute('/usr/bin/ovs-ofctl', *rule) # clear all flows @@ -44,24 +47,12 @@ def main(phys_dev_name, bridge_name): ovs_ofctl('add-flow', bridge_name, 'priority=1,action=drop') -def execute(*command, return_stdout=False): - devnull = open(os.devnull, 'w') - command = map(str, command) - proc = subprocess.Popen(command, close_fds=True, - stdout=subprocess.PIPE, stderr=devnull) - devnull.close() - if return_stdout: - return proc.stdout.read() - else: - return None - - if __name__ == "__main__": if len(sys.argv) != 3: script_name = os.path.basename(sys.argv[0]) print "This script configures base ovs flows." print "usage: %s phys-dev-name bridge-name" % script_name - print " ex: %s eth2 xenbr2" % script_name + print " ex: %s eth0 xenbr0" % script_name sys.exit(1) else: phys_dev_name, bridge_name = sys.argv[1:3] diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 2faf4e5c0..82e79c2d8 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -28,23 +28,14 @@ import sys import simplejson as json +from novalib import execute, execute_get_output + + # FIXME(dubs) this needs to be able to be passed in, check xen vif script XEN_BRIDGE = 'xenbr0' OVS_OFCTL = '/usr/bin/ovs-ofctl' -def execute(*command, return_stdout=False): - devnull = open(os.devnull, 'w') - command = map(str, command) - proc = subprocess.Popen(command, close_fds=True, - stdout=subprocess.PIPE, stderr=devnull) - devnull.close() - if return_stdout: - return proc.stdout.read() - else: - return None - - class OvsFlow(): def __init__(self, command, params, bridge=None): self.command = command @@ -64,15 +55,14 @@ class OvsFlow(): def main(dom_id, command, net_type, only_this_vif=None): - xsls = execute('/usr/bin/xenstore-ls', - '/local/domain/%s/vm-data/networking' % dom_id, - return_stdout=True) + xsls = execute_get_output('/usr/bin/xenstore-ls', + '/local/domain/%s/vm-data/networking' % dom_id) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: - xsread = execute('/usr/bin/xenstore-read', - '/local/domain/%s/vm-data/networking/%s' % - (dom_id, mac), True) + xsread = execute_get_output('/usr/bin/xenstore-read', + '/local/domain/%s/vm-data/networking/%s' % + (dom_id, mac)) data = json.loads(xsread) if data["label"] == "public": vif = "vif%s.0" % dom_id @@ -80,8 +70,8 @@ def main(dom_id, command, net_type, only_this_vif=None): vif = "vif%s.1" % dom_id if (only_this_vif is None) or (vif == only_this_vif): - vif_ofport = execute('/usr/bin/ovs-vsctl', 'get', 'Interface', - vif, 'ofport', return_stdout=True) + vif_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', + 'Interface', vif, 'ofport') params = dict(VIF_NAME=vif, VIF_MAC=data['mac'], -- cgit From a4e1db03a2c61648588d9adb703a385f49d82fc0 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 1 Apr 2011 20:26:59 +0000 Subject: use novalib for vif_rules.py, fix OvsFlow class --- .../xensource/scripts/ovs_configure_vif_flows.py | 2 +- .../networking/etc/xensource/scripts/vif_rules.py | 25 +++++++--------------- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 82e79c2d8..23b6d85c9 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -36,7 +36,7 @@ XEN_BRIDGE = 'xenbr0' OVS_OFCTL = '/usr/bin/ovs-ofctl' -class OvsFlow(): +class OvsFlow(object): def __init__(self, command, params, bridge=None): self.command = command self.params = params diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py index 4e13bad9d..662def205 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py @@ -29,16 +29,18 @@ import sys import simplejson as json +from novalib import execute, execute_get_output + + def main(dom_id, command, only_this_vif=None): - xsls = execute('/usr/bin/xenstore-ls', - '/local/domain/%s/vm-data/networking' % dom_id, - return_stdout=True) + xsls = execute_get_output('/usr/bin/xenstore-ls', + '/local/domain/%s/vm-data/networking' % dom_id) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: - xsread = execute('/usr/bin/xenstore-read', - '/local/domain/%s/vm-data/networking/%s' % - (dom_id, mac), True) + xsread = execute_get_output('/usr/bin/xenstore-read', + '/local/domain/%s/vm-data/networking/%s' % + (dom_id, mac)) data = json.loads(xsread) for ip in data['ips']: if data["label"] == "public": @@ -53,17 +55,6 @@ def main(dom_id, command, only_this_vif=None): apply_iptables_rules(command, params) -def execute(*command, return_stdout=False): - devnull = open(os.devnull, 'w') - command = map(str, command) - proc = subprocess.Popen(command, close_fds=True, - stdout=subprocess.PIPE, stderr=devnull) - devnull.close() - if return_stdout: - return proc.stdout.read() - else: - return None - # A note about adding rules: # Whenever we add any rule to iptables, arptables or ebtables we first # delete the same rule to ensure the rule only exists once. -- cgit From 1c13695a2c5e5d14ead3f5459d0b40bb875ecdf6 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 11 Apr 2011 14:16:17 -0400 Subject: Sudo chown the vbd device to the nova user before streaming data to it. This resolves an issue where nova-compute required 'root' privs to successfully create nodes with connection_type=xenapi. --- nova/virt/xenapi/vm_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index d2045a557..50fdf3e30 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -1012,6 +1012,8 @@ def _stream_disk(dev, image_type, virtual_size, image_file): offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) + utils.execute('sudo', 'chown', os.getuid(), '/dev/%s' % dev) + with open('/dev/%s' % dev, 'wb') as f: f.seek(offset) for chunk in image_file: -- cgit From 9d2513ea3a6d586e1fe3deae778a02bb089b9a5e Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 12 Apr 2011 10:25:07 -0400 Subject: Updated to use setfacl instead of chown. --- nova/virt/xenapi/vm_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 50fdf3e30..5cdd29057 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -1012,7 +1012,8 @@ def _stream_disk(dev, image_type, virtual_size, image_file): offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) - utils.execute('sudo', 'chown', os.getuid(), '/dev/%s' % dev) + utils.execute('sudo', 'setfacl', '-m', 'u:%s:rw' % os.getuid(), + '/dev/%s' % dev) with open('/dev/%s' % dev, 'wb') as f: f.seek(offset) -- cgit From 8c66d79a41044837a0865b1a706dd89e788597d1 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 14 Apr 2011 20:57:11 +0900 Subject: add kvm-pause and kvm-suspend --- nova/virt/libvirt_conn.py | 43 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 4 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 6ec15fbb8..66f43e786 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -545,19 +545,54 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def pause(self, instance, callback): - raise exception.ApiError("pause not supported for libvirt.") + """Pause VM instance""" + if self.read_only: + tmpconn = self._connect(self.libvirt_uri, False) + dom = tmpconn.lookupByName(instance.name) + dom.suspend() + tmpconn.close() + else: + dom = self._conn.lookupByName(instance.name) + dom.suspend() @exception.wrap_exception def unpause(self, instance, callback): - raise exception.ApiError("unpause not supported for libvirt.") + """Unpause paused VM instance""" + if self.read_only: + tmpconn = self._connect(self.libvirt_uri, False) + dom = tmpconn.lookupByName(instance.name) + dom.resume() + tmpconn.close() + else: + dom = self._conn.lookupByName(instance.name) + dom.resume() @exception.wrap_exception def suspend(self, instance, callback): - raise exception.ApiError("suspend not supported for libvirt") + """Suspend the specified instance""" + if self.read_only: + tmpconn = self._connect(self.libvirt_uri, False) + dom = tmpconn.lookupByName(instance.name) + dom.managedSave(0) + tmpconn.close() + else: + dom = self._conn.lookupByName(instance.name) + dom.managedSave(0) @exception.wrap_exception def resume(self, instance, callback): - raise exception.ApiError("resume not supported for libvirt") + """resume the specified instance""" + try: + if self.read_only: + tmpconn = self._connect(self.libvirt_uri, False) + dom = tmpconn.lookupByName(instance.name) + tmpconn.close() + else: + dom = self._conn.lookupByName(instance.name) + dom.create() + except libvirt.LibvirtError: + xml = self.to_xml(instance, None) + self._create_new_domain(xml) @exception.wrap_exception def rescue(self, instance, callback=None): -- cgit From f2e9d4120ed0495d9c810a0d27d530e280f325c6 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Thu, 14 Apr 2011 14:35:42 -0400 Subject: set the bridge on each OvsFlow --- .../xensource/scripts/ovs_configure_vif_flows.py | 30 +++++++++++----------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 23b6d85c9..b59cc4d0b 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -27,30 +27,28 @@ import sys # This is written to Python 2.4, since that is what is available on XenServer import simplejson as json - from novalib import execute, execute_get_output -# FIXME(dubs) this needs to be able to be passed in, check xen vif script -XEN_BRIDGE = 'xenbr0' OVS_OFCTL = '/usr/bin/ovs-ofctl' class OvsFlow(object): - def __init__(self, command, params, bridge=None): + def __init__(self, command, bridge, params): self.command = command + self.bridge = bridge self.params = params - self.bridge = bridge or XEN_BRIDGE def add(self, rule): execute(OVS_OFCTL, 'add-flow', self.bridge, rule) def delete(self, rule): - execute(OVS_OFCTL, 'del-flow', self.bridge, rule) + execute(OVS_OFCTL, 'del-flows', self.bridge, rule) def apply(self, rule): - self.delete(rule % self.params) - if self.command == 'online': + if self.command in ('offline', 'reset'): + self.delete(rule % self.params) + if self.command in ('online', 'reset'): self.add(rule % self.params) @@ -66,8 +64,10 @@ def main(dom_id, command, net_type, only_this_vif=None): data = json.loads(xsread) if data["label"] == "public": vif = "vif%s.0" % dom_id + bridge = "xenbr0" else: vif = "vif%s.1" % dom_id + bridge = "xenbr1" if (only_this_vif is None) or (vif == only_this_vif): vif_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', @@ -80,17 +80,17 @@ def main(dom_id, command, net_type, only_this_vif=None): if net_type in ('ipv4', 'all'): for ip4 in data['ips']: params.update({'VIF_IPv4': ip4['ip']}) - apply_ovs_ipv4_flows(command, params) + apply_ovs_ipv4_flows(command, bridge, params) if net_type in ('ipv6', 'all'): for ip6 in data['ip6s']: params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) # TODO(dubs) calculate v6 link local addr #params.update({'VIF_LOCAL_IPv6': XXX}) - apply_ovs_ipv6_flows(command, params) + apply_ovs_ipv6_flows(command, bridge, params) -def apply_ovs_ipv4_flows(command, params): - flow = OvsFlow(command, params) +def apply_ovs_ipv4_flows(command, bridge, params): + flow = OvsFlow(command, bridge, params) # allow valid ARP outbound (both request / reply) flow.apply("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," @@ -104,8 +104,8 @@ def apply_ovs_ipv4_flows(command, params): "nw_src=%(VIF_IPv4)s,action=normal") -def apply_ovs_ipv6_flows(command, params): - flow = OvsFlow(command, params) +def apply_ovs_ipv6_flows(command, bridge, params): + flow = OvsFlow(command, bridge, params) # allow valid IPv6 ND outbound (are both global and local IPs needed?) # Neighbor Solicitation @@ -170,7 +170,7 @@ def apply_ovs_ipv6_flows(command, params): if __name__ == "__main__": if len(sys.argv) < 3: - print "usage: %s dom_id online|offline ipv4|ipv6|all [vif_name]" % \ + print "usage: %s dom_id online|offline|reset ipv4|ipv6|all [vif_name]" % \ os.path.basename(sys.argv[0]) sys.exit(1) else: -- cgit From c134d3c9bfb5a9d789776b243b8d6e4283fb3f80 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Mon, 18 Apr 2011 13:30:54 -0400 Subject: calc link local --- .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index b59cc4d0b..08d7a3859 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -25,6 +25,7 @@ import subprocess import sys # This is written to Python 2.4, since that is what is available on XenServer +import netaddr import simplejson as json from novalib import execute, execute_get_output @@ -84,8 +85,8 @@ def main(dom_id, command, net_type, only_this_vif=None): if net_type in ('ipv6', 'all'): for ip6 in data['ip6s']: params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) - # TODO(dubs) calculate v6 link local addr - #params.update({'VIF_LOCAL_IPv6': XXX}) + mac64 = netaddr.EUI(mac).eui64() + params.update({'VIF_LOCAL_IPv6': mac64.ipv6_link_local()}) apply_ovs_ipv6_flows(command, bridge, params) -- cgit From 0ba085928c75f2fc27fb03eaa3aaeff6618e8875 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 20:48:26 +0900 Subject: Add support for creating a snapshot of a nova volume with euca-create-snapshot. --- nova/api/ec2/__init__.py | 6 ++ nova/api/ec2/cloud.py | 52 ++++++++++++--- nova/db/api.py | 39 +++++++++++ nova/db/sqlalchemy/api.py | 77 ++++++++++++++++++++++ .../versions/015_add_volume_snapshot_support.py | 71 ++++++++++++++++++++ nova/db/sqlalchemy/models.py | 24 +++++++ nova/exception.py | 50 ++++++++++++++ nova/volume/api.py | 44 +++++++++++++ nova/volume/driver.py | 8 +++ nova/volume/manager.py | 42 ++++++++++++ 10 files changed, 405 insertions(+), 8 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index cd59340bd..4a49a5a6b 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -327,6 +327,12 @@ class Executor(wsgi.Application): ec2_id = ec2utils.id_to_ec2_id(ex.volume_id, 'vol-%08x') message = _('Volume %s not found') % ec2_id return self._error(req, context, type(ex).__name__, message) + except exception.SnapshotNotFound as ex: + LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex), + context=context) + ec2_id = ec2utils.id_to_ec2_id(ex.snapshot_id, 'snap-%08x') + message = _('Snapshot %s not found') % ec2_id + return self._error(req, context, type(ex).__name__, message) except exception.NotFound as ex: LOG.info(_('NotFound raised: %s'), unicode(ex), context=context) return self._error(req, context, type(ex).__name__, unicode(ex)) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 092b80fa2..f5360af0b 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -280,14 +280,46 @@ class CloudController(object): owner=None, restorable_by=None, **kwargs): - return {'snapshotSet': [{'snapshotId': 'fixme', - 'volumeId': 'fixme', - 'status': 'fixme', - 'startTime': 'fixme', - 'progress': 'fixme', - 'ownerId': 'fixme', - 'volumeSize': 0, - 'description': 'fixme'}]} + if snapshot_id: + snapshots = [] + for ec2_id in snapshot_id: + internal_id = ec2utils.ec2_id_to_id(ec2_id) + snapshot = self.volume_api.get_snapshot(context, snapshot_id=internal_id) + snapshots.append(snapshot) + else: + snapshots = self.volume_api.get_all_snapshots(context) + snapshots = [self._format_snapshot(context, s) for s in snapshots] + return {'snapshotSet': snapshots} + + def _format_snapshot(self, context, snapshot): + s = {} + s['snapshotId'] = ec2utils.id_to_ec2_id(snapshot['id'], 'snap-%08x') + s['volumeId'] = ec2utils.id_to_ec2_id(snapshot['volume_id'], 'vol-%08x') + s['status'] = snapshot['status'] + s['startTime'] = snapshot['created_at'] + s['progress'] = snapshot['progress'] + s['ownerId'] = snapshot['project_id'] + s['volumeSize'] = snapshot['volume_size'] + s['description'] = snapshot['display_description'] + + s['display_name'] = snapshot['display_name'] + s['display_description'] = snapshot['display_description'] + return s + + def create_snapshot(self, context, volume_id, **kwargs): + LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) + volume_id = ec2utils.ec2_id_to_id(volume_id) + snapshot = self.volume_api.create_snapshot( + context, + volume_id=volume_id, + name=kwargs.get('display_name'), + description=kwargs.get('display_description')) + return {'snapshotSet': [self._format_snapshot(context, snapshot)]} + + def delete_snapshot(self, context, snapshot_id, **kwargs): + snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) + self.volume_api.delete_snapshot(context, snapshot_id=snapshot_id) + return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) @@ -595,6 +627,10 @@ class CloudController(object): 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] + if volume.get('snapshot_id') != None: + v['snapshotId'] = ec2utils.id_to_ec2_id(volume['snapshot_id'], 'snap-%08x') + else: + v['snapshotId'] = None v['display_name'] = volume['display_name'] v['display_description'] = volume['display_description'] diff --git a/nova/db/api.py b/nova/db/api.py index f9a4b5b4b..57e585a9c 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -47,6 +47,8 @@ flags.DEFINE_string('instance_name_template', 'instance-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('volume_name_template', 'volume-%08x', 'Template string to be used to generate instance names') +flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x', + 'Template string to be used to generate instance names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], @@ -871,6 +873,43 @@ def volume_update(context, volume_id, values): #################### +def snapshot_create(context, values): + """Create a volume from the values dictionary.""" + return IMPL.snapshot_create(context, values) + + +def snapshot_destroy(context, snapshot_id): + """Create a volume from the values dictionary.""" + return IMPL.snapshot_destroy(context, snapshot_id) + + +def snapshot_get(context, snapshot_id): + """Get a volume or raise if it does not exist.""" + return IMPL.snapshot_get(context, snapshot_id) + + +def snapshot_get_all(context): + """Get all volumes.""" + return IMPL.snapshot_get_all(context) + + +def snapshot_get_all_by_project(context, project_id): + """Get all volumes belonging to a project.""" + return IMPL.snapshot_get_all_by_project(context, project_id) + + +def snapshot_update(context, snapshot_id, values): + """Set the given properties on an snapshot and update it. + + Raises NotFound if snapshot does not exist. + + """ + return IMPL.snapshot_update(context, snapshot_id, values) + + +#################### + + def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 285b22a04..ebdb2ad5c 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1758,6 +1758,83 @@ def volume_update(context, volume_id, values): ################### +@require_context +def snapshot_create(context, values): + snapshot_ref = models.Snapshot() + snapshot_ref.update(values) + + session = get_session() + with session.begin(): + snapshot_ref.save(session=session) + return snapshot_ref + + +@require_admin_context +def snapshot_destroy(context, snapshot_id): + session = get_session() + with session.begin(): + session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def snapshot_get(context, snapshot_id, session=None): + if not session: + session = get_session() + result = None + + if is_admin_context(context): + result = session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + elif is_user_context(context): + result = session.query(models.Snapshot).\ + filter_by(project_id=context.project_id).\ + filter_by(id=snapshot_id).\ + filter_by(deleted=False).\ + first() + if not result: + raise exception.SnapshotNotFound(_('Snapshot %s not found') % snapshot_id, + snapshot_id) + + return result + + +@require_admin_context +def snapshot_get_all(context): + session = get_session() + return session.query(models.Snapshot).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def snapshot_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + session = get_session() + return session.query(models.Snapshot).\ + filter_by(project_id=project_id).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def snapshot_update(context, snapshot_id, values): + session = get_session() + with session.begin(): + snapshot_ref = snapshot_get(context, snapshot_id, session=session) + snapshot_ref.update(values) + snapshot_ref.save(session=session) + + +################### + + @require_context def security_group_get_all(context): session = get_session() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py new file mode 100644 index 000000000..288f63e72 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + +meta = MetaData() + +snapshots = Table('snapshots', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', Integer(), nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('progress', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_size', Integer()), + Column('scheduled_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + try: + snapshots.create() + except Exception: + logging.info(repr(snapshots)) + logging.exception('Exception while creating table') + meta.drop_all(tables=[snapshots]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + snapshots.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 36a084a1d..2e0ead5f9 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -327,6 +327,30 @@ class Quota(BASE, NovaBase): metadata_items = Column(Integer) +class Snapshot(BASE, NovaBase): + """Represents a block storage device that can be attached to a vm.""" + __tablename__ = 'snapshots' + id = Column(Integer, primary_key=True, autoincrement=True) + + @property + def name(self): + return FLAGS.snapshot_name_template % self.id + + @property + def volume_name(self): + return FLAGS.volume_name_template % self.volume_id + + user_id = Column(String(255)) + project_id = Column(String(255)) + + volume_id = Column(Integer) + status = Column(String(255)) + progress = Column(String(255)) + volume_size = Column(Integer) + + display_name = Column(String(255)) + display_description = Column(String(255)) + class ExportDevice(BASE, NovaBase): """Represates a shelf and blade that a volume can be exported on.""" __tablename__ = 'export_devices' diff --git a/nova/exception.py b/nova/exception.py index 9905fb19b..2dffeb795 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -60,6 +60,56 @@ class ApiError(Error): class BuildInProgress(Error): + super(ApiError, self).__init__('%s: %s' % (code, message)) + + +class NotFound(Error): + pass + + +class InstanceNotFound(NotFound): + def __init__(self, message, instance_id): + self.instance_id = instance_id + super(InstanceNotFound, self).__init__(message) + + +class VolumeNotFound(NotFound): + def __init__(self, message, volume_id): + self.volume_id = volume_id + super(VolumeNotFound, self).__init__(message) + + +class SnapshotNotFound(NotFound): + def __init__(self, message, snapshot_id): + self.snapshot_id = snapshot_id + super(SnapshotNotFound, self).__init__(message) + + +class Duplicate(Error): + pass + + +class NotAuthorized(Error): + pass + + +class NotEmpty(Error): + pass + + +class Invalid(Error): + pass + + +class InvalidInputException(Error): + pass + + +class InvalidContentType(Error): + pass + + +class TimeoutException(Error): pass diff --git a/nova/volume/api.py b/nova/volume/api.py index 09befb647..c1af30de0 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -90,6 +90,15 @@ class API(base.Base): return self.db.volume_get_all(context) return self.db.volume_get_all_by_project(context, context.project_id) + def get_snapshot(self, context, snapshot_id): + rv = self.db.snapshot_get(context, snapshot_id) + return dict(rv.iteritems()) + + def get_all_snapshots(self, context): + if context.is_admin: + return self.db.snapshot_get_all(context) + return self.db.snapshot_get_all_by_project(context, context.project_id) + def check_attach(self, context, volume_id): volume = self.get(context, volume_id) # TODO(vish): abstract status checking? @@ -110,3 +119,38 @@ class API(base.Base): self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "remove_volume", "args": {'volume_id': volume_id}}) + + def create_snapshot(self, context, volume_id, name, description): + volume = self.get(context, volume_id) + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + + options = { + 'volume_id': volume_id, + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': "creating", + 'progress': '0%', + 'volume_size': volume['size'], + 'display_name': name, + 'display_description': description} + + snapshot = self.db.snapshot_create(context, options) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_snapshot", + "args": {"topic": FLAGS.volume_topic, + "volume_id": volume_id, + "snapshot_id": snapshot['id']}}) + return snapshot + + def delete_snapshot(self, context, snapshot_id): + snapshot = self.get_snapshot(context, snapshot_id) + if snapshot['status'] != "available": + raise exception.ApiError(_("Snapshot status must be available")) + self.db.snapshot_update(context, snapshot_id, {'status': 'deleting'}) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "delete_snapshot", + "args": {"topic": FLAGS.volume_topic, + "snapshot_id": snapshot_id}}) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 55307ad9b..31998e307 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -122,6 +122,14 @@ class VolumeDriver(object): (FLAGS.volume_group, volume['name'])) + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + raise NotImplementedError() + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + raise NotImplementedError() + def local_path(self, volume): # NOTE(vish): stops deprecation warning escaped_group = FLAGS.volume_group.replace('-', '--') diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 2178389ce..87fd3bf17 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -152,6 +152,48 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) return True + def create_snapshot(self, context, volume_id, snapshot_id): + """Creates and exports the snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + LOG.info(_("snapshot %s: creating"), snapshot_ref['name']) + + try: + snap_name = snapshot_ref['name'] + LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) + model_update = self.driver.create_snapshot(snapshot_ref) + if model_update: + self.db.snapshot_update(context, snapshot_ref['id'], model_update) + + except Exception: + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'error'}) + raise + + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'available', + 'progress': '100%'}) + LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name']) + return snapshot_id + + def delete_snapshot(self, context, snapshot_id): + """Deletes and unexports snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + + try: + LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name']) + self.driver.delete_snapshot(snapshot_ref) + except Exception: + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'error_deleting'}) + raise + + self.db.snapshot_destroy(context, snapshot_id) + LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name']) + return True + def setup_compute_volume(self, context, volume_id): """Setup remote volume on compute host. -- cgit From f76f2ee50f2407155a0aaefac3224e6af14e7d26 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 20:50:10 +0900 Subject: Add support for creating a Sheepdog snapshot. --- nova/volume/driver.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 31998e307..ba0a7efef 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -620,6 +620,16 @@ class SheepdogDriver(VolumeDriver): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) + def create_snapshot(self, snapshot): + """Creates a sheepdog snapshot""" + self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'], + "sheepdog:%s" % snapshot['volume_name']) + + def delete_snapshot(self, snapshot): + """Deletes a sheepdog snapshot""" + self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'], + '-s', snapshot['name']) + def local_path(self, volume): return "sheepdog:%s" % volume['name'] -- cgit From 2f3819628b6d3dea13a56ea6e93e02992b2e1f5f Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 21:01:25 +0900 Subject: Add support for creating a new volume from a existing snapshot with EC2 API. --- nova/api/ec2/cloud.py | 12 +++++- .../versions/016_add_snapshot_id_to_volumes.py | 48 ++++++++++++++++++++++ nova/db/sqlalchemy/models.py | 2 + nova/volume/api.py | 12 +++++- nova/volume/driver.py | 4 ++ nova/volume/manager.py | 9 +++- 6 files changed, 81 insertions(+), 6 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index f5360af0b..aa15539ac 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -636,11 +636,19 @@ class CloudController(object): v['display_description'] = volume['display_description'] return v - def create_volume(self, context, size, **kwargs): - LOG.audit(_("Create volume of %s GB"), size, context=context) + def create_volume(self, context, **kwargs): + size = kwargs.get('size'); + if kwargs.get('snapshot_id') != None: + snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) + LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) + else: + snapshot_id = None + LOG.audit(_("Create volume of %s GB"), size, context=context) + volume = self.volume_api.create( context, size=size, + snapshot_id=snapshot_id, name=kwargs.get('display_name'), description=kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py new file mode 100644 index 000000000..0a50123bf --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py @@ -0,0 +1,48 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Table stub-definitions +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +# +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Column +# + +snapshot_id = Column('snapshot_id', Integer()) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + # Add columns to existing tables + volumes.create_column(snapshot_id) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 2e0ead5f9..ca762ca9f 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -287,6 +287,8 @@ class Volume(BASE, NovaBase): user_id = Column(String(255)) project_id = Column(String(255)) + snapshot_id = Column(String(255)) + host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? diff --git a/nova/volume/api.py b/nova/volume/api.py index c1af30de0..7fa80383b 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -39,7 +39,13 @@ LOG = logging.getLogger('nova.volume') class API(base.Base): """API for interacting with the volume manager.""" - def create(self, context, size, name, description): + def create(self, context, size, snapshot_id, name, description): + if snapshot_id != None: + snapshot = self.get_snapshot(context, snapshot_id) + if snapshot['status'] != "available": + raise exception.ApiError(_("Snapshot status must be available")) + size = snapshot['volume_size'] + if quota.allowed_volumes(context, 1, size) < 1: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" @@ -51,6 +57,7 @@ class API(base.Base): 'size': size, 'user_id': context.user_id, 'project_id': context.project_id, + 'snapshot_id': snapshot_id, 'availability_zone': FLAGS.storage_availability_zone, 'status': "creating", 'attach_status': "detached", @@ -62,7 +69,8 @@ class API(base.Base): FLAGS.scheduler_topic, {"method": "create_volume", "args": {"topic": FLAGS.volume_topic, - "volume_id": volume['id']}}) + "volume_id": volume['id'], + "snapshot_id": snapshot_id}}) return volume def delete(self, context, volume_id): diff --git a/nova/volume/driver.py b/nova/volume/driver.py index ba0a7efef..02b0d50f4 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -101,6 +101,10 @@ class VolumeDriver(object): volume['name'], FLAGS.volume_group) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + raise NotImplementedError() + def delete_volume(self, volume): """Deletes a logical volume.""" try: diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 87fd3bf17..7d47fc191 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -90,7 +90,7 @@ class VolumeManager(manager.SchedulerDependentManager): else: LOG.info(_("volume %s: skipping export"), volume['name']) - def create_volume(self, context, volume_id): + def create_volume(self, context, volume_id, snapshot_id): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) @@ -108,7 +108,12 @@ class VolumeManager(manager.SchedulerDependentManager): vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) - model_update = self.driver.create_volume(volume_ref) + if snapshot_id == None: + model_update = self.driver.create_volume(volume_ref) + else: + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + model_update = self.driver.create_volume_from_snapshot(volume_ref, + snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) -- cgit From 1018a60e3194e7e283cd89af28efd689623058a8 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 21:01:25 +0900 Subject: Add support for creating a new volume from a existing snapshot with EC2 API. --- nova/api/ec2/cloud.py | 12 +++++- .../versions/016_add_snapshot_id_to_volumes.py | 48 ++++++++++++++++++++++ nova/db/sqlalchemy/models.py | 2 + nova/volume/api.py | 12 +++++- nova/volume/driver.py | 4 ++ nova/volume/manager.py | 9 +++- 6 files changed, 81 insertions(+), 6 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 6daf299b9..5d4d2ad27 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -639,11 +639,19 @@ class CloudController(object): v['display_description'] = volume['display_description'] return v - def create_volume(self, context, size, **kwargs): - LOG.audit(_("Create volume of %s GB"), size, context=context) + def create_volume(self, context, **kwargs): + size = kwargs.get('size'); + if kwargs.get('snapshot_id') != None: + snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) + LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) + else: + snapshot_id = None + LOG.audit(_("Create volume of %s GB"), size, context=context) + volume = self.volume_api.create( context, size=size, + snapshot_id=snapshot_id, name=kwargs.get('display_name'), description=kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py new file mode 100644 index 000000000..0a50123bf --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py @@ -0,0 +1,48 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Table stub-definitions +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +# +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Column +# + +snapshot_id = Column('snapshot_id', Integer()) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + # Add columns to existing tables + volumes.create_column(snapshot_id) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 9abe4d9ae..afc2ea4e4 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -287,6 +287,8 @@ class Volume(BASE, NovaBase): user_id = Column(String(255)) project_id = Column(String(255)) + snapshot_id = Column(String(255)) + host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? diff --git a/nova/volume/api.py b/nova/volume/api.py index f5285f31f..bd073964d 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -39,7 +39,13 @@ LOG = logging.getLogger('nova.volume') class API(base.Base): """API for interacting with the volume manager.""" - def create(self, context, size, name, description): + def create(self, context, size, snapshot_id, name, description): + if snapshot_id != None: + snapshot = self.get_snapshot(context, snapshot_id) + if snapshot['status'] != "available": + raise exception.ApiError(_("Snapshot status must be available")) + size = snapshot['volume_size'] + if quota.allowed_volumes(context, 1, size) < 1: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" @@ -51,6 +57,7 @@ class API(base.Base): 'size': size, 'user_id': context.user_id, 'project_id': context.project_id, + 'snapshot_id': snapshot_id, 'availability_zone': FLAGS.storage_availability_zone, 'status': "creating", 'attach_status': "detached", @@ -62,7 +69,8 @@ class API(base.Base): FLAGS.scheduler_topic, {"method": "create_volume", "args": {"topic": FLAGS.volume_topic, - "volume_id": volume['id']}}) + "volume_id": volume['id'], + "snapshot_id": snapshot_id}}) return volume def delete(self, context, volume_id): diff --git a/nova/volume/driver.py b/nova/volume/driver.py index ba0a7efef..02b0d50f4 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -101,6 +101,10 @@ class VolumeDriver(object): volume['name'], FLAGS.volume_group) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + raise NotImplementedError() + def delete_volume(self, volume): """Deletes a logical volume.""" try: diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 87fd3bf17..7d47fc191 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -90,7 +90,7 @@ class VolumeManager(manager.SchedulerDependentManager): else: LOG.info(_("volume %s: skipping export"), volume['name']) - def create_volume(self, context, volume_id): + def create_volume(self, context, volume_id, snapshot_id): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) @@ -108,7 +108,12 @@ class VolumeManager(manager.SchedulerDependentManager): vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) - model_update = self.driver.create_volume(volume_ref) + if snapshot_id == None: + model_update = self.driver.create_volume(volume_ref) + else: + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + model_update = self.driver.create_volume_from_snapshot(volume_ref, + snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) -- cgit From 1c7c53a9f40a88eb9def7ab9d706e7399ad5e65b Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 21:02:00 +0900 Subject: Add support for cloning a Sheepdog volume. --- nova/volume/driver.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 02b0d50f4..3f3caf37a 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -620,6 +620,13 @@ class SheepdogDriver(VolumeDriver): "sheepdog:%s" % volume['name'], sizestr) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a sheepdog volume from a snapshot.""" + self._try_execute('qemu-img', 'create', '-b', + "sheepdog:%s:%s" % (snapshot['volume_name'], snapshot['name']), + "sheepdog:%s" % volume['name']) + + def delete_volume(self, volume): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) -- cgit From 5b670fe9bca9103642967bce609853704d0d1b88 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 21:02:00 +0900 Subject: Add support for cloning a Sheepdog volume. --- nova/volume/driver.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 02b0d50f4..3f3caf37a 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -620,6 +620,13 @@ class SheepdogDriver(VolumeDriver): "sheepdog:%s" % volume['name'], sizestr) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a sheepdog volume from a snapshot.""" + self._try_execute('qemu-img', 'create', '-b', + "sheepdog:%s:%s" % (snapshot['volume_name'], snapshot['name']), + "sheepdog:%s" % volume['name']) + + def delete_volume(self, volume): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) -- cgit From 4e11c04a34b3237853c0b4be90ce6362237bcbe0 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Tue, 19 Apr 2011 20:10:57 +0000 Subject: strip output, str() link local --- plugins/xenserver/networking/etc/xensource/scripts/novalib.py | 2 +- .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py index 5366c385d..9fc4b2310 100644 --- a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py @@ -29,7 +29,7 @@ def execute_get_output(*command): proc = subprocess.Popen(command, close_fds=True, stdout=subprocess.PIPE, stderr=devnull) devnull.close() - return proc.stdout.read() + return proc.stdout.read().strip() def execute(*command): diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 08d7a3859..d1d646b99 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -86,7 +86,7 @@ def main(dom_id, command, net_type, only_this_vif=None): for ip6 in data['ip6s']: params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) mac64 = netaddr.EUI(mac).eui64() - params.update({'VIF_LOCAL_IPv6': mac64.ipv6_link_local()}) + params.update({'VIF_LOCAL_IPv6': str(mac64.ipv6_link_local())}) apply_ovs_ipv6_flows(command, bridge, params) -- cgit From a46bd9fb6483959e210f25276ff70c76767e509d Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Tue, 19 Apr 2011 22:13:40 +0000 Subject: only apply ipv6 if the data exists in xenstore --- .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index d1d646b99..e1a151476 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -82,10 +82,10 @@ def main(dom_id, command, net_type, only_this_vif=None): for ip4 in data['ips']: params.update({'VIF_IPv4': ip4['ip']}) apply_ovs_ipv4_flows(command, bridge, params) - if net_type in ('ipv6', 'all'): + if net_type in ('ipv6', 'all') and 'ip6s' in data: for ip6 in data['ip6s']: params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) - mac64 = netaddr.EUI(mac).eui64() + mac64 = netaddr.EUI(data['mac']).eui64() params.update({'VIF_LOCAL_IPv6': str(mac64.ipv6_link_local())}) apply_ovs_ipv6_flows(command, bridge, params) -- cgit From 169496af390caa4035db2fefabffd71c95a57fbf Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Wed, 20 Apr 2011 14:11:25 -0400 Subject: refactor the way flows are deleted/reset --- .../xensource/scripts/ovs_configure_vif_flows.py | 143 ++++++++++----------- 1 file changed, 70 insertions(+), 73 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index e1a151476..37ff07e33 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -35,22 +35,15 @@ OVS_OFCTL = '/usr/bin/ovs-ofctl' class OvsFlow(object): - def __init__(self, command, bridge, params): - self.command = command + def __init__(self, bridge, params): self.bridge = bridge self.params = params def add(self, rule): - execute(OVS_OFCTL, 'add-flow', self.bridge, rule) + execute(OVS_OFCTL, 'add-flow', self.bridge, rule % self.params) - def delete(self, rule): - execute(OVS_OFCTL, 'del-flows', self.bridge, rule) - - def apply(self, rule): - if self.command in ('offline', 'reset'): - self.delete(rule % self.params) - if self.command in ('online', 'reset'): - self.add(rule % self.params) + def clear_flows(self, ofport): + execute(OVS_OFCTL, 'del-flows', self.bridge, "in_port=%s" % ofport) def main(dom_id, command, net_type, only_this_vif=None): @@ -78,94 +71,98 @@ def main(dom_id, command, net_type, only_this_vif=None): VIF_MAC=data['mac'], VIF_OFPORT=vif_ofport) - if net_type in ('ipv4', 'all'): - for ip4 in data['ips']: - params.update({'VIF_IPv4': ip4['ip']}) - apply_ovs_ipv4_flows(command, bridge, params) - if net_type in ('ipv6', 'all') and 'ip6s' in data: - for ip6 in data['ip6s']: - params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) - mac64 = netaddr.EUI(data['mac']).eui64() - params.update({'VIF_LOCAL_IPv6': str(mac64.ipv6_link_local())}) - apply_ovs_ipv6_flows(command, bridge, params) + ovs = OvsFlow(bridge, params) + + if command in ('offline', 'reset'): + # I haven't found a way to clear only IPv4 or IPv6 rules. + ovs.clear_flows(vif_ofport) + if command in ('online', 'reset'): + if net_type in ('ipv4', 'all') and 'ips' in data: + for ip4 in data['ips']: + ovs.params.update({'VIF_IPv4': ip4['ip']}) + apply_ovs_ipv4_flows(ovs, bridge, params) + if net_type in ('ipv6', 'all') and 'ip6s' in data: + for ip6 in data['ip6s']: + link_local = str(netaddr.EUI(data['mac']).eui64()\ + .ipv6_link_local()) + ovs.params.update({'VIF_LOCAL_IPv6': link_local}) + ovs.params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) + apply_ovs_ipv6_flows(ovs, bridge, params) -def apply_ovs_ipv4_flows(command, bridge, params): - flow = OvsFlow(command, bridge, params) +def apply_ovs_ipv4_flows(ovs, command, bridge, params): # allow valid ARP outbound (both request / reply) - flow.apply("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," - "arp_sha=%(VIF_MAC)s,nw_src=%(VIF_IPv4)s,action=normal") + ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," + "arp_sha=%(VIF_MAC)s,nw_src=%(VIF_IPv4)s,action=normal") - flow.apply("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," - "arp_sha=%(VIF_MAC)s,nw_src=0.0.0.0,action=normal") + ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," + "arp_sha=%(VIF_MAC)s,nw_src=0.0.0.0,action=normal") # allow valid IPv4 outbound - flow.apply("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,ip," - "nw_src=%(VIF_IPv4)s,action=normal") - + ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,ip," + "nw_src=%(VIF_IPv4)s,action=normal") -def apply_ovs_ipv6_flows(command, bridge, params): - flow = OvsFlow(command, bridge, params) +def apply_ovs_ipv6_flows(ovs, command, bridge, params): # allow valid IPv6 ND outbound (are both global and local IPs needed?) # Neighbor Solicitation - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," - "action=normal") - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,action=normal") - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," - "action=normal") - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," + "action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," + "action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,action=normal") # Neighbor Advertisement - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136," - "nd_target=%(VIF_LOCAL_IPv6)s,action=normal") - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136,action=normal") - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136," - "nd_target=%(VIF_GLOBAL_IPv6)s,action=normal") - flow.apply("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136,action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136," + "nd_target=%(VIF_LOCAL_IPv6)s,action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136,action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136," + "nd_target=%(VIF_GLOBAL_IPv6)s,action=normal") + ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136,action=normal") # drop all other neighbor discovery (required because we permit all icmp6 below) - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=135,action=drop") - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=136,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=135,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=136,action=drop") # do not allow sending specifc ICMPv6 types # Router Advertisement - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=134,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=134,action=drop") # Redirect Gateway - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=137,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=137,action=drop") # Mobile Prefix Solicitation - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=146,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=146,action=drop") # Mobile Prefix Advertisement - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=147,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=147,action=drop") # Multicast Router Advertisement - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=151,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=151,action=drop") # Multicast Router Solicitation - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=152,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=152,action=drop") # Multicast Router Termination - flow.apply("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=153,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=153,action=drop") # allow valid IPv6 outbound, by type - flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp6,action=normal") - flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp6,action=normal") - flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,tcp6,action=normal") - flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,tcp6,action=normal") - flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,udp6,action=normal") - flow.apply("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,udp6,action=normal") + ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp6,action=normal") + ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp6,action=normal") + ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,tcp6,action=normal") + ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_LOCAL_IPv6)s,tcp6,action=normal") + ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_GLOBAL_IPv6)s,udp6,action=normal") + ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," + "ipv6_src=%(VIF_LOCAL_IPv6)s,udp6,action=normal") # all else will be dropped ... -- cgit From 7c53dc7a792dfcda0862178725adbe585c4fed21 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Wed, 20 Apr 2011 14:24:29 -0400 Subject: bugfix signature --- .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 37ff07e33..9557eb3e2 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -91,7 +91,7 @@ def main(dom_id, command, net_type, only_this_vif=None): apply_ovs_ipv6_flows(ovs, bridge, params) -def apply_ovs_ipv4_flows(ovs, command, bridge, params): +def apply_ovs_ipv4_flows(ovs, bridge, params): # allow valid ARP outbound (both request / reply) ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," "arp_sha=%(VIF_MAC)s,nw_src=%(VIF_IPv4)s,action=normal") @@ -104,7 +104,7 @@ def apply_ovs_ipv4_flows(ovs, command, bridge, params): "nw_src=%(VIF_IPv4)s,action=normal") -def apply_ovs_ipv6_flows(ovs, command, bridge, params): +def apply_ovs_ipv6_flows(ovs, bridge, params): # allow valid IPv6 ND outbound (are both global and local IPs needed?) # Neighbor Solicitation ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," -- cgit From bbcc2304167c3331f4c54898200f01fd66c0a20c Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Thu, 21 Apr 2011 14:53:03 -0400 Subject: change action= to actions= --- .../xensource/scripts/ovs_configure_base_flows.py | 4 +- .../xensource/scripts/ovs_configure_vif_flows.py | 52 +++++++++++----------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py index d036cf517..555dad71a 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py @@ -41,10 +41,10 @@ def main(phys_dev_name, bridge_name): # allow all traffic from the physical NIC, as it is trusted (i.e., from a # filtered vif, or from the physical infrastructure ovs_ofctl('add-flow', bridge_name, - "priority=2,in_port=%s,action=normal" % pnic_ofport) + "priority=2,in_port=%s,actions=normal" % pnic_ofport) # default drop - ovs_ofctl('add-flow', bridge_name, 'priority=1,action=drop') + ovs_ofctl('add-flow', bridge_name, 'priority=1,actions=drop') if __name__ == "__main__": diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 9557eb3e2..aba8487f6 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -94,14 +94,14 @@ def main(dom_id, command, net_type, only_this_vif=None): def apply_ovs_ipv4_flows(ovs, bridge, params): # allow valid ARP outbound (both request / reply) ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," - "arp_sha=%(VIF_MAC)s,nw_src=%(VIF_IPv4)s,action=normal") + "arp_sha=%(VIF_MAC)s,nw_src=%(VIF_IPv4)s,actions=normal") ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," - "arp_sha=%(VIF_MAC)s,nw_src=0.0.0.0,action=normal") + "arp_sha=%(VIF_MAC)s,nw_src=0.0.0.0,actions=normal") # allow valid IPv4 outbound ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,ip," - "nw_src=%(VIF_IPv4)s,action=normal") + "nw_src=%(VIF_IPv4)s,actions=normal") def apply_ovs_ipv6_flows(ovs, bridge, params): @@ -109,60 +109,60 @@ def apply_ovs_ipv6_flows(ovs, bridge, params): # Neighbor Solicitation ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," - "action=normal") + "actions=normal") ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,action=normal") + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,actions=normal") ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," - "action=normal") + "actions=normal") ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,action=normal") + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,actions=normal") # Neighbor Advertisement ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136," - "nd_target=%(VIF_LOCAL_IPv6)s,action=normal") + "nd_target=%(VIF_LOCAL_IPv6)s,actions=normal") ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136,action=normal") + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136,actions=normal") ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136," - "nd_target=%(VIF_GLOBAL_IPv6)s,action=normal") + "nd_target=%(VIF_GLOBAL_IPv6)s,actions=normal") ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136,action=normal") + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136,actions=normal") # drop all other neighbor discovery (required because we permit all icmp6 below) - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=135,action=drop") - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=136,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=135,actions=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=136,actions=drop") # do not allow sending specifc ICMPv6 types # Router Advertisement - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=134,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=134,actions=drop") # Redirect Gateway - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=137,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=137,actions=drop") # Mobile Prefix Solicitation - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=146,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=146,actions=drop") # Mobile Prefix Advertisement - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=147,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=147,actions=drop") # Multicast Router Advertisement - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=151,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=151,actions=drop") # Multicast Router Solicitation - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=152,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=152,actions=drop") # Multicast Router Termination - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=153,action=drop") + ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=153,actions=drop") # allow valid IPv6 outbound, by type ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp6,action=normal") + "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp6,actions=normal") ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp6,action=normal") + "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp6,actions=normal") ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,tcp6,action=normal") + "ipv6_src=%(VIF_GLOBAL_IPv6)s,tcp6,actions=normal") ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,tcp6,action=normal") + "ipv6_src=%(VIF_LOCAL_IPv6)s,tcp6,actions=normal") ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,udp6,action=normal") + "ipv6_src=%(VIF_GLOBAL_IPv6)s,udp6,actions=normal") ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,udp6,action=normal") + "ipv6_src=%(VIF_LOCAL_IPv6)s,udp6,actions=normal") # all else will be dropped ... -- cgit From a13616c2deae4ae90bb69ce87bda28576e194426 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 22 Apr 2011 16:35:26 -0400 Subject: removed unused imports and renamed template variables --- .../networking/etc/xensource/scripts/novalib.py | 1 - .../xensource/scripts/ovs_configure_base_flows.py | 1 - .../xensource/scripts/ovs_configure_vif_flows.py | 107 ++++++++++----------- 3 files changed, 53 insertions(+), 56 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py index 9fc4b2310..dcbee3ded 100644 --- a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py @@ -19,7 +19,6 @@ import os import subprocess -import sys def execute_get_output(*command): diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py index 555dad71a..82d0b9e31 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py @@ -21,7 +21,6 @@ This script is used to configure base openvswitch flows for XenServer hosts. """ import os -import subprocess import sys diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index aba8487f6..f91a5f49d 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -21,7 +21,6 @@ This script is used to configure openvswitch flows on XenServer hosts. """ import os -import subprocess import sys # This is written to Python 2.4, since that is what is available on XenServer @@ -68,8 +67,8 @@ def main(dom_id, command, net_type, only_this_vif=None): 'Interface', vif, 'ofport') params = dict(VIF_NAME=vif, - VIF_MAC=data['mac'], - VIF_OFPORT=vif_ofport) + MAC=data['mac'], + OF_PORT=vif_ofport) ovs = OvsFlow(bridge, params) @@ -80,95 +79,95 @@ def main(dom_id, command, net_type, only_this_vif=None): if command in ('online', 'reset'): if net_type in ('ipv4', 'all') and 'ips' in data: for ip4 in data['ips']: - ovs.params.update({'VIF_IPv4': ip4['ip']}) + ovs.params.update({'IPV4_ADDR': ip4['ip']}) apply_ovs_ipv4_flows(ovs, bridge, params) if net_type in ('ipv6', 'all') and 'ip6s' in data: for ip6 in data['ip6s']: link_local = str(netaddr.EUI(data['mac']).eui64()\ .ipv6_link_local()) - ovs.params.update({'VIF_LOCAL_IPv6': link_local}) - ovs.params.update({'VIF_GLOBAL_IPv6': ip6['ip']}) + ovs.params.update({'IPV6_LINK_LOCAL_ADDR': link_local}) + ovs.params.update({'IPV6_GLOBAL_ADDR': ip6['ip']}) apply_ovs_ipv6_flows(ovs, bridge, params) def apply_ovs_ipv4_flows(ovs, bridge, params): # allow valid ARP outbound (both request / reply) - ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," - "arp_sha=%(VIF_MAC)s,nw_src=%(VIF_IPv4)s,actions=normal") + ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,arp," + "arp_sha=%(MAC)s,nw_src=%(IPV4_ADDR)s,actions=normal") - ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,arp," - "arp_sha=%(VIF_MAC)s,nw_src=0.0.0.0,actions=normal") + ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,arp," + "arp_sha=%(MAC)s,nw_src=0.0.0.0,actions=normal") # allow valid IPv4 outbound - ovs.add("priority=3,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,ip," - "nw_src=%(VIF_IPv4)s,actions=normal") + ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,ip," + "nw_src=%(IPV4_ADDR)s,actions=normal") def apply_ovs_ipv6_flows(ovs, bridge, params): # allow valid IPv6 ND outbound (are both global and local IPs needed?) # Neighbor Solicitation - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s," "actions=normal") - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=135,actions=normal") - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,nd_sll=%(VIF_MAC)s," + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,actions=normal") + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s," "actions=normal") - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=135,actions=normal") + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,actions=normal") # Neighbor Advertisement - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136," - "nd_target=%(VIF_LOCAL_IPv6)s,actions=normal") - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp_type=136,actions=normal") - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136," - "nd_target=%(VIF_GLOBAL_IPv6)s,actions=normal") - ovs.add("priority=6,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s,icmp6," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp_type=136,actions=normal") - - # drop all other neighbor discovery (required because we permit all icmp6 below) - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=135,actions=drop") - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=136,actions=drop") + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136," + "nd_target=%(IPV6_LINK_LOCAL_ADDR)s,actions=normal") + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136,actions=normal") + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136," + "nd_target=%(IPV6_GLOBAL_ADDR)s,actions=normal") + ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6," + "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136,actions=normal") + + # drop all other neighbor discovery (req b/c we permit all icmp6 below) + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=135,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=136,actions=drop") # do not allow sending specifc ICMPv6 types # Router Advertisement - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=134,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=134,actions=drop") # Redirect Gateway - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=137,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=137,actions=drop") # Mobile Prefix Solicitation - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=146,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=146,actions=drop") # Mobile Prefix Advertisement - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=147,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=147,actions=drop") # Multicast Router Advertisement - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=151,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=151,actions=drop") # Multicast Router Solicitation - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=152,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=152,actions=drop") # Multicast Router Termination - ovs.add("priority=5,in_port=%(VIF_OFPORT)s,icmp6,icmp_type=153,actions=drop") + ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=153,actions=drop") # allow valid IPv6 outbound, by type - ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,icmp6,actions=normal") - ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,icmp6,actions=normal") - ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,tcp6,actions=normal") - ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,tcp6,actions=normal") - ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_GLOBAL_IPv6)s,udp6,actions=normal") - ovs.add("priority=4,in_port=%(VIF_OFPORT)s,dl_src=%(VIF_MAC)s," - "ipv6_src=%(VIF_LOCAL_IPv6)s,udp6,actions=normal") + ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," + "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp6,actions=normal") + ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," + "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp6,actions=normal") + ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," + "ipv6_src=%(IPV6_GLOBAL_ADDR)s,tcp6,actions=normal") + ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," + "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,tcp6,actions=normal") + ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," + "ipv6_src=%(IPV6_GLOBAL_ADDR)s,udp6,actions=normal") + ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s," + "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,udp6,actions=normal") # all else will be dropped ... if __name__ == "__main__": if len(sys.argv) < 3: - print "usage: %s dom_id online|offline|reset ipv4|ipv6|all [vif_name]" % \ + print "usage: %s dom_id online|offline|reset ipv4|ipv6|all [vif]" % \ os.path.basename(sys.argv[0]) sys.exit(1) else: -- cgit From 3ee0507ddc6bb7e15834144acc47c354396fbc70 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Thu, 5 May 2011 23:14:46 -0400 Subject: Publish errors via nova.notifier --- nova/log.py | 9 +++++++++ nova/tests/test_notifier.py | 21 +++++++++++++++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/nova/log.py b/nova/log.py index 096279f7c..3e587891a 100644 --- a/nova/log.py +++ b/nova/log.py @@ -35,6 +35,7 @@ import os import sys import traceback +import nova from nova import flags from nova import version @@ -63,6 +64,7 @@ flags.DEFINE_list('default_log_levels', 'eventlet.wsgi.server=WARN'], 'list of logger=LEVEL pairs') flags.DEFINE_bool('use_syslog', False, 'output to syslog') +flags.DEFINE_bool('publish_errors', True, 'publish error events') flags.DEFINE_string('logfile', None, 'output to named file') @@ -258,12 +260,19 @@ class NovaRootLogger(NovaLogger): else: self.removeHandler(self.filelog) self.addHandler(self.streamlog) + if FLAGS.publish_errors: + self.addHandler(PublishErrorsHandler(ERROR)) if FLAGS.verbose: self.setLevel(DEBUG) else: self.setLevel(INFO) +class PublishErrorsHandler(logging.Handler): + def emit(self, record): + nova.notifier.notify('error', record) + + def handle_exception(type, value, tb): extra = {} if FLAGS.verbose: diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 4d6289e6a..d18d3bc05 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -13,14 +13,18 @@ # License for the specific language governing permissions and limitations # under the License. -import nova +import json + +import stubout +import nova +from nova import log as logging from nova import flags from nova import notifier from nova.notifier import no_op_notifier from nova import test -import stubout +LOG = logging.getLogger('nova.compute.api') class NotifierTestCase(test.TestCase): """Test case for notifications""" @@ -58,3 +62,16 @@ class NotifierTestCase(test.TestCase): notifier.notify('derp', Mock()) self.assertEqual(self.mock_cast, True) + + def test_error_notification(self): + self.stubs.Set(nova.flags.FLAGS, 'notification_driver', + 'nova.notifier.rabbit_notifier.RabbitNotifier') + msgs = [] + def mock_cast(context, topic, msg): + data = json.loads(msg) + msgs.append(data) + self.stubs.Set(nova.rpc, 'cast', mock_cast) + LOG.error('foo'); + msg = msgs[0] + self.assertEqual(msg['event_name'], 'error') + self.assertEqual(msg['model']['msg'], 'foo') -- cgit From 65595766706631a5c65193cfc0fa2ac9de1aeffc Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Fri, 6 May 2011 20:15:06 -0400 Subject: Set publish_errors default to False. --- nova/log.py | 2 +- nova/tests/test_notifier.py | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/nova/log.py b/nova/log.py index 3e587891a..d2ed82c6c 100644 --- a/nova/log.py +++ b/nova/log.py @@ -64,7 +64,7 @@ flags.DEFINE_list('default_log_levels', 'eventlet.wsgi.server=WARN'], 'list of logger=LEVEL pairs') flags.DEFINE_bool('use_syslog', False, 'output to syslog') -flags.DEFINE_bool('publish_errors', True, 'publish error events') +flags.DEFINE_bool('publish_errors', False, 'publish error events') flags.DEFINE_string('logfile', None, 'output to named file') diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index d18d3bc05..c9c4ddde8 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -18,14 +18,12 @@ import json import stubout import nova -from nova import log as logging +from nova import log from nova import flags from nova import notifier from nova.notifier import no_op_notifier from nova import test -LOG = logging.getLogger('nova.compute.api') - class NotifierTestCase(test.TestCase): """Test case for notifications""" def setUp(self): @@ -66,12 +64,17 @@ class NotifierTestCase(test.TestCase): def test_error_notification(self): self.stubs.Set(nova.flags.FLAGS, 'notification_driver', 'nova.notifier.rabbit_notifier.RabbitNotifier') + self.stubs.Set(nova.flags.FLAGS, 'publish_errors', True) + LOG = log.getLogger('nova') + LOG.setup_from_flags() + msgs = [] def mock_cast(context, topic, msg): data = json.loads(msg) msgs.append(data) self.stubs.Set(nova.rpc, 'cast', mock_cast) LOG.error('foo'); + self.assertEqual(1, len(msgs)) msg = msgs[0] self.assertEqual(msg['event_name'], 'error') self.assertEqual(msg['model']['msg'], 'foo') -- cgit From 389f7c79199d5ad908a72375a7377a1122f36707 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 9 May 2011 17:52:26 +0900 Subject: volume/driver: factor out lvm opration Factor out lvm operation for implementing basic snapshot later. --- nova/volume/driver.py | 62 ++++++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 3f3caf37a..9591c93d0 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -90,16 +90,40 @@ class VolumeDriver(object): raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) + def _create_volume(self, volume_name, sizestr): + self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n', + volume_name, FLAGS.volume_group) + + def _copy_volume(self, srcstr, deststr, size_in_g): + self._execute('sudo', 'dd', 'if=%s' % srcstr, 'of=%s' % deststr, + 'count=%d' % (size_in_g * 1024), 'bs=1M') + + def _volume_not_present(self, volume_name): + path_name = '%s/%s' % (FLAGS.volume_group, volume_name) + try: + self._try_execute('sudo', 'lvdisplay', path_name) + except Exception as e: + # If the volume isn't present + return True + return False + + def _delete_volume(self, volume, size_in_g): + """Deletes a logical volume.""" + # zero out old volumes to prevent data leaking between users + # TODO(ja): reclaiming space should be done lazy and low priority + self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) + self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % + (FLAGS.volume_group, volume['name'])) + + def _sizestr(self, size_in_g): + if int(size_in_g) == 0: + return '100M' + return '%sG' % size_in_g + def create_volume(self, volume): """Creates a logical volume. Can optionally return a Dictionary of changes to the volume object to be persisted.""" - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] - self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n', - volume['name'], - FLAGS.volume_group) + self._create_volume(volume['name'], self._sizestr(volume['size'])) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" @@ -107,24 +131,10 @@ class VolumeDriver(object): def delete_volume(self, volume): """Deletes a logical volume.""" - try: - self._try_execute('sudo', 'lvdisplay', - '%s/%s' % - (FLAGS.volume_group, - volume['name'])) - except Exception as e: + if self._volume_not_present(volume['name']): # If the volume isn't present, then don't attempt to delete return True - - # zero out old volumes to prevent data leaking between users - # TODO(ja): reclaiming space should be done lazy and low priority - self._execute('sudo', 'dd', 'if=/dev/zero', - 'of=%s' % self.local_path(volume), - 'count=%d' % (volume['size'] * 1024), - 'bs=1M') - self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % - (FLAGS.volume_group, - volume['name'])) + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): """Creates a snapshot.""" @@ -612,13 +622,9 @@ class SheepdogDriver(VolumeDriver): def create_volume(self, volume): """Creates a sheepdog volume""" - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] self._try_execute('qemu-img', 'create', "sheepdog:%s" % volume['name'], - sizestr) + self._sizestr(volume['size'])) def create_volume_from_snapshot(self, volume, snapshot): """Creates a sheepdog volume from a snapshot.""" -- cgit From 03c735bb186a44d80a9d595e00e9c06fd8f709cc Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 9 May 2011 17:53:25 +0900 Subject: volume/driver: implement basic snapshot/clone added basic support for snapshot/clone to VolumeDriver. The implementation is not effective, but works. The effective implementation should be done by drived driver class. --- Authors | 1 + nova/exception.py | 6 ++++++ nova/volume/driver.py | 42 +++++++++++++++++++++++++++++++++++++----- nova/volume/manager.py | 6 ++++++ 4 files changed, 50 insertions(+), 5 deletions(-) diff --git a/Authors b/Authors index 60e1d2dad..9eae53e9b 100644 --- a/Authors +++ b/Authors @@ -28,6 +28,7 @@ Gabe Westmaas Hisaharu Ishii Hisaki Ohara Ilya Alekseyev +Isaku Yamahata Jason Koelker Jay Pipes Jesse Andrews diff --git a/nova/exception.py b/nova/exception.py index 2dffeb795..6748ef265 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -79,6 +79,12 @@ class VolumeNotFound(NotFound): super(VolumeNotFound, self).__init__(message) +class VolumeIsBusy(Error): + def __init__(self, message, volume_id): + self.volume_id = volume_id + super(Error, self).__init__(message) + + class SnapshotNotFound(NotFound): def __init__(self, message, snapshot_id): self.snapshot_id = snapshot_id diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 9591c93d0..457a1c9e6 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -113,13 +113,21 @@ class VolumeDriver(object): # TODO(ja): reclaiming space should be done lazy and low priority self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % - (FLAGS.volume_group, volume['name'])) + (FLAGS.volume_group, + self._escape_snapshot(volume['name']))) def _sizestr(self, size_in_g): if int(size_in_g) == 0: return '100M' return '%sG' % size_in_g + # Linux LVM reserves name that starts with snapshot, so that + # such volume name can't be created. Mangle it. + def _escape_snapshot(self, snapshot_name): + if not snapshot_name.startswith('snapshot'): + return snapshot_name + return '_' + snapshot_name + def create_volume(self, volume): """Creates a logical volume. Can optionally return a Dictionary of changes to the volume object to be persisted.""" @@ -127,27 +135,51 @@ class VolumeDriver(object): def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" - raise NotImplementedError() + self._create_volume(volume['name'], self._sizestr(volume['size'])) + self._copy_volume(self.local_path(snapshot), self.local_path(volume), + snapshot['volume_size']) def delete_volume(self, volume): """Deletes a logical volume.""" if self._volume_not_present(volume['name']): # If the volume isn't present, then don't attempt to delete return True + + # TODO(yamahata): lvm can't delete origin volume only without + # deleting derived snapshots. Can we do something fancy? + out, err = self._execute('sudo', 'lvdisplay', '--noheading', + '-C', '-o', 'Attr', + '%s/%s' % (FLAGS.volume_group, + volume['name'])) + out = out.strip() + if (out[0] == 'o') or (out[0] == 'O'): + raise exception.VolumeIsBusy( + _('deleting volume %s that has snapshot'), volume['name']) + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): """Creates a snapshot.""" - raise NotImplementedError() + orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name']) + self._try_execute('sudo', 'lvcreate', '-L', + self._sizestr(snapshot['volume_size']), + '--name', self._escape_snapshot(snapshot['name']), + '--snapshot', orig_lv_name) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" - raise NotImplementedError() + if self._volume_not_present(self._escape_snapshot(snapshot['name'])): + # If the snapshot isn't present, then don't attempt to delete + return True + + # TODO(yamahata): zeroing out the whole snapshot triggers COW. + # it's quite slow. + self._delete_volume(snapshot, snapshot['volume_size']) def local_path(self, volume): # NOTE(vish): stops deprecation warning escaped_group = FLAGS.volume_group.replace('-', '--') - escaped_name = volume['name'].replace('-', '--') + escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) def ensure_export(self, context, volume): diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 7d47fc191..84085fbd8 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -147,6 +147,12 @@ class VolumeManager(manager.SchedulerDependentManager): self.driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) + except exception.VolumeIsBusy, e: + LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) + self.driver.ensure_export(context, volume_ref) + self.db.volume_update(context, volume_ref['id'], + {'status': 'available'}) + return True except Exception: self.db.volume_update(context, volume_ref['id'], -- cgit From db148f108dfc4829e1302a54fe4f57ab81212786 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 9 May 2011 19:25:02 +0900 Subject: fix mismerge by 1059 --- nova/db/sqlalchemy/api.py | 3 +-- nova/exception.py | 65 ++++++----------------------------------------- nova/volume/driver.py | 3 +-- 3 files changed, 10 insertions(+), 61 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ebdb2ad5c..7302f25b0 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1798,8 +1798,7 @@ def snapshot_get(context, snapshot_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.SnapshotNotFound(_('Snapshot %s not found') % snapshot_id, - snapshot_id) + raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result diff --git a/nova/exception.py b/nova/exception.py index 6748ef265..b16ea6810 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -60,65 +60,8 @@ class ApiError(Error): class BuildInProgress(Error): - super(ApiError, self).__init__('%s: %s' % (code, message)) - - -class NotFound(Error): - pass - - -class InstanceNotFound(NotFound): - def __init__(self, message, instance_id): - self.instance_id = instance_id - super(InstanceNotFound, self).__init__(message) - - -class VolumeNotFound(NotFound): - def __init__(self, message, volume_id): - self.volume_id = volume_id - super(VolumeNotFound, self).__init__(message) - - -class VolumeIsBusy(Error): - def __init__(self, message, volume_id): - self.volume_id = volume_id - super(Error, self).__init__(message) - - -class SnapshotNotFound(NotFound): - def __init__(self, message, snapshot_id): - self.snapshot_id = snapshot_id - super(SnapshotNotFound, self).__init__(message) - - -class Duplicate(Error): pass - -class NotAuthorized(Error): - pass - - -class NotEmpty(Error): - pass - - -class Invalid(Error): - pass - - -class InvalidInputException(Error): - pass - - -class InvalidContentType(Error): - pass - - -class TimeoutException(Error): - pass - - class DBError(Error): """Wraps an implementation specific exception.""" def __init__(self, inner_exception): @@ -319,6 +262,14 @@ class VolumeNotFoundForInstance(VolumeNotFound): message = _("Volume not found for instance %(instance_id)s.") +class SnapshotNotFound(NotFound): + message = _("Snapshot %(snapshot_id)s not found") + + +class VolumeIsBusy(Error): + message = _("deleting volume %(volume_name)s that has snapshot") + + class ExportDeviceNotFoundForVolume(NotFound): message = _("No export device found for volume %(volume_id)s.") diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 457a1c9e6..e783d3a5a 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -153,8 +153,7 @@ class VolumeDriver(object): volume['name'])) out = out.strip() if (out[0] == 'o') or (out[0] == 'O'): - raise exception.VolumeIsBusy( - _('deleting volume %s that has snapshot'), volume['name']) + raise exception.VolumeIsBusy(volume_name=volume['name']) self._delete_volume(volume, volume['size']) -- cgit From c5dbee818b1a06bf5358c32197c8e15ecf0f660d Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 9 May 2011 20:19:35 +0900 Subject: db: fix db versioning --- .../versions/015_add_volume_snapshot_support.py | 71 ---------------------- .../versions/016_add_snapshot_id_to_volumes.py | 48 --------------- .../versions/016_add_volume_snapshot_support.py | 71 ++++++++++++++++++++++ .../versions/017_add_snapshot_id_to_volumes.py | 48 +++++++++++++++ 4 files changed, 119 insertions(+), 119 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/016_add_volume_snapshot_support.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/017_add_snapshot_id_to_volumes.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py deleted file mode 100644 index 288f63e72..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py +++ /dev/null @@ -1,71 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 MORITA Kazutaka. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * -from migrate import * - -from nova import log as logging - -meta = MetaData() - -snapshots = Table('snapshots', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('volume_id', Integer(), nullable=False), - Column('user_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('status', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('progress', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('volume_size', Integer()), - Column('scheduled_at', DateTime(timezone=False)), - Column('display_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('display_description', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - ) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - try: - snapshots.create() - except Exception: - logging.info(repr(snapshots)) - logging.exception('Exception while creating table') - meta.drop_all(tables=[snapshots]) - raise - - -def downgrade(migrate_engine): - # Operations to reverse the above upgrade go here. - snapshots.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py deleted file mode 100644 index 0a50123bf..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py +++ /dev/null @@ -1,48 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 MORITA Kazutaka. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * -from migrate import * - -from nova import log as logging - - -meta = MetaData() - - -# Table stub-definitions -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -# -volumes = Table('volumes', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# New Column -# - -snapshot_id = Column('snapshot_id', Integer()) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - # Add columns to existing tables - volumes.create_column(snapshot_id) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/016_add_volume_snapshot_support.py new file mode 100644 index 000000000..288f63e72 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_add_volume_snapshot_support.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + +meta = MetaData() + +snapshots = Table('snapshots', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', Integer(), nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('progress', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_size', Integer()), + Column('scheduled_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + try: + snapshots.create() + except Exception: + logging.info(repr(snapshots)) + logging.exception('Exception while creating table') + meta.drop_all(tables=[snapshots]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + snapshots.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/017_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/017_add_snapshot_id_to_volumes.py new file mode 100644 index 000000000..0a50123bf --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/017_add_snapshot_id_to_volumes.py @@ -0,0 +1,48 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Table stub-definitions +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +# +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Column +# + +snapshot_id = Column('snapshot_id', Integer()) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + # Add columns to existing tables + volumes.create_column(snapshot_id) -- cgit From 6c4059f20c85e9bc013a340de167151e7b5fa8c4 Mon Sep 17 00:00:00 2001 From: Mike Scherbakov Date: Wed, 11 May 2011 03:24:02 +0400 Subject: Bugfix #780784. KeyError when creating custom image. --- Authors | 1 + nova/virt/libvirt_conn.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Authors b/Authors index 60e1d2dad..72eb0b6ae 100644 --- a/Authors +++ b/Authors @@ -54,6 +54,7 @@ Mark Washenberger Masanori Itoh Matt Dietz Michael Gundlach +Mike Scherbakov Monsyne Dragon Monty Taylor MORITA Kazutaka diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 9780c69a6..71cedae54 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -456,7 +456,8 @@ class LibvirtConnection(driver.ComputeDriver): 'container_format': base['container_format'], 'is_public': False, 'name': '%s.%s' % (base['name'], image_id), - 'properties': {'architecture': base['architecture'], + 'properties': {'architecture': + base['properties']['architecture'], 'kernel_id': instance['kernel_id'], 'image_location': 'snapshot', 'image_state': 'available', -- cgit From a7c25a19a9a2fdf89fc9ecd3992ded936923af18 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 13 May 2011 14:21:55 +0000 Subject: Add init script and sysconfig file for openvswitch-nova --- .../networking/etc/init.d/openvswitch-nova | 96 ++++++++++++++++++++++ .../networking/etc/sysconfig/openvswitch-nova | 1 + .../xensource/scripts/ovs_configure_base_flows.py | 35 ++++---- 3 files changed, 116 insertions(+), 16 deletions(-) create mode 100755 plugins/xenserver/networking/etc/init.d/openvswitch-nova create mode 100644 plugins/xenserver/networking/etc/sysconfig/openvswitch-nova diff --git a/plugins/xenserver/networking/etc/init.d/openvswitch-nova b/plugins/xenserver/networking/etc/init.d/openvswitch-nova new file mode 100755 index 000000000..e4dbdf4af --- /dev/null +++ b/plugins/xenserver/networking/etc/init.d/openvswitch-nova @@ -0,0 +1,96 @@ +#!/bin/bash +# +# openvswitch-nova +# +# chkconfig: 2345 10 89 +# description: Apply initial OVS flows for Nova + +# Copyright 2011 OpenStack LLC. +# Copyright (C) 2009, 2010, 2011 Nicira Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# source function library +if [ -f /etc/init.d/functions ]; then + . /etc/init.d/functions +elif [ -f /etc/rc.d/init.d/functions ]; then + . /etc/rc.d/init.d/functions +elif [ -f /lib/lsb/init-functions ]; then + . /lib/lsb/init-functions +else + echo "$0: missing LSB shell function library" >&2 + exit 1 +fi + +OVS_CONFIGURE_BASE_FLOWS=/etc/xensource/scripts/ovs_configure_base_flows.py + +if test -e /etc/sysconfig/openvswitch-nova; then + . /etc/sysconfig/openvswitch-nova +else + echo "$0: missing configuration file: /etc/sysconfig/openvswitch-nova" + exit 1 +fi + +if test -e /etc/xensource/network.conf; then + NETWORK_MODE=$(cat /etc/xensource/network.conf) +fi + +case ${NETWORK_MODE:=openvswitch} in + vswitch|openvswitch) + ;; + bridge) + exit 0 + ;; + *) + echo "Open vSwitch disabled (/etc/xensource/network.conf is invalid)" >&2 + exit 0 + ;; +esac + +function run_ovs_conf_base_flows { + # expected format: DEVICE_BRIDGES="eth0:xenbr0 eth1:xenbr1" + for pair in $DEVICE_BRIDGES; do + # below in $info, physical device is [0], bridge name is [1] + info=${pair//:/ } + /usr/bin/python $OVS_CONFIGURE_BASE_FLOWS $1 ${info[0]} ${info[1]} + done +} + +function start { + run_ovs_conf_base_flows online +} + +function stop { + run_ovs_conf_base_flows offline +} + +function restart { + run_ovs_conf_base_flows reset +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart) + restart + ;; + *) + echo "usage: openvswitch-nova [start|stop|restart]" + exit 1 + ;; +esac diff --git a/plugins/xenserver/networking/etc/sysconfig/openvswitch-nova b/plugins/xenserver/networking/etc/sysconfig/openvswitch-nova new file mode 100644 index 000000000..829782fb6 --- /dev/null +++ b/plugins/xenserver/networking/etc/sysconfig/openvswitch-nova @@ -0,0 +1 @@ +#DEVICE_BRIDGES="eth0:xenbr0 eth1:xenbr1" diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py index 82d0b9e31..0186a3c8b 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py @@ -27,32 +27,35 @@ import sys from novalib import execute, execute_get_output -def main(phys_dev_name, bridge_name): - pnic_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', 'Interface', - phys_dev_name, 'ofport') +def main(command, phys_dev_name, bridge_name): ovs_ofctl = lambda *rule: execute('/usr/bin/ovs-ofctl', *rule) - # clear all flows + # always clear all flows first ovs_ofctl('del-flows', bridge_name) - # these flows are lower priority than all VM-specific flows. + if command in ('online', 'reset'): + pnic_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', 'Interface', + phys_dev_name, 'ofport') - # allow all traffic from the physical NIC, as it is trusted (i.e., from a - # filtered vif, or from the physical infrastructure - ovs_ofctl('add-flow', bridge_name, - "priority=2,in_port=%s,actions=normal" % pnic_ofport) + # these flows are lower priority than all VM-specific flows. - # default drop - ovs_ofctl('add-flow', bridge_name, 'priority=1,actions=drop') + # allow all traffic from the physical NIC, as it is trusted (i.e., from a + # filtered vif, or from the physical infrastructure + ovs_ofctl('add-flow', bridge_name, + "priority=2,in_port=%s,actions=normal" % pnic_ofport) + + # default drop + ovs_ofctl('add-flow', bridge_name, 'priority=1,actions=drop') if __name__ == "__main__": - if len(sys.argv) != 3: + if len(sys.argv) != 4 or sys.argv[1] not in ('online', 'offline', 'reset'): + print sys.argv script_name = os.path.basename(sys.argv[0]) print "This script configures base ovs flows." - print "usage: %s phys-dev-name bridge-name" % script_name - print " ex: %s eth0 xenbr0" % script_name + print "usage: %s [online|offline|reset] phys-dev-name bridge-name" % script_name + print " ex: %s online eth0 xenbr0" % script_name sys.exit(1) else: - phys_dev_name, bridge_name = sys.argv[1:3] - main(phys_dev_name, bridge_name) + command, phys_dev_name, bridge_name = sys.argv[1:4] + main(command, phys_dev_name, bridge_name) -- cgit From 6d04d6e17f753e0573d37992576dedccccf9db93 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 13 May 2011 10:43:43 -0700 Subject: started on integrating HostFilter --- nova/scheduler/host_filter.py | 18 ++++++++++++++++++ nova/scheduler/zone_aware_scheduler.py | 16 ++++++++++++---- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 483f3225c..17f63d4a0 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -286,3 +286,21 @@ def choose_driver(driver_name=None): if "%s.%s" % (driver.__module__, driver.__name__) == driver_name: return driver() raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name) + + +class HostFilterScheduler(ZoneAwareScheduler): + """The HostFilterScheduler uses the HostFilter drivers to filter + hosts for weighing. The particular driver used may be passed in + as an argument or the default will be used.""" + + def filter_hosts(self, num, specs): + """Filter the full host list (from the ZoneManager)""" + driver_name = specs.get("filter_driver", None) + driver = host_filter.choose_driver(driver_name) + + # TODO(sandy): We're only using InstanceType-based specs + # currently. Later we'll need to snoop for more detailed + # host filter requests. + instance_type = specs['instance_type'] + query = driver.instance_type_to_filter(query) + return driver.filter_hosts(self.zone_manager, query) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index b3d230bd2..fde8b6792 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -25,6 +25,7 @@ import operator from nova import log as logging from nova.scheduler import api from nova.scheduler import driver +from nova.scheduler import host_filter LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler') @@ -36,7 +37,7 @@ class ZoneAwareScheduler(driver.Scheduler): """Call novaclient zone method. Broken out for testing.""" return api.call_zone_method(context, method, specs=specs) - def schedule_run_instance(self, context, topic='compute', specs={}, + def schedule_run_instance(self, context, instance_id, instance_type, *args, **kwargs): """This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being @@ -46,6 +47,9 @@ class ZoneAwareScheduler(driver.Scheduler): to simply create the instance (either in this zone or a child zone).""" + # TODO(sandy): We'll have to look for richer specs at some point. + specs = instance_type + if 'blob' in specs: return self.provision_instance(context, topic, specs) @@ -58,7 +62,7 @@ class ZoneAwareScheduler(driver.Scheduler): """Create the requested instance in this Zone or a child zone.""" pass - def select(self, context, *args, **kwargs): + def select(self, context, specs, *args, **kwargs): """Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any child zone information has been encrypted so as not to reveal @@ -80,9 +84,13 @@ class ZoneAwareScheduler(driver.Scheduler): ordered by their fitness. """ - #TODO(sandy): extract these from args. + if topic != "compute": + raise NotImplemented(_("Zone Aware Scheduler only understands " + "Compute nodes (for now)")) + + specs = args['instance_type'] + #TODO(sandy): how to infer this from OS API params? num_instances = 1 - specs = {} # Filter local hosts based on requirements ... host_list = self.filter_hosts(num_instances, specs) -- cgit From bccbe3f845e9e7661efefbe456bfa56144de8136 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 13 May 2011 19:29:10 +0000 Subject: add udev rules and modified ovs_configure_vif_flows.py to work with udev rules --- .../etc/udev/rules.d/openvswitch-nova.rules | 3 +++ .../xensource/scripts/ovs_configure_vif_flows.py | 28 ++++++++++++---------- 2 files changed, 19 insertions(+), 12 deletions(-) create mode 100644 plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules diff --git a/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules b/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules new file mode 100644 index 000000000..0dfb029eb --- /dev/null +++ b/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules @@ -0,0 +1,3 @@ +SUBSYSTEM=="xen-backend", KERNEL=="vif*", RUN+="/etc/xensource/scripts/ovs_configure_base_flows.py $env{ACTION} %k all" +# is this one needed? +#SUBSYSTEM=="net", KERNEL=="tap*", RUN+="/etc/xensource/scripts/ovs_configure_base_flows.py $env{ACTION} %k all" diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index f91a5f49d..95a944a2b 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -45,7 +45,14 @@ class OvsFlow(object): execute(OVS_OFCTL, 'del-flows', self.bridge, "in_port=%s" % ofport) -def main(dom_id, command, net_type, only_this_vif=None): +def main(command, vif_raw, net_type): + if command not in ('online', 'offline'): + return + + vif_name, dom_id, vif_index = vif_raw.split('-') + vif = "%s%s.%s" % (vif_name, dom_id, vif_index) + bridge = "xenbr%s" % vif_index + xsls = execute_get_output('/usr/bin/xenstore-ls', '/local/domain/%s/vm-data/networking' % dom_id) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] @@ -56,13 +63,11 @@ def main(dom_id, command, net_type, only_this_vif=None): (dom_id, mac)) data = json.loads(xsread) if data["label"] == "public": - vif = "vif%s.0" % dom_id - bridge = "xenbr0" + this_vif = "vif%s.0" % dom_id else: - vif = "vif%s.1" % dom_id - bridge = "xenbr1" + this_vif = "vif%s.1" % dom_id - if (only_this_vif is None) or (vif == only_this_vif): + if vif == this_vif: vif_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', 'Interface', vif, 'ofport') @@ -72,11 +77,11 @@ def main(dom_id, command, net_type, only_this_vif=None): ovs = OvsFlow(bridge, params) - if command in ('offline', 'reset'): + if command == 'offline': # I haven't found a way to clear only IPv4 or IPv6 rules. ovs.clear_flows(vif_ofport) - if command in ('online', 'reset'): + if command == 'online': if net_type in ('ipv4', 'all') and 'ips' in data: for ip4 in data['ips']: ovs.params.update({'IPV4_ADDR': ip4['ip']}) @@ -167,10 +172,9 @@ def apply_ovs_ipv6_flows(ovs, bridge, params): if __name__ == "__main__": if len(sys.argv) < 3: - print "usage: %s dom_id online|offline|reset ipv4|ipv6|all [vif]" % \ + print "usage: %s [online|offline] vif-domid-idx ipv4|ipv6|all " % \ os.path.basename(sys.argv[0]) sys.exit(1) else: - dom_id, command, net_type = sys.argv[1:4] - vif_name = len(sys.argv) == 5 and sys.argv[4] or None - main(dom_id, command, net_type, vif_name) + command, vif_raw, net_type = sys.argv[1:4] + main(command, vif_raw, net_type) -- cgit From 68e34c790612f3250bd902cc87a0ab9d3d69abfb Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 13 May 2011 15:36:42 -0500 Subject: first cut at weighted-sum tests --- nova/scheduler/zone_aware_scheduler.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index b3d230bd2..38b395d52 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -89,6 +89,8 @@ class ZoneAwareScheduler(driver.Scheduler): # then weigh the selected hosts. # weighted = [{weight=weight, name=hostname}, ...] + # TODO(sirp): weigh_hosts should also be a function of 'topic' or + # resources, so that we can apply different objective functions to it weighted = self.weigh_hosts(num_instances, specs, host_list) # Next, tack on the best weights from the child zones ... -- cgit From fd2861fdcdae0d7a3f13dac7b54d4d8f106f2b3e Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 13 May 2011 21:05:12 +0000 Subject: fix sys.argv requirement --- .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 95a944a2b..2ebc4dd8c 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -171,7 +171,7 @@ def apply_ovs_ipv6_flows(ovs, bridge, params): if __name__ == "__main__": - if len(sys.argv) < 3: + if len(sys.argv) != 4: print "usage: %s [online|offline] vif-domid-idx ipv4|ipv6|all " % \ os.path.basename(sys.argv[0]) sys.exit(1) -- cgit From f889f6c8a430d6411a81270a68025a27781b09a2 Mon Sep 17 00:00:00 2001 From: Mike Scherbakov Date: Sun, 15 May 2011 14:15:37 +0400 Subject: Unit test for snapshotting (creating custom image). --- nova/image/fake.py | 33 +++++++++++++++++++++------- nova/tests/test_virt.py | 57 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 8 deletions(-) diff --git a/nova/image/fake.py b/nova/image/fake.py index b400b2adb..4bf25d9af 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -19,6 +19,7 @@ import copy import datetime +import random from nova import exception from nova import flags @@ -32,7 +33,7 @@ LOG = logging.getLogger('nova.image.fake') FLAGS = flags.FLAGS -class FakeImageService(service.BaseImageService): +class _FakeImageService(service.BaseImageService): """Mock (fake) image service for unit testing.""" def __init__(self): @@ -48,9 +49,10 @@ class FakeImageService(service.BaseImageService): 'container_format': 'ami', 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, - 'ramdisk_id': FLAGS.null_kernel}} + 'ramdisk_id': FLAGS.null_kernel, + 'architecture': 'x86_64'}} self.create(None, image) - super(FakeImageService, self).__init__() + super(_FakeImageService, self).__init__() def index(self, context): """Returns list of images.""" @@ -74,19 +76,28 @@ class FakeImageService(service.BaseImageService): image_id, self.images) raise exception.ImageNotFound(image_id=image_id) - def create(self, context, data): + def create(self, context, metadata, data=None): """Store the image data and return the new image id. :raises: Duplicate if the image already exist. """ - image_id = int(data['id']) + #image_id = int(metadata['id']) + # metadata['id'] may not exists, and since image_id is + # randomly generated in local.py, let us do the same here + try: + image_id = int(metadata['id']) + except: + image_id = random.randint(0, 2 ** 31 - 1) + if self.images.get(image_id): raise exception.Duplicate() - self.images[image_id] = copy.deepcopy(data) + metadata['id'] = image_id + self.images[image_id] = copy.deepcopy(metadata) + return self.images[image_id] - def update(self, context, image_id, data): + def update(self, context, image_id, metadata, data=None): """Replace the contents of the given image with the new data. :raises: ImageNotFound if the image does not exist. @@ -95,7 +106,7 @@ class FakeImageService(service.BaseImageService): image_id = int(image_id) if not self.images.get(image_id): raise exception.ImageNotFound(image_id=image_id) - self.images[image_id] = copy.deepcopy(data) + self.images[image_id] = copy.deepcopy(metadata) def delete(self, context, image_id): """Delete the given image. @@ -111,3 +122,9 @@ class FakeImageService(service.BaseImageService): def delete_all(self): """Clears out all images.""" self.images.clear() + +_fakeImageService = _FakeImageService() + + +def FakeImageService(): + return _fakeImageService diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 1311ba361..eb238e871 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -159,6 +159,7 @@ class LibvirtConnTestCase(test.TestCase): 'vcpus': 2, 'project_id': 'fake', 'bridge': 'br101', + 'image_id': '123456', 'instance_type_id': '5'} # m1.small def lazy_load_library_exists(self): @@ -279,6 +280,62 @@ class LibvirtConnTestCase(test.TestCase): instance_data = dict(self.test_instance) self._check_xml_and_container(instance_data) + def test_snapshot(self): + FLAGS.image_service = 'nova.image.fake.FakeImageService' + + # Only file-based instance storages are supported at the moment + test_xml = """ + + + + + + + + """ + + class FakeVirtDomain(object): + + def __init__(self): + pass + + def snapshotCreateXML(self, *args): + return None + + def XMLDesc(self, *args): + return test_xml + + def fake_lookup(instance_name): + if instance_name == instance_ref.name: + return FakeVirtDomain() + + def fake_execute(*args): + # Touch filename to pass 'with open(out_path)' + open(args[-1], "a").close() + + # Start test + image_service = utils.import_object(FLAGS.image_service) + + # Assuming that base image already exists in image_service + instance_ref = db.instance_create(self.context, self.test_instance) + properties = {'instance_id': instance_ref['id'], + 'user_id': str(self.context.user_id)} + sent_meta = {'name': 'test-snap', 'is_public': False, + 'properties': properties} + # Create new image. It will be updated in snapshot method + # To work with it from snapshot, the single image_service is needed + recv_meta = image_service.create(context, sent_meta) + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') + libvirt_conn.LibvirtConnection._conn.lookupByName = fake_lookup + self.mox.StubOutWithMock(libvirt_conn.utils, 'execute') + libvirt_conn.utils.execute = fake_execute + + self.mox.ReplayAll() + + conn = libvirt_conn.LibvirtConnection(False) + conn.snapshot(instance_ref, recv_meta['id']) + def test_multi_nic(self): instance_data = dict(self.test_instance) network_info = _create_network_info(2) -- cgit From 818c2424a0547882fe6bdfe6613ee66a248d91db Mon Sep 17 00:00:00 2001 From: Mike Scherbakov Date: Sun, 15 May 2011 15:11:54 +0400 Subject: Define image state during snapshotting. Name snapshot to the name provided, not generate. --- nova/compute/api.py | 5 +++-- nova/tests/test_virt.py | 10 ++++++++-- nova/virt/libvirt_conn.py | 4 +++- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 63884be97..971c0732f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -500,9 +500,10 @@ class API(base.Base): """ properties = {'instance_id': str(instance_id), - 'user_id': str(context.user_id)} + 'user_id': str(context.user_id), + 'image_state': 'creating'} sent_meta = {'name': name, 'is_public': False, - 'properties': properties} + 'status': 'creating', 'properties': properties} recv_meta = self.image_service.create(context, sent_meta) params = {'image_id': recv_meta['id']} self._cast_compute_message('snapshot_instance', context, instance_id, diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index eb238e871..c4fcc21cc 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -320,8 +320,9 @@ class LibvirtConnTestCase(test.TestCase): instance_ref = db.instance_create(self.context, self.test_instance) properties = {'instance_id': instance_ref['id'], 'user_id': str(self.context.user_id)} - sent_meta = {'name': 'test-snap', 'is_public': False, - 'properties': properties} + snapshot_name = 'test-snap' + sent_meta = {'name': snapshot_name, 'is_public': False, + 'status': 'creating', 'properties': properties} # Create new image. It will be updated in snapshot method # To work with it from snapshot, the single image_service is needed recv_meta = image_service.create(context, sent_meta) @@ -336,6 +337,11 @@ class LibvirtConnTestCase(test.TestCase): conn = libvirt_conn.LibvirtConnection(False) conn.snapshot(instance_ref, recv_meta['id']) + snapshot = image_service.show(context, recv_meta['id']) + self.assertEquals(snapshot['properties']['image_state'], 'available') + self.assertEquals(snapshot['status'], 'active') + self.assertEquals(snapshot['name'], snapshot_name) + def test_multi_nic(self): instance_data = dict(self.test_instance) network_info = _create_network_info(2) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 71cedae54..92d580314 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -451,11 +451,13 @@ class LibvirtConnection(driver.ComputeDriver): elevated = context.get_admin_context() base = image_service.show(elevated, instance['image_id']) + snapshot = image_service.show(elevated, image_id) metadata = {'disk_format': base['disk_format'], 'container_format': base['container_format'], 'is_public': False, - 'name': '%s.%s' % (base['name'], image_id), + 'status': 'active', + 'name': snapshot['name'], 'properties': {'architecture': base['properties']['architecture'], 'kernel_id': instance['kernel_id'], -- cgit From d44299be90bbfcac5f8de1e1264b81fbb0bfa5e2 Mon Sep 17 00:00:00 2001 From: Masanori Itoh Date: Tue, 17 May 2011 01:00:16 +0900 Subject: Add vnc_keymap flag and enable setting keymap for vnc console. --- nova/virt/libvirt.xml.template | 2 +- nova/virt/libvirt_conn.py | 1 + nova/vnc/__init__.py | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index de2497a76..20986d4d5 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -116,7 +116,7 @@ #if $getVar('vncserver_host', False) - + #end if diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 555e44ce2..7552c9488 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1022,6 +1022,7 @@ class LibvirtConnection(driver.ComputeDriver): if FLAGS.vnc_enabled: if FLAGS.libvirt_type != 'lxc': xml_info['vncserver_host'] = FLAGS.vncserver_host + xml_info['vnc_keymap'] = FLAGS.vnc_keymap if not rescue: if instance['kernel_id']: xml_info['kernel'] = xml_info['basepath'] + "/kernel" diff --git a/nova/vnc/__init__.py b/nova/vnc/__init__.py index b5b00e44e..859bfd65f 100644 --- a/nova/vnc/__init__.py +++ b/nova/vnc/__init__.py @@ -32,3 +32,5 @@ flags.DEFINE_string('vncserver_host', '0.0.0.0', 'the host interface on which vnc server should listen') flags.DEFINE_bool('vnc_enabled', True, 'enable vnc related features') +flags.DEFINE_string('vnc_keymap', 'en-us', + 'keymap for vnc') -- cgit From 02bba6a8f49b924e9b5b0e69124afd953e8cc3ae Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 16 May 2011 15:37:25 -0700 Subject: basic call going through --- nova/compute/api.py | 7 ++++++- nova/scheduler/host_filter.py | 19 ++++++++++--------- nova/scheduler/zone_aware_scheduler.py | 32 +++++++++++++++----------------- nova/virt/fake.py | 24 ++++++++++++++++++++++++ 4 files changed, 55 insertions(+), 27 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 8a7c713a2..f57d5cd90 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -259,7 +259,12 @@ class API(base.Base): {"method": "run_instance", "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id, - "instance_type": instance_type, + "request_spec": { + 'instance_type': instance_type, + 'filter_driver': + 'nova.scheduler.host_filter.' + 'InstanceTypeFilter' + }, "availability_zone": availability_zone, "injected_files": injected_files}}) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 17f63d4a0..a47e41da4 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -42,6 +42,7 @@ from nova import exception from nova import flags from nova import log as logging from nova import utils +from nova.scheduler import zone_aware_scheduler LOG = logging.getLogger('nova.scheduler.host_filter') @@ -83,8 +84,8 @@ class AllHostsFilter(HostFilter): for host, services in zone_manager.service_states.iteritems()] -class FlavorFilter(HostFilter): - """HostFilter driver hard-coded to work with flavors.""" +class InstanceTypeFilter(HostFilter): + """HostFilter driver hard-coded to work with InstanceType records.""" def instance_type_to_filter(self, instance_type): """Use instance_type to filter hosts.""" @@ -271,7 +272,7 @@ class JsonFilter(HostFilter): return hosts -DRIVERS = [AllHostsFilter, FlavorFilter, JsonFilter] +DRIVERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] def choose_driver(driver_name=None): @@ -288,19 +289,19 @@ def choose_driver(driver_name=None): raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name) -class HostFilterScheduler(ZoneAwareScheduler): +class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): """The HostFilterScheduler uses the HostFilter drivers to filter hosts for weighing. The particular driver used may be passed in as an argument or the default will be used.""" - def filter_hosts(self, num, specs): + def filter_hosts(self, num, request_spec): """Filter the full host list (from the ZoneManager)""" - driver_name = specs.get("filter_driver", None) - driver = host_filter.choose_driver(driver_name) + driver_name = request_spec.get("filter_driver", None) + driver = choose_driver(driver_name) # TODO(sandy): We're only using InstanceType-based specs # currently. Later we'll need to snoop for more detailed # host filter requests. - instance_type = specs['instance_type'] - query = driver.instance_type_to_filter(query) + instance_type = request_spec['instance_type'] + query = driver.instance_type_to_filter(instance_type) return driver.filter_hosts(self.zone_manager, query) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index fde8b6792..f9c5f65f3 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -25,7 +25,6 @@ import operator from nova import log as logging from nova.scheduler import api from nova.scheduler import driver -from nova.scheduler import host_filter LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler') @@ -37,7 +36,7 @@ class ZoneAwareScheduler(driver.Scheduler): """Call novaclient zone method. Broken out for testing.""" return api.call_zone_method(context, method, specs=specs) - def schedule_run_instance(self, context, instance_id, instance_type, + def schedule_run_instance(self, context, instance_id, request_spec, *args, **kwargs): """This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being @@ -48,13 +47,12 @@ class ZoneAwareScheduler(driver.Scheduler): a child zone).""" # TODO(sandy): We'll have to look for richer specs at some point. - specs = instance_type - if 'blob' in specs: - return self.provision_instance(context, topic, specs) + if 'blob' in request_spec: + return self.provision_instance(context, topic, request_spec) # Create build plan and provision ... - build_plan = self.select(context, specs) + build_plan = self.select(context, request_spec) for item in build_plan: self.provision_instance(context, topic, item) @@ -62,24 +60,24 @@ class ZoneAwareScheduler(driver.Scheduler): """Create the requested instance in this Zone or a child zone.""" pass - def select(self, context, specs, *args, **kwargs): + def select(self, context, request_spec, *args, **kwargs): """Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any child zone information has been encrypted so as not to reveal anything about the children.""" - return self._schedule(context, "compute", *args, **kwargs) + return self._schedule(context, "compute", request_spec, *args, **kwargs) - def schedule(self, context, topic, *args, **kwargs): + def schedule(self, context, topic, request_spec, *args, **kwargs): """The schedule() contract requires we return the one best-suited host for this request. """ - res = self._schedule(context, topic, *args, **kwargs) + res = self._schedule(context, topic, request_spec, *args, **kwargs) # TODO(sirp): should this be a host object rather than a weight-dict? if not res: raise driver.NoValidHost(_('No hosts were available')) return res[0] - def _schedule(self, context, topic, *args, **kwargs): + def _schedule(self, context, topic, request_spec, *args, **kwargs): """Returns a list of hosts that meet the required specs, ordered by their fitness. """ @@ -88,20 +86,20 @@ class ZoneAwareScheduler(driver.Scheduler): raise NotImplemented(_("Zone Aware Scheduler only understands " "Compute nodes (for now)")) - specs = args['instance_type'] + LOG.debug("specs = %s, ARGS = %s" % (request_spec, args, )) #TODO(sandy): how to infer this from OS API params? num_instances = 1 # Filter local hosts based on requirements ... - host_list = self.filter_hosts(num_instances, specs) + host_list = self.filter_hosts(num_instances, request_spec) # then weigh the selected hosts. # weighted = [{weight=weight, name=hostname}, ...] - weighted = self.weigh_hosts(num_instances, specs, host_list) + weighted = self.weigh_hosts(num_instances, request_spec, host_list) # Next, tack on the best weights from the child zones ... child_results = self._call_zone_method(context, "select", - specs=specs) + specs=request_spec) for child_zone, result in child_results: for weighting in result: # Remember the child_zone so we can get back to @@ -116,12 +114,12 @@ class ZoneAwareScheduler(driver.Scheduler): weighted.sort(key=operator.itemgetter('weight')) return weighted - def filter_hosts(self, num, specs): + def filter_hosts(self, num, request_spec): """Derived classes must override this method and return a list of hosts in [(hostname, capability_dict)] format.""" raise NotImplemented() - def weigh_hosts(self, num, specs, hosts): + def weigh_hosts(self, num, request_spec, hosts): """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format.""" raise NotImplemented() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 5ac376e46..bf87e5ced 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -82,6 +82,20 @@ class FakeConnection(driver.ComputeDriver): def __init__(self): self.instances = {} + self.host_status = {'host_name-description': 'Fake Host', + 'host_hostname': 'fake-mini', + 'host_memory_total': 8000000000, + 'host_memory_overhead': 10000000, + 'host_memory_free': 7900000000, + 'host_memory_free_computed': 7900000000, + 'host_other-config': {}, + 'host_ip_address': '192.168.1.109', + 'host_cpu_info': {}, + 'disk_available': 500000000000, + 'disk_total': 600000000000, + 'disk_used': 100000000000, + 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', + 'host_name-label': 'fake-mini'} @classmethod def instance(cls): @@ -456,3 +470,13 @@ class FakeConnection(driver.ComputeDriver): def test_remove_vm(self, instance_name): """ Removes the named VM, as if it crashed. For testing""" self.instances.pop(instance_name) + + def update_host_status(self): + """Return fake Host Status of ram, disk, network.""" + return self.host_status + + def get_host_stats(self, refresh=False): + """Return fake Host Status of ram, disk, network.""" + return self.host_status + + -- cgit From 7ed85c9ee57190589efcb22819783d6faf973cc3 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 17 May 2011 05:27:50 -0700 Subject: tests fixed and pep8'ed --- nova/scheduler/host_filter.py | 10 +++++++--- nova/scheduler/manager.py | 3 ++- nova/scheduler/zone_aware_scheduler.py | 11 ++++++++--- nova/tests/test_host_filter.py | 11 ++++++----- nova/virt/fake.py | 2 -- 5 files changed, 23 insertions(+), 14 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 8519b8b51..2b0d9af77 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -292,11 +292,15 @@ def choose_driver(driver_name=None): class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): """The HostFilterScheduler uses the HostFilter drivers to filter hosts for weighing. The particular driver used may be passed in - as an argument or the default will be used.""" + as an argument or the default will be used. + + request_spec = {'filter_driver': , + 'instance_type': } + """ def filter_hosts(self, num, request_spec): """Filter the full host list (from the ZoneManager)""" - driver_name = request_spec.get("filter_driver", None) + driver_name = request_spec.get('filter_driver', None) driver = choose_driver(driver_name) # TODO(sandy): We're only using InstanceType-based specs @@ -309,4 +313,4 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): def weigh_hosts(self, num, request_spec, hosts): """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format.""" - return [] + return [dict(weight=1, hostname=hostname) for host, caps in hosts] diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 60f98b082..bd40e73c0 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -84,7 +84,8 @@ class SchedulerManager(manager.Manager): host = self.driver.schedule(elevated, topic, *args, **kwargs) if not host: - LOG.debug(_("%(topic)s %(method)s handled in Scheduler") % locals()) + LOG.debug(_("%(topic)s %(method)s handled in Scheduler") + % locals()) return rpc.cast(context, diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 2fc5f1f87..614b1bb89 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -59,20 +59,25 @@ class ZoneAwareScheduler(driver.Scheduler): for item in build_plan: self.provision_instance(context, topic, item) + # Returning None short-circuits the routing to Compute (since + # we've already done it here) + return None + def provision_instance(context, topic, item): """Create the requested instance in this Zone or a child zone.""" - pass + return None def select(self, context, request_spec, *args, **kwargs): """Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any child zone information has been encrypted so as not to reveal anything about the children.""" - return self._schedule(context, "compute", request_spec, *args, **kwargs) + return self._schedule(context, "compute", request_spec, + *args, **kwargs) # TODO(sandy): We're only focused on compute instances right now, # so we don't implement the default "schedule()" method required - # of Schedulers. + # of Schedulers. def schedule(self, context, topic, request_spec, *args, **kwargs): """The schedule() contract requires we return the one best-suited host for this request. diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py index c029d41e6..dd2325cc6 100644 --- a/nova/tests/test_host_filter.py +++ b/nova/tests/test_host_filter.py @@ -85,9 +85,9 @@ class HostFilterTestCase(test.TestCase): 'nova.scheduler.host_filter.AllHostsFilter') # Test valid driver ... driver = host_filter.choose_driver( - 'nova.scheduler.host_filter.FlavorFilter') + 'nova.scheduler.host_filter.InstanceTypeFilter') self.assertEquals(driver._full_name(), - 'nova.scheduler.host_filter.FlavorFilter') + 'nova.scheduler.host_filter.InstanceTypeFilter') # Test invalid driver ... try: host_filter.choose_driver('does not exist') @@ -103,11 +103,12 @@ class HostFilterTestCase(test.TestCase): for host, capabilities in hosts: self.assertTrue(host.startswith('host')) - def test_flavor_driver(self): - driver = host_filter.FlavorFilter() + def test_instance_type_driver(self): + driver = host_filter.InstanceTypeFilter() # filter all hosts that can support 50 ram and 500 disk name, cooked = driver.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.FlavorFilter', name) + self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', + name) hosts = driver.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] diff --git a/nova/virt/fake.py b/nova/virt/fake.py index bf87e5ced..3bd9fbc93 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -478,5 +478,3 @@ class FakeConnection(driver.ComputeDriver): def get_host_stats(self, refresh=False): """Return fake Host Status of ram, disk, network.""" return self.host_status - - -- cgit From effa4b37fae0e6fef993ffd2892bb77c0e7245f1 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 17 May 2011 05:43:06 -0700 Subject: ugh, fixed again --- nova/tests/test_zone_aware_scheduler.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py index fdcde34c9..37169fb97 100644 --- a/nova/tests/test_zone_aware_scheduler.py +++ b/nova/tests/test_zone_aware_scheduler.py @@ -116,4 +116,6 @@ class ZoneAwareSchedulerTestCase(test.TestCase): sched.set_zone_manager(zm) fake_context = {} - self.assertRaises(driver.NoValidHost, sched.schedule, fake_context, {}) + self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, + fake_context, 1, + dict(host_filter=None, instance_type={})) -- cgit From 0e2aba9e9869a66a1c3a6ece0fb08be631daa5bf Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:38:44 +0400 Subject: Moved memcached connection in AuthManager to thread-local storage. Added caching of LDAP connection in thread-local storage. Optimized LDAP queries, added similar memcached support to LDAPDriver. Add "per-driver-request" caching of LDAP results. (should be per-api-request) --- nova/auth/ldapdriver.py | 93 ++++++++++++++++++++++++++++++++++++++++++++----- nova/auth/manager.py | 20 +++++++---- 2 files changed, 98 insertions(+), 15 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 3f8432851..7849d941e 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -24,7 +24,9 @@ other backends by creating another class that exposes the same public methods. """ +import functools import sys +import threading from nova import exception from nova import flags @@ -85,6 +87,7 @@ def _clean(attr): def sanitize(fn): """Decorator to sanitize all args""" + @functools.wraps(fn) def _wrapped(self, *args, **kwargs): args = [_clean(x) for x in args] kwargs = dict((k, _clean(v)) for (k, v) in kwargs) @@ -103,29 +106,74 @@ class LdapDriver(object): isadmin_attribute = 'isNovaAdmin' project_attribute = 'owner' project_objectclass = 'groupOfNames' + __local = threading.local() def __init__(self): """Imports the LDAP module""" self.ldap = __import__('ldap') - self.conn = None if FLAGS.ldap_schema_version == 1: LdapDriver.project_pattern = '(objectclass=novaProject)' LdapDriver.isadmin_attribute = 'isAdmin' LdapDriver.project_attribute = 'projectManager' LdapDriver.project_objectclass = 'novaProject' + self.__cache = None def __enter__(self): """Creates the connection to LDAP""" - self.conn = self.ldap.initialize(FLAGS.ldap_url) - self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + # TODO(yorik-sar): Should be per-request cache, not per-driver-request + self.__cache = {} return self def __exit__(self, exc_type, exc_value, traceback): """Destroys the connection to LDAP""" - self.conn.unbind_s() + self.__cache = None return False + def __local_cache(key_fmt): + """Wrap function to cache it's result in self.__cache. + Works only with functions with one fixed argument. + """ + def do_wrap(fn): + @functools.wraps(fn) + def inner(self, arg, **kwargs): + cache_key = key_fmt % (arg,) + try: + res = self.__cache[cache_key] + LOG.debug('Local cache hit for %s by key %s' % + (fn.__name__, cache_key)) + return res + except KeyError: + res = fn(self, arg, **kwargs) + self.__cache[cache_key] = res + return res + return inner + return do_wrap + + @property + def conn(self): + try: + return self.__local.conn + except AttributeError: + conn = self.ldap.initialize(FLAGS.ldap_url) + conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + self.__local.conn = conn + return conn + + @property + def mc(self): + try: + return self.__local.mc + except AttributeError: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + mc = memcache.Client(FLAGS.memcached_servers, debug=0) + self.__local.mc = mc + return mc + @sanitize + @__local_cache('uid_user-%s') def get_user(self, uid): """Retrieve user by id""" attr = self.__get_ldap_user(uid) @@ -134,15 +182,30 @@ class LdapDriver(object): @sanitize def get_user_from_access_key(self, access): """Retrieve user by access key""" + cache_key = 'uak_dn_%s'%(access,) + user_dn = self.mc.get(cache_key) + if user_dn: + user = self.__to_user( + self.__find_object(user_dn, scope=self.ldap.SCOPE_BASE)) + if user: + if user['access'] == access: + return user + else: + self.mc.set(cache_key, None) query = '(accessKey=%s)' % access dn = FLAGS.ldap_user_subtree - return self.__to_user(self.__find_object(dn, query)) + user_obj = self.__find_object(dn, query) + user = self.__to_user(user_obj) + if user: + self.mc.set(cache_key, user_obj['dn'][0]) + return user @sanitize + @__local_cache('pid_project-%s') def get_project(self, pid): """Retrieve project by id""" - dn = self.__project_to_dn(pid) - attr = self.__find_object(dn, LdapDriver.project_pattern) + dn = self.__project_to_dn(pid, search=False) + attr = self.__find_object(dn, LdapDriver.project_pattern, scope=self.ldap.SCOPE_BASE) return self.__to_project(attr) @sanitize @@ -395,6 +458,7 @@ class LdapDriver(object): """Check if project exists""" return self.get_project(project_id) is not None + @__local_cache('uid_attrs-%s') def __get_ldap_user(self, uid): """Retrieve LDAP user entry by id""" dn = FLAGS.ldap_user_subtree @@ -426,12 +490,20 @@ class LdapDriver(object): if scope is None: # One of the flags is 0! scope = self.ldap.SCOPE_SUBTREE + if query is None: + query = "(objectClass=*)" try: res = self.conn.search_s(dn, scope, query) except self.ldap.NO_SUCH_OBJECT: return [] # Just return the attributes - return [attributes for dn, attributes in res] + # FIXME(yorik-sar): Whole driver should be refactored to + # prevent this hack + res1 = [] + for dn, attrs in res: + attrs['dn'] = [dn] + res1.append(attrs) + return res1 def __find_role_dns(self, tree): """Find dns of role objects in given tree""" @@ -564,6 +636,7 @@ class LdapDriver(object): 'description': attr.get('description', [None])[0], 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} + @__local_cache('uid_dn-%s') def __uid_to_dn(self, uid, search=True): """Convert uid to dn""" # By default return a generated DN @@ -576,6 +649,7 @@ class LdapDriver(object): userdn = user[0] return userdn + @__local_cache('pid_dn-%s') def __project_to_dn(self, pid, search=True): """Convert pid to dn""" # By default return a generated DN @@ -603,10 +677,11 @@ class LdapDriver(object): else: return None + @__local_cache('dn_uid-%s') def __dn_to_uid(self, dn): """Convert user dn to uid""" query = '(objectclass=novaUser)' - user = self.__find_object(dn, query) + user = self.__find_object(dn, query, scope=self.ldap.SCOPE_BASE) return user[FLAGS.ldap_user_id_attribute][0] diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 07235a2a7..c71f0f161 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -23,6 +23,7 @@ Nova authentication management import os import shutil import string # pylint: disable=W0402 +import threading import tempfile import uuid import zipfile @@ -206,6 +207,7 @@ class AuthManager(object): """ _instance = None + __local = threading.local() def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" @@ -223,12 +225,18 @@ class AuthManager(object): if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache - self.mc = memcache.Client(FLAGS.memcached_servers, - debug=0) + @property + def mc(self): + try: + return self.__local.mc + except AttributeError: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + mc = memcache.Client(FLAGS.memcached_servers, debug=0) + self.__local.mc = mc + return mc def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', -- cgit From e4f8ef67065f1de36ceadf9dd97e07fbe9fc9d83 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:39:19 +0400 Subject: Fixed mistyped key, caused huge performance leak. --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index 519793643..5c536f6d8 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -110,7 +110,7 @@ class FlagValues(gflags.FlagValues): return name in self.__dict__['__dirty'] def ClearDirty(self): - self.__dict__['__is_dirty'] = [] + self.__dict__['__dirty'] = [] def WasAlreadyParsed(self): return self.__dict__['__was_already_parsed'] -- cgit From 2fcc10656222bea6056742ef943c1b82724c0b56 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:45:48 +0400 Subject: PEP8 fixes. --- nova/auth/ldapdriver.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 7849d941e..9fe0165a1 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -182,7 +182,7 @@ class LdapDriver(object): @sanitize def get_user_from_access_key(self, access): """Retrieve user by access key""" - cache_key = 'uak_dn_%s'%(access,) + cache_key = 'uak_dn_%s' % (access,) user_dn = self.mc.get(cache_key) if user_dn: user = self.__to_user( @@ -205,7 +205,8 @@ class LdapDriver(object): def get_project(self, pid): """Retrieve project by id""" dn = self.__project_to_dn(pid, search=False) - attr = self.__find_object(dn, LdapDriver.project_pattern, scope=self.ldap.SCOPE_BASE) + attr = self.__find_object(dn, LdapDriver.project_pattern, + scope=self.ldap.SCOPE_BASE) return self.__to_project(attr) @sanitize -- cgit From 84e8893c08cced5f7097b5c90e21a8a06740b3ab Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 17 May 2011 07:49:12 -0700 Subject: provision working correctly now --- nova/scheduler/host_filter.py | 19 ++++++++++--------- nova/scheduler/zone_aware_scheduler.py | 26 +++++++++++++++++++++----- nova/virt/fake.py | 7 ++++--- 3 files changed, 35 insertions(+), 17 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 2b0d9af77..92ec827d3 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -99,9 +99,10 @@ class InstanceTypeFilter(HostFilter): capabilities = services.get('compute', {}) host_ram_mb = capabilities['host_memory_free'] disk_bytes = capabilities['disk_available'] - if host_ram_mb >= instance_type['memory_mb'] and \ - disk_bytes >= instance_type['local_gb']: - selected_hosts.append((host, capabilities)) + spec_ram = instance_type['memory_mb'] + spec_disk = instance_type['local_gb'] + if host_ram_mb >= spec_ram and disk_bytes >= spec_disk: + selected_hosts.append((host, capabilities)) return selected_hosts #host entries (currently) are like: @@ -110,15 +111,15 @@ class InstanceTypeFilter(HostFilter): # 'host_memory_total': 8244539392, # 'host_memory_overhead': 184225792, # 'host_memory_free': 3868327936, -# 'host_memory_free_computed': 3840843776}, -# 'host_other-config': {}, +# 'host_memory_free_computed': 3840843776, +# 'host_other_config': {}, # 'host_ip_address': '192.168.1.109', # 'host_cpu_info': {}, # 'disk_available': 32954957824, # 'disk_total': 50394562560, -# 'disk_used': 17439604736}, +# 'disk_used': 17439604736, # 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', -# 'host_name-label': 'xs-mini'} +# 'host_name_label': 'xs-mini'} # instance_type table has: #name = Column(String(255), unique=True) @@ -307,10 +308,10 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): # currently. Later we'll need to snoop for more detailed # host filter requests. instance_type = request_spec['instance_type'] - query = driver.instance_type_to_filter(instance_type) + name, query = driver.instance_type_to_filter(instance_type) return driver.filter_hosts(self.zone_manager, query) def weigh_hosts(self, num, request_spec, hosts): """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format.""" - return [dict(weight=1, hostname=hostname) for host, caps in hosts] + return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 614b1bb89..3ebb4caef 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -22,6 +22,8 @@ across zones. There are two expansion points to this class for: import operator +from nova import db +from nova import rpc from nova import log as logging from nova.scheduler import api from nova.scheduler import driver @@ -49,7 +51,8 @@ class ZoneAwareScheduler(driver.Scheduler): # TODO(sandy): We'll have to look for richer specs at some point. if 'blob' in request_spec: - return self.provision_instance(context, topic, request_spec) + return self.provision_resource(context, request_spec, + instance_id, kwargs) # Create build plan and provision ... build_plan = self.select(context, request_spec) @@ -57,14 +60,28 @@ class ZoneAwareScheduler(driver.Scheduler): raise driver.NoValidHost(_('No hosts were available')) for item in build_plan: - self.provision_instance(context, topic, item) + self.provision_resource(context, item, instance_id, kwargs) # Returning None short-circuits the routing to Compute (since # we've already done it here) return None - def provision_instance(context, topic, item): - """Create the requested instance in this Zone or a child zone.""" + def provision_resource(self, context, item, instance_id, kwargs): + """Create the requested resource in this Zone or a child zone.""" + if "hostname" in item: + host = item['hostname'] + kwargs['instance_id'] = instance_id + rpc.cast(context, + db.queue_get_for(context, "compute", host), + {"method": "run_instance", + "args": kwargs}) + LOG.debug(_("Casted to compute %(host)s for run_instance") + % locals()) + else: + # TODO(sandy) Provision in child zone ... + LOG.warning(_("Provision to Child Zone not supported (yet)") + % locals()) + pass return None def select(self, context, request_spec, *args, **kwargs): @@ -93,7 +110,6 @@ class ZoneAwareScheduler(driver.Scheduler): raise NotImplemented(_("Zone Aware Scheduler only understands " "Compute nodes (for now)")) - LOG.debug("specs = %s, ARGS = %s" % (request_spec, args, )) #TODO(sandy): how to infer this from OS API params? num_instances = 1 diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 3bd9fbc93..0225797d7 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -82,20 +82,21 @@ class FakeConnection(driver.ComputeDriver): def __init__(self): self.instances = {} - self.host_status = {'host_name-description': 'Fake Host', + self.host_status = { + 'host_name-description': 'Fake Host', 'host_hostname': 'fake-mini', 'host_memory_total': 8000000000, 'host_memory_overhead': 10000000, 'host_memory_free': 7900000000, 'host_memory_free_computed': 7900000000, - 'host_other-config': {}, + 'host_other_config': {}, 'host_ip_address': '192.168.1.109', 'host_cpu_info': {}, 'disk_available': 500000000000, 'disk_total': 600000000000, 'disk_used': 100000000000, 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', - 'host_name-label': 'fake-mini'} + 'host_name_label': 'fake-mini'} @classmethod def instance(cls): -- cgit From b66c689afc5923702b3d6d27a5c8f12f6749b07d Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 17 May 2011 07:52:02 -0700 Subject: provision_resource no longer returns value --- nova/scheduler/zone_aware_scheduler.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 3ebb4caef..2050c8918 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -51,8 +51,8 @@ class ZoneAwareScheduler(driver.Scheduler): # TODO(sandy): We'll have to look for richer specs at some point. if 'blob' in request_spec: - return self.provision_resource(context, request_spec, - instance_id, kwargs) + self.provision_resource(context, request_spec, instance_id, kwargs) + return None # Create build plan and provision ... build_plan = self.select(context, request_spec) @@ -82,7 +82,6 @@ class ZoneAwareScheduler(driver.Scheduler): LOG.warning(_("Provision to Child Zone not supported (yet)") % locals()) pass - return None def select(self, context, request_spec, *args, **kwargs): """Select returns a list of weights and zone/host information -- cgit From 23bbbfcd3317859d44dba7da7996a978ad922543 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 10:45:19 -0500 Subject: First cut at least cost scheduler --- nova/scheduler/least_cost.py | 79 +++++++++++++++++++++++++++++++++ nova/tests/test_least_cost_scheduler.py | 39 ++++++++++++++++ 2 files changed, 118 insertions(+) create mode 100644 nova/scheduler/least_cost.py create mode 100644 nova/tests/test_least_cost_scheduler.py diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py new file mode 100644 index 000000000..75dde81ca --- /dev/null +++ b/nova/scheduler/least_cost.py @@ -0,0 +1,79 @@ +import collections + +# TODO(sirp): this should be just `zone_aware` to match naming scheme +# TODO(sirp): perhaps all zone-aware stuff should go under a `zone_aware` +# module +from nova.scheduler import zone_aware_scheduler + +class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): + def get_cost_fns(self): + """Returns a list of tuples containing weights and cost functions to + use for weighing hosts + """ + cost_fns = [] + + return cost_fns + + def weigh_hosts(self, num, specs, hosts): + """ + Returns a list of dictionaries of form: + [ {weight: weight, hostname: hostname} ] + """ + # FIXME(sirp): weigh_hosts should handle more than just instances + cost_fns = [] + hosts = [] + cost_hosts = weighted_sum(domain=hosts, weighted_fns=self.get_cost_fns()) + + # TODO convert hosts back to hostnames + weight_hostnames = [] + return weight_hostnames + +def normalize_list(L): + """Normalize an array of numbers such that each element satisfies: + 0 <= e <= 1 + """ + if not L: + return L + max_ = max(L) + if max_ > 0: + return [(float(e) / max_) for e in L] + return L + +def weighted_sum(domain, weighted_fns, normalize=True): + """ + Use the weighted-sum method to compute a score for an array of objects. + Normalize the results of the objective-functions so that the weights are + meaningful regardless of objective-function's range. + + domain - input to be scored + weighted_fns - list of weights and functions like: + [(weight, objective-functions)] + + Returns an unsorted list like: [(score, elem)] + """ + # Table of form: + # { domain1: [score1, score2, ..., scoreM] + # ... + # domainN: [score1, score2, ..., scoreM] } + score_table = collections.defaultdict(list) + + for weight, fn in weighted_fns: + scores = [fn(elem) for elem in domain] + + if normalize: + norm_scores = normalize_list(scores) + else: + norm_scores = scores + + for idx, score in enumerate(norm_scores): + weighted_score = score * weight + score_table[idx].append(weighted_score) + + # Sum rows in table to compute score for each element in domain + domain_scores = [] + for idx in sorted(score_table): + elem_score = sum(score_table[idx]) + elem = domain[idx] + domain_scores.append(elem_score) + + return domain_scores diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py new file mode 100644 index 000000000..a3a18a09f --- /dev/null +++ b/nova/tests/test_least_cost_scheduler.py @@ -0,0 +1,39 @@ +from nova import test +from nova.scheduler import least_cost + +MB = 1024 * 1024 + +class FakeHost(object): + def __init__(self, host_id, free_ram, io): + self.id = host_id + self.free_ram = free_ram + self.io = io + +class WeightedSumTest(test.TestCase): + def test_empty_domain(self): + domain = [] + weighted_fns = [] + result = least_cost.weighted_sum(domain, weighted_fns) + expected = [] + self.assertEqual(expected, result) + + def test_basic_costing(self): + hosts = [ + FakeHost(1, 512 * MB, 100), + FakeHost(2, 256 * MB, 400), + FakeHost(3, 512 * MB, 100) + ] + + weighted_fns = [ + (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* + (2, lambda h: h.io), # Avoid high I/O + ] + + costs = least_cost.weighted_sum(domain=hosts, weighted_fns=weighted_fns) + + # Each 256 MB unit of free-ram contributes 0.5 points by way of: + # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 + # Each 100 iops of IO adds 0.5 points by way of: + # cost = 2 * (100/400) = 2 * 0.25 = 0.5 + expected = [1.5, 2.5, 1.5] + self.assertEqual(expected, costs) -- cgit From 40f15a6ffb0d9ae965f9c7c7289654f323f2775f Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 17 May 2011 13:01:28 -0700 Subject: pep8 --- nova/scheduler/zone_aware_scheduler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 2050c8918..dc18fc427 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -78,7 +78,7 @@ class ZoneAwareScheduler(driver.Scheduler): LOG.debug(_("Casted to compute %(host)s for run_instance") % locals()) else: - # TODO(sandy) Provision in child zone ... + # TODO(sandy) Provision in child zone ... LOG.warning(_("Provision to Child Zone not supported (yet)") % locals()) pass -- cgit From e6fc2fc58d2c98f4322e92b26b1031ca362c8724 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 May 2011 15:14:17 -0700 Subject: add more statuses for ec2 image registration --- nova/image/s3.py | 84 ++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 58 insertions(+), 26 deletions(-) diff --git a/nova/image/s3.py b/nova/image/s3.py index c38c58d95..673cbf56f 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -161,43 +161,75 @@ class S3ImageService(service.BaseImageService): def delayed_create(): """This handles the fetching and decrypting of the part files.""" - parts = [] - for fn_element in manifest.find('image').getiterator('filename'): - part = self._download_file(bucket, fn_element.text, image_path) - parts.append(part) - - # NOTE(vish): this may be suboptimal, should we use cat? - encrypted_filename = os.path.join(image_path, 'image.encrypted') - with open(encrypted_filename, 'w') as combined: - for filename in parts: - with open(filename) as part: - shutil.copyfileobj(part, combined) - - metadata['properties']['image_state'] = 'decrypting' + metadata['properties']['image_state'] = 'downloading' self.service.update(context, image_id, metadata) - hex_key = manifest.find('image/ec2_encrypted_key').text - encrypted_key = binascii.a2b_hex(hex_key) - hex_iv = manifest.find('image/ec2_encrypted_iv').text - encrypted_iv = binascii.a2b_hex(hex_iv) + try: + parts = [] + elements = manifest.find('image').getiterator('filename') + for fn_element in elements: + part = self._download_file(bucket, + fn_element.text, + image_path) + parts.append(part) + + # NOTE(vish): this may be suboptimal, should we use cat? + enc_filename = os.path.join(image_path, 'image.encrypted') + with open(enc_filename, 'w') as combined: + for filename in parts: + with open(filename) as part: + shutil.copyfileobj(part, combined) + + except Exception: + metadata['properties']['image_state'] = 'failed_download' + self.service.update(context, image_id, metadata) + raise - # FIXME(vish): grab key from common service so this can run on - # any host. - cloud_pk = crypto.key_path(context.project_id) + metadata['properties']['image_state'] = 'decrypting' + self.service.update(context, image_id, metadata) - decrypted_filename = os.path.join(image_path, 'image.tar.gz') - self._decrypt_image(encrypted_filename, encrypted_key, - encrypted_iv, cloud_pk, decrypted_filename) + try: + hex_key = manifest.find('image/ec2_encrypted_key').text + encrypted_key = binascii.a2b_hex(hex_key) + hex_iv = manifest.find('image/ec2_encrypted_iv').text + encrypted_iv = binascii.a2b_hex(hex_iv) + + # FIXME(vish): grab key from common service so this can run on + # any host. + cloud_pk = crypto.key_path(context.project_id) + + dec_filename = os.path.join(image_path, 'image.tar.gz') + self._decrypt_image(enc_filename, encrypted_key, + encrypted_iv, cloud_pk, + dec_filename) + except Exception: + metadata['properties']['image_state'] = 'failed_decrypt' + self.service.update(context, image_id, metadata) + raise metadata['properties']['image_state'] = 'untarring' self.service.update(context, image_id, metadata) - unz_filename = self._untarzip_image(image_path, decrypted_filename) + try: + unz_filename = self._untarzip_image(image_path, dec_filename) + except Exception: + metadata['properties']['image_state'] = 'failed_untar' + self.service.update(context, image_id, metadata) + raise metadata['properties']['image_state'] = 'uploading' - with open(unz_filename) as image_file: - self.service.update(context, image_id, metadata, image_file) + self.service.update(context, image_id, metadata) + try: + with open(unz_filename) as image_file: + self.service.update(context, image_id, + metadata, image_file) + except Exception: + metadata['properties']['image_state'] = 'failed_upload' + self.service.update(context, image_id, metadata) + raise + metadata['properties']['image_state'] = 'available' + metadata['status'] = 'active' self.service.update(context, image_id, metadata) shutil.rmtree(image_path) -- cgit From a4035df4d031d3d90f3f7ce938ff0b8305be6773 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 17:27:04 -0500 Subject: Adding fill first cost function --- nova/scheduler/least_cost.py | 12 ++++++++++++ nova/test.py | 16 +++++++++++++--- nova/tests/test_least_cost_scheduler.py | 33 +++++++++++++++++++++++++++++++-- 3 files changed, 56 insertions(+), 5 deletions(-) diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index e47951f17..79376c358 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -34,6 +34,8 @@ flags.DEFINE_list('least_cost_scheduler_cost_functions', 'Which cost functions the LeastCostScheduler should use.') +# TODO(sirp): Once we have enough of these rules, we can break them out into a +# cost_functions.py file (perhaps in a least_cost_scheduler directory) flags.DEFINE_integer('noop_cost_fn_weight', 1, 'How much weight to give the noop cost function') def noop_cost_fn(host): @@ -41,6 +43,16 @@ def noop_cost_fn(host): return 1 +flags.DEFINE_integer('fill_first_cost_fn_weight', 1, + 'How much weight to give the fill-first cost function') +def fill_first_cost_fn(host): + """Prefer hosts that have less ram available, filter_hosts will exclude + hosts that don't have enough ram""" + hostname, caps = host + free_mem = caps['compute']['host_memory_free'] + return free_mem + + class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): def get_cost_fns(self): """Returns a list of tuples containing weights and cost functions to diff --git a/nova/test.py b/nova/test.py index 4deb2a175..401f82d38 100644 --- a/nova/test.py +++ b/nova/test.py @@ -181,7 +181,7 @@ class TestCase(unittest.TestCase): wsgi.Server.start = _wrapped_start # Useful assertions - def assertDictMatch(self, d1, d2): + def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): """Assert two dicts are equivalent. This is a 'deep' match in the sense that it handles nested @@ -212,15 +212,24 @@ class TestCase(unittest.TestCase): for key in d1keys: d1value = d1[key] d2value = d2[key] + + try: + within_tolerance = abs(float(d1value) - float(d2value)) < tolerance + except ValueError: + # If both values aren't convertable to float, just ignore + within_tolerance = False + if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): self.assertDictMatch(d1value, d2value) elif 'DONTCARE' in (d1value, d2value): continue + elif approx_equal and within_tolerance: + continue elif d1value != d2value: raise_assertion("d1['%(key)s']=%(d1value)s != " "d2['%(key)s']=%(d2value)s" % locals()) - def assertDictListMatch(self, L1, L2): + def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001): """Assert a list of dicts are equivalent.""" def raise_assertion(msg): L1str = str(L1) @@ -236,4 +245,5 @@ class TestCase(unittest.TestCase): 'len(L2)=%(L2count)d' % locals()) for d1, d2 in zip(L1, L2): - self.assertDictMatch(d1, d2) + self.assertDictMatch(d1, d2, approx_equal=approx_equal, + tolerance=tolerance) diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py index b2318a3bf..b7bcd2f02 100644 --- a/nova/tests/test_least_cost_scheduler.py +++ b/nova/tests/test_least_cost_scheduler.py @@ -119,7 +119,7 @@ class LeastCostSchedulerTestCase(test.TestCase): def assertWeights(self, expected, num, request_spec, hosts): weighted = self.sched.weigh_hosts(num, request_spec, hosts) - self.assertDictListMatch(weighted, expected) + self.assertDictListMatch(weighted, expected, approx_equal=True) def test_no_hosts(self): num = 1 @@ -137,12 +137,41 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) expected = [ dict(weight=1, hostname=hostname) for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) + def test_cost_fn_weights(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] FLAGS.noop_cost_fn_weight = 2 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + expected = [ dict(weight=2, hostname=hostname) for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) + + def test_fill_first_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.fill_first_cost_fn' + ] + FLAGS.fill_first_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [] + for idx, (hostname, caps) in enumerate(hosts): + # Costs are normalized so over 10 hosts, each host with increasing + # free ram will cost 1/N more. Since the lowest cost host has some + # free ram, we add in the 1/N for the base_cost + weight = 0.1 + (0.1 * idx) + weight_dict = dict(weight=weight, hostname=hostname) + expected.append(weight_dict) + + self.assertWeights(expected, num, request_spec, hosts) -- cgit From d6fbe417d7f8f7540cffe8c941c0591a22483978 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 17:44:08 -0500 Subject: Using import_class to import filter_host driver --- nova/scheduler/host_filter.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 7cb41a433..117f08242 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -42,6 +42,7 @@ from nova import exception from nova import flags from nova import log as logging from nova.scheduler import zone_aware_scheduler +from nova import utils LOG = logging.getLogger('nova.scheduler.host_filter') @@ -283,11 +284,13 @@ def choose_driver(driver_name=None): if not driver_name: driver_name = FLAGS.default_host_filter_driver - # FIXME(sirp): use utils.import_class here - for driver in DRIVERS: - if "%s.%s" % (driver.__module__, driver.__name__) == driver_name: - return driver() - raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name) + + try: + driver = utils.import_object(driver_name) + return driver + except exception.ClassNotFound: + raise exception.SchedulerHostFilterDriverNotFound( + driver_name=driver_name) class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): -- cgit From 41ea2f4babc474cad64d81c9c95cf02e399a0a64 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 17 May 2011 18:57:00 -0400 Subject: added util functions to get image service --- nova/utils.py | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/nova/utils.py b/nova/utils.py index 361fc9873..e7ce0a79b 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -724,3 +724,51 @@ def parse_server_string(server_str): except: LOG.debug(_('Invalid server_string: %s' % server_str)) return ('', '') + + +def parse_image_ref(image_ref): + """ + Parse an imageRef and return (id, host, port) + + If the image_ref passed in is an integer, it will + return (image_ref, None, None), otherwise it will + return (id, host, port) + + image_ref - imageRef for an image + + """ + + if is_int(image_ref): + return (image_ref, None, None) + + o = urlparse(image_ref) + # Default to port 80 if not passed, should this be 9292? + port = o.port or 80 + host = o.netloc.split(':', 1)[0] + id = o.path.split('/')[-1] + + return (id, host, port) + + +def get_image_service(image_ref): + """ + Get the proper image_service for an image_id + + image_ref - image ref/id for an image + """ + + (image_id, host, port) = parse_image_ref(image_ref) + + image_service = None + + if host: + GlanceImageService = utils.import_class(FLAGS.glance_image_service) + GlanceClient = utils.import_class('glance.client.Client') + + glance_client = GlanceClient(host, port) + image_service = GlanceImageService(glance_client) + else: + ImageService = utils.import_class(FLAGS.image_service) + image_service = ImageService() + + return (image_id, image_service) -- cgit From 5d35b548316eccd5a8454ccf7424ebe60aaf54e6 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 17 May 2011 19:07:44 -0400 Subject: updates to utils methods, initial usage in images.py --- nova/api/openstack/images.py | 14 ++++++-------- nova/utils.py | 10 +++++++--- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 34d4c27fc..8d796c284 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -82,15 +82,12 @@ class Controller(common.OpenstackController): :param id: Image identifier (integer) """ context = req.environ['nova.context'] + image_id = id try: - image_id = int(id) - except ValueError: - explanation = _("Image not found.") - raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) - - try: - image = self._image_service.show(context, image_id) + (image_service, service_image_id) = utils.get_image_service( + image_id) + image = image_service.show(context, service_image_id) except exception.NotFound: explanation = _("Image '%d' not found.") % (image_id) raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) @@ -105,7 +102,8 @@ class Controller(common.OpenstackController): """ image_id = id context = req.environ['nova.context'] - self._image_service.delete(context, image_id) + (image_service, service_image_id) = utils.get_image_service(image_id) + image_service.delete(context, service_image_id) return webob.exc.HTTPNoContent() def create(self, req): diff --git a/nova/utils.py b/nova/utils.py index e7ce0a79b..c7da95a97 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -750,12 +750,17 @@ def parse_image_ref(image_ref): return (id, host, port) -def get_image_service(image_ref): +def get_image_service(image_ref=None): """ Get the proper image_service for an image_id + Returns (image_service, image_id) image_ref - image ref/id for an image """ + ImageService = utils.import_class(FLAGS.image_service) + + if not image_ref: + return (ImageService(), -1) (image_id, host, port) = parse_image_ref(image_ref) @@ -768,7 +773,6 @@ def get_image_service(image_ref): glance_client = GlanceClient(host, port) image_service = GlanceImageService(glance_client) else: - ImageService = utils.import_class(FLAGS.image_service) image_service = ImageService() - return (image_id, image_service) + return (image_service, id) -- cgit From dacb4899ea631840fd95ee0bd25d999fbb16b8b4 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 17 May 2011 19:10:11 -0400 Subject: use utils.get_image_service in compute_api --- nova/compute/api.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index a12f8d515..930e4efaa 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -156,7 +156,8 @@ class API(base.Base): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) - image = self.image_service.show(context, image_id) + (image_service, service_image_id) = utils.get_image_service(image_id) + image = image_service.show(context, service_image_id) os_type = None if 'properties' in image and 'os_type' in image['properties']: @@ -176,9 +177,9 @@ class API(base.Base): logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: - self.image_service.show(context, kernel_id) + image_service.show(context, kernel_id) if ramdisk_id: - self.image_service.show(context, ramdisk_id) + image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] @@ -515,6 +516,8 @@ class API(base.Base): 'user_id': str(context.user_id)} sent_meta = {'name': name, 'is_public': False, 'properties': properties} + # TODO(wwolf): not sure if we need to use + # utils.get_image_service() here ? recv_meta = self.image_service.create(context, sent_meta) params = {'image_id': recv_meta['id']} self._cast_compute_message('snapshot_instance', context, instance_id, -- cgit From eacb354c159aeb8f428232eb7d678ffb60bb73cd Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 17 May 2011 19:14:35 -0400 Subject: made get_image_service calls in servers.py --- nova/api/openstack/servers.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8f2de2afe..bf0f56373 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -142,7 +142,10 @@ class Controller(common.OpenstackController): requested_image_id = self._image_id_from_req_data(env) try: - image_id = common.get_image_id_from_image_hash(self._image_service, + (image_service, service_image_id) = utils.get_image_service( + requested_image_id) + + image_id = common.get_image_id_from_image_hash(image_service, context, requested_image_id) except: msg = _("Can not find requested image") @@ -556,7 +559,8 @@ class Controller(common.OpenstackController): associated kernel and ramdisk image IDs. """ context = req.environ['nova.context'] - image_meta = self._image_service.show(context, image_id) + (image_service, service_image_id) = utils.get_image_service(image_id) + image_meta = image_service.show(context, service_image_id) # NOTE(sirp): extracted to a separate method to aid unit-testing, the # new method doesn't need a request obj or an ImageService stub kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image( -- cgit From 439787e7588b2409f319f2d86a41a3581cff8861 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:15:31 -0500 Subject: Pep8 fixes --- nova/exception.py | 3 +++ nova/scheduler/least_cost.py | 30 +++++++++++++++++++++--------- nova/scheduler/zone_aware_scheduler.py | 11 ++++++----- nova/test.py | 5 +++-- nova/tests/test_least_cost_scheduler.py | 18 +++++++++++------- 5 files changed, 44 insertions(+), 23 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index 63ed6dd5e..16c443c61 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -460,15 +460,18 @@ class FlavorNotFound(NotFound): class ZoneNotFound(NotFound): message = _("Zone %(zone_id)s could not be found.") + # TODO(sirp): move these into the schedule classes since they are internal? class SchedulerHostFilterDriverNotFound(NotFound): message = _("Scheduler Host Filter Driver %(driver_name)s could" " not be found.") + class SchedulerCostFunctionNotFound(NotFound): message = _("Scheduler cost function %(cost_fn_str)s could" " not be found.") + class SchedulerWeightFlagNotFound(NotFound): message = _("Scheduler weight flag not found: %(flag_name)s") diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index 79376c358..629fe2e42 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -13,16 +13,19 @@ # License for the specific language governing permissions and limitations # under the License. """ -Helpful docstring here +Least Cost Scheduler is a mechanism for choosing which host machines to +provision a set of resources to. The input of the least-cost-scheduler is a +set of objective-functions, called the 'cost-functions', a weight for each +cost-function, and a list of candidate hosts (gathered via FilterHosts). + +The cost-function and weights are tabulated, and the host with the least cost +is then selected for provisioning. """ import collections from nova import flags from nova import log as logging -# TODO(sirp): this should be just `zone_aware` to match naming scheme -# TODO(sirp): perhaps all zone-aware stuff should go under a `zone_aware` -# module from nova.scheduler import zone_aware_scheduler from nova import utils @@ -38,6 +41,8 @@ flags.DEFINE_list('least_cost_scheduler_cost_functions', # cost_functions.py file (perhaps in a least_cost_scheduler directory) flags.DEFINE_integer('noop_cost_fn_weight', 1, 'How much weight to give the noop cost function') + + def noop_cost_fn(host): """Return a pre-weight cost of 1 for each host""" return 1 @@ -45,6 +50,8 @@ def noop_cost_fn(host): flags.DEFINE_integer('fill_first_cost_fn_weight', 1, 'How much weight to give the fill-first cost function') + + def fill_first_cost_fn(host): """Prefer hosts that have less ram available, filter_hosts will exclude hosts that don't have enough ram""" @@ -68,7 +75,7 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): except exception.ClassNotFound: raise exception.SchedulerCostFunctionNotFound( cost_fn_str=cost_fn_str) - + try: weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__) except AttributeError: @@ -82,17 +89,22 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): def weigh_hosts(self, num, request_spec, hosts): """Returns a list of dictionaries of form: [ {weight: weight, hostname: hostname} ]""" + # FIXME(sirp): weigh_hosts should handle more than just instances - hostnames = [hostname for hostname, _ in hosts] + hostnames = [hostname for hostname, caps in hosts] cost_fns = self.get_cost_fns() costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) - + weighted = [] + weight_log = [] for cost, hostname in zip(costs, hostnames): + weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) weight_dict = dict(weight=cost, hostname=hostname) weighted.append(weight_dict) - return weighted + + LOG.debug(_("Weighted Costs => %s") % weight_log) + return weighted def normalize_list(L): @@ -110,7 +122,7 @@ def weighted_sum(domain, weighted_fns, normalize=True): """Use the weighted-sum method to compute a score for an array of objects. Normalize the results of the objective-functions so that the weights are meaningful regardless of objective-function's range. - + domain - input to be scored weighted_fns - list of weights and functions like: [(weight, objective-functions)] diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index fa5b3b1b6..a1a68ce5e 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -80,7 +80,7 @@ class ZoneAwareScheduler(driver.Scheduler): LOG.debug(_("Casted to compute %(host)s for run_instance") % locals()) else: - # TODO(sandy) Provision in child zone ... + # TODO(sandy) Provision in child zone ... LOG.warning(_("Provision to Child Zone not supported (yet)") % locals()) pass @@ -117,11 +117,11 @@ class ZoneAwareScheduler(driver.Scheduler): # Filter local hosts based on requirements ... host_list = self.filter_hosts(num_instances, request_spec) - # then weigh the selected hosts. - # weighted = [{weight=weight, name=hostname}, ...] - # TODO(sirp): weigh_hosts should also be a function of 'topic' or # resources, so that we can apply different objective functions to it + + # then weigh the selected hosts. + # weighted = [{weight=weight, name=hostname}, ...] weighted = self.weigh_hosts(num_instances, request_spec, host_list) # Next, tack on the best weights from the child zones ... @@ -145,8 +145,9 @@ class ZoneAwareScheduler(driver.Scheduler): """Derived classes must override this method and return a list of hosts in [(hostname, capability_dict)] format.""" # NOTE(sirp): The default logic is the equivalent to AllHostsFilter + service_states = self.zone_manager.service_states return [(host, services) - for host, services in self.zone_manager.service_states.iteritems()] + for host, services in service_states.iteritems()] def weigh_hosts(self, num, request_spec, hosts): """Derived classes may override this to provide more sophisticated diff --git a/nova/test.py b/nova/test.py index 401f82d38..00a16dd68 100644 --- a/nova/test.py +++ b/nova/test.py @@ -212,9 +212,10 @@ class TestCase(unittest.TestCase): for key in d1keys: d1value = d1[key] d2value = d2[key] - + try: - within_tolerance = abs(float(d1value) - float(d2value)) < tolerance + error = abs(float(d1value) - float(d2value)) + within_tolerance = error <= tolerance except ValueError: # If both values aren't convertable to float, just ignore within_tolerance = False diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py index b7bcd2f02..c8ce7892f 100644 --- a/nova/tests/test_least_cost_scheduler.py +++ b/nova/tests/test_least_cost_scheduler.py @@ -30,6 +30,7 @@ class FakeHost(object): self.free_ram = free_ram self.io = io + class WeightedSumTestCase(test.TestCase): def test_empty_domain(self): domain = [] @@ -50,14 +51,15 @@ class WeightedSumTestCase(test.TestCase): (2, lambda h: h.io), # Avoid high I/O ] - costs = least_cost.weighted_sum(domain=hosts, weighted_fns=weighted_fns) + costs = least_cost.weighted_sum( + domain=hosts, weighted_fns=weighted_fns) # Each 256 MB unit of free-ram contributes 0.5 points by way of: # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 # Each 100 iops of IO adds 0.5 points by way of: # cost = 2 * (100/400) = 2 * 0.25 = 0.5 expected = [1.5, 2.5, 1.5] - self.assertEqual(expected, costs) + self.assertEqual(expected, costs) # TODO(sirp): unify this with test_host_filter tests? possibility of sharing @@ -65,6 +67,7 @@ class WeightedSumTestCase(test.TestCase): class FakeZoneManager: pass + class LeastCostSchedulerTestCase(test.TestCase): def _host_caps(self, multiplier): # Returns host capabilities in the following way: @@ -116,7 +119,6 @@ class LeastCostSchedulerTestCase(test.TestCase): #FLAGS.default_host_filter_driver = self.old_flag super(LeastCostSchedulerTestCase, self).tearDown() - def assertWeights(self, expected, num, request_spec, hosts): weighted = self.sched.weigh_hosts(num, request_spec, hosts) self.assertDictListMatch(weighted, expected, approx_equal=True) @@ -138,8 +140,9 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - - expected = [ dict(weight=1, hostname=hostname) for hostname, caps in hosts] + + expected = [dict(weight=1, hostname=hostname) + for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) def test_cost_fn_weights(self): @@ -152,7 +155,8 @@ class LeastCostSchedulerTestCase(test.TestCase): request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - expected = [ dict(weight=2, hostname=hostname) for hostname, caps in hosts] + expected = [dict(weight=2, hostname=hostname) + for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) def test_fill_first_cost_fn(self): @@ -164,7 +168,7 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - + expected = [] for idx, (hostname, caps) in enumerate(hosts): # Costs are normalized so over 10 hosts, each host with increasing -- cgit From d24f59a251173826817e5f5c53a4f54dfe927f2d Mon Sep 17 00:00:00 2001 From: William Wolf Date: Tue, 17 May 2011 19:30:29 -0400 Subject: added is_int function to utils --- nova/utils.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/nova/utils.py b/nova/utils.py index c7da95a97..fff916527 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -726,6 +726,11 @@ def parse_server_string(server_str): return ('', '') +def is_int(x): + """ Return if passed in variable is integer or not """ + return re.match(r'\d+$', str(x)) + + def parse_image_ref(image_ref): """ Parse an imageRef and return (id, host, port) @@ -757,18 +762,18 @@ def get_image_service(image_ref=None): image_ref - image ref/id for an image """ - ImageService = utils.import_class(FLAGS.image_service) + ImageService = import_class(FLAGS.image_service) if not image_ref: - return (ImageService(), -1) + return (ImageService(), None) (image_id, host, port) = parse_image_ref(image_ref) image_service = None if host: - GlanceImageService = utils.import_class(FLAGS.glance_image_service) - GlanceClient = utils.import_class('glance.client.Client') + GlanceImageService = import_class(FLAGS.glance_image_service) + GlanceClient = import_class('glance.client.Client') glance_client = GlanceClient(host, port) image_service = GlanceImageService(glance_client) -- cgit From 4ba215224e6c75037fd4f20be57d632da5d07469 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:32:56 -0500 Subject: Moving tests into scheduler subdirectory --- nova/scheduler/host_filter.py | 5 - nova/tests/scheduler/test_scheduler.py | 1118 +++++++++++++++++++++ nova/tests/scheduler/test_zone_aware_scheduler.py | 121 +++ nova/tests/test_host_filter.py | 211 ---- nova/tests/test_least_cost_scheduler.py | 181 ---- nova/tests/test_scheduler.py | 1118 --------------------- nova/tests/test_zone_aware_scheduler.py | 121 --- 7 files changed, 1239 insertions(+), 1636 deletions(-) create mode 100644 nova/tests/scheduler/test_scheduler.py create mode 100644 nova/tests/scheduler/test_zone_aware_scheduler.py delete mode 100644 nova/tests/test_host_filter.py delete mode 100644 nova/tests/test_least_cost_scheduler.py delete mode 100644 nova/tests/test_scheduler.py delete mode 100644 nova/tests/test_zone_aware_scheduler.py diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 117f08242..79e9f3159 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -313,8 +313,3 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): instance_type = request_spec['instance_type'] name, query = driver.instance_type_to_filter(instance_type) return driver.filter_hosts(self.zone_manager, query) - - def weigh_hosts(self, num, request_spec, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format.""" - return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py new file mode 100644 index 000000000..54b3f80fb --- /dev/null +++ b/nova/tests/scheduler/test_scheduler.py @@ -0,0 +1,1118 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler +""" + +import datetime +import mox +import novaclient.exceptions +import stubout +import webob + +from mox import IgnoreArg +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import service +from nova import test +from nova import rpc +from nova import utils +from nova.auth import manager as auth_manager +from nova.scheduler import api +from nova.scheduler import manager +from nova.scheduler import driver +from nova.compute import power_state +from nova.db.sqlalchemy import models + + +FLAGS = flags.FLAGS +flags.DECLARE('max_cores', 'nova.scheduler.simple') +flags.DECLARE('stub_network', 'nova.compute.manager') +flags.DECLARE('instances_path', 'nova.compute.manager') + + +class TestDriver(driver.Scheduler): + """Scheduler Driver for Tests""" + def schedule(context, topic, *args, **kwargs): + return 'fallback_host' + + def schedule_named_method(context, topic, num): + return 'named_host' + + +class SchedulerTestCase(test.TestCase): + """Test case for scheduler""" + def setUp(self): + super(SchedulerTestCase, self).setUp() + self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') + + def _create_compute_service(self): + """Create compute-manager(ComputeNode and Service record).""" + ctxt = context.get_admin_context() + dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute', + 'report_count': 0, 'availability_zone': 'dummyzone'} + s_ref = db.service_create(ctxt, dic) + + dic = {'service_id': s_ref['id'], + 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, + 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10, + 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, + 'cpu_info': ''} + db.compute_node_create(ctxt, dic) + + return db.service_get(ctxt, s_ref['id']) + + def _create_instance(self, **kwargs): + """Create a test instance""" + ctxt = context.get_admin_context() + inst = {} + inst['user_id'] = 'admin' + inst['project_id'] = kwargs.get('project_id', 'fake') + inst['host'] = kwargs.get('host', 'dummy') + inst['vcpus'] = kwargs.get('vcpus', 1) + inst['memory_mb'] = kwargs.get('memory_mb', 10) + inst['local_gb'] = kwargs.get('local_gb', 20) + return db.instance_create(ctxt, inst) + + def test_fallback(self): + scheduler = manager.SchedulerManager() + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + ctxt = context.get_admin_context() + rpc.cast(ctxt, + 'topic.fallback_host', + {'method': 'noexist', + 'args': {'num': 7}}) + self.mox.ReplayAll() + scheduler.noexist(ctxt, 'topic', num=7) + + def test_named_method(self): + scheduler = manager.SchedulerManager() + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + ctxt = context.get_admin_context() + rpc.cast(ctxt, + 'topic.named_host', + {'method': 'named_method', + 'args': {'num': 7}}) + self.mox.ReplayAll() + scheduler.named_method(ctxt, 'topic', num=7) + + def test_show_host_resources_host_not_exit(self): + """A host given as an argument does not exists.""" + + scheduler = manager.SchedulerManager() + dest = 'dummydest' + ctxt = context.get_admin_context() + + self.assertRaises(exception.NotFound, scheduler.show_host_resources, + ctxt, dest) + #TODO(bcwaldon): reimplement this functionality + #c1 = (e.message.find(_("does not exist or is not a " + # "compute node.")) >= 0) + + def _dic_is_equal(self, dic1, dic2, keys=None): + """Compares 2 dictionary contents(Helper method)""" + if not keys: + keys = ['vcpus', 'memory_mb', 'local_gb', + 'vcpus_used', 'memory_mb_used', 'local_gb_used'] + + for key in keys: + if not (dic1[key] == dic2[key]): + return False + return True + + def test_show_host_resources_no_project(self): + """No instance are running on the given host.""" + + scheduler = manager.SchedulerManager() + ctxt = context.get_admin_context() + s_ref = self._create_compute_service() + + result = scheduler.show_host_resources(ctxt, s_ref['host']) + + # result checking + c1 = ('resource' in result and 'usage' in result) + compute_node = s_ref['compute_node'][0] + c2 = self._dic_is_equal(result['resource'], compute_node) + c3 = result['usage'] == {} + self.assertTrue(c1 and c2 and c3) + db.service_destroy(ctxt, s_ref['id']) + + def test_show_host_resources_works_correctly(self): + """Show_host_resources() works correctly as expected.""" + + scheduler = manager.SchedulerManager() + ctxt = context.get_admin_context() + s_ref = self._create_compute_service() + i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host']) + i_ref2 = self._create_instance(project_id='p-02', vcpus=3, + host=s_ref['host']) + + result = scheduler.show_host_resources(ctxt, s_ref['host']) + + c1 = ('resource' in result and 'usage' in result) + compute_node = s_ref['compute_node'][0] + c2 = self._dic_is_equal(result['resource'], compute_node) + c3 = result['usage'].keys() == ['p-01', 'p-02'] + keys = ['vcpus', 'memory_mb', 'local_gb'] + c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys) + c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys) + self.assertTrue(c1 and c2 and c3 and c4 and c5) + + db.service_destroy(ctxt, s_ref['id']) + db.instance_destroy(ctxt, i_ref1['id']) + db.instance_destroy(ctxt, i_ref2['id']) + + +class ZoneSchedulerTestCase(test.TestCase): + """Test case for zone scheduler""" + def setUp(self): + super(ZoneSchedulerTestCase, self).setUp() + self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler') + + def _create_service_model(self, **kwargs): + service = db.sqlalchemy.models.Service() + service.host = kwargs['host'] + service.disabled = False + service.deleted = False + service.report_count = 0 + service.binary = 'nova-compute' + service.topic = 'compute' + service.id = kwargs['id'] + service.availability_zone = kwargs['zone'] + service.created_at = datetime.datetime.utcnow() + return service + + def test_with_two_zones(self): + scheduler = manager.SchedulerManager() + ctxt = context.get_admin_context() + service_list = [self._create_service_model(id=1, + host='host1', + zone='zone1'), + self._create_service_model(id=2, + host='host2', + zone='zone2'), + self._create_service_model(id=3, + host='host3', + zone='zone2'), + self._create_service_model(id=4, + host='host4', + zone='zone2'), + self._create_service_model(id=5, + host='host5', + zone='zone2')] + self.mox.StubOutWithMock(db, 'service_get_all_by_topic') + arg = IgnoreArg() + db.service_get_all_by_topic(arg, arg).AndReturn(service_list) + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + rpc.cast(ctxt, + 'compute.host1', + {'method': 'run_instance', + 'args': {'instance_id': 'i-ffffffff', + 'availability_zone': 'zone1'}}) + self.mox.ReplayAll() + scheduler.run_instance(ctxt, + 'compute', + instance_id='i-ffffffff', + availability_zone='zone1') + + +class SimpleDriverTestCase(test.TestCase): + """Test case for simple driver""" + def setUp(self): + super(SimpleDriverTestCase, self).setUp() + self.flags(connection_type='fake', + stub_network=True, + max_cores=4, + max_gigabytes=4, + network_manager='nova.network.manager.FlatManager', + volume_driver='nova.volume.driver.FakeISCSIDriver', + scheduler_driver='nova.scheduler.simple.SimpleScheduler') + self.scheduler = manager.SchedulerManager() + self.manager = auth_manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake') + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.get_admin_context() + + def tearDown(self): + self.manager.delete_user(self.user) + self.manager.delete_project(self.project) + super(SimpleDriverTestCase, self).tearDown() + + def _create_instance(self, **kwargs): + """Create a test instance""" + inst = {} + inst['image_id'] = 1 + inst['reservation_id'] = 'r-fakeres' + inst['user_id'] = self.user.id + inst['project_id'] = self.project.id + inst['instance_type_id'] = '1' + inst['mac_address'] = utils.generate_mac() + inst['vcpus'] = kwargs.get('vcpus', 1) + inst['ami_launch_index'] = 0 + inst['availability_zone'] = kwargs.get('availability_zone', None) + inst['host'] = kwargs.get('host', 'dummy') + inst['memory_mb'] = kwargs.get('memory_mb', 20) + inst['local_gb'] = kwargs.get('local_gb', 30) + inst['launched_on'] = kwargs.get('launghed_on', 'dummy') + inst['state_description'] = kwargs.get('state_description', 'running') + inst['state'] = kwargs.get('state', power_state.RUNNING) + return db.instance_create(self.context, inst)['id'] + + def _create_volume(self): + """Create a test volume""" + vol = {} + vol['size'] = 1 + vol['availability_zone'] = 'test' + return db.volume_create(self.context, vol)['id'] + + def _create_compute_service(self, **kwargs): + """Create a compute service.""" + + dic = {'binary': 'nova-compute', 'topic': 'compute', + 'report_count': 0, 'availability_zone': 'dummyzone'} + dic['host'] = kwargs.get('host', 'dummy') + s_ref = db.service_create(self.context, dic) + if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): + t = datetime.datetime.utcnow() - datetime.timedelta(0) + dic['created_at'] = kwargs.get('created_at', t) + dic['updated_at'] = kwargs.get('updated_at', t) + db.service_update(self.context, s_ref['id'], dic) + + dic = {'service_id': s_ref['id'], + 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, + 'vcpus_used': 16, 'local_gb_used': 10, + 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, + 'cpu_info': ''} + dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32) + dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu') + dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003) + db.compute_node_create(self.context, dic) + return db.service_get(self.context, s_ref['id']) + + def test_doesnt_report_disabled_hosts_as_up(self): + """Ensures driver doesn't find hosts before they are enabled""" + # NOTE(vish): constructing service without create method + # because we are going to use it without queue + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + db.service_update(self.context, s2['id'], {'disabled': True}) + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(0, len(hosts)) + compute1.kill() + compute2.kill() + + def test_reports_enabled_hosts_as_up(self): + """Ensures driver can find the hosts that are up""" + # NOTE(vish): constructing service without create method + # because we are going to use it without queue + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(2, len(hosts)) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_instance(self): + """Ensures the host with less cores gets the next one""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance() + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual(host, 'host2') + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_specific_host_gets_instance(self): + """Ensures if you set availability_zone it launches on that zone""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_wont_sechedule_if_specified_host_is_down(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) + past = now - delta + db.service_update(self.context, s1['id'], {'updated_at': past}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + self.assertRaises(driver.WillNotSchedule, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id2) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_will_schedule_on_disabled_host_if_specified(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_too_many_cores(self): + """Ensures we don't go over max cores""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_ids1 = [] + instance_ids2 = [] + for index in xrange(FLAGS.max_cores): + instance_id = self._create_instance() + compute1.run_instance(self.context, instance_id) + instance_ids1.append(instance_id) + instance_id = self._create_instance() + compute2.run_instance(self.context, instance_id) + instance_ids2.append(instance_id) + instance_id = self._create_instance() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id) + for instance_id in instance_ids1: + compute1.terminate_instance(self.context, instance_id) + for instance_id in instance_ids2: + compute2.terminate_instance(self.context, instance_id) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_volume(self): + """Ensures the host with less gigabytes gets the next one""" + volume1 = service.Service('host1', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume1.start() + volume2 = service.Service('host2', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume2.start() + volume_id1 = self._create_volume() + volume1.create_volume(self.context, volume_id1) + volume_id2 = self._create_volume() + host = self.scheduler.driver.schedule_create_volume(self.context, + volume_id2) + self.assertEqual(host, 'host2') + volume1.delete_volume(self.context, volume_id1) + db.volume_destroy(self.context, volume_id2) + dic = {'service_id': s_ref['id'], + 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, + 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10, + 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, + 'cpu_info': ''} + + def test_doesnt_report_disabled_hosts_as_up(self): + """Ensures driver doesn't find hosts before they are enabled""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + db.service_update(self.context, s2['id'], {'disabled': True}) + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(0, len(hosts)) + compute1.kill() + compute2.kill() + + def test_reports_enabled_hosts_as_up(self): + """Ensures driver can find the hosts that are up""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(2, len(hosts)) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_instance(self): + """Ensures the host with less cores gets the next one""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance() + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual(host, 'host2') + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_specific_host_gets_instance(self): + """Ensures if you set availability_zone it launches on that zone""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_wont_sechedule_if_specified_host_is_down(self): + compute1 = self.start_service('compute', host='host1') + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) + past = now - delta + db.service_update(self.context, s1['id'], {'updated_at': past}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + self.assertRaises(driver.WillNotSchedule, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id2) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_will_schedule_on_disabled_host_if_specified(self): + compute1 = self.start_service('compute', host='host1') + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_too_many_cores(self): + """Ensures we don't go over max cores""" + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') + instance_ids1 = [] + instance_ids2 = [] + for index in xrange(FLAGS.max_cores): + instance_id = self._create_instance() + compute1.run_instance(self.context, instance_id) + instance_ids1.append(instance_id) + instance_id = self._create_instance() + compute2.run_instance(self.context, instance_id) + instance_ids2.append(instance_id) + instance_id = self._create_instance() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id) + db.instance_destroy(self.context, instance_id) + for instance_id in instance_ids1: + compute1.terminate_instance(self.context, instance_id) + for instance_id in instance_ids2: + compute2.terminate_instance(self.context, instance_id) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_volume(self): + """Ensures the host with less gigabytes gets the next one""" + volume1 = self.start_service('volume', host='host1') + volume2 = self.start_service('volume', host='host2') + volume_id1 = self._create_volume() + volume1.create_volume(self.context, volume_id1) + volume_id2 = self._create_volume() + host = self.scheduler.driver.schedule_create_volume(self.context, + volume_id2) + self.assertEqual(host, 'host2') + volume1.delete_volume(self.context, volume_id1) + db.volume_destroy(self.context, volume_id2) + volume1.kill() + volume2.kill() + + def test_too_many_gigabytes(self): + """Ensures we don't go over max gigabytes""" + volume1 = self.start_service('volume', host='host1') + volume2 = self.start_service('volume', host='host2') + volume_ids1 = [] + volume_ids2 = [] + for index in xrange(FLAGS.max_gigabytes): + volume_id = self._create_volume() + volume1.create_volume(self.context, volume_id) + volume_ids1.append(volume_id) + volume_id = self._create_volume() + volume2.create_volume(self.context, volume_id) + volume_ids2.append(volume_id) + volume_id = self._create_volume() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_create_volume, + self.context, + volume_id) + for volume_id in volume_ids1: + volume1.delete_volume(self.context, volume_id) + for volume_id in volume_ids2: + volume2.delete_volume(self.context, volume_id) + volume1.kill() + volume2.kill() + + def test_scheduler_live_migration_with_volume(self): + """scheduler_live_migration() works correctly as expected. + + Also, checks instance state is changed from 'running' -> 'migrating'. + + """ + + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + dic = {'instance_id': instance_id, 'size': 1} + v_ref = db.volume_create(self.context, dic) + + # cannot check 2nd argument b/c the addresses of instance object + # is different. + driver_i = self.scheduler.driver + nocare = mox.IgnoreArg() + self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') + self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') + self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') + driver_i._live_migration_src_check(nocare, nocare) + driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) + driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} + rpc.cast(self.context, + db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), + {"method": 'live_migration', "args": kwargs}) + + self.mox.ReplayAll() + self.scheduler.live_migration(self.context, FLAGS.compute_topic, + instance_id=instance_id, + dest=i_ref['host']) + + i_ref = db.instance_get(self.context, instance_id) + self.assertTrue(i_ref['state_description'] == 'migrating') + db.instance_destroy(self.context, instance_id) + db.volume_destroy(self.context, v_ref['id']) + + def test_live_migration_src_check_instance_not_running(self): + """The instance given by instance_id is not running.""" + + instance_id = self._create_instance(state_description='migrating') + i_ref = db.instance_get(self.context, instance_id) + + try: + self.scheduler.driver._live_migration_src_check(self.context, + i_ref) + except exception.Invalid, e: + c = (e.message.find('is not running') > 0) + + self.assertTrue(c) + db.instance_destroy(self.context, instance_id) + + def test_live_migration_src_check_volume_node_not_alive(self): + """Raise exception when volume node is not alive.""" + + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + dic = {'instance_id': instance_id, 'size': 1} + v_ref = db.volume_create(self.context, {'instance_id': instance_id, + 'size': 1}) + t1 = datetime.datetime.utcnow() - datetime.timedelta(1) + dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', + 'topic': 'volume', 'report_count': 0} + s_ref = db.service_create(self.context, dic) + + self.assertRaises(exception.VolumeServiceUnavailable, + self.scheduler.driver.schedule_live_migration, + self.context, instance_id, i_ref['host']) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.volume_destroy(self.context, v_ref['id']) + + def test_live_migration_src_check_compute_node_not_alive(self): + """Confirms src-compute node is alive.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + t = datetime.datetime.utcnow() - datetime.timedelta(10) + s_ref = self._create_compute_service(created_at=t, updated_at=t, + host=i_ref['host']) + + self.assertRaises(exception.ComputeServiceUnavailable, + self.scheduler.driver._live_migration_src_check, + self.context, i_ref) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_src_check_works_correctly(self): + """Confirms this method finishes with no error.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host=i_ref['host']) + + ret = self.scheduler.driver._live_migration_src_check(self.context, + i_ref) + + self.assertTrue(ret is None) + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_not_alive(self): + """Confirms exception raises in case dest host does not exist.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + t = datetime.datetime.utcnow() - datetime.timedelta(10) + s_ref = self._create_compute_service(created_at=t, updated_at=t, + host=i_ref['host']) + + self.assertRaises(exception.ComputeServiceUnavailable, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, i_ref['host']) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_service_same_host(self): + """Confirms exceptioin raises in case dest and src is same host.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host=i_ref['host']) + + self.assertRaises(exception.UnableToMigrateToSelf, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, i_ref['host']) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_service_lack_memory(self): + """Confirms exception raises when dest doesn't have enough memory.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host='somewhere', + memory_mb_used=12) + + self.assertRaises(exception.MigrationError, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, 'somewhere') + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_dest_check_service_works_correctly(self): + """Confirms method finishes with no error.""" + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host='somewhere', + memory_mb_used=5) + + ret = self.scheduler.driver._live_migration_dest_check(self.context, + i_ref, + 'somewhere') + self.assertTrue(ret is None) + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_common_check_service_orig_not_exists(self): + """Destination host does not exist.""" + + dest = 'dummydest' + # mocks for live_migration_common_check() + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + t1 = datetime.datetime.utcnow() - datetime.timedelta(10) + s_ref = self._create_compute_service(created_at=t1, updated_at=t1, + host=dest) + + # mocks for mounted_on_same_shared_storage() + fpath = '/test/20110127120000' + self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) + topic = FLAGS.compute_topic + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(self.context, topic, dest), + {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), + {"method": 'check_shared_storage_test_file', + "args": {'filename': fpath}}) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), topic, dest), + {"method": 'cleanup_shared_storage_test_file', + "args": {'filename': fpath}}) + + self.mox.ReplayAll() + self.assertRaises(exception.SourceHostUnavailable, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + + def test_live_migration_common_check_service_different_hypervisor(self): + """Original host and dest host has different hypervisor type.""" + dest = 'dummydest' + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + + # compute service for destination + s_ref = self._create_compute_service(host=i_ref['host']) + # compute service for original host + s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen') + + # mocks + driver = self.scheduler.driver + self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') + driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) + + self.mox.ReplayAll() + self.assertRaises(exception.InvalidHypervisorType, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.service_destroy(self.context, s_ref2['id']) + + def test_live_migration_common_check_service_different_version(self): + """Original host and dest host has different hypervisor version.""" + dest = 'dummydest' + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + + # compute service for destination + s_ref = self._create_compute_service(host=i_ref['host']) + # compute service for original host + s_ref2 = self._create_compute_service(host=dest, + hypervisor_version=12002) + + # mocks + driver = self.scheduler.driver + self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') + driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) + + self.mox.ReplayAll() + self.assertRaises(exception.DestinationHypervisorTooOld, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) + + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.service_destroy(self.context, s_ref2['id']) + + def test_live_migration_common_check_checking_cpuinfo_fail(self): + """Raise excetion when original host doen't have compatible cpu.""" + + dest = 'dummydest' + instance_id = self._create_instance() + i_ref = db.instance_get(self.context, instance_id) + + # compute service for destination + s_ref = self._create_compute_service(host=i_ref['host']) + # compute service for original host + s_ref2 = self._create_compute_service(host=dest) + + # mocks + driver = self.scheduler.driver + self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') + driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) + self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True) + rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), + {"method": 'compare_cpu', + "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ + AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_common_check(self.context, + i_ref, + dest) + except rpc.RemoteError, e: + c = (e.message.find(_("doesn't have compatibility to")) >= 0) + + self.assertTrue(c) + db.instance_destroy(self.context, instance_id) + db.service_destroy(self.context, s_ref['id']) + db.service_destroy(self.context, s_ref2['id']) + + +class FakeZone(object): + def __init__(self, id, api_url, username, password): + self.id = id + self.api_url = api_url + self.username = username + self.password = password + + +def zone_get_all(context): + return [ + FakeZone(1, 'http://example.com', 'bob', 'xxx'), + ] + + +class FakeRerouteCompute(api.reroute_compute): + def _call_child_zones(self, zones, function): + return [] + + def get_collection_context_and_id(self, args, kwargs): + return ("servers", None, 1) + + def unmarshall_result(self, zone_responses): + return dict(magic="found me") + + +def go_boom(self, context, instance): + raise exception.InstanceNotFound(instance_id=instance) + + +def found_instance(self, context, instance): + return dict(name='myserver') + + +class FakeResource(object): + def __init__(self, attribute_dict): + for k, v in attribute_dict.iteritems(): + setattr(self, k, v) + + def pause(self): + pass + + +class ZoneRedirectTest(test.TestCase): + def setUp(self): + super(ZoneRedirectTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + + self.stubs.Set(db, 'zone_get_all', zone_get_all) + + self.enable_zone_routing = FLAGS.enable_zone_routing + FLAGS.enable_zone_routing = True + + def tearDown(self): + self.stubs.UnsetAll() + FLAGS.enable_zone_routing = self.enable_zone_routing + super(ZoneRedirectTest, self).tearDown() + + def test_trap_found_locally(self): + decorator = FakeRerouteCompute("foo") + try: + result = decorator(found_instance)(None, None, 1) + except api.RedirectResult, e: + self.fail(_("Successful database hit should succeed")) + + def test_trap_not_found_locally(self): + decorator = FakeRerouteCompute("foo") + try: + result = decorator(go_boom)(None, None, 1) + self.assertFail(_("Should have rerouted.")) + except api.RedirectResult, e: + self.assertEquals(e.results['magic'], 'found me') + + def test_routing_flags(self): + FLAGS.enable_zone_routing = False + decorator = FakeRerouteCompute("foo") + self.assertRaises(exception.InstanceNotFound, decorator(go_boom), + None, None, 1) + + def test_get_collection_context_and_id(self): + decorator = api.reroute_compute("foo") + self.assertEquals(decorator.get_collection_context_and_id( + (None, 10, 20), {}), ("servers", 10, 20)) + self.assertEquals(decorator.get_collection_context_and_id( + (None, 11,), dict(instance_id=21)), ("servers", 11, 21)) + self.assertEquals(decorator.get_collection_context_and_id( + (None,), dict(context=12, instance_id=22)), ("servers", 12, 22)) + + def test_unmarshal_single_server(self): + decorator = api.reroute_compute("foo") + self.assertEquals(decorator.unmarshall_result([]), {}) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, b=2)), ]), + dict(server=dict(a=1, b=2))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, _b=2)), ]), + dict(server=dict(a=1,))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, manager=2)), ]), + dict(server=dict(a=1,))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(_a=1, manager=2)), ]), + dict(server={})) + + +class FakeServerCollection(object): + def get(self, instance_id): + return FakeResource(dict(a=10, b=20)) + + def find(self, name): + return FakeResource(dict(a=11, b=22)) + + +class FakeEmptyServerCollection(object): + def get(self, f): + raise novaclient.NotFound(1) + + def find(self, name): + raise novaclient.NotFound(2) + + +class FakeNovaClient(object): + def __init__(self, collection): + self.servers = collection + + +class DynamicNovaClientTest(test.TestCase): + def test_issue_novaclient_command_found(self): + zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "get", 100).a, 10) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "find", "name").b, 22) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "pause", 100), None) + + def test_issue_novaclient_command_not_found(self): + zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "get", 100), None) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "find", "name"), None) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "any", "name"), None) + + +class FakeZonesProxy(object): + def do_something(*args, **kwargs): + return 42 + + def raises_exception(*args, **kwargs): + raise Exception('testing') + + +class FakeNovaClientOpenStack(object): + def __init__(self, *args, **kwargs): + self.zones = FakeZonesProxy() + + def authenticate(self): + pass + + +class CallZoneMethodTest(test.TestCase): + def setUp(self): + super(CallZoneMethodTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.stubs.Set(db, 'zone_get_all', zone_get_all) + self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack) + + def tearDown(self): + self.stubs.UnsetAll() + super(CallZoneMethodTest, self).tearDown() + + def test_call_zone_method(self): + context = {} + method = 'do_something' + results = api.call_zone_method(context, method) + expected = [(1, 42)] + self.assertEqual(expected, results) + + def test_call_zone_method_not_present(self): + context = {} + method = 'not_present' + self.assertRaises(AttributeError, api.call_zone_method, + context, method) + + def test_call_zone_method_generates_exception(self): + context = {} + method = 'raises_exception' + results = api.call_zone_method(context, method) + + # FIXME(sirp): for now the _error_trap code is catching errors and + # converting them to a ("ERROR", "string") tuples. The code (and this + # test) should eventually handle real exceptions. + expected = [(1, ('ERROR', 'testing'))] + self.assertEqual(expected, results) diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py new file mode 100644 index 000000000..37169fb97 --- /dev/null +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -0,0 +1,121 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Zone Aware Scheduler. +""" + +from nova import test +from nova.scheduler import driver +from nova.scheduler import zone_aware_scheduler +from nova.scheduler import zone_manager + + +class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): + def filter_hosts(self, num, specs): + # NOTE(sirp): this is returning [(hostname, services)] + return self.zone_manager.service_states.items() + + def weigh_hosts(self, num, specs, hosts): + fake_weight = 99 + weighted = [] + for hostname, caps in hosts: + weighted.append(dict(weight=fake_weight, name=hostname)) + return weighted + + +class FakeZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = { + 'host1': { + 'compute': {'ram': 1000} + }, + 'host2': { + 'compute': {'ram': 2000} + }, + 'host3': { + 'compute': {'ram': 3000} + } + } + + +class FakeEmptyZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = {} + + +def fake_empty_call_zone_method(context, method, specs): + return [] + + +def fake_call_zone_method(context, method, specs): + return [ + ('zone1', [ + dict(weight=1, blob='AAAAAAA'), + dict(weight=111, blob='BBBBBBB'), + dict(weight=112, blob='CCCCCCC'), + dict(weight=113, blob='DDDDDDD'), + ]), + ('zone2', [ + dict(weight=120, blob='EEEEEEE'), + dict(weight=2, blob='FFFFFFF'), + dict(weight=122, blob='GGGGGGG'), + dict(weight=123, blob='HHHHHHH'), + ]), + ('zone3', [ + dict(weight=130, blob='IIIIIII'), + dict(weight=131, blob='JJJJJJJ'), + dict(weight=132, blob='KKKKKKK'), + dict(weight=3, blob='LLLLLLL'), + ]), + ] + + +class ZoneAwareSchedulerTestCase(test.TestCase): + """Test case for Zone Aware Scheduler.""" + + def test_zone_aware_scheduler(self): + """ + Create a nested set of FakeZones, ensure that a select call returns the + appropriate build plan. + """ + sched = FakeZoneAwareScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) + + zm = FakeZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + build_plan = sched.select(fake_context, {}) + + self.assertEqual(15, len(build_plan)) + + hostnames = [plan_item['name'] + for plan_item in build_plan if 'name' in plan_item] + self.assertEqual(3, len(hostnames)) + + def test_empty_zone_aware_scheduler(self): + """ + Ensure empty hosts & child_zones result in NoValidHosts exception. + """ + sched = FakeZoneAwareScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) + + zm = FakeEmptyZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, + fake_context, 1, + dict(host_filter=None, instance_type={})) diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py deleted file mode 100644 index 1a2a86a79..000000000 --- a/nova/tests/test_host_filter.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler Host Filter Drivers. -""" - -import json - -from nova import exception -from nova import flags -from nova import test -from nova.scheduler import host_filter - -FLAGS = flags.FLAGS - - -class FakeZoneManager: - pass - - -class HostFilterTestCase(test.TestCase): - """Test case for host filter drivers.""" - - def _host_caps(self, multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - def setUp(self): - super(HostFilterTestCase, self).setUp() - self.old_flag = FLAGS.default_host_filter_driver - FLAGS.default_host_filter_driver = \ - 'nova.scheduler.host_filter.AllHostsFilter' - self.instance_type = dict(name='tiny', - memory_mb=50, - vcpus=10, - local_gb=500, - flavorid=1, - swap=500, - rxtx_quota=30000, - rxtx_cap=200) - - self.zone_manager = FakeZoneManager() - states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} - self.zone_manager.service_states = states - - def tearDown(self): - FLAGS.default_host_filter_driver = self.old_flag - super(HostFilterTestCase, self).tearDown() - - def test_choose_driver(self): - # Test default driver ... - driver = host_filter.choose_driver() - self.assertEquals(driver._full_name(), - 'nova.scheduler.host_filter.AllHostsFilter') - # Test valid driver ... - driver = host_filter.choose_driver( - 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(driver._full_name(), - 'nova.scheduler.host_filter.InstanceTypeFilter') - # Test invalid driver ... - try: - host_filter.choose_driver('does not exist') - self.fail("Should not find driver") - except exception.SchedulerHostFilterDriverNotFound: - pass - - def test_all_host_driver(self): - driver = host_filter.AllHostsFilter() - cooked = driver.instance_type_to_filter(self.instance_type) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(10, len(hosts)) - for host, capabilities in hosts: - self.assertTrue(host.startswith('host')) - - def test_instance_type_driver(self): - driver = host_filter.InstanceTypeFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - def test_json_driver(self): - driver = host_filter.JsonFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - # Try some custom queries - - raw = ['or', - ['and', - ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300] - ], - ['and', - ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700] - ] - ] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['not', - ['=', '$compute.host_memory_free', 30], - ] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(9, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([2, 4, 6, 8, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - # Try some bogus input ... - raw = ['unknown command', ] - cooked = json.dumps(raw) - try: - driver.filter_hosts(self.zone_manager, cooked) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([]))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({}))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps( - ['not', True, False, True, False] - ))) - - try: - driver.filter_hosts(self.zone_manager, json.dumps( - 'not', True, False, True, False - )) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$foo', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$.....', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] - ))) - - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', {}, ['>', '$missing....foo']] - ))) diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py deleted file mode 100644 index c8ce7892f..000000000 --- a/nova/tests/test_least_cost_scheduler.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Least Cost Scheduler -""" - -from nova import flags -from nova import test -from nova.scheduler import least_cost - -MB = 1024 * 1024 -FLAGS = flags.FLAGS - - -class FakeHost(object): - def __init__(self, host_id, free_ram, io): - self.id = host_id - self.free_ram = free_ram - self.io = io - - -class WeightedSumTestCase(test.TestCase): - def test_empty_domain(self): - domain = [] - weighted_fns = [] - result = least_cost.weighted_sum(domain, weighted_fns) - expected = [] - self.assertEqual(expected, result) - - def test_basic_costing(self): - hosts = [ - FakeHost(1, 512 * MB, 100), - FakeHost(2, 256 * MB, 400), - FakeHost(3, 512 * MB, 100) - ] - - weighted_fns = [ - (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* - (2, lambda h: h.io), # Avoid high I/O - ] - - costs = least_cost.weighted_sum( - domain=hosts, weighted_fns=weighted_fns) - - # Each 256 MB unit of free-ram contributes 0.5 points by way of: - # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 - # Each 100 iops of IO adds 0.5 points by way of: - # cost = 2 * (100/400) = 2 * 0.25 = 0.5 - expected = [1.5, 2.5, 1.5] - self.assertEqual(expected, costs) - - -# TODO(sirp): unify this with test_host_filter tests? possibility of sharing -# test setup code -class FakeZoneManager: - pass - - -class LeastCostSchedulerTestCase(test.TestCase): - def _host_caps(self, multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - def setUp(self): - super(LeastCostSchedulerTestCase, self).setUp() - #self.old_flag = FLAGS.default_host_filter_driver - #FLAGS.default_host_filter_driver = \ - # 'nova.scheduler.host_filter.AllHostsFilter' - self.instance_type = dict(name='tiny', - memory_mb=50, - vcpus=10, - local_gb=500, - flavorid=1, - swap=500, - rxtx_quota=30000, - rxtx_cap=200) - - zone_manager = FakeZoneManager() - states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} - zone_manager.service_states = states - - self.sched = least_cost.LeastCostScheduler() - self.sched.zone_manager = zone_manager - - def tearDown(self): - #FLAGS.default_host_filter_driver = self.old_flag - super(LeastCostSchedulerTestCase, self).tearDown() - - def assertWeights(self, expected, num, request_spec, hosts): - weighted = self.sched.weigh_hosts(num, request_spec, hosts) - self.assertDictListMatch(weighted, expected, approx_equal=True) - - def test_no_hosts(self): - num = 1 - request_spec = {} - hosts = [] - - expected = [] - self.assertWeights(expected, num, request_spec, hosts) - - def test_noop_cost_fn(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.noop_cost_fn' - ] - FLAGS.noop_cost_fn_weight = 1 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [dict(weight=1, hostname=hostname) - for hostname, caps in hosts] - self.assertWeights(expected, num, request_spec, hosts) - - def test_cost_fn_weights(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.noop_cost_fn' - ] - FLAGS.noop_cost_fn_weight = 2 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [dict(weight=2, hostname=hostname) - for hostname, caps in hosts] - self.assertWeights(expected, num, request_spec, hosts) - - def test_fill_first_cost_fn(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.fill_first_cost_fn' - ] - FLAGS.fill_first_cost_fn_weight = 1 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [] - for idx, (hostname, caps) in enumerate(hosts): - # Costs are normalized so over 10 hosts, each host with increasing - # free ram will cost 1/N more. Since the lowest cost host has some - # free ram, we add in the 1/N for the base_cost - weight = 0.1 + (0.1 * idx) - weight_dict = dict(weight=weight, hostname=hostname) - expected.append(weight_dict) - - self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py deleted file mode 100644 index 54b3f80fb..000000000 --- a/nova/tests/test_scheduler.py +++ /dev/null @@ -1,1118 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler -""" - -import datetime -import mox -import novaclient.exceptions -import stubout -import webob - -from mox import IgnoreArg -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import service -from nova import test -from nova import rpc -from nova import utils -from nova.auth import manager as auth_manager -from nova.scheduler import api -from nova.scheduler import manager -from nova.scheduler import driver -from nova.compute import power_state -from nova.db.sqlalchemy import models - - -FLAGS = flags.FLAGS -flags.DECLARE('max_cores', 'nova.scheduler.simple') -flags.DECLARE('stub_network', 'nova.compute.manager') -flags.DECLARE('instances_path', 'nova.compute.manager') - - -class TestDriver(driver.Scheduler): - """Scheduler Driver for Tests""" - def schedule(context, topic, *args, **kwargs): - return 'fallback_host' - - def schedule_named_method(context, topic, num): - return 'named_host' - - -class SchedulerTestCase(test.TestCase): - """Test case for scheduler""" - def setUp(self): - super(SchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') - - def _create_compute_service(self): - """Create compute-manager(ComputeNode and Service record).""" - ctxt = context.get_admin_context() - dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute', - 'report_count': 0, 'availability_zone': 'dummyzone'} - s_ref = db.service_create(ctxt, dic) - - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - db.compute_node_create(ctxt, dic) - - return db.service_get(ctxt, s_ref['id']) - - def _create_instance(self, **kwargs): - """Create a test instance""" - ctxt = context.get_admin_context() - inst = {} - inst['user_id'] = 'admin' - inst['project_id'] = kwargs.get('project_id', 'fake') - inst['host'] = kwargs.get('host', 'dummy') - inst['vcpus'] = kwargs.get('vcpus', 1) - inst['memory_mb'] = kwargs.get('memory_mb', 10) - inst['local_gb'] = kwargs.get('local_gb', 20) - return db.instance_create(ctxt, inst) - - def test_fallback(self): - scheduler = manager.SchedulerManager() - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - ctxt = context.get_admin_context() - rpc.cast(ctxt, - 'topic.fallback_host', - {'method': 'noexist', - 'args': {'num': 7}}) - self.mox.ReplayAll() - scheduler.noexist(ctxt, 'topic', num=7) - - def test_named_method(self): - scheduler = manager.SchedulerManager() - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - ctxt = context.get_admin_context() - rpc.cast(ctxt, - 'topic.named_host', - {'method': 'named_method', - 'args': {'num': 7}}) - self.mox.ReplayAll() - scheduler.named_method(ctxt, 'topic', num=7) - - def test_show_host_resources_host_not_exit(self): - """A host given as an argument does not exists.""" - - scheduler = manager.SchedulerManager() - dest = 'dummydest' - ctxt = context.get_admin_context() - - self.assertRaises(exception.NotFound, scheduler.show_host_resources, - ctxt, dest) - #TODO(bcwaldon): reimplement this functionality - #c1 = (e.message.find(_("does not exist or is not a " - # "compute node.")) >= 0) - - def _dic_is_equal(self, dic1, dic2, keys=None): - """Compares 2 dictionary contents(Helper method)""" - if not keys: - keys = ['vcpus', 'memory_mb', 'local_gb', - 'vcpus_used', 'memory_mb_used', 'local_gb_used'] - - for key in keys: - if not (dic1[key] == dic2[key]): - return False - return True - - def test_show_host_resources_no_project(self): - """No instance are running on the given host.""" - - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - s_ref = self._create_compute_service() - - result = scheduler.show_host_resources(ctxt, s_ref['host']) - - # result checking - c1 = ('resource' in result and 'usage' in result) - compute_node = s_ref['compute_node'][0] - c2 = self._dic_is_equal(result['resource'], compute_node) - c3 = result['usage'] == {} - self.assertTrue(c1 and c2 and c3) - db.service_destroy(ctxt, s_ref['id']) - - def test_show_host_resources_works_correctly(self): - """Show_host_resources() works correctly as expected.""" - - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - s_ref = self._create_compute_service() - i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host']) - i_ref2 = self._create_instance(project_id='p-02', vcpus=3, - host=s_ref['host']) - - result = scheduler.show_host_resources(ctxt, s_ref['host']) - - c1 = ('resource' in result and 'usage' in result) - compute_node = s_ref['compute_node'][0] - c2 = self._dic_is_equal(result['resource'], compute_node) - c3 = result['usage'].keys() == ['p-01', 'p-02'] - keys = ['vcpus', 'memory_mb', 'local_gb'] - c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys) - c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys) - self.assertTrue(c1 and c2 and c3 and c4 and c5) - - db.service_destroy(ctxt, s_ref['id']) - db.instance_destroy(ctxt, i_ref1['id']) - db.instance_destroy(ctxt, i_ref2['id']) - - -class ZoneSchedulerTestCase(test.TestCase): - """Test case for zone scheduler""" - def setUp(self): - super(ZoneSchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler') - - def _create_service_model(self, **kwargs): - service = db.sqlalchemy.models.Service() - service.host = kwargs['host'] - service.disabled = False - service.deleted = False - service.report_count = 0 - service.binary = 'nova-compute' - service.topic = 'compute' - service.id = kwargs['id'] - service.availability_zone = kwargs['zone'] - service.created_at = datetime.datetime.utcnow() - return service - - def test_with_two_zones(self): - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - service_list = [self._create_service_model(id=1, - host='host1', - zone='zone1'), - self._create_service_model(id=2, - host='host2', - zone='zone2'), - self._create_service_model(id=3, - host='host3', - zone='zone2'), - self._create_service_model(id=4, - host='host4', - zone='zone2'), - self._create_service_model(id=5, - host='host5', - zone='zone2')] - self.mox.StubOutWithMock(db, 'service_get_all_by_topic') - arg = IgnoreArg() - db.service_get_all_by_topic(arg, arg).AndReturn(service_list) - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - rpc.cast(ctxt, - 'compute.host1', - {'method': 'run_instance', - 'args': {'instance_id': 'i-ffffffff', - 'availability_zone': 'zone1'}}) - self.mox.ReplayAll() - scheduler.run_instance(ctxt, - 'compute', - instance_id='i-ffffffff', - availability_zone='zone1') - - -class SimpleDriverTestCase(test.TestCase): - """Test case for simple driver""" - def setUp(self): - super(SimpleDriverTestCase, self).setUp() - self.flags(connection_type='fake', - stub_network=True, - max_cores=4, - max_gigabytes=4, - network_manager='nova.network.manager.FlatManager', - volume_driver='nova.volume.driver.FakeISCSIDriver', - scheduler_driver='nova.scheduler.simple.SimpleScheduler') - self.scheduler = manager.SchedulerManager() - self.manager = auth_manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake') - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.get_admin_context() - - def tearDown(self): - self.manager.delete_user(self.user) - self.manager.delete_project(self.project) - super(SimpleDriverTestCase, self).tearDown() - - def _create_instance(self, **kwargs): - """Create a test instance""" - inst = {} - inst['image_id'] = 1 - inst['reservation_id'] = 'r-fakeres' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id - inst['instance_type_id'] = '1' - inst['mac_address'] = utils.generate_mac() - inst['vcpus'] = kwargs.get('vcpus', 1) - inst['ami_launch_index'] = 0 - inst['availability_zone'] = kwargs.get('availability_zone', None) - inst['host'] = kwargs.get('host', 'dummy') - inst['memory_mb'] = kwargs.get('memory_mb', 20) - inst['local_gb'] = kwargs.get('local_gb', 30) - inst['launched_on'] = kwargs.get('launghed_on', 'dummy') - inst['state_description'] = kwargs.get('state_description', 'running') - inst['state'] = kwargs.get('state', power_state.RUNNING) - return db.instance_create(self.context, inst)['id'] - - def _create_volume(self): - """Create a test volume""" - vol = {} - vol['size'] = 1 - vol['availability_zone'] = 'test' - return db.volume_create(self.context, vol)['id'] - - def _create_compute_service(self, **kwargs): - """Create a compute service.""" - - dic = {'binary': 'nova-compute', 'topic': 'compute', - 'report_count': 0, 'availability_zone': 'dummyzone'} - dic['host'] = kwargs.get('host', 'dummy') - s_ref = db.service_create(self.context, dic) - if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): - t = datetime.datetime.utcnow() - datetime.timedelta(0) - dic['created_at'] = kwargs.get('created_at', t) - dic['updated_at'] = kwargs.get('updated_at', t) - db.service_update(self.context, s_ref['id'], dic) - - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32) - dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu') - dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003) - db.compute_node_create(self.context, dic) - return db.service_get(self.context, s_ref['id']) - - def test_doesnt_report_disabled_hosts_as_up(self): - """Ensures driver doesn't find hosts before they are enabled""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - db.service_update(self.context, s2['id'], {'disabled': True}) - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(0, len(hosts)) - compute1.kill() - compute2.kill() - - def test_reports_enabled_hosts_as_up(self): - """Ensures driver can find the hosts that are up""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(2, len(hosts)) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_instance(self): - """Ensures the host with less cores gets the next one""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance() - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual(host, 'host2') - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_specific_host_gets_instance(self): - """Ensures if you set availability_zone it launches on that zone""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_wont_sechedule_if_specified_host_is_down(self): - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) - past = now - delta - db.service_update(self.context, s1['id'], {'updated_at': past}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - self.assertRaises(driver.WillNotSchedule, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id2) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_will_schedule_on_disabled_host_if_specified(self): - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_too_many_cores(self): - """Ensures we don't go over max cores""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_ids1 = [] - instance_ids2 = [] - for index in xrange(FLAGS.max_cores): - instance_id = self._create_instance() - compute1.run_instance(self.context, instance_id) - instance_ids1.append(instance_id) - instance_id = self._create_instance() - compute2.run_instance(self.context, instance_id) - instance_ids2.append(instance_id) - instance_id = self._create_instance() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id) - for instance_id in instance_ids1: - compute1.terminate_instance(self.context, instance_id) - for instance_id in instance_ids2: - compute2.terminate_instance(self.context, instance_id) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_volume(self): - """Ensures the host with less gigabytes gets the next one""" - volume1 = service.Service('host1', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume1.start() - volume2 = service.Service('host2', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume2.start() - volume_id1 = self._create_volume() - volume1.create_volume(self.context, volume_id1) - volume_id2 = self._create_volume() - host = self.scheduler.driver.schedule_create_volume(self.context, - volume_id2) - self.assertEqual(host, 'host2') - volume1.delete_volume(self.context, volume_id1) - db.volume_destroy(self.context, volume_id2) - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - - def test_doesnt_report_disabled_hosts_as_up(self): - """Ensures driver doesn't find hosts before they are enabled""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - db.service_update(self.context, s2['id'], {'disabled': True}) - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(0, len(hosts)) - compute1.kill() - compute2.kill() - - def test_reports_enabled_hosts_as_up(self): - """Ensures driver can find the hosts that are up""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(2, len(hosts)) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_instance(self): - """Ensures the host with less cores gets the next one""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance() - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual(host, 'host2') - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_specific_host_gets_instance(self): - """Ensures if you set availability_zone it launches on that zone""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_wont_sechedule_if_specified_host_is_down(self): - compute1 = self.start_service('compute', host='host1') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) - past = now - delta - db.service_update(self.context, s1['id'], {'updated_at': past}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - self.assertRaises(driver.WillNotSchedule, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id2) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_will_schedule_on_disabled_host_if_specified(self): - compute1 = self.start_service('compute', host='host1') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_too_many_cores(self): - """Ensures we don't go over max cores""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_ids1 = [] - instance_ids2 = [] - for index in xrange(FLAGS.max_cores): - instance_id = self._create_instance() - compute1.run_instance(self.context, instance_id) - instance_ids1.append(instance_id) - instance_id = self._create_instance() - compute2.run_instance(self.context, instance_id) - instance_ids2.append(instance_id) - instance_id = self._create_instance() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id) - db.instance_destroy(self.context, instance_id) - for instance_id in instance_ids1: - compute1.terminate_instance(self.context, instance_id) - for instance_id in instance_ids2: - compute2.terminate_instance(self.context, instance_id) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_volume(self): - """Ensures the host with less gigabytes gets the next one""" - volume1 = self.start_service('volume', host='host1') - volume2 = self.start_service('volume', host='host2') - volume_id1 = self._create_volume() - volume1.create_volume(self.context, volume_id1) - volume_id2 = self._create_volume() - host = self.scheduler.driver.schedule_create_volume(self.context, - volume_id2) - self.assertEqual(host, 'host2') - volume1.delete_volume(self.context, volume_id1) - db.volume_destroy(self.context, volume_id2) - volume1.kill() - volume2.kill() - - def test_too_many_gigabytes(self): - """Ensures we don't go over max gigabytes""" - volume1 = self.start_service('volume', host='host1') - volume2 = self.start_service('volume', host='host2') - volume_ids1 = [] - volume_ids2 = [] - for index in xrange(FLAGS.max_gigabytes): - volume_id = self._create_volume() - volume1.create_volume(self.context, volume_id) - volume_ids1.append(volume_id) - volume_id = self._create_volume() - volume2.create_volume(self.context, volume_id) - volume_ids2.append(volume_id) - volume_id = self._create_volume() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_create_volume, - self.context, - volume_id) - for volume_id in volume_ids1: - volume1.delete_volume(self.context, volume_id) - for volume_id in volume_ids2: - volume2.delete_volume(self.context, volume_id) - volume1.kill() - volume2.kill() - - def test_scheduler_live_migration_with_volume(self): - """scheduler_live_migration() works correctly as expected. - - Also, checks instance state is changed from 'running' -> 'migrating'. - - """ - - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - dic = {'instance_id': instance_id, 'size': 1} - v_ref = db.volume_create(self.context, dic) - - # cannot check 2nd argument b/c the addresses of instance object - # is different. - driver_i = self.scheduler.driver - nocare = mox.IgnoreArg() - self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') - self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') - self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') - driver_i._live_migration_src_check(nocare, nocare) - driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) - driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} - rpc.cast(self.context, - db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), - {"method": 'live_migration', "args": kwargs}) - - self.mox.ReplayAll() - self.scheduler.live_migration(self.context, FLAGS.compute_topic, - instance_id=instance_id, - dest=i_ref['host']) - - i_ref = db.instance_get(self.context, instance_id) - self.assertTrue(i_ref['state_description'] == 'migrating') - db.instance_destroy(self.context, instance_id) - db.volume_destroy(self.context, v_ref['id']) - - def test_live_migration_src_check_instance_not_running(self): - """The instance given by instance_id is not running.""" - - instance_id = self._create_instance(state_description='migrating') - i_ref = db.instance_get(self.context, instance_id) - - try: - self.scheduler.driver._live_migration_src_check(self.context, - i_ref) - except exception.Invalid, e: - c = (e.message.find('is not running') > 0) - - self.assertTrue(c) - db.instance_destroy(self.context, instance_id) - - def test_live_migration_src_check_volume_node_not_alive(self): - """Raise exception when volume node is not alive.""" - - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - dic = {'instance_id': instance_id, 'size': 1} - v_ref = db.volume_create(self.context, {'instance_id': instance_id, - 'size': 1}) - t1 = datetime.datetime.utcnow() - datetime.timedelta(1) - dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', - 'topic': 'volume', 'report_count': 0} - s_ref = db.service_create(self.context, dic) - - self.assertRaises(exception.VolumeServiceUnavailable, - self.scheduler.driver.schedule_live_migration, - self.context, instance_id, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.volume_destroy(self.context, v_ref['id']) - - def test_live_migration_src_check_compute_node_not_alive(self): - """Confirms src-compute node is alive.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t, updated_at=t, - host=i_ref['host']) - - self.assertRaises(exception.ComputeServiceUnavailable, - self.scheduler.driver._live_migration_src_check, - self.context, i_ref) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_src_check_works_correctly(self): - """Confirms this method finishes with no error.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host=i_ref['host']) - - ret = self.scheduler.driver._live_migration_src_check(self.context, - i_ref) - - self.assertTrue(ret is None) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_not_alive(self): - """Confirms exception raises in case dest host does not exist.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t, updated_at=t, - host=i_ref['host']) - - self.assertRaises(exception.ComputeServiceUnavailable, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_same_host(self): - """Confirms exceptioin raises in case dest and src is same host.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host=i_ref['host']) - - self.assertRaises(exception.UnableToMigrateToSelf, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_lack_memory(self): - """Confirms exception raises when dest doesn't have enough memory.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=12) - - self.assertRaises(exception.MigrationError, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, 'somewhere') - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_works_correctly(self): - """Confirms method finishes with no error.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=5) - - ret = self.scheduler.driver._live_migration_dest_check(self.context, - i_ref, - 'somewhere') - self.assertTrue(ret is None) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_common_check_service_orig_not_exists(self): - """Destination host does not exist.""" - - dest = 'dummydest' - # mocks for live_migration_common_check() - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t1 = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t1, updated_at=t1, - host=dest) - - # mocks for mounted_on_same_shared_storage() - fpath = '/test/20110127120000' - self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) - topic = FLAGS.compute_topic - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(self.context, topic, dest), - {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), - {"method": 'check_shared_storage_test_file', - "args": {'filename': fpath}}) - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(mox.IgnoreArg(), topic, dest), - {"method": 'cleanup_shared_storage_test_file', - "args": {'filename': fpath}}) - - self.mox.ReplayAll() - self.assertRaises(exception.SourceHostUnavailable, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_common_check_service_different_hypervisor(self): - """Original host and dest host has different hypervisor type.""" - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen') - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - - self.mox.ReplayAll() - self.assertRaises(exception.InvalidHypervisorType, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - def test_live_migration_common_check_service_different_version(self): - """Original host and dest host has different hypervisor version.""" - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest, - hypervisor_version=12002) - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - - self.mox.ReplayAll() - self.assertRaises(exception.DestinationHypervisorTooOld, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - def test_live_migration_common_check_checking_cpuinfo_fail(self): - """Raise excetion when original host doen't have compatible cpu.""" - - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest) - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True) - rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), - {"method": 'compare_cpu', - "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ - AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) - - self.mox.ReplayAll() - try: - self.scheduler.driver._live_migration_common_check(self.context, - i_ref, - dest) - except rpc.RemoteError, e: - c = (e.message.find(_("doesn't have compatibility to")) >= 0) - - self.assertTrue(c) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - -class FakeZone(object): - def __init__(self, id, api_url, username, password): - self.id = id - self.api_url = api_url - self.username = username - self.password = password - - -def zone_get_all(context): - return [ - FakeZone(1, 'http://example.com', 'bob', 'xxx'), - ] - - -class FakeRerouteCompute(api.reroute_compute): - def _call_child_zones(self, zones, function): - return [] - - def get_collection_context_and_id(self, args, kwargs): - return ("servers", None, 1) - - def unmarshall_result(self, zone_responses): - return dict(magic="found me") - - -def go_boom(self, context, instance): - raise exception.InstanceNotFound(instance_id=instance) - - -def found_instance(self, context, instance): - return dict(name='myserver') - - -class FakeResource(object): - def __init__(self, attribute_dict): - for k, v in attribute_dict.iteritems(): - setattr(self, k, v) - - def pause(self): - pass - - -class ZoneRedirectTest(test.TestCase): - def setUp(self): - super(ZoneRedirectTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - - self.stubs.Set(db, 'zone_get_all', zone_get_all) - - self.enable_zone_routing = FLAGS.enable_zone_routing - FLAGS.enable_zone_routing = True - - def tearDown(self): - self.stubs.UnsetAll() - FLAGS.enable_zone_routing = self.enable_zone_routing - super(ZoneRedirectTest, self).tearDown() - - def test_trap_found_locally(self): - decorator = FakeRerouteCompute("foo") - try: - result = decorator(found_instance)(None, None, 1) - except api.RedirectResult, e: - self.fail(_("Successful database hit should succeed")) - - def test_trap_not_found_locally(self): - decorator = FakeRerouteCompute("foo") - try: - result = decorator(go_boom)(None, None, 1) - self.assertFail(_("Should have rerouted.")) - except api.RedirectResult, e: - self.assertEquals(e.results['magic'], 'found me') - - def test_routing_flags(self): - FLAGS.enable_zone_routing = False - decorator = FakeRerouteCompute("foo") - self.assertRaises(exception.InstanceNotFound, decorator(go_boom), - None, None, 1) - - def test_get_collection_context_and_id(self): - decorator = api.reroute_compute("foo") - self.assertEquals(decorator.get_collection_context_and_id( - (None, 10, 20), {}), ("servers", 10, 20)) - self.assertEquals(decorator.get_collection_context_and_id( - (None, 11,), dict(instance_id=21)), ("servers", 11, 21)) - self.assertEquals(decorator.get_collection_context_and_id( - (None,), dict(context=12, instance_id=22)), ("servers", 12, 22)) - - def test_unmarshal_single_server(self): - decorator = api.reroute_compute("foo") - self.assertEquals(decorator.unmarshall_result([]), {}) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, b=2)), ]), - dict(server=dict(a=1, b=2))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, _b=2)), ]), - dict(server=dict(a=1,))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, manager=2)), ]), - dict(server=dict(a=1,))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(_a=1, manager=2)), ]), - dict(server={})) - - -class FakeServerCollection(object): - def get(self, instance_id): - return FakeResource(dict(a=10, b=20)) - - def find(self, name): - return FakeResource(dict(a=11, b=22)) - - -class FakeEmptyServerCollection(object): - def get(self, f): - raise novaclient.NotFound(1) - - def find(self, name): - raise novaclient.NotFound(2) - - -class FakeNovaClient(object): - def __init__(self, collection): - self.servers = collection - - -class DynamicNovaClientTest(test.TestCase): - def test_issue_novaclient_command_found(self): - zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "get", 100).a, 10) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "find", "name").b, 22) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "pause", 100), None) - - def test_issue_novaclient_command_not_found(self): - zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "get", 100), None) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "find", "name"), None) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "any", "name"), None) - - -class FakeZonesProxy(object): - def do_something(*args, **kwargs): - return 42 - - def raises_exception(*args, **kwargs): - raise Exception('testing') - - -class FakeNovaClientOpenStack(object): - def __init__(self, *args, **kwargs): - self.zones = FakeZonesProxy() - - def authenticate(self): - pass - - -class CallZoneMethodTest(test.TestCase): - def setUp(self): - super(CallZoneMethodTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - self.stubs.Set(db, 'zone_get_all', zone_get_all) - self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack) - - def tearDown(self): - self.stubs.UnsetAll() - super(CallZoneMethodTest, self).tearDown() - - def test_call_zone_method(self): - context = {} - method = 'do_something' - results = api.call_zone_method(context, method) - expected = [(1, 42)] - self.assertEqual(expected, results) - - def test_call_zone_method_not_present(self): - context = {} - method = 'not_present' - self.assertRaises(AttributeError, api.call_zone_method, - context, method) - - def test_call_zone_method_generates_exception(self): - context = {} - method = 'raises_exception' - results = api.call_zone_method(context, method) - - # FIXME(sirp): for now the _error_trap code is catching errors and - # converting them to a ("ERROR", "string") tuples. The code (and this - # test) should eventually handle real exceptions. - expected = [(1, ('ERROR', 'testing'))] - self.assertEqual(expected, results) diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py deleted file mode 100644 index 37169fb97..000000000 --- a/nova/tests/test_zone_aware_scheduler.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Zone Aware Scheduler. -""" - -from nova import test -from nova.scheduler import driver -from nova.scheduler import zone_aware_scheduler -from nova.scheduler import zone_manager - - -class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): - def filter_hosts(self, num, specs): - # NOTE(sirp): this is returning [(hostname, services)] - return self.zone_manager.service_states.items() - - def weigh_hosts(self, num, specs, hosts): - fake_weight = 99 - weighted = [] - for hostname, caps in hosts: - weighted.append(dict(weight=fake_weight, name=hostname)) - return weighted - - -class FakeZoneManager(zone_manager.ZoneManager): - def __init__(self): - self.service_states = { - 'host1': { - 'compute': {'ram': 1000} - }, - 'host2': { - 'compute': {'ram': 2000} - }, - 'host3': { - 'compute': {'ram': 3000} - } - } - - -class FakeEmptyZoneManager(zone_manager.ZoneManager): - def __init__(self): - self.service_states = {} - - -def fake_empty_call_zone_method(context, method, specs): - return [] - - -def fake_call_zone_method(context, method, specs): - return [ - ('zone1', [ - dict(weight=1, blob='AAAAAAA'), - dict(weight=111, blob='BBBBBBB'), - dict(weight=112, blob='CCCCCCC'), - dict(weight=113, blob='DDDDDDD'), - ]), - ('zone2', [ - dict(weight=120, blob='EEEEEEE'), - dict(weight=2, blob='FFFFFFF'), - dict(weight=122, blob='GGGGGGG'), - dict(weight=123, blob='HHHHHHH'), - ]), - ('zone3', [ - dict(weight=130, blob='IIIIIII'), - dict(weight=131, blob='JJJJJJJ'), - dict(weight=132, blob='KKKKKKK'), - dict(weight=3, blob='LLLLLLL'), - ]), - ] - - -class ZoneAwareSchedulerTestCase(test.TestCase): - """Test case for Zone Aware Scheduler.""" - - def test_zone_aware_scheduler(self): - """ - Create a nested set of FakeZones, ensure that a select call returns the - appropriate build plan. - """ - sched = FakeZoneAwareScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) - - zm = FakeZoneManager() - sched.set_zone_manager(zm) - - fake_context = {} - build_plan = sched.select(fake_context, {}) - - self.assertEqual(15, len(build_plan)) - - hostnames = [plan_item['name'] - for plan_item in build_plan if 'name' in plan_item] - self.assertEqual(3, len(hostnames)) - - def test_empty_zone_aware_scheduler(self): - """ - Ensure empty hosts & child_zones result in NoValidHosts exception. - """ - sched = FakeZoneAwareScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) - - zm = FakeEmptyZoneManager() - sched.set_zone_manager(zm) - - fake_context = {} - self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, - fake_context, 1, - dict(host_filter=None, instance_type={})) -- cgit From 967d82669ae07b2add3289e3decad60aea2657d8 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:49:21 -0500 Subject: Moving into scheduler subdir and refactoring out common code --- nova/tests/scheduler/__init__.py | 0 nova/tests/scheduler/test_host_filter.py | 189 ++++++++++++++++++++++ nova/tests/scheduler/test_least_cost_scheduler.py | 146 +++++++++++++++++ nova/tests/scheduler/test_zone_aware_scheduler.py | 31 ++++ 4 files changed, 366 insertions(+) create mode 100644 nova/tests/scheduler/__init__.py create mode 100644 nova/tests/scheduler/test_host_filter.py create mode 100644 nova/tests/scheduler/test_least_cost_scheduler.py diff --git a/nova/tests/scheduler/__init__.py b/nova/tests/scheduler/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py new file mode 100644 index 000000000..c3af50a6e --- /dev/null +++ b/nova/tests/scheduler/test_host_filter.py @@ -0,0 +1,189 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler Host Filter Drivers. +""" + +import json + +from nova import exception +from nova import flags +from nova import test +from nova.scheduler import host_filter +from nova.tests.scheduler import test_zone_aware_scheduler + +FLAGS = flags.FLAGS + + +class FakeZoneManager: + pass + + +class HostFilterTestCase(test.TestCase): + """Test case for host filter drivers.""" + + def setUp(self): + super(HostFilterTestCase, self).setUp() + self.old_flag = FLAGS.default_host_filter_driver + FLAGS.default_host_filter_driver = \ + 'nova.scheduler.host_filter.AllHostsFilter' + self.instance_type = dict(name='tiny', + memory_mb=50, + vcpus=10, + local_gb=500, + flavorid=1, + swap=500, + rxtx_quota=30000, + rxtx_cap=200) + + self.zone_manager = FakeZoneManager() + + states = test_zone_aware_scheduler.fake_zone_manager_service_states( + num_hosts=10) + self.zone_manager.service_states = states + + def tearDown(self): + FLAGS.default_host_filter_driver = self.old_flag + super(HostFilterTestCase, self).tearDown() + + def test_choose_driver(self): + # Test default driver ... + driver = host_filter.choose_driver() + self.assertEquals(driver._full_name(), + 'nova.scheduler.host_filter.AllHostsFilter') + # Test valid driver ... + driver = host_filter.choose_driver( + 'nova.scheduler.host_filter.InstanceTypeFilter') + self.assertEquals(driver._full_name(), + 'nova.scheduler.host_filter.InstanceTypeFilter') + # Test invalid driver ... + try: + host_filter.choose_driver('does not exist') + self.fail("Should not find driver") + except exception.SchedulerHostFilterDriverNotFound: + pass + + def test_all_host_driver(self): + driver = host_filter.AllHostsFilter() + cooked = driver.instance_type_to_filter(self.instance_type) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(10, len(hosts)) + for host, capabilities in hosts: + self.assertTrue(host.startswith('host')) + + def test_instance_type_driver(self): + driver = host_filter.InstanceTypeFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = driver.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', + name) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(6, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + self.assertEquals('host05', just_hosts[0]) + self.assertEquals('host10', just_hosts[5]) + + def test_json_driver(self): + driver = host_filter.JsonFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = driver.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(6, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + self.assertEquals('host05', just_hosts[0]) + self.assertEquals('host10', just_hosts[5]) + + # Try some custom queries + + raw = ['or', + ['and', + ['<', '$compute.host_memory_free', 30], + ['<', '$compute.disk_available', 300] + ], + ['and', + ['>', '$compute.host_memory_free', 70], + ['>', '$compute.disk_available', 700] + ] + ] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(5, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([1, 2, 8, 9, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + raw = ['not', + ['=', '$compute.host_memory_free', 30], + ] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(9, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(5, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([2, 4, 6, 8, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + # Try some bogus input ... + raw = ['unknown command', ] + cooked = json.dumps(raw) + try: + driver.filter_hosts(self.zone_manager, cooked) + self.fail("Should give KeyError") + except KeyError, e: + pass + + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([]))) + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({}))) + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps( + ['not', True, False, True, False] + ))) + + try: + driver.filter_hosts(self.zone_manager, json.dumps( + 'not', True, False, True, False + )) + self.fail("Should give KeyError") + except KeyError, e: + pass + + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', '$foo', 100] + ))) + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', '$.....', 100] + ))) + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] + ))) + + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', {}, ['>', '$missing....foo']] + ))) diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py new file mode 100644 index 000000000..e0ed61417 --- /dev/null +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -0,0 +1,146 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Least Cost Scheduler +""" + +from nova import flags +from nova import test +from nova.scheduler import least_cost +from nova.tests.scheduler import test_zone_aware_scheduler + +MB = 1024 * 1024 +FLAGS = flags.FLAGS + + +class FakeHost(object): + def __init__(self, host_id, free_ram, io): + self.id = host_id + self.free_ram = free_ram + self.io = io + + +class WeightedSumTestCase(test.TestCase): + def test_empty_domain(self): + domain = [] + weighted_fns = [] + result = least_cost.weighted_sum(domain, weighted_fns) + expected = [] + self.assertEqual(expected, result) + + def test_basic_costing(self): + hosts = [ + FakeHost(1, 512 * MB, 100), + FakeHost(2, 256 * MB, 400), + FakeHost(3, 512 * MB, 100) + ] + + weighted_fns = [ + (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* + (2, lambda h: h.io), # Avoid high I/O + ] + + costs = least_cost.weighted_sum( + domain=hosts, weighted_fns=weighted_fns) + + # Each 256 MB unit of free-ram contributes 0.5 points by way of: + # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 + # Each 100 iops of IO adds 0.5 points by way of: + # cost = 2 * (100/400) = 2 * 0.25 = 0.5 + expected = [1.5, 2.5, 1.5] + self.assertEqual(expected, costs) + + +# TODO(sirp): unify this with test_host_filter tests? possibility of sharing +# test setup code +class FakeZoneManager: + pass + + +class LeastCostSchedulerTestCase(test.TestCase): + def setUp(self): + super(LeastCostSchedulerTestCase, self).setUp() + zone_manager = FakeZoneManager() + + states = test_zone_aware_scheduler.fake_zone_manager_service_states( + num_hosts=10) + zone_manager.service_states = states + + self.sched = least_cost.LeastCostScheduler() + self.sched.zone_manager = zone_manager + + def tearDown(self): + super(LeastCostSchedulerTestCase, self).tearDown() + + def assertWeights(self, expected, num, request_spec, hosts): + weighted = self.sched.weigh_hosts(num, request_spec, hosts) + self.assertDictListMatch(weighted, expected, approx_equal=True) + + def test_no_hosts(self): + num = 1 + request_spec = {} + hosts = [] + + expected = [] + self.assertWeights(expected, num, request_spec, hosts) + + def test_noop_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] + FLAGS.noop_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [dict(weight=1, hostname=hostname) + for hostname, caps in hosts] + self.assertWeights(expected, num, request_spec, hosts) + + def test_cost_fn_weights(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] + FLAGS.noop_cost_fn_weight = 2 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [dict(weight=2, hostname=hostname) + for hostname, caps in hosts] + self.assertWeights(expected, num, request_spec, hosts) + + def test_fill_first_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.fill_first_cost_fn' + ] + FLAGS.fill_first_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [] + for idx, (hostname, caps) in enumerate(hosts): + # Costs are normalized so over 10 hosts, each host with increasing + # free ram will cost 1/N more. Since the lowest cost host has some + # free ram, we add in the 1/N for the base_cost + weight = 0.1 + (0.1 * idx) + weight_dict = dict(weight=weight, hostname=hostname) + expected.append(weight_dict) + + self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py index 37169fb97..b2cc4fe23 100644 --- a/nova/tests/scheduler/test_zone_aware_scheduler.py +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -22,6 +22,37 @@ from nova.scheduler import zone_aware_scheduler from nova.scheduler import zone_manager +def _host_caps(multiplier): + # Returns host capabilities in the following way: + # host1 = memory:free 10 (100max) + # disk:available 100 (1000max) + # hostN = memory:free 10 + 10N + # disk:available 100 + 100N + # in other words: hostN has more resources than host0 + # which means ... don't go above 10 hosts. + return {'host_name-description': 'XenServer %s' % multiplier, + 'host_hostname': 'xs-%s' % multiplier, + 'host_memory_total': 100, + 'host_memory_overhead': 10, + 'host_memory_free': 10 + multiplier * 10, + 'host_memory_free-computed': 10 + multiplier * 10, + 'host_other-config': {}, + 'host_ip_address': '192.168.1.%d' % (100 + multiplier), + 'host_cpu_info': {}, + 'disk_available': 100 + multiplier * 100, + 'disk_total': 1000, + 'disk_used': 0, + 'host_uuid': 'xxx-%d' % multiplier, + 'host_name-label': 'xs-%s' % multiplier} + + +def fake_zone_manager_service_states(num_hosts): + states = {} + for x in xrange(num_hosts): + states['host%02d' % (x + 1)] = {'compute': _host_caps(x)} + return states + + class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): def filter_hosts(self, num, specs): # NOTE(sirp): this is returning [(hostname, services)] -- cgit From 1b610e28e40c77271191349b6bfaa56c8f522c24 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:53:00 -0500 Subject: Small cleanups --- nova/tests/scheduler/test_host_filter.py | 7 +++---- nova/tests/scheduler/test_least_cost_scheduler.py | 10 ++++------ 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index c3af50a6e..edbab7ab4 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -27,10 +27,6 @@ from nova.tests.scheduler import test_zone_aware_scheduler FLAGS = flags.FLAGS -class FakeZoneManager: - pass - - class HostFilterTestCase(test.TestCase): """Test case for host filter drivers.""" @@ -48,6 +44,9 @@ class HostFilterTestCase(test.TestCase): rxtx_quota=30000, rxtx_cap=200) + class FakeZoneManager: + pass + self.zone_manager = FakeZoneManager() states = test_zone_aware_scheduler.fake_zone_manager_service_states( diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index e0ed61417..506fa62fb 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -63,15 +63,13 @@ class WeightedSumTestCase(test.TestCase): self.assertEqual(expected, costs) -# TODO(sirp): unify this with test_host_filter tests? possibility of sharing -# test setup code -class FakeZoneManager: - pass - - class LeastCostSchedulerTestCase(test.TestCase): def setUp(self): super(LeastCostSchedulerTestCase, self).setUp() + + class FakeZoneManager: + pass + zone_manager = FakeZoneManager() states = test_zone_aware_scheduler.fake_zone_manager_service_states( -- cgit From 6c151bfbfeb728d6e38f777640d483c1e344113d Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 18 May 2011 03:51:25 -0400 Subject: Removed all utils.import_object(FLAGS.image_service) and replaced with utils.get_default_image_service(). --- bin/nova-manage | 2 +- nova/api/openstack/image_metadata.py | 2 +- nova/api/openstack/images.py | 5 ++- nova/api/openstack/servers.py | 22 ++++++------ nova/api/openstack/views/servers.py | 10 +++--- nova/compute/api.py | 4 +-- nova/image/s3.py | 4 +-- nova/tests/api/openstack/test_servers.py | 11 +++--- nova/utils.py | 60 +++++++++++++++----------------- nova/virt/images.py | 2 +- nova/virt/libvirt_conn.py | 2 +- 11 files changed, 58 insertions(+), 66 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index db964064d..3f3fd72a6 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -905,7 +905,7 @@ class ImageCommands(object): """Methods for dealing with a cloud in an odd state""" def __init__(self, *args, **kwargs): - self.image_service = utils.import_object(FLAGS.image_service) + self.image_service = utils.get_default_image_service() def _register(self, container_format, disk_format, path, owner, name=None, is_public='T', diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index 1eccc0174..f6913ffc6 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -32,7 +32,7 @@ class Controller(common.OpenstackController): """The image metadata API controller for the Openstack API""" def __init__(self): - self.image_service = utils.import_object(FLAGS.image_service) + self.image_service = utils.get_default_image_service() super(Controller, self).__init__() def _get_metadata(self, context, image_id, image=None): diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 8d796c284..8a90b4c4d 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -47,11 +47,10 @@ class Controller(common.OpenstackController): :param compute_service: `nova.compute.api:API` :param image_service: `nova.image.service:BaseImageService` - """ - _default_service = utils.import_object(flags.FLAGS.image_service) + """ self._compute_service = compute_service or compute.API() - self._image_service = image_service or _default_service + self._image_service = image_service or utils.get_default_image_service() def index(self, req): """Return an index listing of images available to the request. diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index bf0f56373..4e8574994 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -64,7 +64,6 @@ class Controller(common.OpenstackController): def __init__(self): self.compute_api = compute.API() - self._image_service = utils.import_object(FLAGS.image_service) super(Controller, self).__init__() def index(self, req): @@ -75,7 +74,7 @@ class Controller(common.OpenstackController): """ Returns a list of server details for a given user """ return self._items(req, is_detail=True) - def _image_id_from_req_data(self, data): + def _image_ref_from_req_data(self, data): raise NotImplementedError() def _flavor_id_from_req_data(self, data): @@ -140,13 +139,13 @@ class Controller(common.OpenstackController): key_name = key_pair['name'] key_data = key_pair['public_key'] - requested_image_id = self._image_id_from_req_data(env) + image_ref = self._image_ref_from_req_data(env) try: - (image_service, service_image_id) = utils.get_image_service( - requested_image_id) + (image_service, image_id) = utils.get_image_service( image_ref) - image_id = common.get_image_id_from_image_hash(image_service, - context, requested_image_id) + #TODO: need to assert image exists a better way + #image_id = common.get_image_id_from_image_hash(image_service, + #context, image_ref) except: msg = _("Can not find requested image") return faults.Fault(exc.HTTPBadRequest(msg)) @@ -188,7 +187,7 @@ class Controller(common.OpenstackController): self._handle_quota_error(error) inst['instance_type'] = inst_type - inst['image_id'] = requested_image_id + inst['image_id'] = image_ref builder = self._get_view_builder(req) server = builder.build(inst, is_detail=True) @@ -596,7 +595,7 @@ class Controller(common.OpenstackController): class ControllerV10(Controller): - def _image_id_from_req_data(self, data): + def _image_ref_from_req_data(self, data): return data['server']['imageId'] def _flavor_id_from_req_data(self, data): @@ -639,9 +638,8 @@ class ControllerV10(Controller): class ControllerV11(Controller): - def _image_id_from_req_data(self, data): - href = data['server']['imageRef'] - return common.get_id_from_href(href) + def _image_ref_from_req_data(self, data): + return data['server']['imageRef'] def _flavor_id_from_req_data(self, data): href = data['server']['flavorRef'] diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 0be468edc..70a942594 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -113,7 +113,7 @@ class ViewBuilderV10(ViewBuilder): def _build_image(self, response, inst): if 'image_id' in dict(inst): - response['imageId'] = inst['image_id'] + response['imageId'] = int(inst['image_id']) def _build_flavor(self, response, inst): if 'instance_type' in dict(inst): @@ -130,9 +130,11 @@ class ViewBuilderV11(ViewBuilder): self.base_url = base_url def _build_image(self, response, inst): - if "image_id" in dict(inst): - image_id = inst.get("image_id") - response["imageRef"] = self.image_builder.generate_href(image_id) + if 'image_id' in dict(inst): + image_id = inst['image_id'] + if utils.is_int(image_id): + image_id = int(image_id) + response['imageRef'] = image_id def _build_flavor(self, response, inst): if "instance_type" in dict(inst): diff --git a/nova/compute/api.py b/nova/compute/api.py index 930e4efaa..4e7af7421 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -58,9 +58,7 @@ class API(base.Base): def __init__(self, image_service=None, network_api=None, volume_api=None, hostname_factory=generate_default_hostname, **kwargs): - if not image_service: - image_service = utils.import_object(FLAGS.image_service) - self.image_service = image_service + self.image_service = image_service or utils.get_default_image_service() if not network_api: network_api = network.API() self.network_api = network_api diff --git a/nova/image/s3.py b/nova/image/s3.py index c38c58d95..ed685ea51 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -46,9 +46,7 @@ class S3ImageService(service.BaseImageService): """Wraps an existing image service to support s3 based register.""" def __init__(self, service=None, *args, **kwargs): - if service is None: - service = utils.import_object(FLAGS.image_service) - self.service = service + self.service = service or utils.get_default_image_service() self.service.__init__(*args, **kwargs) def create(self, context, metadata, data=None): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index e8182b6a9..cfa8d2556 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -628,13 +628,12 @@ class ServersTest(test.TestCase): def test_create_instance_v1_1_local_href(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/2' - image_ref_local = '2' + image_ref = 2 flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': image_ref_local, + 'imageRef': image_ref, 'flavorRef': flavor_ref, }, } @@ -852,7 +851,7 @@ class ServersTest(test.TestCase): self.assertEqual(s['id'], i) self.assertEqual(s['hostId'], '') self.assertEqual(s['name'], 'server%d' % i) - self.assertEqual(s['imageId'], '10') + self.assertEqual(s['imageId'], 10) self.assertEqual(s['flavorId'], 1) self.assertEqual(s['status'], 'BUILD') self.assertEqual(s['metadata']['seq'], str(i)) @@ -866,7 +865,7 @@ class ServersTest(test.TestCase): self.assertEqual(s['id'], i) self.assertEqual(s['hostId'], '') self.assertEqual(s['name'], 'server%d' % i) - self.assertEqual(s['imageRef'], 'http://localhost/v1.1/images/10') + self.assertEqual(s['imageRef'], 10) self.assertEqual(s['flavorRef'], 'http://localhost/v1.1/flavors/1') self.assertEqual(s['status'], 'BUILD') self.assertEqual(s['metadata']['seq'], str(i)) @@ -898,7 +897,7 @@ class ServersTest(test.TestCase): self.assertEqual(s['id'], i) self.assertEqual(s['hostId'], host_ids[i % 2]) self.assertEqual(s['name'], 'server%d' % i) - self.assertEqual(s['imageId'], '10') + self.assertEqual(s['imageId'], 10) self.assertEqual(s['flavorId'], 1) def test_server_pause(self): diff --git a/nova/utils.py b/nova/utils.py index fff916527..3c8c82281 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -35,6 +35,7 @@ import struct import sys import time import types +from urlparse import urlparse from xml.sax import saxutils from eventlet import event @@ -42,6 +43,7 @@ from eventlet import greenthread from eventlet import semaphore from eventlet.green import subprocess +import nova from nova import exception from nova import flags from nova import log as logging @@ -727,57 +729,53 @@ def parse_server_string(server_str): def is_int(x): - """ Return if passed in variable is integer or not """ return re.match(r'\d+$', str(x)) def parse_image_ref(image_ref): - """ - Parse an imageRef and return (id, host, port) + """Parse an image href into composite parts. If the image_ref passed in is an integer, it will return (image_ref, None, None), otherwise it will - return (id, host, port) + return (image_id, host, port) - image_ref - imageRef for an image + :param image_ref: href or id of an image """ - if is_int(image_ref): - return (image_ref, None, None) + return (int(image_ref), None, None) o = urlparse(image_ref) - # Default to port 80 if not passed, should this be 9292? - port = o.port or 80 + port = o.port host = o.netloc.split(':', 1)[0] - id = o.path.split('/')[-1] - - return (id, host, port) + image_id = o.path.split('/')[-1] + if is_int(image_id): + image_id = int(image_id) + else: + raise Exception(_('image_ref [%s] is missing a proper id') % image_ref) + return (image_id, host, port) -def get_image_service(image_ref=None): - """ - Get the proper image_service for an image_id - Returns (image_service, image_id) - image_ref - image ref/id for an image - """ +def get_default_image_service(): ImageService = import_class(FLAGS.image_service) + return ImageService() - if not image_ref: - return (ImageService(), None) - - (image_id, host, port) = parse_image_ref(image_ref) +def get_image_service(image_ref): + """Get the proper image_service and id for the given image_ref. - image_service = None + The image_ref param can be an href of the form + http://myglanceserver:9292/images/42, or just an int such as 42. If the + image_ref is an int, then the default image service is returned. - if host: - GlanceImageService = import_class(FLAGS.glance_image_service) - GlanceClient = import_class('glance.client.Client') + :param image_ref: image ref/id for an image + :returns: a tuple of the form (image_service, image_id) - glance_client = GlanceClient(host, port) - image_service = GlanceImageService(glance_client) - else: - image_service = ImageService() + """ + if is_int(image_ref): + return (get_default_image_service(), int(image_ref)) - return (image_service, id) + (image_id, host, port) = parse_image_ref(image_ref) + glance_client = nova.image.glance.GlanceClient(host, port) + image_service = nova.image.glance.GlanceImageService(glance_client) + return (image_service, image_id) diff --git a/nova/virt/images.py b/nova/virt/images.py index 2e3f2ee4d..0828a1fd0 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -45,7 +45,7 @@ def fetch(image_id, path, _user, _project): # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. - image_service = utils.import_object(FLAGS.image_service) + image_service = utils.get_default_image_service() with open(path, "wb") as image_file: elevated = context.get_admin_context() metadata = image_service.get(elevated, image_id, image_file) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index fa918b0a3..23fa5bdfc 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -448,7 +448,7 @@ class LibvirtConnection(driver.ComputeDriver): to support this command. """ - image_service = utils.import_object(FLAGS.image_service) + image_service = utils.get_default_image_service() virt_dom = self._lookup_by_name(instance['name']) elevated = context.get_admin_context() -- cgit From 375fdc745fc5915098f11585ccd6a91e86747086 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 09:50:18 -0400 Subject: get integrated server_tests passing --- nova/flags.py | 3 +++ nova/image/fake.py | 2 +- nova/tests/integrated/integrated_helpers.py | 4 +--- nova/tests/integrated/test_servers.py | 17 +++++++++-------- nova/utils.py | 5 +++-- 5 files changed, 17 insertions(+), 14 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 519793643..2481a10af 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -362,6 +362,9 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.local.LocalImageService', 'The service to use for retrieving and searching for images.') +DEFINE_string('glance_image_service', 'nova.image.local.LocalImageService', + 'The service to use for retrieving and searching for ' + + 'glance images.') DEFINE_string('host', socket.gethostname(), 'name of this node') diff --git a/nova/image/fake.py b/nova/image/fake.py index b400b2adb..8918c0c14 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -35,7 +35,7 @@ FLAGS = flags.FLAGS class FakeImageService(service.BaseImageService): """Mock (fake) image service for unit testing.""" - def __init__(self): + def __init__(self, client=None): self.images = {} # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 2e5d67017..e6efc16c5 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -185,6 +185,7 @@ class _IntegratedTestBase(test.TestCase): """An opportunity to setup flags, before the services are started.""" f = {} f['image_service'] = 'nova.image.fake.FakeImageService' + f['glance_image_service'] = 'nova.image.fake.FakeImageService' f['fake_network'] = True return f @@ -201,9 +202,6 @@ class _IntegratedTestBase(test.TestCase): LOG.warning("imageRef not yet in images output") image_ref = image['id'] - # TODO(justinsb): This is FUBAR - image_ref = abs(hash(image_ref)) - image_ref = 'http://fake.server/%s' % image_ref # We now have a valid imageId diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index e89d0100a..ba764907a 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -51,14 +51,14 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Without an imageRef, this throws 500. # TODO(justinsb): Check whatever the spec says should be thrown here - self.assertRaises(client.OpenStackApiException, - self.api.post_server, post) + #self.assertRaises(client.OpenStackApiException, + #self.api.post_server, post) # With an invalid imageRef, this throws 500. server['imageRef'] = self.user.get_invalid_image() # TODO(justinsb): Check whatever the spec says should be thrown here - self.assertRaises(client.OpenStackApiException, - self.api.post_server, post) + #self.assertRaises(client.OpenStackApiException, + #self.api.post_server, post) # Add a valid imageId/imageRef server['imageId'] = good_server.get('imageId') @@ -66,8 +66,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Without flavorId, this throws 500 # TODO(justinsb): Check whatever the spec says should be thrown here - self.assertRaises(client.OpenStackApiException, - self.api.post_server, post) + #self.assertRaises(client.OpenStackApiException, + #self.api.post_server, post) # Set a valid flavorId/flavorRef server['flavorRef'] = good_server.get('flavorRef') @@ -75,8 +75,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Without a name, this throws 500 # TODO(justinsb): Check whatever the spec says should be thrown here - self.assertRaises(client.OpenStackApiException, - self.api.post_server, post) + #self.assertRaises(client.OpenStackApiException, + #self.api.post_server, post) # Set a valid server name server['name'] = good_server['name'] @@ -85,6 +85,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): LOG.debug("created_server: %s" % created_server) self.assertTrue(created_server['id']) created_server_id = created_server['id'] + return # Check it's there found_server = self.api.get_server(created_server_id) diff --git a/nova/utils.py b/nova/utils.py index 3c8c82281..46dfc82e9 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -776,6 +776,7 @@ def get_image_service(image_ref): return (get_default_image_service(), int(image_ref)) (image_id, host, port) = parse_image_ref(image_ref) - glance_client = nova.image.glance.GlanceClient(host, port) - image_service = nova.image.glance.GlanceImageService(glance_client) + glance_client = import_class('nova.image.glance.GlanceClient')(host, + port) + image_service = import_class(FLAGS.glance_image_service)(glance_client) return (image_service, image_id) -- cgit From d3f67f97d81185158f611c3bc9bd5542a7fed788 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 09:52:37 -0400 Subject: fixed test_servers small tests as well --- nova/tests/api/openstack/test_servers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index cfa8d2556..6982f87a8 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -464,6 +464,8 @@ class ServersTest(test.TestCase): def image_id_from_hash(*args, **kwargs): return 2 + + FLAGS.glance_image_service = 'nova.image.fake.FakeImageService' self.stubs.Set(nova.db.api, 'project_get_network', project_get_network) self.stubs.Set(nova.db.api, 'instance_create', instance_create) self.stubs.Set(nova.rpc, 'cast', fake_method) -- cgit From a9738fe5196cc1ed0715c3d96c692e782e77fec6 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 10:10:10 -0400 Subject: made ImageControllerWithGlanceServiceTests pass --- nova/api/openstack/images.py | 2 +- nova/api/openstack/servers.py | 7 +++---- nova/utils.py | 3 ++- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 8a90b4c4d..6d3e50b56 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -88,7 +88,7 @@ class Controller(common.OpenstackController): image_id) image = image_service.show(context, service_image_id) except exception.NotFound: - explanation = _("Image '%d' not found.") % (image_id) + explanation = _("Image not found.") raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) return dict(image=self.get_builder(req).build(image, detail=True)) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 4e8574994..ca13a8669 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -141,7 +141,9 @@ class Controller(common.OpenstackController): image_ref = self._image_ref_from_req_data(env) try: - (image_service, image_id) = utils.get_image_service( image_ref) + (image_service, image_id) = utils.get_image_service(image_ref) + kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( + req, image_id) #TODO: need to assert image exists a better way #image_id = common.get_image_id_from_image_hash(image_service, @@ -150,9 +152,6 @@ class Controller(common.OpenstackController): msg = _("Can not find requested image") return faults.Fault(exc.HTTPBadRequest(msg)) - kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( - req, image_id) - personality = env['server'].get('personality') injected_files = [] if personality: diff --git a/nova/utils.py b/nova/utils.py index 46dfc82e9..04acfc417 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -752,7 +752,8 @@ def parse_image_ref(image_ref): if is_int(image_id): image_id = int(image_id) else: - raise Exception(_('image_ref [%s] is missing a proper id') % image_ref) + raise exception.ImageNotFound( + _('image_ref [%s] is missing a proper id') % image_ref) return (image_id, host, port) -- cgit From 3c36abb43eea4ff7a740278085690aa057aba502 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 10:16:51 -0400 Subject: fixed ComputeTestCase tests --- nova/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/utils.py b/nova/utils.py index 04acfc417..dcaaab602 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -773,6 +773,7 @@ def get_image_service(image_ref): :returns: a tuple of the form (image_service, image_id) """ + image_ref = image_ref or 0 if is_int(image_ref): return (get_default_image_service(), int(image_ref)) -- cgit From 96c888312fb7a2ba2cc9120282d29128a18342a8 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 10:41:33 -0400 Subject: fixed QuotaTestCases --- nova/flags.py | 2 +- nova/image/fake.py | 18 ++++++++++++++---- nova/tests/test_quota.py | 2 ++ nova/utils.py | 4 +--- 4 files changed, 18 insertions(+), 8 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 2481a10af..d3f72d412 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -362,7 +362,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.local.LocalImageService', 'The service to use for retrieving and searching for images.') -DEFINE_string('glance_image_service', 'nova.image.local.LocalImageService', +DEFINE_string('glance_image_service', 'nova.image.glance.GlanceImageService', 'The service to use for retrieving and searching for ' + 'glance images.') diff --git a/nova/image/fake.py b/nova/image/fake.py index 8918c0c14..3ada0d8d0 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -40,7 +40,7 @@ class FakeImageService(service.BaseImageService): # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03) - image = {'id': '123456', + image1 = {'id': '123456', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, @@ -49,7 +49,18 @@ class FakeImageService(service.BaseImageService): 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} - self.create(None, image) + + image2 = {'id': 'fake', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'status': 'active', + 'container_format': 'ami', + 'disk_format': 'raw', + 'properties': {'kernel_id': FLAGS.null_kernel, + 'ramdisk_id': FLAGS.null_kernel}} + self.create(None, image1) + self.create(None, image2) super(FakeImageService, self).__init__() def index(self, context): @@ -66,7 +77,6 @@ class FakeImageService(service.BaseImageService): Returns a dict containing image data for the given opaque image id. """ - image_id = int(image_id) image = self.images.get(image_id) if image: return copy.deepcopy(image) @@ -80,7 +90,7 @@ class FakeImageService(service.BaseImageService): :raises: Duplicate if the image already exist. """ - image_id = int(data['id']) + image_id = data['id'] if self.images.get(image_id): raise exception.Duplicate() diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 7ace2ad7d..9ede0786f 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -280,6 +280,7 @@ class QuotaTestCase(test.TestCase): FLAGS.quota_max_injected_files) def _create_with_injected_files(self, files): + FLAGS.glance_image_service = 'nova.image.fake.FakeImageService' api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') api.create(self.context, min_count=1, max_count=1, @@ -287,6 +288,7 @@ class QuotaTestCase(test.TestCase): injected_files=files) def test_no_injected_files(self): + FLAGS.glance_image_service = 'nova.image.fake.FakeImageService' api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') api.create(self.context, instance_type=inst_type, image_id='fake') diff --git a/nova/utils.py b/nova/utils.py index dcaaab602..252f5e9a6 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -749,11 +749,9 @@ def parse_image_ref(image_ref): port = o.port host = o.netloc.split(':', 1)[0] image_id = o.path.split('/')[-1] + if is_int(image_id): image_id = int(image_id) - else: - raise exception.ImageNotFound( - _('image_ref [%s] is missing a proper id') % image_ref) return (image_id, host, port) -- cgit From d94d040986e00409ed031b591b39a43edc111e28 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 10:45:33 -0400 Subject: fixed api.openstack.test_servers tests...again --- nova/api/openstack/servers.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ca13a8669..17d286748 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -184,6 +184,10 @@ class Controller(common.OpenstackController): injected_files=injected_files) except quota.QuotaError as error: self._handle_quota_error(error) + except exception.ImageNotFound as error: + msg = _("Can not find requested image") + return faults.Fault(exc.HTTPBadRequest(msg)) + inst['instance_type'] = inst_type inst['image_id'] = image_ref -- cgit From 980ceb71fdc97e92954239b843e7cec60c786a97 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 10:59:22 -0400 Subject: oops, took out commented out tests in integrated.test_servers and made tests pass again --- nova/image/fake.py | 4 ++-- nova/tests/integrated/test_servers.py | 17 ++++++++--------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/nova/image/fake.py b/nova/image/fake.py index 3ada0d8d0..2c0b87952 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -77,7 +77,7 @@ class FakeImageService(service.BaseImageService): Returns a dict containing image data for the given opaque image id. """ - image = self.images.get(image_id) + image = self.images.get(str(image_id)) if image: return copy.deepcopy(image) LOG.warn('Unable to find image id %s. Have images: %s', @@ -90,7 +90,7 @@ class FakeImageService(service.BaseImageService): :raises: Duplicate if the image already exist. """ - image_id = data['id'] + image_id = str(data['id']) if self.images.get(image_id): raise exception.Duplicate() diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index ba764907a..e89d0100a 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -51,14 +51,14 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Without an imageRef, this throws 500. # TODO(justinsb): Check whatever the spec says should be thrown here - #self.assertRaises(client.OpenStackApiException, - #self.api.post_server, post) + self.assertRaises(client.OpenStackApiException, + self.api.post_server, post) # With an invalid imageRef, this throws 500. server['imageRef'] = self.user.get_invalid_image() # TODO(justinsb): Check whatever the spec says should be thrown here - #self.assertRaises(client.OpenStackApiException, - #self.api.post_server, post) + self.assertRaises(client.OpenStackApiException, + self.api.post_server, post) # Add a valid imageId/imageRef server['imageId'] = good_server.get('imageId') @@ -66,8 +66,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Without flavorId, this throws 500 # TODO(justinsb): Check whatever the spec says should be thrown here - #self.assertRaises(client.OpenStackApiException, - #self.api.post_server, post) + self.assertRaises(client.OpenStackApiException, + self.api.post_server, post) # Set a valid flavorId/flavorRef server['flavorRef'] = good_server.get('flavorRef') @@ -75,8 +75,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Without a name, this throws 500 # TODO(justinsb): Check whatever the spec says should be thrown here - #self.assertRaises(client.OpenStackApiException, - #self.api.post_server, post) + self.assertRaises(client.OpenStackApiException, + self.api.post_server, post) # Set a valid server name server['name'] = good_server['name'] @@ -85,7 +85,6 @@ class ServersTest(integrated_helpers._IntegratedTestBase): LOG.debug("created_server: %s" % created_server) self.assertTrue(created_server['id']) created_server_id = created_server['id'] - return # Check it's there found_server = self.api.get_server(created_server_id) -- cgit From 9407bbfc61f165bca0a854d59dd516193334a4b4 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 11:13:22 -0400 Subject: fix pep8 issues --- nova/api/openstack/images.py | 3 ++- nova/api/openstack/servers.py | 1 - nova/flags.py | 2 +- nova/tests/api/openstack/test_servers.py | 1 - nova/utils.py | 3 ++- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 6d3e50b56..c2511b99f 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -50,7 +50,8 @@ class Controller(common.OpenstackController): """ self._compute_service = compute_service or compute.API() - self._image_service = image_service or utils.get_default_image_service() + self._image_service = image_service or \ + utils.get_default_image_service() def index(self, req): """Return an index listing of images available to the request. diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 17d286748..ae7df3fe5 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -188,7 +188,6 @@ class Controller(common.OpenstackController): msg = _("Can not find requested image") return faults.Fault(exc.HTTPBadRequest(msg)) - inst['instance_type'] = inst_type inst['image_id'] = image_ref diff --git a/nova/flags.py b/nova/flags.py index d3f72d412..b45d252c7 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -363,7 +363,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', DEFINE_string('image_service', 'nova.image.local.LocalImageService', 'The service to use for retrieving and searching for images.') DEFINE_string('glance_image_service', 'nova.image.glance.GlanceImageService', - 'The service to use for retrieving and searching for ' + + 'The service to use for retrieving and searching for ' + 'glance images.') DEFINE_string('host', socket.gethostname(), diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 6982f87a8..bced2b910 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -464,7 +464,6 @@ class ServersTest(test.TestCase): def image_id_from_hash(*args, **kwargs): return 2 - FLAGS.glance_image_service = 'nova.image.fake.FakeImageService' self.stubs.Set(nova.db.api, 'project_get_network', project_get_network) self.stubs.Set(nova.db.api, 'instance_create', instance_create) diff --git a/nova/utils.py b/nova/utils.py index 252f5e9a6..82d0dd7a4 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -760,6 +760,7 @@ def get_default_image_service(): ImageService = import_class(FLAGS.image_service) return ImageService() + def get_image_service(image_ref): """Get the proper image_service and id for the given image_ref. @@ -776,7 +777,7 @@ def get_image_service(image_ref): return (get_default_image_service(), int(image_ref)) (image_id, host, port) = parse_image_ref(image_ref) - glance_client = import_class('nova.image.glance.GlanceClient')(host, + glance_client = import_class('nova.image.glance.GlanceClient')(host, port) image_service = import_class(FLAGS.glance_image_service)(glance_client) return (image_service, image_id) -- cgit From 048dda438c9670998e9c91f6a906373a12ea294d Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 13:03:05 -0400 Subject: fixed bug with compute_api not having actual image_ref to use proper image service --- nova/api/openstack/servers.py | 1 + nova/compute/api.py | 7 ++++--- nova/image/fake.py | 22 ++++++++++++++++++++++ 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ae7df3fe5..a4e679242 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -174,6 +174,7 @@ class Controller(common.OpenstackController): context, inst_type, image_id, + image_ref=image_ref, kernel_id=kernel_id, ramdisk_id=ramdisk_id, display_name=name, diff --git a/nova/compute/api.py b/nova/compute/api.py index 4e7af7421..40d011132 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -132,7 +132,7 @@ class API(base.Base): display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, - injected_files=None): + injected_files=None, image_ref=None): """Create the number and type of instances requested. Verifies that quota and other arguments are valid. @@ -154,7 +154,8 @@ class API(base.Base): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) - (image_service, service_image_id) = utils.get_image_service(image_id) + (image_service, service_image_id) = utils.get_image_service( + image_ref or image_id) image = image_service.show(context, service_image_id) os_type = None @@ -198,7 +199,7 @@ class API(base.Base): base_options = { 'reservation_id': utils.generate_uid('r'), - 'image_id': image_id, + 'image_id': image_ref or image_id, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, diff --git a/nova/image/fake.py b/nova/image/fake.py index 2c0b87952..2a60c7743 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -59,8 +59,30 @@ class FakeImageService(service.BaseImageService): 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} + + image3 = {'id': '2', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'status': 'active', + 'container_format': 'ami', + 'disk_format': 'raw', + 'properties': {'kernel_id': FLAGS.null_kernel, + 'ramdisk_id': FLAGS.null_kernel}} + + image4 = {'id': '1', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'status': 'active', + 'container_format': 'ami', + 'disk_format': 'raw', + 'properties': {'kernel_id': FLAGS.null_kernel, + 'ramdisk_id': FLAGS.null_kernel}} self.create(None, image1) self.create(None, image2) + self.create(None, image3) + self.create(None, image4) super(FakeImageService, self).__init__() def index(self, context): -- cgit From 62328a6437f238228152f460b1bd53e7254aa89c Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 16:26:42 -0400 Subject: libvirt fixes to use new image_service stuff --- nova/virt/images.py | 4 ++-- nova/virt/libvirt_conn.py | 14 +++++++++++--- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/nova/virt/images.py b/nova/virt/images.py index 0828a1fd0..fd433ea0c 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -45,10 +45,10 @@ def fetch(image_id, path, _user, _project): # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. - image_service = utils.get_default_image_service() + (image_service, service_image_id) = utils.get_image_service(image_id) with open(path, "wb") as image_file: elevated = context.get_admin_context() - metadata = image_service.get(elevated, image_id, image_file) + metadata = image_service.get(elevated, service_image_id, image_file) return metadata diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 23fa5bdfc..ab47493fd 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -36,6 +36,7 @@ Supports KVM, LXC, QEMU, UML, and XEN. """ +import hashlib import multiprocessing import os import random @@ -843,7 +844,9 @@ class LibvirtConnection(driver.ComputeDriver): 'ramdisk_id': inst['ramdisk_id']} if disk_images['kernel_id']: - fname = '%08x' % int(disk_images['kernel_id']) + fname_hash = hashlib.sha1() + fname_hash.update(disk_images['kernel_id']) + fname = fname_hash.hexdigest() self._cache_image(fn=self._fetch_image, target=basepath('kernel'), fname=fname, @@ -851,7 +854,9 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) if disk_images['ramdisk_id']: - fname = '%08x' % int(disk_images['ramdisk_id']) + fname_hash = hashlib.sha1() + fname_hash.update(disk_images['ramdisk_id']) + fname = fname_hash.hexdigest() self._cache_image(fn=self._fetch_image, target=basepath('ramdisk'), fname=fname, @@ -859,7 +864,10 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) - root_fname = '%08x' % int(disk_images['image_id']) + fname_hash = hashlib.sha1() + fname_hash.update(disk_images['image_id']) + root_fname = fname_hash.hexdigest() + size = FLAGS.minimum_root_size inst_type_id = inst['instance_type_id'] -- cgit From 2c6c184138b0d8c650496e0e8d033c85a2e2dec1 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Wed, 18 May 2011 20:46:21 +0000 Subject: fix typo in udev rule --- plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules b/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules index 0dfb029eb..b179f0847 100644 --- a/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules +++ b/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules @@ -1,3 +1,3 @@ -SUBSYSTEM=="xen-backend", KERNEL=="vif*", RUN+="/etc/xensource/scripts/ovs_configure_base_flows.py $env{ACTION} %k all" +SUBSYSTEM=="xen-backend", KERNEL=="vif*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all" # is this one needed? -#SUBSYSTEM=="net", KERNEL=="tap*", RUN+="/etc/xensource/scripts/ovs_configure_base_flows.py $env{ACTION} %k all" +#SUBSYSTEM=="net", KERNEL=="tap*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all" -- cgit From ef42fa95197e7b0f73e04322456bbbdedaf3e2b3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 18 May 2011 14:15:36 -0700 Subject: log any exceptions that get thrown trying to retrieve metadata --- nova/api/ec2/metadatarequesthandler.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 28f99b0ef..481e34e12 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -71,7 +71,11 @@ class MetadataRequestHandler(wsgi.Application): remote_address = req.remote_addr if FLAGS.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) - meta_data = cc.get_metadata(remote_address) + try: + meta_data = cc.get_metadata(remote_address) + except Exception: + LOG.exception(_('Failed to get metadata for ip: %s'), remote_address) + raise if meta_data is None: LOG.error(_('Failed to get metadata for ip: %s'), remote_address) raise webob.exc.HTTPNotFound() -- cgit From 38ba122d9eb67c699ea0c10eab5961c3b4c25d81 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 18 May 2011 14:23:09 -0700 Subject: use a manual 500 with error text instead of traceback for failure --- nova/api/ec2/metadatarequesthandler.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 481e34e12..720f264a4 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -23,6 +23,7 @@ import webob.exc from nova import log as logging from nova import flags +from nova import utils from nova import wsgi from nova.api.ec2 import cloud @@ -75,7 +76,12 @@ class MetadataRequestHandler(wsgi.Application): meta_data = cc.get_metadata(remote_address) except Exception: LOG.exception(_('Failed to get metadata for ip: %s'), remote_address) - raise + resp = webob.Response() + resp.status = 500 + message = _('An unknown error has occurred. ' + 'Please try your request again.') + resp.body = str(utils.utf8(message)) + return resp if meta_data is None: LOG.error(_('Failed to get metadata for ip: %s'), remote_address) raise webob.exc.HTTPNotFound() -- cgit From 76c98e277a405127d85cf2c264a20ec3a18e023a Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 17:30:43 -0400 Subject: hackish patch to fix hrefs asking for their metadata in boot (this really shouldnt be in ec2 api?) --- nova/api/ec2/cloud.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 1fa07d042..06b5f662f 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -157,7 +157,12 @@ class CloudController(object): floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) - image_ec2_id = self.image_ec2_id(instance_ref['image_id']) + try: + image_ec2_id = self.image_ec2_id(instance_ref['image_id']) + except ValueError: + # not really an ec2_id here + image_ec2_id = instance_ref['image_id'] + data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { -- cgit From c69a1b0d9ef15ecc06217ec2c1ec4d73a755d14b Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 17:57:44 -0400 Subject: return dummy id per vishs suggestion --- nova/api/ec2/cloud.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 06b5f662f..950b72e72 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -157,12 +157,7 @@ class CloudController(object): floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) - try: - image_ec2_id = self.image_ec2_id(instance_ref['image_id']) - except ValueError: - # not really an ec2_id here - image_ec2_id = instance_ref['image_id'] - + image_ec2_id = self.image_ec2_id(instance_ref['image_id']) data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { @@ -907,7 +902,12 @@ class CloudController(object): def image_ec2_id(image_id, image_type='ami'): """Returns image ec2_id using id and three letter type.""" template = image_type + '-%08x' - return ec2utils.id_to_ec2_id(int(image_id), template=template) + try: + return ec2utils.id_to_ec2_id(int(image_id), template=template) + except ValueError: + #TODO(wwolf): once we have ec2_id -> glance_id mapping + # in place, this wont be necessary + return "ami-00000000" def _get_image(self, context, ec2_id): try: -- cgit From 64e9aa6daa416662a25eeab0d943b23906695e92 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 18:09:37 -0400 Subject: default to port 80 if it isnt in the href/uri --- nova/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/utils.py b/nova/utils.py index 82d0dd7a4..85934813e 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -746,7 +746,7 @@ def parse_image_ref(image_ref): return (int(image_ref), None, None) o = urlparse(image_ref) - port = o.port + port = o.port or 80 host = o.netloc.split(':', 1)[0] image_id = o.path.split('/')[-1] -- cgit From 5e722ea7b912f189c0a3b9434e9a38d08095ad00 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 18 May 2011 19:13:22 -0400 Subject: refactoring wsgi to separate controller/serialization/deserialization logic; creating osapi-specific module --- nova/api/openstack/__init__.py | 43 ++--- nova/api/openstack/accounts.py | 33 ++-- nova/api/openstack/backup_schedules.py | 27 ++- nova/api/openstack/consoles.py | 26 ++- nova/api/openstack/flavors.py | 34 ++-- nova/api/openstack/image_metadata.py | 19 +- nova/api/openstack/images.py | 40 +++-- nova/api/openstack/ips.py | 33 ++-- nova/api/openstack/limits.py | 50 ++++-- nova/api/openstack/server_metadata.py | 21 ++- nova/api/openstack/servers.py | 124 +++++++------ nova/api/openstack/shared_ip_groups.py | 28 +-- nova/api/openstack/users.py | 43 +++-- nova/api/openstack/wsgi.py | 291 +++++++++++++++++++++++++++++++ nova/api/openstack/zones.py | 33 ++-- nova/tests/api/openstack/test_limits.py | 4 +- nova/tests/api/openstack/test_servers.py | 2 - nova/tests/api/test_wsgi.py | 135 -------------- nova/tests/integrated/test_xml.py | 4 +- 19 files changed, 624 insertions(+), 366 deletions(-) create mode 100644 nova/api/openstack/wsgi.py diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 348b70d5b..fbbd99cb9 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -26,7 +26,7 @@ import webob.exc from nova import flags from nova import log as logging -from nova import wsgi +from nova import wsgi as base_wsgi from nova.api.openstack import accounts from nova.api.openstack import faults from nova.api.openstack import backup_schedules @@ -40,6 +40,7 @@ from nova.api.openstack import servers from nova.api.openstack import server_metadata from nova.api.openstack import shared_ip_groups from nova.api.openstack import users +from nova.api.openstack import wsgi from nova.api.openstack import zones @@ -50,7 +51,7 @@ flags.DEFINE_bool('allow_admin_api', 'When True, this API service will accept admin operations.') -class FaultWrapper(wsgi.Middleware): +class FaultWrapper(base_wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" @webob.dec.wsgify(RequestClass=wsgi.Request) @@ -63,7 +64,7 @@ class FaultWrapper(wsgi.Middleware): return faults.Fault(exc) -class APIRouter(wsgi.Router): +class APIRouter(base_wsgi.Router): """ Routes requests on the OpenStack API to the appropriate controller and method. @@ -97,18 +98,20 @@ class APIRouter(wsgi.Router): server_members['reset_network'] = 'POST' server_members['inject_network_info'] = 'POST' - mapper.resource("zone", "zones", controller=zones.Controller(), + mapper.resource("zone", "zones", + controller=zones.resource_factory(), collection={'detail': 'GET', 'info': 'GET'}), - mapper.resource("user", "users", controller=users.Controller(), + mapper.resource("user", "users", + controller=users.resource_factory(), collection={'detail': 'GET'}) mapper.resource("account", "accounts", - controller=accounts.Controller(), + controller=accounts.resource_factory(), collection={'detail': 'GET'}) mapper.resource("console", "consoles", - controller=consoles.Controller(), + controller=consoles.resource_factory(), parent_resource=dict(member_name='server', collection_name='servers')) @@ -121,31 +124,31 @@ class APIRouterV10(APIRouter): def _setup_routes(self, mapper): super(APIRouterV10, self)._setup_routes(mapper) mapper.resource("server", "servers", - controller=servers.ControllerV10(), + controller=servers.resource_factory('1.0'), collection={'detail': 'GET'}, member=self.server_members) mapper.resource("image", "images", - controller=images.ControllerV10(), + controller=images.resource_factory('1.0'), collection={'detail': 'GET'}) mapper.resource("flavor", "flavors", - controller=flavors.ControllerV10(), + controller=flavors.resource_factory('1.0'), collection={'detail': 'GET'}) mapper.resource("shared_ip_group", "shared_ip_groups", collection={'detail': 'GET'}, - controller=shared_ip_groups.Controller()) + controller=shared_ip_groups.resource_factory()) mapper.resource("backup_schedule", "backup_schedule", - controller=backup_schedules.Controller(), + controller=backup_schedules.resource_factory(), parent_resource=dict(member_name='server', collection_name='servers')) mapper.resource("limit", "limits", - controller=limits.LimitsControllerV10()) + controller=limits.resource_factory('1.0')) - mapper.resource("ip", "ips", controller=ips.Controller(), + mapper.resource("ip", "ips", controller=ips.resource_factory(), collection=dict(public='GET', private='GET'), parent_resource=dict(member_name='server', collection_name='servers')) @@ -157,27 +160,27 @@ class APIRouterV11(APIRouter): def _setup_routes(self, mapper): super(APIRouterV11, self)._setup_routes(mapper) mapper.resource("server", "servers", - controller=servers.ControllerV11(), + controller=servers.resource_factory('1.1'), collection={'detail': 'GET'}, member=self.server_members) mapper.resource("image", "images", - controller=images.ControllerV11(), + controller=images.resource_factory('1.1'), collection={'detail': 'GET'}) mapper.resource("image_meta", "meta", - controller=image_metadata.Controller(), + controller=image_metadata.resource_factory(), parent_resource=dict(member_name='image', collection_name='images')) mapper.resource("server_meta", "meta", - controller=server_metadata.Controller(), + controller=server_metadata.resource_factory(), parent_resource=dict(member_name='server', collection_name='servers')) mapper.resource("flavor", "flavors", - controller=flavors.ControllerV11(), + controller=flavors.resource_factory('1.1'), collection={'detail': 'GET'}) mapper.resource("limit", "limits", - controller=limits.LimitsControllerV11()) + controller=limits.resource_factory('1.1')) diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py index 00fdd4540..d8a9d1909 100644 --- a/nova/api/openstack/accounts.py +++ b/nova/api/openstack/accounts.py @@ -20,8 +20,9 @@ from nova import flags from nova import log as logging from nova.auth import manager -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi + FLAGS = flags.FLAGS LOG = logging.getLogger('nova.api.openstack') @@ -34,12 +35,7 @@ def _translate_keys(account): manager=account.project_manager_id) -class Controller(common.OpenstackController): - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "account": ["id", "name", "description", "manager"]}}} +class Controller(object): def __init__(self): self.manager = manager.AuthManager() @@ -66,20 +62,33 @@ class Controller(common.OpenstackController): self.manager.delete_project(id) return {} - def create(self, req): + def create(self, req, body): """We use update with create-or-update semantics because the id comes from an external source""" raise faults.Fault(webob.exc.HTTPNotImplemented()) - def update(self, req, id): + def update(self, req, id, body): """This is really create or update.""" self._check_admin(req.environ['nova.context']) - env = self._deserialize(req.body, req.get_content_type()) - description = env['account'].get('description') - manager = env['account'].get('manager') + description = body['account'].get('description') + manager = body['account'].get('manager') try: account = self.manager.get_project(id) self.manager.modify_project(id, manager, description) except exception.NotFound: account = self.manager.create_project(id, manager, description) return dict(account=_translate_keys(account)) + + +def resource_factory(): + metadata = { + "attributes": { + "account": ["id", "name", "description", "manager"], + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index 4bf744046..4153c90c1 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -19,9 +19,8 @@ import time from webob import exc -from nova.api.openstack import common from nova.api.openstack import faults -import nova.image.service +from nova.api.openstack import wsgi def _translate_keys(inst): @@ -29,14 +28,9 @@ def _translate_keys(inst): return dict(backupSchedule=inst) -class Controller(common.OpenstackController): +class Controller(object): """ The backup schedule API controller for the Openstack API """ - _serialization_metadata = { - 'application/xml': { - 'attributes': { - 'backupSchedule': []}}} - def __init__(self): pass @@ -48,7 +42,7 @@ class Controller(common.OpenstackController): """ Returns a single backup schedule for a given instance """ return faults.Fault(exc.HTTPNotImplemented()) - def create(self, req, server_id): + def create(self, req, server_id, body): """ No actual update method required, since the existing API allows both create and update through a POST """ return faults.Fault(exc.HTTPNotImplemented()) @@ -56,3 +50,18 @@ class Controller(common.OpenstackController): def delete(self, req, server_id, id): """ Deletes an existing backup schedule """ return faults.Fault(exc.HTTPNotImplemented()) + + +def resource_factory(): + metadata = { + 'attributes': { + 'backupSchedule': [], + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V10, + metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 1a77f25d7..36d570803 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -19,8 +19,8 @@ from webob import exc from nova import console from nova import exception -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi def _translate_keys(cons): @@ -43,14 +43,9 @@ def _translate_detail_keys(cons): return dict(console=info) -class Controller(common.OpenstackController): +class Controller(object): """The Consoles Controller for the Openstack API""" - _serialization_metadata = { - 'application/xml': { - 'attributes': { - 'console': []}}} - def __init__(self): self.console_api = console.API() super(Controller, self).__init__() @@ -63,9 +58,8 @@ class Controller(common.OpenstackController): return dict(consoles=[_translate_keys(console) for console in consoles]) - def create(self, req, server_id): + def create(self, req, server_id, body): """Creates a new console""" - #info = self._deserialize(req.body, req.get_content_type()) self.console_api.create_console( req.environ['nova.context'], int(server_id)) @@ -94,3 +88,17 @@ class Controller(common.OpenstackController): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() + + +def resource_factory(): + metadata = { + 'attributes': { + 'console': [], + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index 4c5971cf6..46056a27a 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -19,22 +19,13 @@ import webob from nova import db from nova import exception -from nova.api.openstack import common from nova.api.openstack import views +from nova.api.openstack import wsgi -class Controller(common.OpenstackController): +class Controller(object): """Flavor controller for the OpenStack API.""" - _serialization_metadata = { - 'application/xml': { - "attributes": { - "flavor": ["id", "name", "ram", "disk"], - "link": ["rel", "type", "href"], - } - } - } - def index(self, req): """Return all flavors in brief.""" items = self._get_flavors(req, is_detail=False) @@ -71,14 +62,31 @@ class Controller(common.OpenstackController): class ControllerV10(Controller): + def _get_view_builder(self, req): return views.flavors.ViewBuilder() class ControllerV11(Controller): + def _get_view_builder(self, req): base_url = req.application_url return views.flavors.ViewBuilderV11(base_url) - def get_default_xmlns(self, req): - return common.XML_NS_V11 + +def resource_factory(version='1.0'): + controller = { + '1.0': ControllerV10, + '1.1': ControllerV11, + }[version]() + + xmlns = { + '1.0': wsgi.XMLNS_V10, + '1.1': wsgi.XMLNS_V11, + }[version] + + serializers = { + 'application/xml': wsgi.XMLSerializer(xmlns=xmlns), + } + + return wsgi.Resource(controller, serializers=serializers) diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index 1eccc0174..ce0140265 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -21,19 +21,18 @@ from nova import flags from nova import quota from nova import utils from nova import wsgi -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi FLAGS = flags.FLAGS -class Controller(common.OpenstackController): +class Controller(object): """The image metadata API controller for the Openstack API""" def __init__(self): self.image_service = utils.import_object(FLAGS.image_service) - super(Controller, self).__init__() def _get_metadata(self, context, image_id, image=None): if not image: @@ -64,9 +63,8 @@ class Controller(common.OpenstackController): else: return faults.Fault(exc.HTTPNotFound()) - def create(self, req, image_id): + def create(self, req, image_id, body): context = req.environ['nova.context'] - body = self._deserialize(req.body, req.get_content_type()) img = self.image_service.show(context, image_id) metadata = self._get_metadata(context, image_id, img) if 'metadata' in body: @@ -77,9 +75,8 @@ class Controller(common.OpenstackController): self.image_service.update(context, image_id, img, None) return dict(metadata=metadata) - def update(self, req, image_id, id): + def update(self, req, image_id, id, body): context = req.environ['nova.context'] - body = self._deserialize(req.body, req.get_content_type()) if not id in body: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) @@ -104,3 +101,11 @@ class Controller(common.OpenstackController): metadata.pop(id) img['properties'] = metadata self.image_service.update(context, image_id, img, None) + + +def resource_factory(): + serializers = { + 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V11), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 34d4c27fc..e22854ebf 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -23,25 +23,16 @@ from nova import utils from nova.api.openstack import common from nova.api.openstack import faults from nova.api.openstack.views import images as images_view +from nova.api.openstack import wsgi LOG = log.getLogger('nova.api.openstack.images') FLAGS = flags.FLAGS -class Controller(common.OpenstackController): +class Controller(object): """Base `wsgi.Controller` for retrieving/displaying images.""" - _serialization_metadata = { - 'application/xml': { - "attributes": { - "image": ["id", "name", "updated", "created", "status", - "serverId", "progress"], - "link": ["rel", "type", "href"], - }, - }, - } - def __init__(self, image_service=None, compute_service=None): """Initialize new `ImageController`. @@ -153,3 +144,30 @@ class ControllerV11(Controller): def get_default_xmlns(self, req): return common.XML_NS_V11 + + +def resource_factory(version='1.0'): + controller = { + '1.0': ControllerV10, + '1.1': ControllerV11, + }[version]() + + xmlns = { + '1.0': wsgi.XMLNS_V10, + '1.1': wsgi.XMLNS_V11, + }[version] + + metadata = { + "attributes": { + "image": ["id", "name", "updated", "created", "status", + "serverId", "progress"], + "link": ["rel", "type", "href"], + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(xmlns=xmlns, + metadata=metadata), + } + + return wsgi.Resource(controller, serializers=serializers) diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py index 778e9ba1a..24612eafb 100644 --- a/nova/api/openstack/ips.py +++ b/nova/api/openstack/ips.py @@ -20,23 +20,14 @@ import time from webob import exc import nova -import nova.api.openstack.views.addresses -from nova.api.openstack import common from nova.api.openstack import faults +import nova.api.openstack.views.addresses +from nova.api.openstack import wsgi -class Controller(common.OpenstackController): +class Controller(object): """The servers addresses API controller for the Openstack API.""" - _serialization_metadata = { - 'application/xml': { - 'list_collections': { - 'public': {'item_name': 'ip', 'item_key': 'addr'}, - 'private': {'item_name': 'ip', 'item_key': 'addr'}, - }, - }, - } - def __init__(self): self.compute_api = nova.compute.API() self.builder = nova.api.openstack.views.addresses.ViewBuilderV10() @@ -65,8 +56,24 @@ class Controller(common.OpenstackController): def show(self, req, server_id, id): return faults.Fault(exc.HTTPNotImplemented()) - def create(self, req, server_id): + def create(self, req, server_id, body): return faults.Fault(exc.HTTPNotImplemented()) def delete(self, req, server_id, id): return faults.Fault(exc.HTTPNotImplemented()) + + +def resource_factory(): + metadata = { + 'list_collections': { + 'public': {'item_name': 'ip', 'item_key': 'addr'}, + 'private': {'item_name': 'ip', 'item_key': 'addr'}, + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 47bc238f1..306048d8f 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -30,10 +30,11 @@ from collections import defaultdict from webob.dec import wsgify -from nova import wsgi +from nova import wsgi as base_wsgi from nova.api.openstack import common from nova.api.openstack import faults from nova.api.openstack.views import limits as limits_views +from nova.api.openstack import wsgi # Convenience constants for the limits dictionary passed to Limiter(). @@ -43,23 +44,11 @@ PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 -class LimitsController(common.OpenstackController): +class LimitsController(object): """ Controller for accessing limits in the OpenStack API. """ - _serialization_metadata = { - "application/xml": { - "attributes": { - "limit": ["verb", "URI", "uri", "regex", "value", "unit", - "resetTime", "next-available", "remaining", "name"], - }, - "plurals": { - "rate": "limit", - }, - }, - } - def index(self, req): """ Return all global and rate limit information. @@ -84,6 +73,35 @@ class LimitsControllerV11(LimitsController): return limits_views.ViewBuilderV11() +def resource_factory(version='1.0'): + controller = { + '1.0': LimitsControllerV10, + '1.1': LimitsControllerV11, + }[version]() + + xmlns = { + '1.0': wsgi.XMLNS_V10, + '1.1': wsgi.XMLNS_V11, + }[version] + + metadata = { + "attributes": { + "limit": ["verb", "URI", "uri", "regex", "value", "unit", + "resetTime", "next-available", "remaining", "name"], + }, + "plurals": { + "rate": "limit", + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(xmlns=xmlns, + metadata=metadata) + } + + return wsgi.Resource(controller, serializers=serializers) + + class Limit(object): """ Stores information about a limit for HTTP requets. @@ -195,7 +213,7 @@ DEFAULT_LIMITS = [ ] -class RateLimitingMiddleware(wsgi.Middleware): +class RateLimitingMiddleware(base_wsgi.Middleware): """ Rate-limits requests passing through this middleware. All limit information is stored in memory for this implementation. @@ -209,7 +227,7 @@ class RateLimitingMiddleware(wsgi.Middleware): @param application: WSGI application to wrap @param limits: List of dictionaries describing limits """ - wsgi.Middleware.__init__(self, application) + base_wsgi.Middleware.__init__(self, application) self._limiter = Limiter(limits or DEFAULT_LIMITS) @wsgify(RequestClass=wsgi.Request) diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index fd64ee4fb..fb9449b4c 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -19,12 +19,11 @@ from webob import exc from nova import compute from nova import quota -from nova import wsgi -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi -class Controller(common.OpenstackController): +class Controller(object): """ The server metadata API controller for the Openstack API """ def __init__(self): @@ -43,10 +42,9 @@ class Controller(common.OpenstackController): context = req.environ['nova.context'] return self._get_metadata(context, server_id) - def create(self, req, server_id): + def create(self, req, server_id, body): context = req.environ['nova.context'] - data = self._deserialize(req.body, req.get_content_type()) - metadata = data.get('metadata') + metadata = body.get('metadata') try: self.compute_api.update_or_create_instance_metadata(context, server_id, @@ -55,9 +53,8 @@ class Controller(common.OpenstackController): self._handle_quota_error(error) return req.body - def update(self, req, server_id, id): + def update(self, req, server_id, id, body): context = req.environ['nova.context'] - body = self._deserialize(req.body, req.get_content_type()) if not id in body: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) @@ -92,3 +89,11 @@ class Controller(common.OpenstackController): if error.code == "MetadataLimitExceeded": raise exc.HTTPBadRequest(explanation=error.message) raise error + + +def resource_factory(): + serializers = { + 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V11), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8f2de2afe..78f8bb1b7 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -31,6 +31,7 @@ import nova.api.openstack.views.addresses import nova.api.openstack.views.flavors import nova.api.openstack.views.images import nova.api.openstack.views.servers +from nova.api.openstack import wsgi from nova.auth import manager as auth_manager from nova.compute import instance_types import nova.api.openstack @@ -41,31 +42,12 @@ LOG = logging.getLogger('nova.api.openstack.servers') FLAGS = flags.FLAGS -class Controller(common.OpenstackController): +class Controller(object): """ The Server API controller for the OpenStack API """ - _serialization_metadata = { - "application/xml": { - "attributes": { - "server": ["id", "imageId", "name", "flavorId", "hostId", - "status", "progress", "adminPass", "flavorRef", - "imageRef"], - "link": ["rel", "type", "href"], - }, - "dict_collections": { - "metadata": {"item_name": "meta", "item_key": "key"}, - }, - "list_collections": { - "public": {"item_name": "ip", "item_key": "addr"}, - "private": {"item_name": "ip", "item_key": "addr"}, - }, - }, - } - def __init__(self): self.compute_api = compute.API() self._image_service = utils.import_object(FLAGS.image_service) - super(Controller, self).__init__() def index(self, req): """ Returns a list of server names and ids for a given user """ @@ -122,15 +104,14 @@ class Controller(common.OpenstackController): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() - def create(self, req): + def create(self, req, body): """ Creates a new server for a given user """ - env = self._deserialize_create(req) - if not env: + if not body: return faults.Fault(exc.HTTPUnprocessableEntity()) context = req.environ['nova.context'] - password = self._get_server_admin_password(env['server']) + password = self._get_server_admin_password(body['server']) key_name = None key_data = None @@ -140,7 +121,7 @@ class Controller(common.OpenstackController): key_name = key_pair['name'] key_data = key_pair['public_key'] - requested_image_id = self._image_id_from_req_data(env) + requested_image_id = self._image_id_from_req_data(body) try: image_id = common.get_image_id_from_image_hash(self._image_service, context, requested_image_id) @@ -151,18 +132,18 @@ class Controller(common.OpenstackController): kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) - personality = env['server'].get('personality') + personality = body['server'].get('personality') injected_files = [] if personality: injected_files = self._get_injected_files(personality) - flavor_id = self._flavor_id_from_req_data(env) + flavor_id = self._flavor_id_from_req_data(body) - if not 'name' in env['server']: + if not 'name' in body['server']: msg = _("Server name is not defined") return exc.HTTPBadRequest(msg) - name = env['server']['name'] + name = body['server']['name'] self._validate_server_name(name) name = name.strip() @@ -179,7 +160,7 @@ class Controller(common.OpenstackController): display_description=name, key_name=key_name, key_data=key_data, - metadata=env['server'].get('metadata', {}), + metadata=body['server'].get('metadata', {}), injected_files=injected_files) except quota.QuotaError as error: self._handle_quota_error(error) @@ -194,18 +175,6 @@ class Controller(common.OpenstackController): password) return server - def _deserialize_create(self, request): - """ - Deserialize a create request - - Overrides normal behavior in the case of xml content - """ - if request.content_type == "application/xml": - deserializer = ServerCreateRequestXMLDeserializer() - return deserializer.deserialize(request.body) - else: - return self._deserialize(request.body, request.get_content_type()) - def _get_injected_files(self, personality): """ Create a list of injected files from the personality attribute @@ -255,24 +224,23 @@ class Controller(common.OpenstackController): return utils.generate_password(16) @scheduler_api.redirect_handler - def update(self, req, id): + def update(self, req, id, body): """ Updates the server name or password """ if len(req.body) == 0: raise exc.HTTPUnprocessableEntity() - inst_dict = self._deserialize(req.body, req.get_content_type()) - if not inst_dict: + if not body: return faults.Fault(exc.HTTPUnprocessableEntity()) ctxt = req.environ['nova.context'] update_dict = {} - if 'name' in inst_dict['server']: - name = inst_dict['server']['name'] + if 'name' in body['server']: + name = body['server']['name'] self._validate_server_name(name) update_dict['display_name'] = name.strip() - self._parse_update(ctxt, id, inst_dict, update_dict) + self._parse_update(ctxt, id, body, update_dict) try: self.compute_api.update(ctxt, id, **update_dict) @@ -294,7 +262,7 @@ class Controller(common.OpenstackController): pass @scheduler_api.redirect_handler - def action(self, req, id): + def action(self, req, id, body): """Multi-purpose method used to reboot, rebuild, or resize a server""" @@ -307,10 +275,9 @@ class Controller(common.OpenstackController): 'rebuild': self._action_rebuild, } - input_dict = self._deserialize(req.body, req.get_content_type()) for key in actions.keys(): - if key in input_dict: - return actions[key](input_dict, req, id) + if key in body: + return actions[key](body, req, id) return faults.Fault(exc.HTTPNotImplemented()) def _action_change_password(self, input_dict, req, id): @@ -410,7 +377,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def reset_network(self, req, id): + def reset_network(self, req, id, body): """ Reset networking on an instance (admin only). @@ -425,7 +392,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def inject_network_info(self, req, id): + def inject_network_info(self, req, id, body): """ Inject network info for an instance (admin only). @@ -440,7 +407,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def pause(self, req, id): + def pause(self, req, id, body): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] try: @@ -452,7 +419,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def unpause(self, req, id): + def unpause(self, req, id, body): """ Permit Admins to Unpause the server. """ ctxt = req.environ['nova.context'] try: @@ -464,7 +431,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def suspend(self, req, id): + def suspend(self, req, id, body): """permit admins to suspend the server""" context = req.environ['nova.context'] try: @@ -476,7 +443,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def resume(self, req, id): + def resume(self, req, id, body): """permit admins to resume the server from suspend""" context = req.environ['nova.context'] try: @@ -815,3 +782,44 @@ class ServerCreateRequestXMLDeserializer(object): if child.nodeType == child.TEXT_NODE: return child.nodeValue return "" + + +def resource_factory(version='1.0'): + controller = { + '1.0': ControllerV10, + '1.1': ControllerV11, + }[version]() + + metadata = { + "attributes": { + "server": ["id", "imageId", "name", "flavorId", "hostId", + "status", "progress", "adminPass", "flavorRef", + "imageRef"], + "link": ["rel", "type", "href"], + }, + "dict_collections": { + "metadata": {"item_name": "meta", "item_key": "key"}, + }, + "list_collections": { + "public": {"item_name": "ip", "item_key": "addr"}, + "private": {"item_name": "ip", "item_key": "addr"}, + }, + } + + xmlns = { + '1.0': wsgi.XMLNS_V10, + '1.1': wsgi.XMLNS_V11, + }[version] + + serializers = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata, + xmlns=xmlns), + } + + deserializers = { + 'application/xml': ServerCreateRequestXMLDeserializer(), + } + + return wsgi.Resource(controller, serializers=serializers, + deserializers=deserializers) + diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py index 996db3648..db178f2a2 100644 --- a/nova/api/openstack/shared_ip_groups.py +++ b/nova/api/openstack/shared_ip_groups.py @@ -17,29 +17,13 @@ from webob import exc -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi -def _translate_keys(inst): - """ Coerces a shared IP group instance into proper dictionary format """ - return dict(sharedIpGroup=inst) - - -def _translate_detail_keys(inst): - """ Coerces a shared IP group instance into proper dictionary format with - correctly mapped attributes """ - return dict(sharedIpGroups=inst) - - -class Controller(common.OpenstackController): +class Controller(object): """ The Shared IP Groups Controller for the Openstack API """ - _serialization_metadata = { - 'application/xml': { - 'attributes': { - 'sharedIpGroup': []}}} - def index(self, req): """ Returns a list of Shared IP Groups for the user """ raise faults.Fault(exc.HTTPNotImplemented()) @@ -48,7 +32,7 @@ class Controller(common.OpenstackController): """ Shows in-depth information on a specific Shared IP Group """ raise faults.Fault(exc.HTTPNotImplemented()) - def update(self, req, id): + def update(self, req, id, body): """ You can't update a Shared IP Group """ raise faults.Fault(exc.HTTPNotImplemented()) @@ -60,6 +44,10 @@ class Controller(common.OpenstackController): """ Returns a complete list of Shared IP Groups """ raise faults.Fault(exc.HTTPNotImplemented()) - def create(self, req): + def create(self, req, body): """ Creates a new Shared IP group """ raise faults.Fault(exc.HTTPNotImplemented()) + + +def resource_factory(): + return wsgi.Resource(Controller()) diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py index 7ae4c3232..35b6a502e 100644 --- a/nova/api/openstack/users.py +++ b/nova/api/openstack/users.py @@ -20,8 +20,10 @@ from nova import flags from nova import log as logging from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi from nova.auth import manager + FLAGS = flags.FLAGS LOG = logging.getLogger('nova.api.openstack') @@ -34,12 +36,7 @@ def _translate_keys(user): admin=user.admin) -class Controller(common.OpenstackController): - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "user": ["id", "name", "access", "secret", "admin"]}}} +class Controller(object): def __init__(self): self.manager = manager.AuthManager() @@ -81,23 +78,35 @@ class Controller(common.OpenstackController): self.manager.delete_user(id) return {} - def create(self, req): + def create(self, req, body): self._check_admin(req.environ['nova.context']) - env = self._deserialize(req.body, req.get_content_type()) - is_admin = env['user'].get('admin') in ('T', 'True', True) - name = env['user'].get('name') - access = env['user'].get('access') - secret = env['user'].get('secret') + is_admin = body['user'].get('admin') in ('T', 'True', True) + name = body['user'].get('name') + access = body['user'].get('access') + secret = body['user'].get('secret') user = self.manager.create_user(name, access, secret, is_admin) return dict(user=_translate_keys(user)) - def update(self, req, id): + def update(self, req, id, body): self._check_admin(req.environ['nova.context']) - env = self._deserialize(req.body, req.get_content_type()) - is_admin = env['user'].get('admin') + is_admin = body['user'].get('admin') if is_admin is not None: is_admin = is_admin in ('T', 'True', True) - access = env['user'].get('access') - secret = env['user'].get('secret') + access = body['user'].get('access') + secret = body['user'].get('secret') self.manager.modify_user(id, access, secret, is_admin) return dict(user=_translate_keys(self.manager.get_user(id))) + + +def resource_factory(): + metadata = { + "attributes": { + "user": ["id", "name", "access", "secret", "admin"], + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py new file mode 100644 index 000000000..9e0077932 --- /dev/null +++ b/nova/api/openstack/wsgi.py @@ -0,0 +1,291 @@ + +import json +import webob +from xml.dom import minidom + +from nova import exception +from nova import log as logging +from nova import utils + + +XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' +XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' + +LOG = logging.getLogger('nova.api.openstack.wsgi') + + +class Request(webob.Request): + def best_match_content_type(self, supported=None): + """Determine the requested content-type. + + Based on the query extension then the Accept header. + + :param supported: list of content-types to override defaults + + """ + supported = supported or ['application/json', 'application/xml'] + parts = self.path.rsplit('.', 1) + + if len(parts) > 1: + ctype = 'application/{0}'.format(parts[1]) + if ctype in supported: + return ctype + + bm = self.accept.best_match(supported) + + return bm or 'application/json' + + def get_content_type(self): + if not "Content-Type" in self.headers: + raise exception.InvalidContentType(content_type=None) + + allowed_types = ("application/xml", "application/json") + type = self.content_type + + if type not in allowed_types: + raise exception.InvalidContentType(content_type=type) + else: + return type + + +class JSONDeserializer(object): + def deserialize(self, datastring): + return utils.loads(datastring) + + +class JSONSerializer(object): + def serialize(self, data): + return utils.dumps(data) + + +class XMLDeserializer(object): + def __init__(self, metadata=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + """ + super(XMLDeserializer, self).__init__() + self.metadata = metadata or {} + + def deserialize(self, datastring): + """XML deserialization entry point.""" + plurals = set(self.metadata.get('plurals', {})) + node = minidom.parseString(datastring).childNodes[0] + return {node.nodeName: self._from_xml_node(node, plurals)} + + def _from_xml_node(self, node, listnames): + """Convert a minidom node to a simple Python type. + + :param listnames: list of XML node names whose subnodes should + be considered list items. + + """ + if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: + return node.childNodes[0].nodeValue + elif node.nodeName in listnames: + return [self._from_xml_node(n, listnames) for n in node.childNodes] + else: + result = dict() + for attr in node.attributes.keys(): + result[attr] = node.attributes[attr].nodeValue + for child in node.childNodes: + if child.nodeType != node.TEXT_NODE: + result[child.nodeName] = self._from_xml_node(child, + listnames) + return result + + +class XMLSerializer(object): + def __init__(self, metadata=None, xmlns=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + :param xmlns: XML namespace to include with serialized xml + """ + super(XMLSerializer, self).__init__() + self.metadata = metadata or {} + self.xmlns = xmlns + + def serialize(self, data): + # We expect data to contain a single key which is the XML root. + root_key = data.keys()[0] + doc = minidom.Document() + node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) + + xmlns = node.getAttribute('xmlns') + if not xmlns and self.xmlns: + node.setAttribute('xmlns', self.xmlns) + + return node.toprettyxml(indent=' ') + + def _to_xml_node(self, doc, metadata, nodename, data): + """Recursive method to convert data members to XML nodes.""" + result = doc.createElement(nodename) + + # Set the xml namespace if one is specified + # TODO(justinsb): We could also use prefixes on the keys + xmlns = metadata.get('xmlns', None) + if xmlns: + result.setAttribute('xmlns', xmlns) + + if type(data) is list: + collections = metadata.get('list_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for item in data: + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(item)) + result.appendChild(node) + return result + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + node = self._to_xml_node(doc, metadata, singular, item) + result.appendChild(node) + elif type(data) is dict: + collections = metadata.get('dict_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for k, v in data.items(): + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(k)) + text = doc.createTextNode(str(v)) + node.appendChild(text) + result.appendChild(node) + return result + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k, v in data.items(): + if k in attrs: + result.setAttribute(k, str(v)) + else: + node = self._to_xml_node(doc, metadata, k, v) + result.appendChild(node) + else: + # Type is atom + node = doc.createTextNode(str(data)) + result.appendChild(node) + return result + + +class Resource(object): + """WSGI app that dispatched to methods. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon itself. All action methods + must, in addition to their normal parameters, accept a 'req' argument + which is the incoming wsgi.Request. They raise a webob.exc exception, + or return a dict which will be serialized by requested content type. + + """ + def __init__(self, controller, serializers=None, deserializers=None): + self.serializers = { + 'application/xml': XMLSerializer(), + 'application/json': JSONSerializer(), + } + self.serializers.update(serializers or {}) + + self.deserializers = { + 'application/xml': XMLDeserializer(), + 'application/json': JSONDeserializer(), + } + self.deserializers.update(deserializers or {}) + + self.controller = controller + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """Call the method specified in req.environ by RoutesMiddleware.""" + LOG.debug("%s %s" % (request.method, request.url)) + + try: + action, action_args, accept = self.deserialize_request(request) + except exception.InvalidContentType: + return webob.exc.HTTPBadRequest(_("Unsupported Content-Type")) + + controller_method = getattr(self.controller, action) + result = controller_method(req=request, **action_args) + + response = self.serialize_response(accept, result) + + try: + msg_dict = dict(url=request.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + except AttributeError: + msg_dict = dict(url=request.url) + msg = _("%(url)s returned a fault") + + LOG.debug(msg) + + return response + + def serialize_response(self, content_type, response_body): + """Serialize a dict into a string and wrap in a wsgi.Request object. + + :param content_type: expected mimetype of serialized response body + :param response_body: dict produced by the Controller + + """ + if not type(response_body) is dict: + return response_body + + response = webob.Response() + response.headers['Content-Type'] = content_type + + serializer = self.get_serializer(content_type) + response.body = serializer.serialize(response_body) + + return response + + def get_serializer(self, content_type): + try: + return self.serializers[content_type] + except Exception: + raise exception.InvalidContentType(content_type=content_type) + + def deserialize_request(self, request): + """Parse a wsgi request into a set of params we care about. + + :param request: wsgi.Request object + + """ + action_args = self.get_action_args(request.environ) + action = action_args.pop('action') + + if request.method.lower() in ('post', 'put'): + if len(request.body) == 0: + action_args['body'] = None + else: + content_type = request.get_content_type() + deserializer = self.get_deserializer(content_type) + + try: + action_args['body'] = deserializer.deserialize(request.body) + except exception.InvalidContentType: + action_args['body'] = None + + accept = self.get_expected_content_type(request) + + return (action, action_args, accept) + + def get_expected_content_type(self, request): + return request.best_match_content_type() + + def get_action_args(self, request_environment): + args = request_environment['wsgiorg.routing_args'][1].copy() + + del args['controller'] + + if 'format' in args: + del args['format'] + + return args + + def get_deserializer(self, content_type): + try: + return self.deserializers[content_type] + except Exception: + raise exception.InvalidContentType(content_type=content_type) diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index 227ffecdc..d17ab7a9b 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -17,6 +17,7 @@ from nova import db from nova import flags from nova import log as logging from nova.api.openstack import common +from nova.api.openstack import wsgi from nova.scheduler import api @@ -41,12 +42,7 @@ def _scrub_zone(zone): 'deleted', 'deleted_at', 'updated_at')) -class Controller(common.OpenstackController): - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "zone": ["id", "api_url", "name", "capabilities"]}}} +class Controller(object): def index(self, req): """Return all zones in brief""" @@ -85,15 +81,28 @@ class Controller(common.OpenstackController): api.zone_delete(req.environ['nova.context'], zone_id) return {} - def create(self, req): + def create(self, req, body): context = req.environ['nova.context'] - env = self._deserialize(req.body, req.get_content_type()) - zone = api.zone_create(context, env["zone"]) + zone = api.zone_create(context, body["zone"]) return dict(zone=_scrub_zone(zone)) - def update(self, req, id): + def update(self, req, id, body): context = req.environ['nova.context'] - env = self._deserialize(req.body, req.get_content_type()) zone_id = int(id) - zone = api.zone_update(context, zone_id, env["zone"]) + zone = api.zone_update(context, zone_id, body["zone"]) return dict(zone=_scrub_zone(zone)) + + +def resource_factory(): + metadata = { + "attributes": { + "zone": ["id", "api_url", "name", "capabilities"], + }, + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V10, + metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 45bd4d501..db859c2f8 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -65,7 +65,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): def setUp(self): """Run before each test.""" BaseLimitTestSuite.setUp(self) - self.controller = limits.LimitsControllerV10() + self.controller = limits.resource_factory('1.0') def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" @@ -178,7 +178,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite): def setUp(self): """Run before each test.""" BaseLimitTestSuite.setUp(self) - self.controller = limits.LimitsControllerV11() + self.controller = limits.resource_factory('1.1') def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index e8182b6a9..15f376f74 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -207,7 +207,6 @@ class ServersTest(test.TestCase): }, ] - print res_dict['server'] self.assertEqual(res_dict['server']['links'], expected_links) def test_get_server_by_id_with_addresses_xml(self): @@ -831,7 +830,6 @@ class ServersTest(test.TestCase): req = webob.Request.blank('/v1.0/servers/detail') req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) - print res.body dom = minidom.parseString(res.body) for i, server in enumerate(dom.getElementsByTagName('server')): self.assertEqual(server.getAttribute('id'), str(i)) diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index 5820ecdc2..0be3aecf1 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -121,138 +121,3 @@ class ControllerTest(test.TestCase): result = request.get_response(self.TestRouter()) self.assertEqual(result.status_int, 200) self.assertEqual(result.headers["Content-Type"], "application/json") - - -class RequestTest(test.TestCase): - - def test_request_content_type_missing(self): - request = wsgi.Request.blank('/tests/123') - request.body = "" - self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type) - - def test_request_content_type_unsupported(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Content-Type"] = "text/html" - request.body = "asdf
" - self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type) - - def test_request_content_type_with_charset(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Content-Type"] = "application/json; charset=UTF-8" - result = request.get_content_type() - self.assertEqual(result, "application/json") - - def test_content_type_from_accept_xml(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml" - result = request.best_match_content_type() - self.assertEqual(result, "application/xml") - - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/json" - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml, application/json" - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = \ - "application/json; q=0.3, application/xml; q=0.9" - result = request.best_match_content_type() - self.assertEqual(result, "application/xml") - - def test_content_type_from_query_extension(self): - request = wsgi.Request.blank('/tests/123.xml') - result = request.best_match_content_type() - self.assertEqual(result, "application/xml") - - request = wsgi.Request.blank('/tests/123.json') - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - request = wsgi.Request.blank('/tests/123.invalid') - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - def test_content_type_accept_and_query_extension(self): - request = wsgi.Request.blank('/tests/123.xml') - request.headers["Accept"] = "application/json" - result = request.best_match_content_type() - self.assertEqual(result, "application/xml") - - def test_content_type_accept_default(self): - request = wsgi.Request.blank('/tests/123.unsupported') - request.headers["Accept"] = "application/unsupported1" - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - -class SerializerTest(test.TestCase): - - def test_xml(self): - input_dict = dict(servers=dict(a=(2, 3))) - expected_xml = '(2,3)' - serializer = wsgi.Serializer() - result = serializer.serialize(input_dict, "application/xml") - result = result.replace('\n', '').replace(' ', '') - self.assertEqual(result, expected_xml) - - def test_json(self): - input_dict = dict(servers=dict(a=(2, 3))) - expected_json = '{"servers":{"a":[2,3]}}' - serializer = wsgi.Serializer() - result = serializer.serialize(input_dict, "application/json") - result = result.replace('\n', '').replace(' ', '') - self.assertEqual(result, expected_json) - - def test_unsupported_content_type(self): - serializer = wsgi.Serializer() - self.assertRaises(exception.InvalidContentType, serializer.serialize, - {}, "text/null") - - def test_deserialize_json(self): - data = """{"a": { - "a1": "1", - "a2": "2", - "bs": ["1", "2", "3", {"c": {"c1": "1"}}], - "d": {"e": "1"}, - "f": "1"}}""" - as_dict = dict(a={ - 'a1': '1', - 'a2': '2', - 'bs': ['1', '2', '3', {'c': dict(c1='1')}], - 'd': {'e': '1'}, - 'f': '1'}) - metadata = {} - serializer = wsgi.Serializer(metadata) - self.assertEqual(serializer.deserialize(data, "application/json"), - as_dict) - - def test_deserialize_xml(self): - xml = """ - - 123 - 1 - 1 - - """.strip() - as_dict = dict(a={ - 'a1': '1', - 'a2': '2', - 'bs': ['1', '2', '3', {'c': dict(c1='1')}], - 'd': {'e': '1'}, - 'f': '1'}) - metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})} - serializer = wsgi.Serializer(metadata) - self.assertEqual(serializer.deserialize(xml, "application/xml"), - as_dict) - - def test_deserialize_empty_xml(self): - xml = """""" - as_dict = {"a": {}} - serializer = wsgi.Serializer() - self.assertEqual(serializer.deserialize(xml, "application/xml"), - as_dict) diff --git a/nova/tests/integrated/test_xml.py b/nova/tests/integrated/test_xml.py index 8a9754777..fde32f797 100644 --- a/nova/tests/integrated/test_xml.py +++ b/nova/tests/integrated/test_xml.py @@ -32,7 +32,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase): """"Some basic XML sanity checks.""" def test_namespace_limits(self): - """/limits should have v1.0 namespace (hasn't changed in 1.1).""" + """/limits should have v1.1 namespace (has changed in 1.1).""" headers = {} headers['Accept'] = 'application/xml' @@ -40,7 +40,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase): data = response.read() LOG.debug("data: %s" % data) - prefix = ' Date: Wed, 18 May 2011 20:33:25 -0400 Subject: removing controller/serializer code from wsgi.py; updating other code to use new modules --- nova/api/direct.py | 12 +- nova/api/openstack/common.py | 7 - nova/api/openstack/consoles.py | 3 +- nova/api/openstack/contrib/volumes.py | 23 +- nova/api/openstack/extensions.py | 97 +++++---- nova/api/openstack/faults.py | 39 ++-- nova/api/openstack/image_metadata.py | 1 - nova/api/openstack/images.py | 11 +- nova/api/openstack/versions.py | 44 ++-- nova/api/openstack/wsgi.py | 23 +- nova/objectstore/s3server.py | 2 +- nova/tests/api/openstack/extensions/foxinsocks.py | 4 +- nova/tests/api/openstack/test_extensions.py | 4 +- nova/tests/api/test_wsgi.py | 54 ----- nova/wsgi.py | 250 +--------------------- 15 files changed, 147 insertions(+), 427 deletions(-) diff --git a/nova/api/direct.py b/nova/api/direct.py index 8ceae299c..5e6c7c882 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -42,6 +42,7 @@ from nova import exception from nova import flags from nova import utils from nova import wsgi +import nova.api.openstack.wsgi # Global storage for registering modules. @@ -251,7 +252,7 @@ class Reflection(object): return self._methods[method] -class ServiceWrapper(wsgi.Controller): +class ServiceWrapper(object): """Wrapper to dynamically povide a WSGI controller for arbitrary objects. With lightweight introspection allows public methods on the object to @@ -265,7 +266,7 @@ class ServiceWrapper(wsgi.Controller): def __init__(self, service_handle): self.service_handle = service_handle - @webob.dec.wsgify(RequestClass=wsgi.Request) + @webob.dec.wsgify(RequestClass=nova.api.openstack.wsgi.Request) def __call__(self, req): arg_dict = req.environ['wsgiorg.routing_args'][1] action = arg_dict['action'] @@ -289,8 +290,11 @@ class ServiceWrapper(wsgi.Controller): try: content_type = req.best_match_content_type() - default_xmlns = self.get_default_xmlns(req) - return self._serialize(result, content_type, default_xmlns) + serializer = { + 'application/xml': nova.api.openstack.wsgi.XMLSerializer(), + 'application/json': nova.api.openstack.wsgi.JSONSerializer(), + }[content_type] + return serializer.serialize(result) except: raise exception.Error("returned non-serializable type: %s" % result) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 32cd689ca..bb1a96812 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -23,7 +23,6 @@ import webob from nova import exception from nova import flags from nova import log as logging -from nova import wsgi LOG = logging.getLogger('nova.api.openstack.common') @@ -146,9 +145,3 @@ def get_id_from_href(href): except: LOG.debug(_("Error extracting id from href: %s") % href) raise webob.exc.HTTPBadRequest(_('could not parse id from href')) - - -class OpenstackController(wsgi.Controller): - def get_default_xmlns(self, req): - # Use V10 by default - return XML_NS_V10 diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 36d570803..97304affe 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -44,11 +44,10 @@ def _translate_detail_keys(cons): class Controller(object): - """The Consoles Controller for the Openstack API""" + """The Consoles controller for the Openstack API""" def __init__(self): self.console_api = console.API() - super(Controller, self).__init__() def index(self, req, server_id): """Returns a list of consoles for this instance""" diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index 18de2ec71..b00790b7f 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -22,7 +22,6 @@ from nova import exception from nova import flags from nova import log as logging from nova import volume -from nova import wsgi from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import faults @@ -64,7 +63,7 @@ def _translate_volume_summary_view(context, vol): return d -class VolumeController(wsgi.Controller): +class VolumeController(object): """The Volumes API controller for the OpenStack API.""" _serialization_metadata = { @@ -124,15 +123,14 @@ class VolumeController(wsgi.Controller): res = [entity_maker(context, vol) for vol in limited_list] return {'volumes': res} - def create(self, req): + def create(self, req, body): """Creates a new volume.""" context = req.environ['nova.context'] - env = self._deserialize(req.body, req.get_content_type()) - if not env: + if not body: return faults.Fault(exc.HTTPUnprocessableEntity()) - vol = env['volume'] + vol = body['volume'] size = vol['size'] LOG.audit(_("Create volume of %s GB"), size, context=context) new_volume = self.volume_api.create(context, size, @@ -175,7 +173,7 @@ def _translate_attachment_summary_view(_context, vol): return d -class VolumeAttachmentController(wsgi.Controller): +class VolumeAttachmentController(object): """The volume attachment API controller for the Openstack API. A child resource of the server. Note that we use the volume id @@ -219,17 +217,16 @@ class VolumeAttachmentController(wsgi.Controller): return {'volumeAttachment': _translate_attachment_detail_view(context, vol)} - def create(self, req, server_id): + def create(self, req, server_id, body): """Attach a volume to an instance.""" context = req.environ['nova.context'] - env = self._deserialize(req.body, req.get_content_type()) - if not env: + if not body: return faults.Fault(exc.HTTPUnprocessableEntity()) instance_id = server_id - volume_id = env['volumeAttachment']['volumeId'] - device = env['volumeAttachment']['device'] + volume_id = body['volumeAttachment']['volumeId'] + device = body['volumeAttachment']['device'] msg = _("Attach volume %(volume_id)s to instance %(server_id)s" " at %(device)s") % locals() @@ -259,7 +256,7 @@ class VolumeAttachmentController(wsgi.Controller): # TODO(justinsb): How do I return "accepted" here? return {'volumeAttachment': attachment} - def update(self, _req, _server_id, _id): + def update(self, req, server_id, id, body): """Update a volume attachment. We don't currently support this.""" return faults.Fault(exc.HTTPBadRequest()) diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 7ea7afef6..73f174e07 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -27,9 +27,10 @@ import webob.exc from nova import exception from nova import flags from nova import log as logging -from nova import wsgi +from nova import wsgi as base_wsgi from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi LOG = logging.getLogger('extensions') @@ -116,28 +117,34 @@ class ExtensionDescriptor(object): return response_exts -class ActionExtensionController(common.OpenstackController): - +class ActionExtensionController(object): def __init__(self, application): - self.application = application self.action_handlers = {} def add_action(self, action_name, handler): self.action_handlers[action_name] = handler - def action(self, req, id): - - input_dict = self._deserialize(req.body, req.get_content_type()) + def action(self, req, id, body): for action_name, handler in self.action_handlers.iteritems(): - if action_name in input_dict: - return handler(input_dict, req, id) + if action_name in body: + return handler(body, req, id) # no action handler found (bump to downstream application) res = self.application return res -class ResponseExtensionController(common.OpenstackController): +class ActionExtensionResource(wsgi.Resource): + + def __init__(self, application): + controller = ActionExtensionController(application) + super(ActionExtensionResource, self).__init__(controller) + + def add_action(self, action_name, handler): + self.controller.add_action(action_name, handler) + + +class ResponseExtensionController(object): def __init__(self, application): self.application = application @@ -157,7 +164,11 @@ class ResponseExtensionController(common.OpenstackController): headers = res.headers except AttributeError: default_xmlns = None - body = self._serialize(res, content_type, default_xmlns) + serializer = { + 'application/xml': wsgi.XMLSerializer(), + 'application/json': wsgi.JSONSerializer(), + }[content_type] + body = serializer.serialize(res) headers = {"Content-Type": content_type} res = webob.Response() res.body = body @@ -165,7 +176,17 @@ class ResponseExtensionController(common.OpenstackController): return res -class ExtensionController(common.OpenstackController): +class ResponseExtensionResource(wsgi.Resource): + + def __init__(self, application): + controller = ResponseExtensionController(application) + super(ResponseExtensionResource, self).__init__(controller) + + def add_handler(self, handler): + self.controller.add_handler(handler) + + +class ExtensionController(object): def __init__(self, extension_manager): self.extension_manager = extension_manager @@ -198,7 +219,7 @@ class ExtensionController(common.OpenstackController): raise faults.Fault(webob.exc.HTTPNotFound()) -class ExtensionMiddleware(wsgi.Middleware): +class ExtensionMiddleware(base_wsgi.Middleware): """Extensions middleware for WSGI.""" @classmethod def factory(cls, global_config, **local_config): @@ -207,43 +228,43 @@ class ExtensionMiddleware(wsgi.Middleware): return cls(app, **local_config) return _factory - def _action_ext_controllers(self, application, ext_mgr, mapper): - """Return a dict of ActionExtensionController-s by collection.""" - action_controllers = {} + def _action_ext_resources(self, application, ext_mgr, mapper): + """Return a dict of ActionExtensionResource objects by collection.""" + action_resources = {} for action in ext_mgr.get_actions(): - if not action.collection in action_controllers.keys(): - controller = ActionExtensionController(application) + if not action.collection in action_resources.keys(): + resource = ActionExtensionResource(application) mapper.connect("/%s/:(id)/action.:(format)" % action.collection, action='action', - controller=controller, + controller=resource, conditions=dict(method=['POST'])) mapper.connect("/%s/:(id)/action" % action.collection, action='action', - controller=controller, + controller=resource, conditions=dict(method=['POST'])) - action_controllers[action.collection] = controller + action_resources[action.collection] = resource - return action_controllers + return action_resources - def _response_ext_controllers(self, application, ext_mgr, mapper): - """Returns a dict of ResponseExtensionController-s by collection.""" - response_ext_controllers = {} + def _response_ext_resources(self, application, ext_mgr, mapper): + """Returns a dict of ResponseExtensionResource objects by collection.""" + response_ext_resources = {} for resp_ext in ext_mgr.get_response_extensions(): - if not resp_ext.key in response_ext_controllers.keys(): - controller = ResponseExtensionController(application) + if not resp_ext.key in response_ext_resources.keys(): + resource = ResponseExtensionResource(application) mapper.connect(resp_ext.url_route + '.:(format)', action='process', - controller=controller, + controller=resource, conditions=resp_ext.conditions) mapper.connect(resp_ext.url_route, action='process', - controller=controller, + controller=resource, conditions=resp_ext.conditions) - response_ext_controllers[resp_ext.key] = controller + response_ext_resources[resp_ext.key] = resource - return response_ext_controllers + return response_ext_resources def __init__(self, application, ext_mgr=None): @@ -258,21 +279,21 @@ class ExtensionMiddleware(wsgi.Middleware): LOG.debug(_('Extended resource: %s'), resource.collection) mapper.resource(resource.collection, resource.collection, - controller=resource.controller, + controller=wsgi.Resource(resource.controller), collection=resource.collection_actions, member=resource.member_actions, parent_resource=resource.parent) # extended actions - action_controllers = self._action_ext_controllers(application, ext_mgr, + action_resources = self._action_ext_resources(application, ext_mgr, mapper) for action in ext_mgr.get_actions(): LOG.debug(_('Extended action: %s'), action.action_name) - controller = action_controllers[action.collection] - controller.add_action(action.action_name, action.handler) + resource = action_resources[action.collection] + resource.add_action(action.action_name, action.handler) # extended responses - resp_controllers = self._response_ext_controllers(application, ext_mgr, + resp_controllers = self._response_ext_resources(application, ext_mgr, mapper) for response_ext in ext_mgr.get_response_extensions(): LOG.debug(_('Extended response: %s'), response_ext.key) @@ -422,7 +443,7 @@ class ExtensionManager(object): class ResponseExtension(object): - """Add data to responses from core nova OpenStack API controllers.""" + """Add data to responses from core nova OpenStack API resources.""" def __init__(self, method, url_route, handler): self.url_route = url_route @@ -432,7 +453,7 @@ class ResponseExtension(object): class ActionExtension(object): - """Add custom actions to core nova OpenStack API controllers.""" + """Add custom actions to core nova OpenStack API resources.""" def __init__(self, collection, action_name, handler): self.collection = collection diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index 87118ce19..fd36f8f17 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -19,8 +19,7 @@ import webob.dec import webob.exc -from nova import wsgi -from nova.api.openstack import common +from nova.api.openstack import wsgi class Fault(webob.exc.HTTPException): @@ -55,13 +54,21 @@ class Fault(webob.exc.HTTPException): if code == 413: retry = self.wrapped_exc.headers['Retry-After'] fault_data[fault_name]['retryAfter'] = retry + # 'code' is an attribute on the fault tag itself - metadata = {'application/xml': {'attributes': {fault_name: 'code'}}} - default_xmlns = common.XML_NS_V10 - serializer = wsgi.Serializer(metadata, default_xmlns) + metadata = {'attributes': {fault_name: 'code'}} + content_type = req.best_match_content_type() - self.wrapped_exc.body = serializer.serialize(fault_data, content_type) + + serializer = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), + 'application/json': wsgi.JSONSerializer(), + }[content_type] + + self.wrapped_exc.body = serializer.serialize(fault_data) self.wrapped_exc.content_type = content_type + return self.wrapped_exc @@ -70,14 +77,6 @@ class OverLimitFault(webob.exc.HTTPException): Rate-limited request response. """ - _serialization_metadata = { - "application/xml": { - "attributes": { - "overLimitFault": "code", - }, - }, - } - def __init__(self, message, details, retry_time): """ Initialize new `OverLimitFault` with relevant information. @@ -97,8 +96,16 @@ class OverLimitFault(webob.exc.HTTPException): Return the wrapped exception with a serialized body conforming to our error format. """ - serializer = wsgi.Serializer(self._serialization_metadata) content_type = request.best_match_content_type() - content = serializer.serialize(self.content, content_type) + metadata = {"attributes": {"overLimitFault": "code"}} + + serializer = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), + 'application/json': wsgi.JSONSerializer(), + }[content_type] + + content = serializer.serialize(self.content) self.wrapped_exc.body = content + return self.wrapped_exc diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index ce0140265..506b63acf 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -20,7 +20,6 @@ from webob import exc from nova import flags from nova import quota from nova import utils -from nova import wsgi from nova.api.openstack import faults from nova.api.openstack import wsgi diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index e22854ebf..5a03573d8 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -31,7 +31,7 @@ FLAGS = flags.FLAGS class Controller(object): - """Base `wsgi.Controller` for retrieving/displaying images.""" + """Base controller for retrieving/displaying images.""" def __init__(self, image_service=None, compute_service=None): """Initialize new `ImageController`. @@ -99,21 +99,20 @@ class Controller(object): self._image_service.delete(context, image_id) return webob.exc.HTTPNoContent() - def create(self, req): + def create(self, req, body): """Snapshot a server instance and save the image. :param req: `wsgi.Request` object """ context = req.environ['nova.context'] content_type = req.get_content_type() - image = self._deserialize(req.body, content_type) - if not image: + if not body: raise webob.exc.HTTPBadRequest() try: - server_id = image["image"]["serverId"] - image_name = image["image"]["name"] + server_id = body["image"]["serverId"] + image_name = body["image"]["name"] except KeyError: raise webob.exc.HTTPBadRequest() diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index 3f9d91934..a8d785b52 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -18,13 +18,27 @@ import webob import webob.dec -from nova import wsgi +from nova import wsgi as base_wsgi import nova.api.openstack.views.versions +from nova.api.openstack import wsgi -class Versions(wsgi.Application): - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): +class Versions(wsgi.Resource, base_wsgi.Application): + def __init__(self): + metadata = { + "attributes": { + "version": ["status", "id"], + "link": ["rel", "href"], + } + } + + serializers = { + 'application/xml': wsgi.XMLSerializer(metadata=metadata), + } + + super(Versions, self).__init__(None, serializers=serializers) + + def dispatch(self, request, *args): """Respond to a request for all OpenStack API versions.""" version_objs = [ { @@ -37,24 +51,6 @@ class Versions(wsgi.Application): }, ] - builder = nova.api.openstack.views.versions.get_view_builder(req) + builder = nova.api.openstack.views.versions.get_view_builder(request) versions = [builder.build(version) for version in version_objs] - response = dict(versions=versions) - - metadata = { - "application/xml": { - "attributes": { - "version": ["status", "id"], - "link": ["rel", "href"], - } - } - } - - content_type = req.best_match_content_type() - body = wsgi.Serializer(metadata).serialize(response, content_type) - - response = webob.Response() - response.content_type = content_type - response.body = body - - return response + return dict(versions=versions) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 9e0077932..97280c365 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -206,8 +206,7 @@ class Resource(object): except exception.InvalidContentType: return webob.exc.HTTPBadRequest(_("Unsupported Content-Type")) - controller_method = getattr(self.controller, action) - result = controller_method(req=request, **action_args) + result = self.dispatch(request, action, action_args) response = self.serialize_response(accept, result) @@ -222,6 +221,10 @@ class Resource(object): return response + def dispatch(self, request, action, action_args): + controller_method = getattr(self.controller, action) + return controller_method(req=request, **action_args) + def serialize_response(self, content_type, response_body): """Serialize a dict into a string and wrap in a wsgi.Request object. @@ -253,7 +256,7 @@ class Resource(object): """ action_args = self.get_action_args(request.environ) - action = action_args.pop('action') + action = action_args.pop('action', None) if request.method.lower() in ('post', 'put'): if len(request.body) == 0: @@ -275,14 +278,18 @@ class Resource(object): return request.best_match_content_type() def get_action_args(self, request_environment): - args = request_environment['wsgiorg.routing_args'][1].copy() + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + + del args['controller'] - del args['controller'] + if 'format' in args: + del args['format'] - if 'format' in args: - del args['format'] + return args - return args + except KeyError: + return {} def get_deserializer(self, content_type): try: diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py index dd6327c8f..76025a1e3 100644 --- a/nova/objectstore/s3server.py +++ b/nova/objectstore/s3server.py @@ -81,7 +81,7 @@ class S3Application(wsgi.Router): super(S3Application, self).__init__(mapper) -class BaseRequestHandler(wsgi.Controller): +class BaseRequestHandler(object): """Base class emulating Tornado's web framework pattern in WSGI. This is a direct port of Tornado's implementation, so some key decisions diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py index 0860b51ac..a64552af1 100644 --- a/nova/tests/api/openstack/extensions/foxinsocks.py +++ b/nova/tests/api/openstack/extensions/foxinsocks.py @@ -17,12 +17,10 @@ import json -from nova import wsgi - from nova.api.openstack import extensions -class FoxInSocksController(wsgi.Controller): +class FoxInSocksController(object): def index(self, req): return "Try to say this Mr. Knox, sir..." diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 481d34ed1..a8168f88a 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -26,15 +26,15 @@ from nova import flags from nova.api import openstack from nova.api.openstack import extensions from nova.api.openstack import flavors +from nova.api.openstack import wsgi from nova.tests.api.openstack import fakes -import nova.wsgi FLAGS = flags.FLAGS response_body = "Try to say this Mr. Knox, sir..." -class StubController(nova.wsgi.Controller): +class StubController(object): def __init__(self, body): self.body = body diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index 0be3aecf1..d33268296 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -67,57 +67,3 @@ class Test(test.TestCase): self.assertEqual(result.body, "Router result") result = webob.Request.blank('/bad').get_response(Router()) self.assertNotEqual(result.body, "Router result") - - -class ControllerTest(test.TestCase): - - class TestRouter(wsgi.Router): - - class TestController(wsgi.Controller): - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "test": ["id"]}}} - - def show(self, req, id): # pylint: disable=W0622,C0103 - return {"test": {"id": id}} - - def __init__(self): - mapper = routes.Mapper() - mapper.resource("test", "tests", controller=self.TestController()) - wsgi.Router.__init__(self, mapper) - - def test_show(self): - request = wsgi.Request.blank('/tests/123') - result = request.get_response(self.TestRouter()) - self.assertEqual(json.loads(result.body), {"test": {"id": "123"}}) - - def test_response_content_type_from_accept_xml(self): - request = webob.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml" - result = request.get_response(self.TestRouter()) - self.assertEqual(result.headers["Content-Type"], "application/xml") - - def test_response_content_type_from_accept_json(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/json" - result = request.get_response(self.TestRouter()) - self.assertEqual(result.headers["Content-Type"], "application/json") - - def test_response_content_type_from_query_extension_xml(self): - request = wsgi.Request.blank('/tests/123.xml') - result = request.get_response(self.TestRouter()) - self.assertEqual(result.headers["Content-Type"], "application/xml") - - def test_response_content_type_from_query_extension_json(self): - request = wsgi.Request.blank('/tests/123.json') - result = request.get_response(self.TestRouter()) - self.assertEqual(result.headers["Content-Type"], "application/json") - - def test_response_content_type_default_when_unsupported(self): - request = wsgi.Request.blank('/tests/123.unsupported') - request.headers["Accept"] = "application/unsupported1" - result = request.get_response(self.TestRouter()) - self.assertEqual(result.status_int, 200) - self.assertEqual(result.headers["Content-Type"], "application/json") diff --git a/nova/wsgi.py b/nova/wsgi.py index e60a8820d..3a292073b 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -82,36 +82,7 @@ class Server(object): class Request(webob.Request): - - def best_match_content_type(self): - """Determine the most acceptable content-type. - - Based on the query extension then the Accept header. - - """ - parts = self.path.rsplit('.', 1) - - if len(parts) > 1: - format = parts[1] - if format in ['json', 'xml']: - return 'application/{0}'.format(parts[1]) - - ctypes = ['application/json', 'application/xml'] - bm = self.accept.best_match(ctypes) - - return bm or 'application/json' - - def get_content_type(self): - allowed_types = ("application/xml", "application/json") - if not "Content-Type" in self.headers: - msg = _("Missing Content-Type") - LOG.debug(msg) - raise webob.exc.HTTPBadRequest(msg) - type = self.content_type - if type in allowed_types: - return type - LOG.debug(_("Wrong Content-Type: %s") % type) - raise webob.exc.HTTPBadRequest("Invalid content type") + pass class Application(object): @@ -286,7 +257,7 @@ class Router(object): Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as - well and have your controller be a wsgi.Controller, who will route + well and have your controller be a controller, who will route the request to the action method. Examples: @@ -335,223 +306,6 @@ class Router(object): return app -class Controller(object): - """WSGI app that dispatched to methods. - - WSGI app that reads routing information supplied by RoutesMiddleware - and calls the requested action method upon itself. All action methods - must, in addition to their normal parameters, accept a 'req' argument - which is the incoming wsgi.Request. They raise a webob.exc exception, - or return a dict which will be serialized by requested content type. - - """ - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - """Call the method specified in req.environ by RoutesMiddleware.""" - arg_dict = req.environ['wsgiorg.routing_args'][1] - action = arg_dict['action'] - method = getattr(self, action) - LOG.debug("%s %s" % (req.method, req.url)) - del arg_dict['controller'] - del arg_dict['action'] - if 'format' in arg_dict: - del arg_dict['format'] - arg_dict['req'] = req - result = method(**arg_dict) - - if type(result) is dict: - content_type = req.best_match_content_type() - default_xmlns = self.get_default_xmlns(req) - body = self._serialize(result, content_type, default_xmlns) - - response = webob.Response() - response.headers['Content-Type'] = content_type - response.body = body - msg_dict = dict(url=req.url, status=response.status_int) - msg = _("%(url)s returned with HTTP %(status)d") % msg_dict - LOG.debug(msg) - return response - else: - return result - - def _serialize(self, data, content_type, default_xmlns): - """Serialize the given dict to the provided content_type. - - Uses self._serialization_metadata if it exists, which is a dict mapping - MIME types to information needed to serialize to that type. - - """ - _metadata = getattr(type(self), '_serialization_metadata', {}) - - serializer = Serializer(_metadata, default_xmlns) - try: - return serializer.serialize(data, content_type) - except exception.InvalidContentType: - raise webob.exc.HTTPNotAcceptable() - - def _deserialize(self, data, content_type): - """Deserialize the request body to the specefied content type. - - Uses self._serialization_metadata if it exists, which is a dict mapping - MIME types to information needed to serialize to that type. - - """ - _metadata = getattr(type(self), '_serialization_metadata', {}) - serializer = Serializer(_metadata) - return serializer.deserialize(data, content_type) - - def get_default_xmlns(self, req): - """Provide the XML namespace to use if none is otherwise specified.""" - return None - - -class Serializer(object): - """Serializes and deserializes dictionaries to certain MIME types.""" - - def __init__(self, metadata=None, default_xmlns=None): - """Create a serializer based on the given WSGI environment. - - 'metadata' is an optional dict mapping MIME types to information - needed to serialize a dictionary to that type. - - """ - self.metadata = metadata or {} - self.default_xmlns = default_xmlns - - def _get_serialize_handler(self, content_type): - handlers = { - 'application/json': self._to_json, - 'application/xml': self._to_xml, - } - - try: - return handlers[content_type] - except Exception: - raise exception.InvalidContentType(content_type=content_type) - - def serialize(self, data, content_type): - """Serialize a dictionary into the specified content type.""" - return self._get_serialize_handler(content_type)(data) - - def deserialize(self, datastring, content_type): - """Deserialize a string to a dictionary. - - The string must be in the format of a supported MIME type. - - """ - return self.get_deserialize_handler(content_type)(datastring) - - def get_deserialize_handler(self, content_type): - handlers = { - 'application/json': self._from_json, - 'application/xml': self._from_xml, - } - - try: - return handlers[content_type] - except Exception: - raise exception.InvalidContentType(content_type=content_type) - - def _from_json(self, datastring): - return utils.loads(datastring) - - def _from_xml(self, datastring): - xmldata = self.metadata.get('application/xml', {}) - plurals = set(xmldata.get('plurals', {})) - node = minidom.parseString(datastring).childNodes[0] - return {node.nodeName: self._from_xml_node(node, plurals)} - - def _from_xml_node(self, node, listnames): - """Convert a minidom node to a simple Python type. - - listnames is a collection of names of XML nodes whose subnodes should - be considered list items. - - """ - if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: - return node.childNodes[0].nodeValue - elif node.nodeName in listnames: - return [self._from_xml_node(n, listnames) for n in node.childNodes] - else: - result = dict() - for attr in node.attributes.keys(): - result[attr] = node.attributes[attr].nodeValue - for child in node.childNodes: - if child.nodeType != node.TEXT_NODE: - result[child.nodeName] = self._from_xml_node(child, - listnames) - return result - - def _to_json(self, data): - return utils.dumps(data) - - def _to_xml(self, data): - metadata = self.metadata.get('application/xml', {}) - # We expect data to contain a single key which is the XML root. - root_key = data.keys()[0] - doc = minidom.Document() - node = self._to_xml_node(doc, metadata, root_key, data[root_key]) - - xmlns = node.getAttribute('xmlns') - if not xmlns and self.default_xmlns: - node.setAttribute('xmlns', self.default_xmlns) - - return node.toprettyxml(indent=' ') - - def _to_xml_node(self, doc, metadata, nodename, data): - """Recursive method to convert data members to XML nodes.""" - result = doc.createElement(nodename) - - # Set the xml namespace if one is specified - # TODO(justinsb): We could also use prefixes on the keys - xmlns = metadata.get('xmlns', None) - if xmlns: - result.setAttribute('xmlns', xmlns) - - if type(data) is list: - collections = metadata.get('list_collections', {}) - if nodename in collections: - metadata = collections[nodename] - for item in data: - node = doc.createElement(metadata['item_name']) - node.setAttribute(metadata['item_key'], str(item)) - result.appendChild(node) - return result - singular = metadata.get('plurals', {}).get(nodename, None) - if singular is None: - if nodename.endswith('s'): - singular = nodename[:-1] - else: - singular = 'item' - for item in data: - node = self._to_xml_node(doc, metadata, singular, item) - result.appendChild(node) - elif type(data) is dict: - collections = metadata.get('dict_collections', {}) - if nodename in collections: - metadata = collections[nodename] - for k, v in data.items(): - node = doc.createElement(metadata['item_name']) - node.setAttribute(metadata['item_key'], str(k)) - text = doc.createTextNode(str(v)) - node.appendChild(text) - result.appendChild(node) - return result - attrs = metadata.get('attributes', {}).get(nodename, {}) - for k, v in data.items(): - if k in attrs: - result.setAttribute(k, str(v)) - else: - node = self._to_xml_node(doc, metadata, k, v) - result.appendChild(node) - else: - # Type is atom - node = doc.createTextNode(str(data)) - result.appendChild(node) - return result - - def paste_config_file(basename): """Find the best location in the system for a paste config file. -- cgit From 0aefdc6da92b8db8b15a3e8a0bef8fc5c4b46450 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 18 May 2011 20:33:52 -0400 Subject: missed the new wsgi test file --- nova/tests/api/openstack/test_wsgi.py | 248 ++++++++++++++++++++++++++++++++++ 1 file changed, 248 insertions(+) create mode 100644 nova/tests/api/openstack/test_wsgi.py diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py new file mode 100644 index 000000000..430dafe77 --- /dev/null +++ b/nova/tests/api/openstack/test_wsgi.py @@ -0,0 +1,248 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +import json +import webob + +from nova import exception +from nova import test +from nova.api.openstack import wsgi + + +class RequestTest(test.TestCase): + def test_content_type_missing(self): + request = wsgi.Request.blank('/tests/123') + request.body = "" + self.assertRaises(exception.InvalidContentType, + request.get_content_type) + + def test_content_type_unsupported(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "text/html" + request.body = "asdf
" + self.assertRaises(exception.InvalidContentType, + request.get_content_type) + + def test_content_type_with_charset(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/json; charset=UTF-8" + result = request.get_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_from_accept_xml(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml, application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = \ + "application/json; q=0.3, application/xml; q=0.9" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_from_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123.json') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123.invalid') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_accept_and_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_accept_default(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + +class SerializationTest(test.TestCase): + def test_xml(self): + input_dict = dict(servers=dict(a=(2, 3))) + expected_xml = '(2,3)' + xmlns = "testing xmlns" + serializer = wsgi.XMLSerializer(xmlns="asdf") + result = serializer.serialize(input_dict) + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(result, expected_xml) + + def test_json(self): + input_dict = dict(servers=dict(a=(2, 3))) + expected_json = '{"servers":{"a":[2,3]}}' + serializer = wsgi.JSONSerializer() + result = serializer.serialize(input_dict) + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(result, expected_json) + + +class DeserializationTest(test.TestCase): + def test_json(self): + data = """{"a": { + "a1": "1", + "a2": "2", + "bs": ["1", "2", "3", {"c": {"c1": "1"}}], + "d": {"e": "1"}, + "f": "1"}}""" + as_dict = dict(a={ + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': dict(c1='1')}], + 'd': {'e': '1'}, + 'f': '1'}) + deserializer = wsgi.JSONDeserializer() + self.assertEqual(deserializer.deserialize(data), as_dict) + + def test_xml(self): + xml = """ + + 123 + 1 + 1 + + """.strip() + as_dict = dict(a={ + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': dict(c1='1')}], + 'd': {'e': '1'}, + 'f': '1'}) + metadata = {'plurals': {'bs': 'b', 'ts': 't'}} + deserializer = wsgi.XMLDeserializer(metadata=metadata) + self.assertEqual(deserializer.deserialize(xml), as_dict) + + def test_xml_empty(self): + xml = """""" + as_dict = {"a": {}} + deserializer = wsgi.XMLDeserializer() + self.assertEqual(deserializer.deserialize(xml), as_dict) + + +class ResourceSerializerTest(test.TestCase): + def setUp(self): + class JSONSerializer(object): + def serialize(self, data): + return 'pew_json' + + class XMLSerializer(object): + def serialize(self, data): + return 'pew_xml' + + self.serializers = { + 'application/json': JSONSerializer(), + 'application/XML': XMLSerializer(), + } + + self.resource = wsgi.Resource(None, serializers=self.serializers) + + def tearDown(self): + pass + + def test_get_serializer(self): + self.assertEqual(self.resource.get_serializer('application/json'), + self.serializers['application/json']) + + def test_get_serializer_unknown_content_type(self): + self.assertRaises(exception.InvalidContentType, + self.resource.get_serializer, + 'application/unknown') + + def test_serialize_response_dict(self): + response = self.resource.serialize_response('application/json', {}) + self.assertEqual(response.headers['Content-Type'], 'application/json') + self.assertEqual(response.body, 'pew_json') + + def test_serialize_response_non_dict(self): + response = self.resource.serialize_response('application/json', 'a') + self.assertEqual(response, 'a') + + def test_serialize_response_dict_to_unknown_content_type(self): + self.assertRaises(exception.InvalidContentType, + self.resource.serialize_response, + 'application/unknown', {}) + + def test_serialize_response_non_dict_to_unknown_content_type(self): + response = self.resource.serialize_response('application/unknown', 'a') + self.assertEqual(response, 'a') + + +class ResourceDeserializerTest(test.TestCase): + def setUp(self): + class JSONDeserializer(object): + def deserialize(self, data): + return 'pew_json' + + class XMLDeserializer(object): + def deserialize(self, data): + return 'pew_xml' + + self.deserializers = { + 'application/json': JSONDeserializer(), + 'application/XML': XMLDeserializer(), + } + + self.resource = wsgi.Resource(None, deserializers=self.deserializers) + + def tearDown(self): + pass + + def test_get_deserializer(self): + self.assertEqual(self.resource.get_deserializer('application/json'), + self.deserializers['application/json']) + + def test_get_deserializer_unknown_content_type(self): + self.assertRaises(exception.InvalidContentType, + self.resource.get_deserializer, + 'application/unknown') + + def test_get_expected_content_type(self): + request = wsgi.Request.blank('/') + request.headers['Accept'] = 'application/json' + self.assertEqual(self.resource.get_expected_content_type(request), + 'application/json') + + def test_get_action_args(self): + env = { + 'wsgiorg.routing_args': [None, { + 'controller': None, + 'format': None, + 'action': 'update', + 'id': 12, + }], + } + + expected = {'action': 'update', 'id': 12} + + self.assertEqual(self.resource.get_action_args(env), expected) + + def test_deserialize_request(self): + def fake_get_routing_args(request): + return {'action': 'create'} + self.resource.get_action_args = fake_get_routing_args + + request = wsgi.Request.blank('/') + request.headers['Accept'] = 'application/xml' + + deserialized = self.resource.deserialize_request(request) + expected = ('create', {}, 'application/xml') + + self.assertEqual(expected, deserialized) -- cgit From beea6545804dc17661eea83b373d74d14cf07c32 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Thu, 19 May 2011 10:52:23 -0400 Subject: Minor cleanup --- nova/api/openstack/images.py | 8 +++----- nova/virt/libvirt_conn.py | 12 +++--------- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index c2511b99f..ac02d63c5 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -50,8 +50,7 @@ class Controller(common.OpenstackController): """ self._compute_service = compute_service or compute.API() - self._image_service = image_service or \ - utils.get_default_image_service() + self._image_service = image_service or utils.get_default_image_service() def index(self, req): """Return an index listing of images available to the request. @@ -75,14 +74,13 @@ class Controller(common.OpenstackController): builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) - def show(self, req, id): + def show(self, req, image_id): """Return detailed information about a specific image. :param req: `wsgi.Request` object - :param id: Image identifier (integer) + :param image_id: Image identifier (integer) """ context = req.environ['nova.context'] - image_id = id try: (image_service, service_image_id) = utils.get_image_service( diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ab47493fd..e311184e7 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -844,9 +844,7 @@ class LibvirtConnection(driver.ComputeDriver): 'ramdisk_id': inst['ramdisk_id']} if disk_images['kernel_id']: - fname_hash = hashlib.sha1() - fname_hash.update(disk_images['kernel_id']) - fname = fname_hash.hexdigest() + fname = hashlib.sha1(disk_images['kernel_id']).hexdigest() self._cache_image(fn=self._fetch_image, target=basepath('kernel'), fname=fname, @@ -854,9 +852,7 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) if disk_images['ramdisk_id']: - fname_hash = hashlib.sha1() - fname_hash.update(disk_images['ramdisk_id']) - fname = fname_hash.hexdigest() + fname = hashlib.sha1(disk_images['ramdisk_id']).hexdigest() self._cache_image(fn=self._fetch_image, target=basepath('ramdisk'), fname=fname, @@ -864,9 +860,7 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) - fname_hash = hashlib.sha1() - fname_hash.update(disk_images['image_id']) - root_fname = fname_hash.hexdigest() + fname = hashlib.sha1(disk_images['image_id']).hexdigest() size = FLAGS.minimum_root_size -- cgit From d6fab80027e5fdb9a8d3e56044c399a7a80b2464 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Thu, 19 May 2011 11:29:23 -0400 Subject: fname should have been root_fname --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index e311184e7..9e66c3b48 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -860,7 +860,7 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) - fname = hashlib.sha1(disk_images['image_id']).hexdigest() + root_fname = hashlib.sha1(disk_images['image_id']).hexdigest() size = FLAGS.minimum_root_size -- cgit From e0d43f39aeee0d62741ed40de9045bfde3fd20d8 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Thu, 19 May 2011 16:03:10 -0400 Subject: No reason to hash ramdisk_id and kernel_id. They are ints. --- nova/virt/libvirt_conn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 9e66c3b48..25ba0bc8d 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -844,7 +844,7 @@ class LibvirtConnection(driver.ComputeDriver): 'ramdisk_id': inst['ramdisk_id']} if disk_images['kernel_id']: - fname = hashlib.sha1(disk_images['kernel_id']).hexdigest() + fname = disk_images['kernel_id'] self._cache_image(fn=self._fetch_image, target=basepath('kernel'), fname=fname, @@ -852,7 +852,7 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) if disk_images['ramdisk_id']: - fname = hashlib.sha1(disk_images['ramdisk_id']).hexdigest() + fname = disk_images['ramdisk_id'] self._cache_image(fn=self._fetch_image, target=basepath('ramdisk'), fname=fname, -- cgit From 68426df2287c24efc3d327d12371911ac29d117e Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 19 May 2011 16:16:06 -0400 Subject: further refactoring of wsgi module; adding documentation and tests --- nova/api/direct.py | 4 +- nova/api/openstack/accounts.py | 2 +- nova/api/openstack/backup_schedules.py | 4 +- nova/api/openstack/consoles.py | 2 +- nova/api/openstack/extensions.py | 8 +- nova/api/openstack/faults.py | 12 +- nova/api/openstack/flavors.py | 2 +- nova/api/openstack/image_metadata.py | 2 +- nova/api/openstack/images.py | 4 +- nova/api/openstack/ips.py | 4 +- nova/api/openstack/limits.py | 4 +- nova/api/openstack/server_metadata.py | 2 +- nova/api/openstack/servers.py | 11 +- nova/api/openstack/users.py | 2 +- nova/api/openstack/versions.py | 5 +- nova/api/openstack/wsgi.py | 301 ++++++++++++++++++++----------- nova/api/openstack/zones.py | 4 +- nova/tests/api/openstack/test_servers.py | 50 ++--- nova/tests/api/openstack/test_wsgi.py | 104 ++++++++--- 19 files changed, 331 insertions(+), 196 deletions(-) diff --git a/nova/api/direct.py b/nova/api/direct.py index 5e6c7c882..ea20042a7 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -291,8 +291,8 @@ class ServiceWrapper(object): try: content_type = req.best_match_content_type() serializer = { - 'application/xml': nova.api.openstack.wsgi.XMLSerializer(), - 'application/json': nova.api.openstack.wsgi.JSONSerializer(), + 'application/xml': nova.api.openstack.wsgi.XMLDictSerializer(), + 'application/json': nova.api.openstack.wsgi.JSONDictSerializer(), }[content_type] return serializer.serialize(result) except: diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py index d8a9d1909..faff8bb2c 100644 --- a/nova/api/openstack/accounts.py +++ b/nova/api/openstack/accounts.py @@ -88,7 +88,7 @@ def resource_factory(): } serializers = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index 4153c90c1..d08a4799c 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -60,8 +60,8 @@ def resource_factory(): } serializers = { - 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V10, - metadata=metadata), + 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10, + metadata=metadata), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 97304affe..56f79db60 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -97,7 +97,7 @@ def resource_factory(): } serializers = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 73f174e07..19147bbea 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -165,8 +165,8 @@ class ResponseExtensionController(object): except AttributeError: default_xmlns = None serializer = { - 'application/xml': wsgi.XMLSerializer(), - 'application/json': wsgi.JSONSerializer(), + 'application/xml': wsgi.XMLDictSerializer(), + 'application/json': wsgi.JSONDictSerializer(), }[content_type] body = serializer.serialize(res) headers = {"Content-Type": content_type} @@ -229,7 +229,7 @@ class ExtensionMiddleware(base_wsgi.Middleware): return _factory def _action_ext_resources(self, application, ext_mgr, mapper): - """Return a dict of ActionExtensionResource objects by collection.""" + """Return a dict of ActionExtensionResource-s by collection.""" action_resources = {} for action in ext_mgr.get_actions(): if not action.collection in action_resources.keys(): @@ -248,7 +248,7 @@ class ExtensionMiddleware(base_wsgi.Middleware): return action_resources def _response_ext_resources(self, application, ext_mgr, mapper): - """Returns a dict of ResponseExtensionResource objects by collection.""" + """Returns a dict of ResponseExtensionResource-s by collection.""" response_ext_resources = {} for resp_ext in ext_mgr.get_response_extensions(): if not resp_ext.key in response_ext_resources.keys(): diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index fd36f8f17..b9a23c126 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -61,9 +61,9 @@ class Fault(webob.exc.HTTPException): content_type = req.best_match_content_type() serializer = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata, - xmlns=wsgi.XMLNS_V10), - 'application/json': wsgi.JSONSerializer(), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), + 'application/json': wsgi.JSONDictSerializer(), }[content_type] self.wrapped_exc.body = serializer.serialize(fault_data) @@ -100,9 +100,9 @@ class OverLimitFault(webob.exc.HTTPException): metadata = {"attributes": {"overLimitFault": "code"}} serializer = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata, - xmlns=wsgi.XMLNS_V10), - 'application/json': wsgi.JSONSerializer(), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), + 'application/json': wsgi.JSONDictSerializer(), }[content_type] content = serializer.serialize(self.content) diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index 46056a27a..9e98e6c27 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -86,7 +86,7 @@ def resource_factory(version='1.0'): }[version] serializers = { - 'application/xml': wsgi.XMLSerializer(xmlns=xmlns), + 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns), } return wsgi.Resource(controller, serializers=serializers) diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index 506b63acf..8acde9fe8 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -104,7 +104,7 @@ class Controller(object): def resource_factory(): serializers = { - 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V11), + 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 5a03573d8..a9071ed8a 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -165,8 +165,8 @@ def resource_factory(version='1.0'): } serializers = { - 'application/xml': wsgi.XMLSerializer(xmlns=xmlns, - metadata=metadata), + 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns, + metadata=metadata), } return wsgi.Resource(controller, serializers=serializers) diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py index 24612eafb..87c8c997a 100644 --- a/nova/api/openstack/ips.py +++ b/nova/api/openstack/ips.py @@ -72,8 +72,8 @@ def resource_factory(): } serializers = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata, - xmlns=wsgi.XMLNS_V10), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 306048d8f..b0e093702 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -95,8 +95,8 @@ def resource_factory(version='1.0'): } serializers = { - 'application/xml': wsgi.XMLSerializer(xmlns=xmlns, - metadata=metadata) + 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns, + metadata=metadata) } return wsgi.Resource(controller, serializers=serializers) diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index fb9449b4c..eff98c060 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -93,7 +93,7 @@ class Controller(object): def resource_factory(): serializers = { - 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V11), + 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 78f8bb1b7..8f39bd256 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -707,7 +707,7 @@ class ControllerV11(Controller): return common.XML_NS_V11 -class ServerCreateRequestXMLDeserializer(object): +class ServerXMLDeserializer(wsgi.XMLDeserializer): """ Deserializer to handle xml-formatted server create requests. @@ -715,7 +715,7 @@ class ServerCreateRequestXMLDeserializer(object): and personality attributes """ - def deserialize(self, string): + def create(self, string): """Deserialize an xml-formatted server create request""" dom = minidom.parseString(string) server = self._extract_server(dom) @@ -812,14 +812,13 @@ def resource_factory(version='1.0'): }[version] serializers = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata, - xmlns=xmlns), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, + xmlns=xmlns), } deserializers = { - 'application/xml': ServerCreateRequestXMLDeserializer(), + 'application/xml': ServerXMLDeserializer(), } return wsgi.Resource(controller, serializers=serializers, deserializers=deserializers) - diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py index 35b6a502e..e14616349 100644 --- a/nova/api/openstack/users.py +++ b/nova/api/openstack/users.py @@ -106,7 +106,7 @@ def resource_factory(): } serializers = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index a8d785b52..9db160102 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -18,12 +18,11 @@ import webob import webob.dec -from nova import wsgi as base_wsgi import nova.api.openstack.views.versions from nova.api.openstack import wsgi -class Versions(wsgi.Resource, base_wsgi.Application): +class Versions(wsgi.Resource): def __init__(self): metadata = { "attributes": { @@ -33,7 +32,7 @@ class Versions(wsgi.Resource, base_wsgi.Application): } serializers = { - 'application/xml': wsgi.XMLSerializer(metadata=metadata), + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), } super(Versions, self).__init__(None, serializers=serializers) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 97280c365..bd840a6f7 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -6,6 +6,7 @@ from xml.dom import minidom from nova import exception from nova import log as logging from nova import utils +from nova import wsgi XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' @@ -15,17 +16,17 @@ LOG = logging.getLogger('nova.api.openstack.wsgi') class Request(webob.Request): - def best_match_content_type(self, supported=None): - """Determine the requested content-type. + """Add some Openstack API-specific logic to the base webob.Request.""" - Based on the query extension then the Accept header. + def best_match_content_type(self): + """Determine the requested response content-type. - :param supported: list of content-types to override defaults + Based on the query extension then the Accept header. """ - supported = supported or ['application/json', 'application/xml'] - parts = self.path.rsplit('.', 1) + supported = ('application/json', 'application/xml') + parts = self.path.rsplit('.', 1) if len(parts) > 1: ctype = 'application/{0}'.format(parts[1]) if ctype in supported: @@ -33,32 +34,52 @@ class Request(webob.Request): bm = self.accept.best_match(supported) + # default to application/json if we don't find a preference return bm or 'application/json' def get_content_type(self): + """Determine content type of the request body. + + Does not do any body introspection, only checks header + + """ if not "Content-Type" in self.headers: raise exception.InvalidContentType(content_type=None) allowed_types = ("application/xml", "application/json") - type = self.content_type + content_type = self.content_type - if type not in allowed_types: - raise exception.InvalidContentType(content_type=type) + if content_type not in allowed_types: + raise exception.InvalidContentType(content_type=content_type) else: - return type + return content_type -class JSONDeserializer(object): - def deserialize(self, datastring): - return utils.loads(datastring) +class TextDeserializer(object): + """Custom request body deserialization based on controller action name.""" + def deserialize(self, datastring, action=None): + """Find local deserialization method and parse request body.""" + try: + action_method = getattr(self, action) + except Exception: + action_method = self.default -class JSONSerializer(object): - def serialize(self, data): - return utils.dumps(data) + return action_method(datastring) + def default(self, datastring): + """Default deserialization code should live here""" + raise NotImplementedError() + + +class JSONDeserializer(TextDeserializer): + + def default(self, datastring): + return utils.loads(datastring) + + +class XMLDeserializer(TextDeserializer): -class XMLDeserializer(object): def __init__(self, metadata=None): """ :param metadata: information needed to deserialize xml into @@ -67,8 +88,7 @@ class XMLDeserializer(object): super(XMLDeserializer, self).__init__() self.metadata = metadata or {} - def deserialize(self, datastring): - """XML deserialization entry point.""" + def default(self, datastring): plurals = set(self.metadata.get('plurals', {})) node = minidom.parseString(datastring).childNodes[0] return {node.nodeName: self._from_xml_node(node, plurals)} @@ -95,18 +115,111 @@ class XMLDeserializer(object): return result -class XMLSerializer(object): +class RequestDeserializer(object): + """Break up a Request object into more useful pieces.""" + + def __init__(self, deserializers=None): + """ + :param deserializers: dictionary of content-type-specific deserializers + + """ + self.deserializers = { + 'application/xml': XMLDeserializer(), + 'application/json': JSONDeserializer(), + } + + self.deserializers.update(deserializers or {}) + + def deserialize(self, request): + """Extract necessary pieces of the request. + + :param request: Request object + :returns tuple of expected controller action name, dictionary of + keyword arguments to pass to the controller, the expected + content type of the response + + """ + action_args = self.get_action_args(request.environ) + action = action_args.pop('action', None) + + if request.method.lower() in ('post', 'put'): + if len(request.body) == 0: + action_args['body'] = None + else: + content_type = request.get_content_type() + deserializer = self.get_deserializer(content_type) + + try: + body = deserializer.deserialize(request.body, action) + action_args['body'] = body + except exception.InvalidContentType: + action_args['body'] = None + + accept = self.get_expected_content_type(request) + + return (action, action_args, accept) + + def get_deserializer(self, content_type): + try: + return self.deserializers[content_type] + except Exception: + raise exception.InvalidContentType(content_type=content_type) + + def get_expected_content_type(self, request): + return request.best_match_content_type() + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + + del args['controller'] + + if 'format' in args: + del args['format'] + + return args + + except KeyError: + return {} + + +class DictSerializer(object): + """Custom response body serialization based on controller action name.""" + + def serialize(self, data, action=None): + """Find local serialization method and encode response body.""" + try: + action_method = getattr(self, action) + except Exception: + action_method = self.default + + return action_method(data) + + def default(self, data): + """Default serialization code should live here""" + raise NotImplementedError() + + +class JSONDictSerializer(DictSerializer): + + def default(self, data): + return utils.dumps(data) + + +class XMLDictSerializer(DictSerializer): + def __init__(self, metadata=None, xmlns=None): """ :param metadata: information needed to deserialize xml into a dictionary. :param xmlns: XML namespace to include with serialized xml """ - super(XMLSerializer, self).__init__() + super(XMLDictSerializer, self).__init__() self.metadata = metadata or {} self.xmlns = xmlns - def serialize(self, data): + def default(self, data): # We expect data to contain a single key which is the XML root. root_key = data.keys()[0] doc = minidom.Document() @@ -171,75 +284,32 @@ class XMLSerializer(object): return result -class Resource(object): - """WSGI app that dispatched to methods. +class ResponseSerializer(object): + """Encode the necessary pieces into a response object""" - WSGI app that reads routing information supplied by RoutesMiddleware - and calls the requested action method upon itself. All action methods - must, in addition to their normal parameters, accept a 'req' argument - which is the incoming wsgi.Request. They raise a webob.exc exception, - or return a dict which will be serialized by requested content type. + def __init__(self, serializers=None): + """ + :param serializers: dictionary of content-type-specific serializers - """ - def __init__(self, controller, serializers=None, deserializers=None): + """ self.serializers = { - 'application/xml': XMLSerializer(), - 'application/json': JSONSerializer(), + 'application/xml': XMLDictSerializer(), + 'application/json': JSONDictSerializer(), } self.serializers.update(serializers or {}) - self.deserializers = { - 'application/xml': XMLDeserializer(), - 'application/json': JSONDeserializer(), - } - self.deserializers.update(deserializers or {}) - - self.controller = controller - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, request): - """Call the method specified in req.environ by RoutesMiddleware.""" - LOG.debug("%s %s" % (request.method, request.url)) - - try: - action, action_args, accept = self.deserialize_request(request) - except exception.InvalidContentType: - return webob.exc.HTTPBadRequest(_("Unsupported Content-Type")) - - result = self.dispatch(request, action, action_args) - - response = self.serialize_response(accept, result) - - try: - msg_dict = dict(url=request.url, status=response.status_int) - msg = _("%(url)s returned with HTTP %(status)d") % msg_dict - except AttributeError: - msg_dict = dict(url=request.url) - msg = _("%(url)s returned a fault") - - LOG.debug(msg) - - return response - - def dispatch(self, request, action, action_args): - controller_method = getattr(self.controller, action) - return controller_method(req=request, **action_args) - - def serialize_response(self, content_type, response_body): + def serialize(self, response_data, content_type): """Serialize a dict into a string and wrap in a wsgi.Request object. + :param response_data: dict produced by the Controller :param content_type: expected mimetype of serialized response body - :param response_body: dict produced by the Controller """ - if not type(response_body) is dict: - return response_body - response = webob.Response() response.headers['Content-Type'] = content_type serializer = self.get_serializer(content_type) - response.body = serializer.serialize(response_body) + response.body = serializer.serialize(response_data) return response @@ -249,50 +319,63 @@ class Resource(object): except Exception: raise exception.InvalidContentType(content_type=content_type) - def deserialize_request(self, request): - """Parse a wsgi request into a set of params we care about. - :param request: wsgi.Request object +class Resource(wsgi.Application): + """WSGI app that handles (de)serialization and controller dispatch. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon its controller. All + controller action methods must accept a 'req' argument, which is the + incoming wsgi.Request. If the operation is a PUT or POST, the controller + method must also accept a 'body' argument (the deserialized request body). + They may raise a webob.exc exception or return a dict, which will be + serialized by requested content type. + """ + def __init__(self, controller, serializers=None, deserializers=None): """ - action_args = self.get_action_args(request.environ) - action = action_args.pop('action', None) + :param controller: object that implement methods created by routes lib + :param serializers: dict of content-type specific text serializers + :param deserializers: dict of content-type specific text deserializers - if request.method.lower() in ('post', 'put'): - if len(request.body) == 0: - action_args['body'] = None - else: - content_type = request.get_content_type() - deserializer = self.get_deserializer(content_type) + """ + self.controller = controller + self.serializer = ResponseSerializer(serializers) + self.deserializer = RequestDeserializer(deserializers) - try: - action_args['body'] = deserializer.deserialize(request.body) - except exception.InvalidContentType: - action_args['body'] = None + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """WSGI method that controls (de)serialization and method dispatch.""" - accept = self.get_expected_content_type(request) + LOG.debug("%s %s" % (request.method, request.url)) - return (action, action_args, accept) + try: + action, action_args, accept = self.deserializer.deserialize( + request) + except exception.InvalidContentType: + return webob.exc.HTTPBadRequest(_("Unsupported Content-Type")) - def get_expected_content_type(self, request): - return request.best_match_content_type() + action_result = self.dispatch(request, action, action_args) - def get_action_args(self, request_environment): - try: - args = request_environment['wsgiorg.routing_args'][1].copy() + #TODO(bcwaldon): find a more elegant way to pass through non-dict types + if type(action_result) is dict: + response = self.serializer.serialize(action_result, accept) + else: + response = action_result - del args['controller'] + try: + msg_dict = dict(url=request.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + except AttributeError: + msg_dict = dict(url=request.url) + msg = _("%(url)s returned a fault") - if 'format' in args: - del args['format'] + LOG.debug(msg) - return args + return response - except KeyError: - return {} + def dispatch(self, request, action, action_args): + """Find action-spefic method on controller and call it.""" - def get_deserializer(self, content_type): - try: - return self.deserializers[content_type] - except Exception: - raise exception.InvalidContentType(content_type=content_type) + controller_method = getattr(self.controller, action) + return controller_method(req=request, **action_args) diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index d17ab7a9b..e750fc230 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -101,8 +101,8 @@ def resource_factory(): } serializers = { - 'application/xml': wsgi.XMLSerializer(xmlns=wsgi.XMLNS_V10, - metadata=metadata), + 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10, + metadata=metadata), } return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 15f376f74..31571fc46 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -993,6 +993,14 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 501) + def test_server_change_password_xml(self): + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.content_type = 'application/xml' + req.body = '' +# res = req.get_response(fakes.wsgi_app()) +# self.assertEqual(res.status_int, 501) + def test_server_change_password_v1_1(self): class MockSetAdminPassword(object): @@ -1375,13 +1383,13 @@ class ServersTest(test.TestCase): class TestServerCreateRequestXMLDeserializer(unittest.TestCase): def setUp(self): - self.deserializer = servers.ServerCreateRequestXMLDeserializer() + self.deserializer = servers.ServerXMLDeserializer() def test_minimal_request(self): serial_request = """ """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1395,7 +1403,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): name="new-server-test" imageId="1" flavorId="1"> """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1410,7 +1418,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): name="new-server-test" imageId="1" flavorId="1"> """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1426,7 +1434,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1443,7 +1451,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1461,7 +1469,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): aabbccdd """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}] self.assertEquals(request["server"]["personality"], expected) @@ -1471,7 +1479,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): name="new-server-test" imageId="1" flavorId="1"> aabbccdd abcd""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}, {"path": "/etc/sudoers", "contents": "abcd"}] self.assertEquals(request["server"]["personality"], expected) @@ -1487,7 +1495,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): anything """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}] self.assertEquals(request["server"]["personality"], expected) @@ -1496,7 +1504,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): aabbccdd""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"contents": "aabbccdd"}] self.assertEquals(request["server"]["personality"], expected) @@ -1505,7 +1513,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": ""}] self.assertEquals(request["server"]["personality"], expected) @@ -1514,7 +1522,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": ""}] self.assertEquals(request["server"]["personality"], expected) @@ -1526,7 +1534,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): beta """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "beta"} self.assertEquals(request["server"]["metadata"], expected) @@ -1539,7 +1547,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): bar """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "beta", "foo": "bar"} self.assertEquals(request["server"]["metadata"], expected) @@ -1551,7 +1559,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": ""} self.assertEquals(request["server"]["metadata"], expected) @@ -1564,7 +1572,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "", "delta": ""} self.assertEquals(request["server"]["metadata"], expected) @@ -1576,7 +1584,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): beta """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"": "beta"} self.assertEquals(request["server"]["metadata"], expected) @@ -1589,7 +1597,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): gamma """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"": "gamma"} self.assertEquals(request["server"]["metadata"], expected) @@ -1602,7 +1610,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): baz """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"foo": "baz"} self.assertEquals(request["server"]["metadata"], expected) @@ -1649,7 +1657,7 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""", }, ], }} - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') self.assertEqual(request, expected) def test_request_xmlser_with_flavor_image_ref(self): @@ -1659,7 +1667,7 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""", imageRef="http://localhost:8774/v1.1/images/1" flavorRef="http://localhost:8774/v1.1/flavors/1"> """ - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') self.assertEquals(request["server"]["flavorRef"], "http://localhost:8774/v1.1/flavors/1") self.assertEquals(request["server"]["imageRef"], diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index 430dafe77..6c57d3e4f 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -76,26 +76,56 @@ class RequestTest(test.TestCase): self.assertEqual(result, "application/json") -class SerializationTest(test.TestCase): +class DictSerializerTest(test.TestCase): + def test_dispatch(self): + serializer = wsgi.DictSerializer() + serializer.create = lambda x: 'pants' + serializer.default = lambda x: 'trousers' + self.assertEqual(serializer.serialize({}, 'create'), 'pants') + + def test_dispatch_default(self): + serializer = wsgi.DictSerializer() + serializer.create = lambda x: 'pants' + serializer.default = lambda x: 'trousers' + self.assertEqual(serializer.serialize({}, 'update'), 'trousers') + + +class XMLDictSerializerTest(test.TestCase): def test_xml(self): input_dict = dict(servers=dict(a=(2, 3))) expected_xml = '(2,3)' xmlns = "testing xmlns" - serializer = wsgi.XMLSerializer(xmlns="asdf") + serializer = wsgi.XMLDictSerializer(xmlns="asdf") result = serializer.serialize(input_dict) result = result.replace('\n', '').replace(' ', '') self.assertEqual(result, expected_xml) + +class JSONDictSerializerTest(test.TestCase): def test_json(self): input_dict = dict(servers=dict(a=(2, 3))) expected_json = '{"servers":{"a":[2,3]}}' - serializer = wsgi.JSONSerializer() + serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace('\n', '').replace(' ', '') self.assertEqual(result, expected_json) -class DeserializationTest(test.TestCase): +class TextDeserializerTest(test.TestCase): + def test_dispatch(self): + deserializer = wsgi.TextDeserializer() + deserializer.create = lambda x: 'pants' + deserializer.default = lambda x: 'trousers' + self.assertEqual(deserializer.deserialize({}, 'create'), 'pants') + + def test_dispatch_default(self): + deserializer = wsgi.TextDeserializer() + deserializer.create = lambda x: 'pants' + deserializer.default = lambda x: 'trousers' + self.assertEqual(deserializer.deserialize({}, 'update'), 'trousers') + + +class JSONDeserializerTest(test.TestCase): def test_json(self): data = """{"a": { "a1": "1", @@ -112,6 +142,8 @@ class DeserializationTest(test.TestCase): deserializer = wsgi.JSONDeserializer() self.assertEqual(deserializer.deserialize(data), as_dict) + +class XMLDeserializerTest(test.TestCase): def test_xml(self): xml = """ @@ -137,7 +169,7 @@ class DeserializationTest(test.TestCase): self.assertEqual(deserializer.deserialize(xml), as_dict) -class ResourceSerializerTest(test.TestCase): +class ResponseSerializerTest(test.TestCase): def setUp(self): class JSONSerializer(object): def serialize(self, data): @@ -152,40 +184,32 @@ class ResourceSerializerTest(test.TestCase): 'application/XML': XMLSerializer(), } - self.resource = wsgi.Resource(None, serializers=self.serializers) + self.serializer = wsgi.ResponseSerializer(serializers=self.serializers) def tearDown(self): pass def test_get_serializer(self): - self.assertEqual(self.resource.get_serializer('application/json'), + self.assertEqual(self.serializer.get_serializer('application/json'), self.serializers['application/json']) def test_get_serializer_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, - self.resource.get_serializer, + self.serializer.get_serializer, 'application/unknown') - def test_serialize_response_dict(self): - response = self.resource.serialize_response('application/json', {}) + def test_serialize_response(self): + response = self.serializer.serialize({}, 'application/json') self.assertEqual(response.headers['Content-Type'], 'application/json') self.assertEqual(response.body, 'pew_json') - def test_serialize_response_non_dict(self): - response = self.resource.serialize_response('application/json', 'a') - self.assertEqual(response, 'a') - def test_serialize_response_dict_to_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, - self.resource.serialize_response, + self.serializer.serialize, 'application/unknown', {}) - def test_serialize_response_non_dict_to_unknown_content_type(self): - response = self.resource.serialize_response('application/unknown', 'a') - self.assertEqual(response, 'a') - -class ResourceDeserializerTest(test.TestCase): +class RequestDeserializerTest(test.TestCase): def setUp(self): class JSONDeserializer(object): def deserialize(self, data): @@ -200,24 +224,25 @@ class ResourceDeserializerTest(test.TestCase): 'application/XML': XMLDeserializer(), } - self.resource = wsgi.Resource(None, deserializers=self.deserializers) + self.deserializer = wsgi.RequestDeserializer( + deserializers=self.deserializers) def tearDown(self): pass def test_get_deserializer(self): - self.assertEqual(self.resource.get_deserializer('application/json'), - self.deserializers['application/json']) + expected = self.deserializer.get_deserializer('application/json') + self.assertEqual(expected, self.deserializers['application/json']) def test_get_deserializer_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, - self.resource.get_deserializer, + self.deserializer.get_deserializer, 'application/unknown') def test_get_expected_content_type(self): request = wsgi.Request.blank('/') request.headers['Accept'] = 'application/json' - self.assertEqual(self.resource.get_expected_content_type(request), + self.assertEqual(self.deserializer.get_expected_content_type(request), 'application/json') def test_get_action_args(self): @@ -232,17 +257,38 @@ class ResourceDeserializerTest(test.TestCase): expected = {'action': 'update', 'id': 12} - self.assertEqual(self.resource.get_action_args(env), expected) + self.assertEqual(self.deserializer.get_action_args(env), expected) - def test_deserialize_request(self): + def test_deserialize(self): def fake_get_routing_args(request): return {'action': 'create'} - self.resource.get_action_args = fake_get_routing_args + self.deserializer.get_action_args = fake_get_routing_args request = wsgi.Request.blank('/') request.headers['Accept'] = 'application/xml' - deserialized = self.resource.deserialize_request(request) + deserialized = self.deserializer.deserialize(request) expected = ('create', {}, 'application/xml') self.assertEqual(expected, deserialized) + + +class ResourceTest(test.TestCase): + def test_dispatch(self): + class Controller(object): + def index(self, req, pants=None): + return pants + + resource = wsgi.Resource(Controller()) + actual = resource.dispatch(None, 'index', {'pants': 'off'}) + expected = 'off' + self.assertEqual(actual, expected) + + def test_dispatch_unknown_controller_action(self): + class Controller(object): + def index(self, req, pants=None): + return pants + + resource = wsgi.Resource(Controller()) + self.assertRaises(AttributeError, resource.dispatch, + None, 'create', {}) -- cgit From a1869741689817168c75046f2f81ee9761956cbc Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Thu, 19 May 2011 18:05:38 -0400 Subject: Fail early if requested imageRef does not exist when creating a server. --- nova/api/openstack/servers.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index a4e679242..337c6ced8 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -144,10 +144,8 @@ class Controller(common.OpenstackController): (image_service, image_id) = utils.get_image_service(image_ref) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) - - #TODO: need to assert image exists a better way - #image_id = common.get_image_id_from_image_hash(image_service, - #context, image_ref) + image_set = set([x['id'] for x in image_service.index(context)]) + assert image_id in image_set except: msg = _("Can not find requested image") return faults.Fault(exc.HTTPBadRequest(msg)) -- cgit From e16b2d22dc4e6e24c3bf5150a0830661933aad29 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Fri, 20 May 2011 04:14:02 -0400 Subject: Fixed some tests. --- nova/api/openstack/common.py | 28 ---------------------------- nova/api/openstack/servers.py | 6 +++--- nova/exception.py | 4 ++++ nova/flags.py | 3 --- nova/image/fake.py | 12 ++++++++++++ nova/tests/api/openstack/test_servers.py | 14 +++++++++----- nova/tests/integrated/integrated_helpers.py | 7 ++++++- nova/tests/test_quota.py | 8 ++++---- nova/utils.py | 16 +++++++--------- 9 files changed, 45 insertions(+), 53 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 32cd689ca..a89594c13 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -100,34 +100,6 @@ def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): return items[start_index:range_end] -def get_image_id_from_image_hash(image_service, context, image_hash): - """Given an Image ID Hash, return an objectstore Image ID. - - image_service - reference to objectstore compatible image service. - context - security context for image service requests. - image_hash - hash of the image ID. - """ - - # FIX(sandy): This is terribly inefficient. It pulls all images - # from objectstore in order to find the match. ObjectStore - # should have a numeric counterpart to the string ID. - try: - items = image_service.detail(context) - except NotImplementedError: - items = image_service.index(context) - for image in items: - image_id = image['id'] - try: - if abs(hash(image_id)) == int(image_hash): - return image_id - except ValueError: - msg = _("Requested image_id has wrong format: %s," - "should have numerical format") % image_id - LOG.error(msg) - raise Exception(msg) - raise exception.ImageNotFound(image_id=image_hash) - - def get_id_from_href(href): """Return the id portion of a url as an int. diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 337c6ced8..31c1e86c0 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -144,10 +144,10 @@ class Controller(common.OpenstackController): (image_service, image_id) = utils.get_image_service(image_ref) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) - image_set = set([x['id'] for x in image_service.index(context)]) - assert image_id in image_set + images = set([str(x['id']) for x in image_service.index(context)]) + assert str(image_id) in images except: - msg = _("Can not find requested image") + msg = _("Cannot find requested image %s") % image_ref return faults.Fault(exc.HTTPBadRequest(msg)) personality = env['server'].get('personality') diff --git a/nova/exception.py b/nova/exception.py index cf6069454..4c977aca0 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -279,6 +279,10 @@ class DiskNotFound(NotFound): message = _("No disk at %(location)s") +class InvalidImageRef(Invalid): + message = _("Invalid image ref %(image_ref)s.") + + class ImageNotFound(NotFound): message = _("Image %(image_id)s could not be found.") diff --git a/nova/flags.py b/nova/flags.py index ee5adae32..32cb6efa8 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -362,9 +362,6 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.local.LocalImageService', 'The service to use for retrieving and searching for images.') -DEFINE_string('glance_image_service', 'nova.image.glance.GlanceImageService', - 'The service to use for retrieving and searching for ' + - 'glance images.') DEFINE_string('host', socket.gethostname(), 'name of this node') diff --git a/nova/image/fake.py b/nova/image/fake.py index 2a60c7743..659c16557 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -79,10 +79,22 @@ class FakeImageService(service.BaseImageService): 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} + + image5 = {'id': '3', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'status': 'active', + 'container_format': 'ami', + 'disk_format': 'raw', + 'properties': {'kernel_id': FLAGS.null_kernel, + 'ramdisk_id': FLAGS.null_kernel}} + self.create(None, image1) self.create(None, image2) self.create(None, image3) self.create(None, image4) + self.create(None, image5) super(FakeImageService, self).__init__() def index(self, context): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index bced2b910..22beef05f 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -29,6 +29,7 @@ from nova import db from nova import exception from nova import flags from nova import test +from nova import utils import nova.api.openstack from nova.api.openstack import servers import nova.compute.api @@ -37,6 +38,7 @@ from nova.compute import power_state import nova.db.api from nova.db.sqlalchemy.models import Instance from nova.db.sqlalchemy.models import InstanceMetadata +import nova.image.fake import nova.rpc from nova.tests.api.openstack import common from nova.tests.api.openstack import fakes @@ -464,7 +466,12 @@ class ServersTest(test.TestCase): def image_id_from_hash(*args, **kwargs): return 2 - FLAGS.glance_image_service = 'nova.image.fake.FakeImageService' + def fake_image_service(*args): + return nova.image.fake.FakeImageService() + + FLAGS.image_service = 'nova.image.fake.FakeImageService' + self.stubs.Set( + nova.image.glance, 'GlanceImageService', fake_image_service) self.stubs.Set(nova.db.api, 'project_get_network', project_get_network) self.stubs.Set(nova.db.api, 'instance_create', instance_create) self.stubs.Set(nova.rpc, 'cast', fake_method) @@ -476,8 +483,6 @@ class ServersTest(test.TestCase): fake_method) self.stubs.Set(nova.api.openstack.servers.Controller, "_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping) - self.stubs.Set(nova.api.openstack.common, - "get_image_id_from_image_hash", image_id_from_hash) self.stubs.Set(nova.compute.api.API, "_find_host", find_host) def _test_create_instance_helper(self): @@ -1707,11 +1712,10 @@ class TestServerInstanceCreation(test.TestCase): return stub_method compute_api = MockComputeAPI() + FLAGS.image_service = 'nova.image.fake.FakeImageService' self.stubs.Set(nova.compute, 'API', make_stub_method(compute_api)) self.stubs.Set(nova.api.openstack.servers.Controller, '_get_kernel_ramdisk_from_image', make_stub_method((1, 1))) - self.stubs.Set(nova.api.openstack.common, - 'get_image_id_from_image_hash', make_stub_method(2)) return compute_api def _create_personality_request_dict(self, personality_files): diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index e6efc16c5..5871a498c 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -27,6 +27,7 @@ from nova import flags from nova import service from nova import test # For the flags from nova.auth import manager +import nova.image.glance from nova.log import logging from nova.tests.integrated.api import client @@ -151,6 +152,11 @@ class _IntegratedTestBase(test.TestCase): f = self._get_flags() self.flags(**f) + def fake_image_service(*args): + return nova.image.fake.FakeImageService() + self.stubs.Set( + nova.image.glance, 'GlanceImageService', fake_image_service) + # set up services self.start_service('compute') self.start_service('volume') @@ -185,7 +191,6 @@ class _IntegratedTestBase(test.TestCase): """An opportunity to setup flags, before the services are started.""" f = {} f['image_service'] = 'nova.image.fake.FakeImageService' - f['glance_image_service'] = 'nova.image.fake.FakeImageService' f['fake_network'] = True return f diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 9ede0786f..02b641a47 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -280,18 +280,18 @@ class QuotaTestCase(test.TestCase): FLAGS.quota_max_injected_files) def _create_with_injected_files(self, files): - FLAGS.glance_image_service = 'nova.image.fake.FakeImageService' + FLAGS.image_service = 'nova.image.fake.FakeImageService' api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') api.create(self.context, min_count=1, max_count=1, - instance_type=inst_type, image_id='fake', + instance_type=inst_type, image_id='3', injected_files=files) def test_no_injected_files(self): - FLAGS.glance_image_service = 'nova.image.fake.FakeImageService' + FLAGS.image_service = 'nova.image.fake.FakeImageService' api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') - api.create(self.context, instance_type=inst_type, image_id='fake') + api.create(self.context, instance_type=inst_type, image_id='3') def test_max_injected_files(self): files = [] diff --git a/nova/utils.py b/nova/utils.py index 85934813e..3802f50c4 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -748,11 +748,7 @@ def parse_image_ref(image_ref): o = urlparse(image_ref) port = o.port or 80 host = o.netloc.split(':', 1)[0] - image_id = o.path.split('/')[-1] - - if is_int(image_id): - image_id = int(image_id) - + image_id = int(o.path.split('/')[-1]) return (image_id, host, port) @@ -776,8 +772,10 @@ def get_image_service(image_ref): if is_int(image_ref): return (get_default_image_service(), int(image_ref)) - (image_id, host, port) = parse_image_ref(image_ref) - glance_client = import_class('nova.image.glance.GlanceClient')(host, - port) - image_service = import_class(FLAGS.glance_image_service)(glance_client) + try: + (image_id, host, port) = parse_image_ref(image_ref) + except: + raise exception.InvalidImageRef(image_ref=image_ref) + glance_client = nova.image.glance.GlanceClient(host, port) + image_service = nova.image.glance.GlanceImageService(glance_client) return (image_service, image_id) -- cgit From 0f191404fee42b9225f364af12242812798ff08a Mon Sep 17 00:00:00 2001 From: William Wolf Date: Fri, 20 May 2011 11:42:38 -0400 Subject: fixed silly issue with variable needing to be named 'id' for the url mapper, also caught new exception type where needed --- nova/api/openstack/images.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index ac02d63c5..2a3f9e070 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -74,7 +74,7 @@ class Controller(common.OpenstackController): builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) - def show(self, req, image_id): + def show(self, req, id): """Return detailed information about a specific image. :param req: `wsgi.Request` object @@ -84,11 +84,14 @@ class Controller(common.OpenstackController): try: (image_service, service_image_id) = utils.get_image_service( - image_id) + id) image = image_service.show(context, service_image_id) except exception.NotFound: explanation = _("Image not found.") raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) + except exception.InvalidImageRef: + explanation = _("Image not found.") + raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) return dict(image=self.get_builder(req).build(image, detail=True)) -- cgit From f1da26ec9af6f6adffb7b6bfdc64f9702db93b56 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Fri, 20 May 2011 11:50:00 -0400 Subject: fix pep8 issue --- nova/api/openstack/images.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 2a3f9e070..5508e7d50 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -50,7 +50,8 @@ class Controller(common.OpenstackController): """ self._compute_service = compute_service or compute.API() - self._image_service = image_service or utils.get_default_image_service() + self._image_service = image_service or \ + utils.get_default_image_service() def index(self, req): """Return an index listing of images available to the request. -- cgit From 7ed71092d513bc621be539e612e6b4e66849b888 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Fri, 20 May 2011 14:22:58 -0400 Subject: combined the exception catching to eliminate duplication --- nova/api/openstack/images.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 5508e7d50..523b3f431 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -87,10 +87,7 @@ class Controller(common.OpenstackController): (image_service, service_image_id) = utils.get_image_service( id) image = image_service.show(context, service_image_id) - except exception.NotFound: - explanation = _("Image not found.") - raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) - except exception.InvalidImageRef: + except (exception.NotFound, exception.InvalidImageRef): explanation = _("Image not found.") raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) -- cgit From 3fc3b7537cc1af2783829a2caaca272e83d6d3e8 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Fri, 20 May 2011 14:42:19 -0400 Subject: renaming resource_factory to create_resource --- nova/api/openstack/__init__.py | 34 ++++++++++++++++----------------- nova/api/openstack/accounts.py | 2 +- nova/api/openstack/backup_schedules.py | 2 +- nova/api/openstack/consoles.py | 2 +- nova/api/openstack/flavors.py | 2 +- nova/api/openstack/image_metadata.py | 2 +- nova/api/openstack/images.py | 2 +- nova/api/openstack/ips.py | 2 +- nova/api/openstack/limits.py | 2 +- nova/api/openstack/server_metadata.py | 2 +- nova/api/openstack/servers.py | 2 +- nova/api/openstack/shared_ip_groups.py | 2 +- nova/api/openstack/users.py | 2 +- nova/api/openstack/zones.py | 2 +- nova/tests/api/openstack/test_limits.py | 4 ++-- 15 files changed, 32 insertions(+), 32 deletions(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index fbbd99cb9..4419d0748 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -99,19 +99,19 @@ class APIRouter(base_wsgi.Router): server_members['inject_network_info'] = 'POST' mapper.resource("zone", "zones", - controller=zones.resource_factory(), + controller=zones.create_resource(), collection={'detail': 'GET', 'info': 'GET'}), mapper.resource("user", "users", - controller=users.resource_factory(), + controller=users.create_resource(), collection={'detail': 'GET'}) mapper.resource("account", "accounts", - controller=accounts.resource_factory(), + controller=accounts.create_resource(), collection={'detail': 'GET'}) mapper.resource("console", "consoles", - controller=consoles.resource_factory(), + controller=consoles.create_resource(), parent_resource=dict(member_name='server', collection_name='servers')) @@ -124,31 +124,31 @@ class APIRouterV10(APIRouter): def _setup_routes(self, mapper): super(APIRouterV10, self)._setup_routes(mapper) mapper.resource("server", "servers", - controller=servers.resource_factory('1.0'), + controller=servers.create_resource('1.0'), collection={'detail': 'GET'}, member=self.server_members) mapper.resource("image", "images", - controller=images.resource_factory('1.0'), + controller=images.create_resource('1.0'), collection={'detail': 'GET'}) mapper.resource("flavor", "flavors", - controller=flavors.resource_factory('1.0'), + controller=flavors.create_resource('1.0'), collection={'detail': 'GET'}) mapper.resource("shared_ip_group", "shared_ip_groups", collection={'detail': 'GET'}, - controller=shared_ip_groups.resource_factory()) + controller=shared_ip_groups.create_resource()) mapper.resource("backup_schedule", "backup_schedule", - controller=backup_schedules.resource_factory(), + controller=backup_schedules.create_resource(), parent_resource=dict(member_name='server', collection_name='servers')) mapper.resource("limit", "limits", - controller=limits.resource_factory('1.0')) + controller=limits.create_resource('1.0')) - mapper.resource("ip", "ips", controller=ips.resource_factory(), + mapper.resource("ip", "ips", controller=ips.create_resource(), collection=dict(public='GET', private='GET'), parent_resource=dict(member_name='server', collection_name='servers')) @@ -160,27 +160,27 @@ class APIRouterV11(APIRouter): def _setup_routes(self, mapper): super(APIRouterV11, self)._setup_routes(mapper) mapper.resource("server", "servers", - controller=servers.resource_factory('1.1'), + controller=servers.create_resource('1.1'), collection={'detail': 'GET'}, member=self.server_members) mapper.resource("image", "images", - controller=images.resource_factory('1.1'), + controller=images.create_resource('1.1'), collection={'detail': 'GET'}) mapper.resource("image_meta", "meta", - controller=image_metadata.resource_factory(), + controller=image_metadata.create_resource(), parent_resource=dict(member_name='image', collection_name='images')) mapper.resource("server_meta", "meta", - controller=server_metadata.resource_factory(), + controller=server_metadata.create_resource(), parent_resource=dict(member_name='server', collection_name='servers')) mapper.resource("flavor", "flavors", - controller=flavors.resource_factory('1.1'), + controller=flavors.create_resource('1.1'), collection={'detail': 'GET'}) mapper.resource("limit", "limits", - controller=limits.resource_factory('1.1')) + controller=limits.create_resource('1.1')) diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py index faff8bb2c..0dcd37217 100644 --- a/nova/api/openstack/accounts.py +++ b/nova/api/openstack/accounts.py @@ -80,7 +80,7 @@ class Controller(object): return dict(account=_translate_keys(account)) -def resource_factory(): +def create_resource(): metadata = { "attributes": { "account": ["id", "name", "description", "manager"], diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index d08a4799c..71a14d4ce 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -52,7 +52,7 @@ class Controller(object): return faults.Fault(exc.HTTPNotImplemented()) -def resource_factory(): +def create_resource(): metadata = { 'attributes': { 'backupSchedule': [], diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 56f79db60..bccf04d8f 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -89,7 +89,7 @@ class Controller(object): return exc.HTTPAccepted() -def resource_factory(): +def create_resource(): metadata = { 'attributes': { 'console': [], diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index 9e98e6c27..a21ff6cb2 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -74,7 +74,7 @@ class ControllerV11(Controller): return views.flavors.ViewBuilderV11(base_url) -def resource_factory(version='1.0'): +def create_resource(version='1.0'): controller = { '1.0': ControllerV10, '1.1': ControllerV11, diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index 8acde9fe8..88e10168d 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -102,7 +102,7 @@ class Controller(object): self.image_service.update(context, image_id, img, None) -def resource_factory(): +def create_resource(): serializers = { 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11), } diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index a9071ed8a..3376f358a 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -145,7 +145,7 @@ class ControllerV11(Controller): return common.XML_NS_V11 -def resource_factory(version='1.0'): +def create_resource(version='1.0'): controller = { '1.0': ControllerV10, '1.1': ControllerV11, diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py index 87c8c997a..abea71830 100644 --- a/nova/api/openstack/ips.py +++ b/nova/api/openstack/ips.py @@ -63,7 +63,7 @@ class Controller(object): return faults.Fault(exc.HTTPNotImplemented()) -def resource_factory(): +def create_resource(): metadata = { 'list_collections': { 'public': {'item_name': 'ip', 'item_key': 'addr'}, diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index b0e093702..2d9fe356f 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -73,7 +73,7 @@ class LimitsControllerV11(LimitsController): return limits_views.ViewBuilderV11() -def resource_factory(version='1.0'): +def create_resource(version='1.0'): controller = { '1.0': LimitsControllerV10, '1.1': LimitsControllerV11, diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index eff98c060..b38b84a2a 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -91,7 +91,7 @@ class Controller(object): raise error -def resource_factory(): +def create_resource(): serializers = { 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11), } diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8f39bd256..bdd2960d9 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -784,7 +784,7 @@ class ServerXMLDeserializer(wsgi.XMLDeserializer): return "" -def resource_factory(version='1.0'): +def create_resource(version='1.0'): controller = { '1.0': ControllerV10, '1.1': ControllerV11, diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py index db178f2a2..4f11f8dfb 100644 --- a/nova/api/openstack/shared_ip_groups.py +++ b/nova/api/openstack/shared_ip_groups.py @@ -49,5 +49,5 @@ class Controller(object): raise faults.Fault(exc.HTTPNotImplemented()) -def resource_factory(): +def create_resource(): return wsgi.Resource(Controller()) diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py index e14616349..50975fc1f 100644 --- a/nova/api/openstack/users.py +++ b/nova/api/openstack/users.py @@ -98,7 +98,7 @@ class Controller(object): return dict(user=_translate_keys(self.manager.get_user(id))) -def resource_factory(): +def create_resource(): metadata = { "attributes": { "user": ["id", "name", "access", "secret", "admin"], diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index e750fc230..0475deb52 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -93,7 +93,7 @@ class Controller(object): return dict(zone=_scrub_zone(zone)) -def resource_factory(): +def create_resource(): metadata = { "attributes": { "zone": ["id", "api_url", "name", "capabilities"], diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index db859c2f8..4cf857507 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -65,7 +65,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): def setUp(self): """Run before each test.""" BaseLimitTestSuite.setUp(self) - self.controller = limits.resource_factory('1.0') + self.controller = limits.create_resource('1.0') def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" @@ -178,7 +178,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite): def setUp(self): """Run before each test.""" BaseLimitTestSuite.setUp(self) - self.controller = limits.resource_factory('1.1') + self.controller = limits.create_resource('1.1') def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" -- cgit From 2c16eb37822b3ebdb14ac36df26362636d0f5078 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Fri, 20 May 2011 16:36:10 -0400 Subject: minor cleanup --- nova/api/openstack/images.py | 3 --- nova/api/openstack/servers.py | 3 --- nova/tests/api/openstack/test_wsgi.py | 1 - 3 files changed, 7 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 3376f358a..7f5551664 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -141,9 +141,6 @@ class ControllerV11(Controller): base_url = request.application_url return images_view.ViewBuilderV11(base_url) - def get_default_xmlns(self, req): - return common.XML_NS_V11 - def create_resource(version='1.0'): controller = { diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index bdd2960d9..313321d7d 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -703,9 +703,6 @@ class ControllerV11(Controller): raise exc.HTTPBadRequest(msg) return password - def get_default_xmlns(self, req): - return common.XML_NS_V11 - class ServerXMLDeserializer(wsgi.XMLDeserializer): """ diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index 6c57d3e4f..89603d82b 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -94,7 +94,6 @@ class XMLDictSerializerTest(test.TestCase): def test_xml(self): input_dict = dict(servers=dict(a=(2, 3))) expected_xml = '(2,3)' - xmlns = "testing xmlns" serializer = wsgi.XMLDictSerializer(xmlns="asdf") result = serializer.serialize(input_dict) result = result.replace('\n', '').replace(' ', '') -- cgit From 0850945efd0c5d7341590acd109572b9caf89e18 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 20 May 2011 21:30:04 +0000 Subject: move init start position to 96 to allow openvswitch time to fully start --- plugins/xenserver/networking/etc/init.d/openvswitch-nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/xenserver/networking/etc/init.d/openvswitch-nova b/plugins/xenserver/networking/etc/init.d/openvswitch-nova index e4dbdf4af..8672a69b8 100755 --- a/plugins/xenserver/networking/etc/init.d/openvswitch-nova +++ b/plugins/xenserver/networking/etc/init.d/openvswitch-nova @@ -2,7 +2,7 @@ # # openvswitch-nova # -# chkconfig: 2345 10 89 +# chkconfig: 2345 96 89 # description: Apply initial OVS flows for Nova # Copyright 2011 OpenStack LLC. -- cgit From 4a184103fef7b1209ecfe3a6aadeccb8fc08fa31 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sat, 21 May 2011 02:04:29 -0400 Subject: No reason to modify the way file names are generated for kernel and ramdisk, since the kernel_id and ramdisk_id is still guaranteed to be ints. --- nova/virt/libvirt_conn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 25ba0bc8d..18f5e3aa9 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -844,7 +844,7 @@ class LibvirtConnection(driver.ComputeDriver): 'ramdisk_id': inst['ramdisk_id']} if disk_images['kernel_id']: - fname = disk_images['kernel_id'] + fname = '%08x' % int(disk_images['kernel_id']) self._cache_image(fn=self._fetch_image, target=basepath('kernel'), fname=fname, @@ -852,7 +852,7 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) if disk_images['ramdisk_id']: - fname = disk_images['ramdisk_id'] + fname = '%08x' % int(disk_images['ramdisk_id']) self._cache_image(fn=self._fetch_image, target=basepath('ramdisk'), fname=fname, -- cgit From f1983479ae8d2483bdb73a494c9043f82928f189 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sat, 21 May 2011 02:34:27 -0400 Subject: Minor cleanup --- nova/api/openstack/images.py | 3 +-- nova/api/openstack/servers.py | 4 ++-- nova/image/fake.py | 2 +- nova/virt/libvirt_conn.py | 1 - 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 523b3f431..bf9d3f49e 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -84,8 +84,7 @@ class Controller(common.OpenstackController): context = req.environ['nova.context'] try: - (image_service, service_image_id) = utils.get_image_service( - id) + (image_service, service_image_id) = utils.get_image_service(id) image = image_service.show(context, service_image_id) except (exception.NotFound, exception.InvalidImageRef): explanation = _("Image not found.") diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 31c1e86c0..d5dee61a5 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -141,7 +141,7 @@ class Controller(common.OpenstackController): image_ref = self._image_ref_from_req_data(env) try: - (image_service, image_id) = utils.get_image_service(image_ref) + image_service, image_id = utils.get_image_service(image_ref) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) images = set([str(x['id']) for x in image_service.index(context)]) @@ -559,7 +559,7 @@ class Controller(common.OpenstackController): associated kernel and ramdisk image IDs. """ context = req.environ['nova.context'] - (image_service, service_image_id) = utils.get_image_service(image_id) + image_service, service_image_id = utils.get_image_service(image_id) image_meta = image_service.show(context, service_image_id) # NOTE(sirp): extracted to a separate method to aid unit-testing, the # new method doesn't need a request obj or an ImageService stub diff --git a/nova/image/fake.py b/nova/image/fake.py index 659c16557..939f855ac 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -35,7 +35,7 @@ FLAGS = flags.FLAGS class FakeImageService(service.BaseImageService): """Mock (fake) image service for unit testing.""" - def __init__(self, client=None): + def __init__(self): self.images = {} # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 18f5e3aa9..8ba5d09ba 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -861,7 +861,6 @@ class LibvirtConnection(driver.ComputeDriver): project=project) root_fname = hashlib.sha1(disk_images['image_id']).hexdigest() - size = FLAGS.minimum_root_size inst_type_id = inst['instance_type_id'] -- cgit From 58c18901ab27219248e64175f2745502499dc265 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sun, 22 May 2011 03:16:16 -0400 Subject: Removing utils.is_int() --- nova/api/openstack/views/servers.py | 2 +- nova/utils.py | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 70a942594..0fe9dbe4a 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -132,7 +132,7 @@ class ViewBuilderV11(ViewBuilder): def _build_image(self, response, inst): if 'image_id' in dict(inst): image_id = inst['image_id'] - if utils.is_int(image_id): + if str(image_id).isdigit(): image_id = int(image_id) response['imageRef'] = image_id diff --git a/nova/utils.py b/nova/utils.py index 3802f50c4..ecca5303a 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -728,10 +728,6 @@ def parse_server_string(server_str): return ('', '') -def is_int(x): - return re.match(r'\d+$', str(x)) - - def parse_image_ref(image_ref): """Parse an image href into composite parts. @@ -742,7 +738,7 @@ def parse_image_ref(image_ref): :param image_ref: href or id of an image """ - if is_int(image_ref): + if str(image_ref).isdigit(): return (int(image_ref), None, None) o = urlparse(image_ref) @@ -769,7 +765,7 @@ def get_image_service(image_ref): """ image_ref = image_ref or 0 - if is_int(image_ref): + if str(image_ref).isdigit(): return (get_default_image_service(), int(image_ref)) try: -- cgit From 1c315d233128f1013d1ec02c78acb36821f6c63d Mon Sep 17 00:00:00 2001 From: William Wolf Date: Mon, 23 May 2011 10:28:04 -0400 Subject: moved utils functions into nova/image/ --- bin/nova-manage | 3 +- nova/api/openstack/image_metadata.py | 3 +- nova/api/openstack/images.py | 9 ++-- nova/api/openstack/servers.py | 10 +++-- nova/compute/api.py | 7 ++- nova/image/__init__.py | 77 ++++++++++++++++++++++++++++++++ nova/image/s3.py | 3 +- nova/tests/api/openstack/test_servers.py | 1 + nova/utils.py | 49 -------------------- nova/virt/images.py | 3 +- nova/virt/libvirt_conn.py | 6 ++- 11 files changed, 107 insertions(+), 64 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 3f3fd72a6..8a9be5d8f 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -78,6 +78,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import image from nova import log as logging from nova import quota from nova import rpc @@ -905,7 +906,7 @@ class ImageCommands(object): """Methods for dealing with a cloud in an odd state""" def __init__(self, *args, **kwargs): - self.image_service = utils.get_default_image_service() + self.image_service = image.get_default_image_service() def _register(self, container_format, disk_format, path, owner, name=None, is_public='T', diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index f6913ffc6..c51d7acf2 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -18,6 +18,7 @@ from webob import exc from nova import flags +from nova import image from nova import quota from nova import utils from nova import wsgi @@ -32,7 +33,7 @@ class Controller(common.OpenstackController): """The image metadata API controller for the Openstack API""" def __init__(self): - self.image_service = utils.get_default_image_service() + self.image_service = image.get_default_image_service() super(Controller, self).__init__() def _get_metadata(self, context, image_id, image=None): diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index bf9d3f49e..c61b5c6a6 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -18,6 +18,7 @@ import webob.exc from nova import compute from nova import exception from nova import flags +import nova.image from nova import log from nova import utils from nova.api.openstack import common @@ -51,7 +52,7 @@ class Controller(common.OpenstackController): """ self._compute_service = compute_service or compute.API() self._image_service = image_service or \ - utils.get_default_image_service() + nova.image.get_default_image_service() def index(self, req): """Return an index listing of images available to the request. @@ -84,7 +85,8 @@ class Controller(common.OpenstackController): context = req.environ['nova.context'] try: - (image_service, service_image_id) = utils.get_image_service(id) + (image_service, service_image_id) = nova.image.get_image_service( + id) image = image_service.show(context, service_image_id) except (exception.NotFound, exception.InvalidImageRef): explanation = _("Image not found.") @@ -100,7 +102,8 @@ class Controller(common.OpenstackController): """ image_id = id context = req.environ['nova.context'] - (image_service, service_image_id) = utils.get_image_service(image_id) + (image_service, service_image_id) = nova.image.get_image_service( + image_id) image_service.delete(context, service_image_id) return webob.exc.HTTPNoContent() diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index d5dee61a5..181833a23 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -22,6 +22,7 @@ from xml.dom import minidom from nova import compute from nova import exception from nova import flags +import nova.image from nova import log as logging from nova import quota from nova import utils @@ -141,7 +142,7 @@ class Controller(common.OpenstackController): image_ref = self._image_ref_from_req_data(env) try: - image_service, image_id = utils.get_image_service(image_ref) + image_service, image_id = nova.image.get_image_service(image_ref) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) images = set([str(x['id']) for x in image_service.index(context)]) @@ -559,12 +560,13 @@ class Controller(common.OpenstackController): associated kernel and ramdisk image IDs. """ context = req.environ['nova.context'] - image_service, service_image_id = utils.get_image_service(image_id) - image_meta = image_service.show(context, service_image_id) + image_service, service_image_id = nova.image.get_image_service( + image_id) + image = image_service.show(context, service_image_id) # NOTE(sirp): extracted to a separate method to aid unit-testing, the # new method doesn't need a request obj or an ImageService stub kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image( - image_meta) + image) return kernel_id, ramdisk_id @staticmethod diff --git a/nova/compute/api.py b/nova/compute/api.py index 61adda13a..a9075ff8a 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -26,6 +26,7 @@ import time from nova import db from nova import exception from nova import flags +import nova.image from nova import log as logging from nova import network from nova import quota @@ -58,7 +59,9 @@ class API(base.Base): def __init__(self, image_service=None, network_api=None, volume_api=None, hostname_factory=generate_default_hostname, **kwargs): - self.image_service = image_service or utils.get_default_image_service() + self.image_service = image_service or \ + nova.image.get_default_image_service() + if not network_api: network_api = network.API() self.network_api = network_api @@ -154,7 +157,7 @@ class API(base.Base): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) - (image_service, service_image_id) = utils.get_image_service( + (image_service, service_image_id) = nova.image.get_image_service( image_ref or image_id) image = image_service.show(context, service_image_id) diff --git a/nova/image/__init__.py b/nova/image/__init__.py index e69de29bb..be692680a 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -0,0 +1,77 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from urlparse import urlparse + + +from nova import exception +import nova.image.glance +from nova.utils import import_class +from nova import flags + + +FLAGS = flags.FLAGS + + +def parse_image_ref(image_ref): + """Parse an image href into composite parts. + + If the image_ref passed in is an integer, it will + return (image_ref, None, None), otherwise it will + return (image_id, host, port) + + :param image_ref: href or id of an image + + """ + if str(image_ref).isdigit(): + return (int(image_ref), None, None) + + o = urlparse(image_ref) + port = o.port or 80 + host = o.netloc.split(':', 1)[0] + image_id = int(o.path.split('/')[-1]) + return (image_id, host, port) + + +def get_default_image_service(): + ImageService = import_class(FLAGS.image_service) + return ImageService() + + +def get_image_service(image_ref): + """Get the proper image_service and id for the given image_ref. + + The image_ref param can be an href of the form + http://myglanceserver:9292/images/42, or just an int such as 42. If the + image_ref is an int, then the default image service is returned. + + :param image_ref: image ref/id for an image + :returns: a tuple of the form (image_service, image_id) + + """ + image_ref = image_ref or 0 + if str(image_ref).isdigit(): + return (get_default_image_service(), int(image_ref)) + + try: + (image_id, host, port) = parse_image_ref(image_ref) + except: + raise exception.InvalidImageRef(image_ref=image_ref) + glance_client = nova.image.glance.GlanceClient(host, port) + image_service = nova.image.glance.GlanceImageService(glance_client) + return (image_service, image_id) diff --git a/nova/image/s3.py b/nova/image/s3.py index ed685ea51..7051b522a 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -31,6 +31,7 @@ import eventlet from nova import crypto from nova import exception from nova import flags +from nova import image from nova import utils from nova.auth import manager from nova.image import service @@ -46,7 +47,7 @@ class S3ImageService(service.BaseImageService): """Wraps an existing image service to support s3 based register.""" def __init__(self, service=None, *args, **kwargs): - self.service = service or utils.get_default_image_service() + self.service = service or image.get_default_image_service() self.service.__init__(*args, **kwargs) def create(self, context, metadata, data=None): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 22beef05f..b2b0325c2 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -607,6 +607,7 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) + print "RES BODY:", res.body server = json.loads(res.body)['server'] self.assertEqual(16, len(server['adminPass'])) self.assertEqual('server_test', server['name']) diff --git a/nova/utils.py b/nova/utils.py index ecca5303a..954947589 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -726,52 +726,3 @@ def parse_server_string(server_str): except: LOG.debug(_('Invalid server_string: %s' % server_str)) return ('', '') - - -def parse_image_ref(image_ref): - """Parse an image href into composite parts. - - If the image_ref passed in is an integer, it will - return (image_ref, None, None), otherwise it will - return (image_id, host, port) - - :param image_ref: href or id of an image - - """ - if str(image_ref).isdigit(): - return (int(image_ref), None, None) - - o = urlparse(image_ref) - port = o.port or 80 - host = o.netloc.split(':', 1)[0] - image_id = int(o.path.split('/')[-1]) - return (image_id, host, port) - - -def get_default_image_service(): - ImageService = import_class(FLAGS.image_service) - return ImageService() - - -def get_image_service(image_ref): - """Get the proper image_service and id for the given image_ref. - - The image_ref param can be an href of the form - http://myglanceserver:9292/images/42, or just an int such as 42. If the - image_ref is an int, then the default image service is returned. - - :param image_ref: image ref/id for an image - :returns: a tuple of the form (image_service, image_id) - - """ - image_ref = image_ref or 0 - if str(image_ref).isdigit(): - return (get_default_image_service(), int(image_ref)) - - try: - (image_id, host, port) = parse_image_ref(image_ref) - except: - raise exception.InvalidImageRef(image_ref=image_ref) - glance_client = nova.image.glance.GlanceClient(host, port) - image_service = nova.image.glance.GlanceImageService(glance_client) - return (image_service, image_id) diff --git a/nova/virt/images.py b/nova/virt/images.py index 45887f38d..f571a9949 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -23,6 +23,7 @@ Handling of VM disk images. from nova import context from nova import flags +from nova import image from nova import log as logging from nova import utils @@ -36,7 +37,7 @@ def fetch(image_id, path, _user, _project): # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. - (image_service, service_image_id) = utils.get_image_service(image_id) + (image_service, service_image_id) = image.get_image_service(image_id) with open(path, "wb") as image_file: elevated = context.get_admin_context() metadata = image_service.get(elevated, service_image_id, image_file) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 8ba5d09ba..8c31f9e27 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -58,6 +58,7 @@ from nova import context from nova import db from nova import exception from nova import flags +import nova.image from nova import ipv6 from nova import log as logging from nova import utils @@ -449,11 +450,12 @@ class LibvirtConnection(driver.ComputeDriver): to support this command. """ - image_service = utils.get_default_image_service() virt_dom = self._lookup_by_name(instance['name']) elevated = context.get_admin_context() - base = image_service.show(elevated, instance['image_id']) + (image_service, service_image_id) = nova.image.get_image_service( + instance['image_id']) + base = image_service.show(elevated, service_image_id) metadata = {'disk_format': base['disk_format'], 'container_format': base['container_format'], -- cgit From 714fe839580d4858417592608e4bf95ac26cf5d1 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 23 May 2011 10:39:50 -0700 Subject: get rid of all mention of drivers ... it's filter only now --- nova/exception.py | 5 +-- nova/scheduler/host_filter.py | 55 +++++++++++++------------ nova/tests/test_host_filter.py | 93 ++++++++++++++++++++---------------------- 3 files changed, 75 insertions(+), 78 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index 56c20d111..dde39f29b 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -465,9 +465,8 @@ class ZoneNotFound(NotFound): message = _("Zone %(zone_id)s could not be found.") -class SchedulerHostFilterDriverNotFound(NotFound): - message = _("Scheduler Host Filter Driver %(driver_name)s could" - " not be found.") +class SchedulerHostFilterNotFound(NotFound): + message = _("Scheduler Host Filter %(filter_name)s could not be found.") class InstanceMetadataNotFound(NotFound): diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 92ec827d3..d9771754a 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -14,8 +14,8 @@ # under the License. """ -Host Filter is a driver mechanism for requesting instance resources. -Three drivers are included: AllHosts, Flavor & JSON. AllHosts just +Host Filter is a mechanism for requesting instance resources. +Three filters are included: AllHosts, Flavor & JSON. AllHosts just returns the full, unfiltered list of hosts. Flavor is a hard coded matching mechanism based on flavor criteria and JSON is an ad-hoc filter grammar. @@ -47,13 +47,13 @@ from nova.scheduler import zone_aware_scheduler LOG = logging.getLogger('nova.scheduler.host_filter') FLAGS = flags.FLAGS -flags.DEFINE_string('default_host_filter_driver', +flags.DEFINE_string('default_host_filter', 'nova.scheduler.host_filter.AllHostsFilter', - 'Which driver to use for filtering hosts.') + 'Which filter to use for filtering hosts.') class HostFilter(object): - """Base class for host filter drivers.""" + """Base class for host filters.""" def instance_type_to_filter(self, instance_type): """Convert instance_type into a filter for most common use-case.""" @@ -64,12 +64,12 @@ class HostFilter(object): raise NotImplementedError() def _full_name(self): - """module.classname of the filter driver""" + """module.classname of the filter.""" return "%s.%s" % (self.__module__, self.__class__.__name__) class AllHostsFilter(HostFilter): - """NOP host filter driver. Returns all hosts in ZoneManager. + """NOP host filter. Returns all hosts in ZoneManager. This essentially does what the old Scheduler+Chance used to give us.""" @@ -85,7 +85,7 @@ class AllHostsFilter(HostFilter): class InstanceTypeFilter(HostFilter): - """HostFilter driver hard-coded to work with InstanceType records.""" + """HostFilter hard-coded to work with InstanceType records.""" def instance_type_to_filter(self, instance_type): """Use instance_type to filter hosts.""" @@ -133,7 +133,7 @@ class InstanceTypeFilter(HostFilter): class JsonFilter(HostFilter): - """Host Filter driver to allow simple JSON-based grammar for + """Host Filter to allow simple JSON-based grammar for selecting hosts.""" def _equals(self, args): @@ -273,43 +273,44 @@ class JsonFilter(HostFilter): return hosts -DRIVERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] +FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] -def choose_driver(driver_name=None): - """Since the caller may specify which driver to use we need +def choose_host_filter(filter_name=None): + """Since the caller may specify which filter to use we need to have an authoritative list of what is permissible. This - function checks the driver name against a predefined set - of acceptable drivers.""" + function checks the filter name against a predefined set + of acceptable filters.""" - if not driver_name: - driver_name = FLAGS.default_host_filter_driver - for driver in DRIVERS: - if "%s.%s" % (driver.__module__, driver.__name__) == driver_name: - return driver() - raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name) + if not filter_name: + filter_name = FLAGS.default_host_filter + for filter_class in FILTERS: + if "%s.%s" % (filter_class.__module__, filter_class.__name__) == \ + filter_name: + return filter_class() + raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): - """The HostFilterScheduler uses the HostFilter drivers to filter - hosts for weighing. The particular driver used may be passed in + """The HostFilterScheduler uses the HostFilter to filter + hosts for weighing. The particular filter used may be passed in as an argument or the default will be used. - request_spec = {'filter_driver': , + request_spec = {'filter_name': , 'instance_type': } """ def filter_hosts(self, num, request_spec): """Filter the full host list (from the ZoneManager)""" - driver_name = request_spec.get('filter_driver', None) - driver = choose_driver(driver_name) + filter_name = request_spec.get('filter_name', None) + host_filter = choose_host_filter(filter_name) # TODO(sandy): We're only using InstanceType-based specs # currently. Later we'll need to snoop for more detailed # host filter requests. instance_type = request_spec['instance_type'] - name, query = driver.instance_type_to_filter(instance_type) - return driver.filter_hosts(self.zone_manager, query) + name, query = host_filter.instance_type_to_filter(instance_type) + return host_filter.filter_hosts(self.zone_manager, query) def weigh_hosts(self, num, request_spec, hosts): """Derived classes must override this method and return diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py index dd2325cc6..07817cc5a 100644 --- a/nova/tests/test_host_filter.py +++ b/nova/tests/test_host_filter.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. """ -Tests For Scheduler Host Filter Drivers. +Tests For Scheduler Host Filters. """ import json @@ -31,7 +31,7 @@ class FakeZoneManager: class HostFilterTestCase(test.TestCase): - """Test case for host filter drivers.""" + """Test case for host filters.""" def _host_caps(self, multiplier): # Returns host capabilities in the following way: @@ -57,8 +57,8 @@ class HostFilterTestCase(test.TestCase): 'host_name-label': 'xs-%s' % multiplier} def setUp(self): - self.old_flag = FLAGS.default_host_filter_driver - FLAGS.default_host_filter_driver = \ + self.old_flag = FLAGS.default_host_filter + FLAGS.default_host_filter = \ 'nova.scheduler.host_filter.AllHostsFilter' self.instance_type = dict(name='tiny', memory_mb=50, @@ -76,52 +76,52 @@ class HostFilterTestCase(test.TestCase): self.zone_manager.service_states = states def tearDown(self): - FLAGS.default_host_filter_driver = self.old_flag + FLAGS.default_host_filter = self.old_flag - def test_choose_driver(self): - # Test default driver ... - driver = host_filter.choose_driver() - self.assertEquals(driver._full_name(), + def test_choose_filter(self): + # Test default filter ... + hf = host_filter.choose_host_filter() + self.assertEquals(hf._full_name(), 'nova.scheduler.host_filter.AllHostsFilter') - # Test valid driver ... - driver = host_filter.choose_driver( + # Test valid filter ... + hf = host_filter.choose_host_filter( 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(driver._full_name(), + self.assertEquals(hf._full_name(), 'nova.scheduler.host_filter.InstanceTypeFilter') - # Test invalid driver ... + # Test invalid filter ... try: - host_filter.choose_driver('does not exist') - self.fail("Should not find driver") - except exception.SchedulerHostFilterDriverNotFound: + host_filter.choose_host_filter('does not exist') + self.fail("Should not find host filter.") + except exception.SchedulerHostFilterNotFound: pass - def test_all_host_driver(self): - driver = host_filter.AllHostsFilter() - cooked = driver.instance_type_to_filter(self.instance_type) - hosts = driver.filter_hosts(self.zone_manager, cooked) + def test_all_host_filter(self): + hf = host_filter.AllHostsFilter() + cooked = hf.instance_type_to_filter(self.instance_type) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(10, len(hosts)) for host, capabilities in hosts: self.assertTrue(host.startswith('host')) - def test_instance_type_driver(self): - driver = host_filter.InstanceTypeFilter() + def test_instance_type_filter(self): + hf = host_filter.InstanceTypeFilter() # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) + name, cooked = hf.instance_type_to_filter(self.instance_type) self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', name) - hosts = driver.filter_hosts(self.zone_manager, cooked) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] just_hosts.sort() self.assertEquals('host05', just_hosts[0]) self.assertEquals('host10', just_hosts[5]) - def test_json_driver(self): - driver = host_filter.JsonFilter() + def test_json_filter(self): + hf = host_filter.JsonFilter() # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) + name, cooked = hf.instance_type_to_filter(self.instance_type) self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) - hosts = driver.filter_hosts(self.zone_manager, cooked) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] just_hosts.sort() @@ -141,7 +141,7 @@ class HostFilterTestCase(test.TestCase): ] ] cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(5, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -153,7 +153,7 @@ class HostFilterTestCase(test.TestCase): ['=', '$compute.host_memory_free', 30], ] cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(9, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -163,7 +163,7 @@ class HostFilterTestCase(test.TestCase): raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(5, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -175,35 +175,32 @@ class HostFilterTestCase(test.TestCase): raw = ['unknown command', ] cooked = json.dumps(raw) try: - driver.filter_hosts(self.zone_manager, cooked) + hf.filter_hosts(self.zone_manager, cooked) self.fail("Should give KeyError") except KeyError, e: pass - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([]))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({}))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps( + self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([]))) + self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({}))) + self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps( ['not', True, False, True, False] ))) try: - driver.filter_hosts(self.zone_manager, json.dumps( + hf.filter_hosts(self.zone_manager, json.dumps( 'not', True, False, True, False )) self.fail("Should give KeyError") except KeyError, e: pass - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$foo', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$.....', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] - ))) + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps(['=', '$foo', 100]))) + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps(['=', '$.....', 100]))) + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps( + ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', {}, ['>', '$missing....foo']] - ))) + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps(['=', {}, ['>', '$missing....foo']]))) -- cgit From 4d1fe953bbfb810f56224b9faae4c10d0d8dfac0 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Mon, 23 May 2011 16:38:30 -0400 Subject: got rid of print statement --- nova/tests/api/openstack/test_servers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index b2b0325c2..22beef05f 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -607,7 +607,6 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) - print "RES BODY:", res.body server = json.loads(res.body)['server'] self.assertEqual(16, len(server['adminPass'])) self.assertEqual('server_test', server['name']) -- cgit From b6a4f6aa5b2a97a6a7d79c40c1a3160abc1def39 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Mon, 23 May 2011 16:47:25 -0400 Subject: Renaming service_image_id vars to image_id to reduce confusion. Also some minor cleanup. --- nova/api/openstack/images.py | 11 ++++------- nova/api/openstack/servers.py | 5 ++--- nova/api/openstack/views/servers.py | 8 ++++---- nova/compute/api.py | 4 ++-- nova/tests/api/openstack/test_servers.py | 1 - nova/utils.py | 2 -- nova/virt/images.py | 8 ++++---- nova/virt/libvirt_conn.py | 10 +++++----- 8 files changed, 21 insertions(+), 28 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index c61b5c6a6..fc26b6c1b 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -85,9 +85,8 @@ class Controller(common.OpenstackController): context = req.environ['nova.context'] try: - (image_service, service_image_id) = nova.image.get_image_service( - id) - image = image_service.show(context, service_image_id) + (image_service, image_id) = nova.image.get_image_service(id) + image = image_service.show(context, image_id) except (exception.NotFound, exception.InvalidImageRef): explanation = _("Image not found.") raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) @@ -100,11 +99,9 @@ class Controller(common.OpenstackController): :param req: `wsgi.Request` object :param id: Image identifier (integer) """ - image_id = id context = req.environ['nova.context'] - (image_service, service_image_id) = nova.image.get_image_service( - image_id) - image_service.delete(context, service_image_id) + (image_service, image_id) = nova.image.get_image_service(id) + image_service.delete(context, image_id) return webob.exc.HTTPNoContent() def create(self, req): diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 181833a23..4a0b208e0 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -560,9 +560,8 @@ class Controller(common.OpenstackController): associated kernel and ramdisk image IDs. """ context = req.environ['nova.context'] - image_service, service_image_id = nova.image.get_image_service( - image_id) - image = image_service.show(context, service_image_id) + image_service, _ = nova.image.get_image_service(image_id) + image = image_service.show(context, image_id) # NOTE(sirp): extracted to a separate method to aid unit-testing, the # new method doesn't need a request obj or an ImageService stub kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image( diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 0fe9dbe4a..4d825ff53 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -131,10 +131,10 @@ class ViewBuilderV11(ViewBuilder): def _build_image(self, response, inst): if 'image_id' in dict(inst): - image_id = inst['image_id'] - if str(image_id).isdigit(): - image_id = int(image_id) - response['imageRef'] = image_id + image_ref = inst['image_id'] + if str(image_ref).isdigit(): + image_ref = int(image_ref) + response['imageRef'] = image_ref def _build_flavor(self, response, inst): if "instance_type" in dict(inst): diff --git a/nova/compute/api.py b/nova/compute/api.py index a9075ff8a..47f7a594f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -157,9 +157,9 @@ class API(base.Base): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) - (image_service, service_image_id) = nova.image.get_image_service( + (image_service, image_id) = nova.image.get_image_service( image_ref or image_id) - image = image_service.show(context, service_image_id) + image = image_service.show(context, image_id) os_type = None if 'properties' in image and 'os_type' in image['properties']: diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index b2b0325c2..f5cfd64e7 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -29,7 +29,6 @@ from nova import db from nova import exception from nova import flags from nova import test -from nova import utils import nova.api.openstack from nova.api.openstack import servers import nova.compute.api diff --git a/nova/utils.py b/nova/utils.py index 954947589..361fc9873 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -35,7 +35,6 @@ import struct import sys import time import types -from urlparse import urlparse from xml.sax import saxutils from eventlet import event @@ -43,7 +42,6 @@ from eventlet import greenthread from eventlet import semaphore from eventlet.green import subprocess -import nova from nova import exception from nova import flags from nova import log as logging diff --git a/nova/virt/images.py b/nova/virt/images.py index f571a9949..61ea77ab0 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -23,7 +23,7 @@ Handling of VM disk images. from nova import context from nova import flags -from nova import image +import nova.image from nova import log as logging from nova import utils @@ -32,15 +32,15 @@ FLAGS = flags.FLAGS LOG = logging.getLogger('nova.virt.images') -def fetch(image_id, path, _user, _project): +def fetch(image_ref, path, _user, _project): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. - (image_service, service_image_id) = image.get_image_service(image_id) + (image_service, image_id) = nova.image.get_image_service(image_ref) with open(path, "wb") as image_file: elevated = context.get_admin_context() - metadata = image_service.get(elevated, service_image_id, image_file) + metadata = image_service.get(elevated, image_id, image_file) return metadata diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 8c31f9e27..e67f08dbf 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -442,7 +442,7 @@ class LibvirtConnection(driver.ComputeDriver): virt_dom.detachDevice(xml) @exception.wrap_exception - def snapshot(self, instance, image_id): + def snapshot(self, instance, image_ref): """Create snapshot from a running VM instance. This command only works with qemu 0.14+, the qemu_img flag is @@ -453,14 +453,14 @@ class LibvirtConnection(driver.ComputeDriver): virt_dom = self._lookup_by_name(instance['name']) elevated = context.get_admin_context() - (image_service, service_image_id) = nova.image.get_image_service( + (image_service, image_id) = nova.image.get_image_service( instance['image_id']) - base = image_service.show(elevated, service_image_id) + base = image_service.show(elevated, image_id) metadata = {'disk_format': base['disk_format'], 'container_format': base['container_format'], 'is_public': False, - 'name': '%s.%s' % (base['name'], image_id), + 'name': '%s.%s' % (base['name'], image_ref), 'properties': {'architecture': base['architecture'], 'kernel_id': instance['kernel_id'], 'image_location': 'snapshot', @@ -503,7 +503,7 @@ class LibvirtConnection(driver.ComputeDriver): # Upload that image to the image service with open(out_path) as image_file: image_service.update(elevated, - image_id, + image_ref, metadata, image_file) -- cgit From bac28418b7b92aa2654fad39d0240a85aa637488 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Mon, 23 May 2011 17:25:59 -0400 Subject: Removing code duplication between parse_image_ref and get_image service. Made parse_image_ref private. --- nova/image/__init__.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/nova/image/__init__.py b/nova/image/__init__.py index be692680a..d957c38fe 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -28,19 +28,13 @@ from nova import flags FLAGS = flags.FLAGS -def parse_image_ref(image_ref): +def _parse_image_ref(image_ref): """Parse an image href into composite parts. - If the image_ref passed in is an integer, it will - return (image_ref, None, None), otherwise it will - return (image_id, host, port) - - :param image_ref: href or id of an image + :param image_ref: href of an image + :returns: a tuple of the form (image_id, host, port) """ - if str(image_ref).isdigit(): - return (int(image_ref), None, None) - o = urlparse(image_ref) port = o.port or 80 host = o.netloc.split(':', 1)[0] @@ -69,7 +63,7 @@ def get_image_service(image_ref): return (get_default_image_service(), int(image_ref)) try: - (image_id, host, port) = parse_image_ref(image_ref) + (image_id, host, port) = _parse_image_ref(image_ref) except: raise exception.InvalidImageRef(image_ref=image_ref) glance_client = nova.image.glance.GlanceClient(host, port) -- cgit From a5efbca08a6b057290622ba5938f87d2e44be3eb Mon Sep 17 00:00:00 2001 From: William Wolf Date: Mon, 23 May 2011 21:55:15 -0400 Subject: take out irrelevant TODO --- nova/compute/api.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 47f7a594f..7ffe9c90c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -525,8 +525,7 @@ class API(base.Base): 'user_id': str(context.user_id)} sent_meta = {'name': name, 'is_public': False, 'properties': properties} - # TODO(wwolf): not sure if we need to use - # utils.get_image_service() here ? + recv_meta = self.image_service.create(context, sent_meta) params = {'image_id': recv_meta['id']} self._cast_compute_message('snapshot_instance', context, instance_id, -- cgit From f49024c437f2680a18eb702f2975de2955b98889 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Mon, 23 May 2011 22:47:44 -0400 Subject: make image_ref and image_id usage more consistant, eliminate redundancy in compute_api.create() call --- nova/api/ec2/cloud.py | 2 +- nova/api/openstack/servers.py | 3 +-- nova/compute/api.py | 9 ++++----- nova/tests/test_cloud.py | 4 ++-- nova/tests/test_compute.py | 6 +++--- nova/tests/test_quota.py | 10 +++++----- 6 files changed, 16 insertions(+), 18 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index cc2e140b0..72bd56ed7 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -849,7 +849,7 @@ class CloudController(object): instances = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), - image_id=self._get_image(context, kwargs['image_id'])['id'], + image_ref=self._get_image(context, kwargs['image_ref'])['id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 4a0b208e0..01509f771 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -172,8 +172,7 @@ class Controller(common.OpenstackController): (inst,) = self.compute_api.create( context, inst_type, - image_id, - image_ref=image_ref, + image_ref, kernel_id=kernel_id, ramdisk_id=ramdisk_id, display_name=name, diff --git a/nova/compute/api.py b/nova/compute/api.py index 7ffe9c90c..4c4bc592b 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -130,12 +130,12 @@ class API(base.Base): raise quota.QuotaError(msg, "MetadataLimitExceeded") def create(self, context, instance_type, - image_id, kernel_id=None, ramdisk_id=None, + image_ref, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, - injected_files=None, image_ref=None): + injected_files=None): """Create the number and type of instances requested. Verifies that quota and other arguments are valid. @@ -157,8 +157,7 @@ class API(base.Base): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) - (image_service, image_id) = nova.image.get_image_service( - image_ref or image_id) + (image_service, image_id) = nova.image.get_image_service(image_ref) image = image_service.show(context, image_id) os_type = None @@ -202,7 +201,7 @@ class API(base.Base): base_options = { 'reservation_id': utils.generate_uid('r'), - 'image_id': image_ref or image_id, + 'image_id': image_ref, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 54c0454de..3aaca6831 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -302,7 +302,7 @@ class CloudTestCase(test.TestCase): def test_console_output(self): instance_type = FLAGS.default_instance_type max_count = 1 - kwargs = {'image_id': 'ami-1', + kwargs = {'image_ref': 'ami-1', 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) @@ -318,7 +318,7 @@ class CloudTestCase(test.TestCase): greenthread.sleep(0.3) def test_ajax_console(self): - kwargs = {'image_id': 'ami-1'} + kwargs = {'image_ref': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] greenthread.sleep(0.3) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 9170837b6..b02b99f66 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -150,7 +150,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_id=None, + image_ref=None, security_group=['testgroup']) try: self.assertEqual(len(db.security_group_get_by_instance( @@ -168,7 +168,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_id=None, + image_ref=None, security_group=['testgroup']) try: db.instance_destroy(self.context, ref[0]['id']) @@ -184,7 +184,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_id=None, + image_ref=None, security_group=['testgroup']) try: diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 02b641a47..c0499c7a9 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -201,7 +201,7 @@ class QuotaTestCase(test.TestCase): min_count=1, max_count=1, instance_type=inst_type, - image_id=1) + image_ref=1) for instance_id in instance_ids: db.instance_destroy(self.context, instance_id) @@ -215,7 +215,7 @@ class QuotaTestCase(test.TestCase): min_count=1, max_count=1, instance_type=inst_type, - image_id=1) + image_ref=1) for instance_id in instance_ids: db.instance_destroy(self.context, instance_id) @@ -271,7 +271,7 @@ class QuotaTestCase(test.TestCase): min_count=1, max_count=1, instance_type=inst_type, - image_id='fake', + image_ref='fake', metadata=metadata) def test_allowed_injected_files(self): @@ -284,14 +284,14 @@ class QuotaTestCase(test.TestCase): api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') api.create(self.context, min_count=1, max_count=1, - instance_type=inst_type, image_id='3', + instance_type=inst_type, image_ref='3', injected_files=files) def test_no_injected_files(self): FLAGS.image_service = 'nova.image.fake.FakeImageService' api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') - api.create(self.context, instance_type=inst_type, image_id='3') + api.create(self.context, instance_type=inst_type, image_ref='3') def test_max_injected_files(self): files = [] -- cgit From 51b3d877c53d9c79dbbea21ed4d4abd0a1b91bf8 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Tue, 24 May 2011 04:08:10 -0400 Subject: Fixing year of copyright. --- nova/image/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/image/__init__.py b/nova/image/__init__.py index d957c38fe..088b7796e 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -1,6 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # -# Copyright 2010 OpenStack LLC. +# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may -- cgit From 84209a3f02f35c16de0614fa81685b242784bf20 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Tue, 24 May 2011 05:26:04 -0400 Subject: Fixing _get_kernel_ramdisk_from_image to use the correct image service. --- nova/api/openstack/servers.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 01509f771..4f823ccf7 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -144,7 +144,7 @@ class Controller(common.OpenstackController): try: image_service, image_id = nova.image.get_image_service(image_ref) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( - req, image_id) + req, image_service, image_id) images = set([str(x['id']) for x in image_service.index(context)]) assert str(image_id) in images except: @@ -554,18 +554,15 @@ class Controller(common.OpenstackController): error=item.error)) return dict(actions=actions) - def _get_kernel_ramdisk_from_image(self, req, image_id): + def _get_kernel_ramdisk_from_image(self, req, image_service, image_id): """Fetch an image from the ImageService, then if present, return the associated kernel and ramdisk image IDs. """ context = req.environ['nova.context'] - image_service, _ = nova.image.get_image_service(image_id) - image = image_service.show(context, image_id) + image_meta = image_service.show(context, image_id) # NOTE(sirp): extracted to a separate method to aid unit-testing, the # new method doesn't need a request obj or an ImageService stub - kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image( - image) - return kernel_id, ramdisk_id + return self._do_get_kernel_ramdisk_from_image(image_meta) @staticmethod def _do_get_kernel_ramdisk_from_image(image_meta): -- cgit From 6e271a42258b439e8fed55c922792b632e062b63 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Tue, 24 May 2011 10:27:26 -0400 Subject: Fixing docstring. --- nova/api/openstack/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index fc26b6c1b..171c4a036 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -80,7 +80,7 @@ class Controller(common.OpenstackController): """Return detailed information about a specific image. :param req: `wsgi.Request` object - :param image_id: Image identifier (integer) + :param id: Image identifier """ context = req.environ['nova.context'] -- cgit From f488576ae27f8eb96a04022d0ecd11a28bd15116 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Tue, 24 May 2011 16:44:28 -0400 Subject: Added filtering on image properties --- nova/api/openstack/images.py | 23 +++++++++++++++++++++-- nova/image/fake.py | 4 ++-- nova/image/glance.py | 8 ++++---- nova/tests/api/openstack/fakes.py | 4 ++-- nova/tests/api/openstack/test_images.py | 9 +++++++++ nova/tests/image/test_glance.py | 2 +- 6 files changed, 39 insertions(+), 11 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 34d4c27fc..755ce8ead 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -28,6 +28,9 @@ from nova.api.openstack.views import images as images_view LOG = log.getLogger('nova.api.openstack.images') FLAGS = flags.FLAGS +SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format', + 'size_min', 'size_max'] + class Controller(common.OpenstackController): """Base `wsgi.Controller` for retrieving/displaying images.""" @@ -59,7 +62,8 @@ class Controller(common.OpenstackController): :param req: `wsgi.Request` object """ context = req.environ['nova.context'] - images = self._image_service.index(context) + filters = self._get_filters(req) + images = self._image_service.index(context, filters) images = common.limited(images, req) builder = self.get_builder(req).build return dict(images=[builder(image, detail=False) for image in images]) @@ -70,11 +74,26 @@ class Controller(common.OpenstackController): :param req: `wsgi.Request` object. """ context = req.environ['nova.context'] - images = self._image_service.detail(context) + filters = self._get_filters(req) + images = self._image_service.detail(context, filters) images = common.limited(images, req) builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + filters = {} + for param in req.str_params: + if param in SUPPORTED_FILTERS or param.startswith('property-'): + filters[param] = req.str_params.get(param) + + return filters + def show(self, req, id): """Return detailed information about a specific image. diff --git a/nova/image/fake.py b/nova/image/fake.py index b400b2adb..8e84c8597 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -52,11 +52,11 @@ class FakeImageService(service.BaseImageService): self.create(None, image) super(FakeImageService, self).__init__() - def index(self, context): + def index(self, context, filters=None): """Returns list of images.""" return copy.deepcopy(self.images.values()) - def detail(self, context): + def detail(self, context, filters=None): """Return list of detailed image information.""" return copy.deepcopy(self.images.values()) diff --git a/nova/image/glance.py b/nova/image/glance.py index 193e37273..dec797619 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -58,23 +58,23 @@ class GlanceImageService(service.BaseImageService): else: self.client = client - def index(self, context): + def index(self, context, filters=None): """Calls out to Glance for a list of images available.""" # NOTE(sirp): We need to use `get_images_detailed` and not # `get_images` here because we need `is_public` and `properties` # included so we can filter by user filtered = [] - image_metas = self.client.get_images_detailed() + image_metas = self.client.get_images_detailed(filters=filters) for image_meta in image_metas: if self._is_image_available(context, image_meta): meta_subset = utils.subset_dict(image_meta, ('id', 'name')) filtered.append(meta_subset) return filtered - def detail(self, context): + def detail(self, context, filters=None): """Calls out to Glance for a list of detailed image information.""" filtered = [] - image_metas = self.client.get_images_detailed() + image_metas = self.client.get_images_detailed(filters=filters) for image_meta in image_metas: if self._is_image_available(context, image_meta): base_image_meta = self._translate_to_base(image_meta) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index bf51239e6..8e0156afa 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -166,11 +166,11 @@ def stub_out_glance(stubs, initial_fixtures=None): def __init__(self, initial_fixtures): self.fixtures = initial_fixtures or [] - def fake_get_images(self): + def fake_get_images(self, filters=None): return [dict(id=f['id'], name=f['name']) for f in self.fixtures] - def fake_get_images_detailed(self): + def fake_get_images_detailed(self, filters=None): return copy.deepcopy(self.fixtures) def fake_get_image_meta(self, image_id): diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 2c329f920..76d4e2f56 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -708,6 +708,15 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) + def test_get_image_request_filters(self): + request =\ + webob.Request.blank('/v1.1/images/detail?status=ACTIVE&name=testname') + filters = images.Controller()._get_filters(request) + expected = {'status': 'ACTIVE', + 'name': 'testname', + } + self.assertDictMatch(expected, filters) + def test_get_image_found(self): req = webob.Request.blank('/v1.0/images/123') res = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 109905ded..6d108d494 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -34,7 +34,7 @@ class StubGlanceClient(object): def get_image_meta(self, image_id): return self.images[image_id] - def get_images_detailed(self): + def get_images_detailed(self, filters=None): return self.images.itervalues() def get_image(self, image_id): -- cgit From 17abaeafaf3fed2847e4377a16b47771eb663304 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Wed, 25 May 2011 16:27:28 +0900 Subject: Fix wrong call of the volume api create() --- nova/api/openstack/contrib/volumes.py | 2 +- nova/tests/test_quota.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index 18de2ec71..b22bd2846 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -135,7 +135,7 @@ class VolumeController(wsgi.Controller): vol = env['volume'] size = vol['size'] LOG.audit(_("Create volume of %s GB"), size, context=context) - new_volume = self.volume_api.create(context, size, + new_volume = self.volume_api.create(context, size, None, vol.get('display_name'), vol.get('display_description')) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 7ace2ad7d..990068fae 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -228,6 +228,7 @@ class QuotaTestCase(test.TestCase): volume.API().create, self.context, size=10, + snapshot_id=None, name='', description='') for volume_id in volume_ids: @@ -241,6 +242,7 @@ class QuotaTestCase(test.TestCase): volume.API().create, self.context, size=10, + snapshot_id=None, name='', description='') for volume_id in volume_ids: -- cgit From 46ddecc177830ea0ccef82e84d72c48261450b40 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 25 May 2011 03:29:16 -0400 Subject: Don't need to import json. --- nova/tests/test_notifier.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 14bef79b8..523f38f24 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import json - import stubout import nova -- cgit From 7139cf1f0cfe9241a1710e5b7c621db569a2fc2d Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Wed, 25 May 2011 16:37:52 +0900 Subject: Make snapshot_id=None a default value in VolumeManager:create_volume(). It is not a regular case to create a volume from a snapshot. --- nova/volume/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 84085fbd8..b6f0f5eeb 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -90,7 +90,7 @@ class VolumeManager(manager.SchedulerDependentManager): else: LOG.info(_("volume %s: skipping export"), volume['name']) - def create_volume(self, context, volume_id, snapshot_id): + def create_volume(self, context, volume_id, snapshot_id=None): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) -- cgit From f3125b3012da7b6429e4e551060498e665c4596e Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Wed, 25 May 2011 17:51:30 +0900 Subject: Add unittests for cloning volumes. --- nova/tests/test_cloud.py | 19 +++++++++++++++++++ nova/tests/test_volume.py | 20 +++++++++++++++++++- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index d9169a646..8c7520fe8 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -171,6 +171,25 @@ class CloudTestCase(test.TestCase): db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id']) + def test_create_volume_from_snapshot(self): + """Makes sure create_volume works when we specify a snapshot.""" + vol = db.volume_create(self.context, {'size': 1}) + snap = db.snapshot_create(self.context, {'volume_id': vol['id'], + 'volume_size': vol['size'], + 'status': "available"}) + snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x') + + result = self.cloud.create_volume(self.context, + snapshot_id=snapshot_id) + volume_id = result['volumeId'] + result = self.cloud.describe_volumes(self.context) + self.assertEqual(len(result['volumeSet']), 2) + self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id) + + db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id)) + db.snapshot_destroy(self.context, snap['id']) + db.volume_destroy(self.context, vol['id']) + def test_describe_availability_zones(self): """Makes sure describe_availability_zones works and filters results.""" service1 = db.service_create(self.context, {'host': 'host1_zones', diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index c66b66959..1c25d601a 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -45,10 +45,11 @@ class VolumeTestCase(test.TestCase): self.context = context.get_admin_context() @staticmethod - def _create_volume(size='0'): + def _create_volume(size='0', snapshot_id=None): """Create a volume object.""" vol = {} vol['size'] = size + vol['snapshot_id'] = snapshot_id vol['user_id'] = 'fake' vol['project_id'] = 'fake' vol['availability_zone'] = FLAGS.storage_availability_zone @@ -69,6 +70,23 @@ class VolumeTestCase(test.TestCase): self.context, volume_id) + def test_create_volume_from_snapshot(self): + """Test volume can be created from a snapshot.""" + volume_src_id = self._create_volume() + self.volume.create_volume(self.context, volume_src_id) + snapshot_id = self._create_snapshot(volume_src_id) + self.volume.create_snapshot(self.context, volume_src_id, snapshot_id) + volume_dst_id = self._create_volume(0, snapshot_id) + self.volume.create_volume(self.context, volume_dst_id, snapshot_id) + self.assertEqual(volume_dst_id, db.volume_get(context.get_admin_context(), + volume_dst_id).id) + self.assertEqual(snapshot_id, db.volume_get(context.get_admin_context(), + volume_dst_id).snapshot_id) + + self.volume.delete_volume(self.context, volume_dst_id) + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume_src_id) + def test_too_big_volume(self): """Ensure failure if a too large of a volume is requested.""" # FIXME(vish): validation needs to move into the data layer in -- cgit From d380729b162c8d6120279db74327e61a4942e28f Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Wed, 25 May 2011 18:02:07 +0900 Subject: Avoid wildcard import. --- .../sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py index 0a50123bf..10bd9d5c9 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py @@ -15,8 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * +from sqlalchemy import Column, Table, MetaData, Integer from nova import log as logging -- cgit From 3d9569147cee2eaa94fc49c55b40f70a72171ebe Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 25 May 2011 09:33:51 -0400 Subject: Added test --- nova/tests/api/openstack/test_images.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 76d4e2f56..233419c6d 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -709,11 +709,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) def test_get_image_request_filters(self): - request =\ - webob.Request.blank('/v1.1/images/detail?status=ACTIVE&name=testname') + request = webob.Request.blank( + '/v1.1/images/detail?status=ACTIVE&name=testname&property-test=3') filters = images.Controller()._get_filters(request) expected = {'status': 'ACTIVE', 'name': 'testname', + 'property-test': '3', + } + self.assertDictMatch(expected, filters) + + def test_get_image_request_filters_not_supported(self): + request = webob.Request.blank( + '/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname') + filters = images.Controller()._get_filters(request) + expected = {'status': 'ACTIVE', } self.assertDictMatch(expected, filters) -- cgit From 537c5aea298a6c09b3329185c2d0eed77a0a21bd Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Wed, 25 May 2011 12:09:53 -0400 Subject: try out mox for testing image request filters --- nova/tests/api/openstack/test_images.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 233419c6d..e25334732 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -28,6 +28,7 @@ import shutil import tempfile import xml.dom.minidom as minidom +import mox import stubout import webob @@ -709,14 +710,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) def test_get_image_request_filters(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE', + 'name': 'testname', + 'property-test': '3'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail?status=ACTIVE&name=testname&property-test=3') - filters = images.Controller()._get_filters(request) - expected = {'status': 'ACTIVE', - 'name': 'testname', - 'property-test': '3', - } - self.assertDictMatch(expected, filters) + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() def test_get_image_request_filters_not_supported(self): request = webob.Request.blank( -- cgit From e4bf97ba29e8e5858f37cedb34e20ccd8e210bae Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 25 May 2011 12:24:27 -0400 Subject: Updated tests to use mox pep8 --- nova/api/openstack/images.py | 2 +- nova/tests/api/openstack/test_images.py | 14 ++++++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 755ce8ead..553566d58 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -93,7 +93,7 @@ class Controller(common.OpenstackController): filters[param] = req.str_params.get(param) return filters - + def show(self, req, id): """Return detailed information about a specific image. diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index e25334732..f3f0217d6 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -726,12 +726,18 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): mocker.VerifyAll() def test_get_image_request_filters_not_supported(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname') - filters = images.Controller()._get_filters(request) - expected = {'status': 'ACTIVE', - } - self.assertDictMatch(expected, filters) + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() def test_get_image_found(self): req = webob.Request.blank('/v1.0/images/123') -- cgit From c440aecaaacf3caa8683234022bc10836d232971 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 25 May 2011 17:28:10 -0400 Subject: Added params to local and base image service --- nova/image/local.py | 4 ++-- nova/image/service.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/image/local.py b/nova/image/local.py index 918180bae..677d5302b 100644 --- a/nova/image/local.py +++ b/nova/image/local.py @@ -63,7 +63,7 @@ class LocalImageService(service.BaseImageService): images.append(unhexed_image_id) return images - def index(self, context): + def index(self, context, *args, **kwargs): filtered = [] image_metas = self.detail(context) for image_meta in image_metas: @@ -71,7 +71,7 @@ class LocalImageService(service.BaseImageService): filtered.append(meta) return filtered - def detail(self, context): + def detail(self, context, *args, **kwargs): images = [] for image_id in self._ids(): try: diff --git a/nova/image/service.py b/nova/image/service.py index ab6749049..5361cfc89 100644 --- a/nova/image/service.py +++ b/nova/image/service.py @@ -46,7 +46,7 @@ class BaseImageService(object): # the ImageService subclass SERVICE_IMAGE_ATTRS = [] - def index(self, context): + def index(self, context, *args, **kwargs): """List images. :returns: a sequence of mappings with the following signature @@ -55,7 +55,7 @@ class BaseImageService(object): """ raise NotImplementedError - def detail(self, context): + def detail(self, context, *args, **kwargs): """Detailed information about an images. :returns: a sequence of mappings with the following signature -- cgit From 781672793c5fb774c5d9d291798775db471233b2 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 25 May 2011 19:57:04 -0400 Subject: Renamed image_ref variables to image_href. Since the convention is that x_ref vars may imply that they are db objects. --- nova/api/ec2/cloud.py | 2 +- nova/api/openstack/servers.py | 14 +++++++------- nova/api/openstack/views/servers.py | 8 ++++---- nova/compute/api.py | 6 +++--- nova/exception.py | 2 +- nova/image/__init__.py | 26 +++++++++++++------------- nova/tests/api/openstack/test_servers.py | 26 +++++++++++++------------- nova/tests/integrated/integrated_helpers.py | 8 ++++---- nova/tests/test_cloud.py | 4 ++-- nova/tests/test_compute.py | 6 +++--- nova/tests/test_quota.py | 10 +++++----- nova/virt/images.py | 4 ++-- nova/virt/libvirt_conn.py | 6 +++--- 13 files changed, 61 insertions(+), 61 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 72bd56ed7..ed33e2d42 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -849,7 +849,7 @@ class CloudController(object): instances = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), - image_ref=self._get_image(context, kwargs['image_ref'])['id'], + image_href=self._get_image(context, kwargs['image_href'])['id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 4f823ccf7..cac433557 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -140,15 +140,15 @@ class Controller(common.OpenstackController): key_name = key_pair['name'] key_data = key_pair['public_key'] - image_ref = self._image_ref_from_req_data(env) + image_href = self._image_ref_from_req_data(env) try: - image_service, image_id = nova.image.get_image_service(image_ref) + image_service, image_id = nova.image.get_image_service(image_href) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_service, image_id) images = set([str(x['id']) for x in image_service.index(context)]) assert str(image_id) in images except: - msg = _("Cannot find requested image %s") % image_ref + msg = _("Cannot find requested image %s") % image_href return faults.Fault(exc.HTTPBadRequest(msg)) personality = env['server'].get('personality') @@ -172,7 +172,7 @@ class Controller(common.OpenstackController): (inst,) = self.compute_api.create( context, inst_type, - image_ref, + image_href, kernel_id=kernel_id, ramdisk_id=ramdisk_id, display_name=name, @@ -188,7 +188,7 @@ class Controller(common.OpenstackController): return faults.Fault(exc.HTTPBadRequest(msg)) inst['instance_type'] = inst_type - inst['image_id'] = image_ref + inst['image_id'] = image_href builder = self._get_view_builder(req) server = builder.build(inst, is_detail=True) @@ -701,13 +701,13 @@ class ControllerV11(Controller): instance_id = int(instance_id) try: - image_ref = info["rebuild"]["imageRef"] + image_href = info["rebuild"]["imageRef"] except (KeyError, TypeError): msg = _("Could not parse imageRef from request.") LOG.debug(msg) return faults.Fault(exc.HTTPBadRequest(explanation=msg)) - image_id = common.get_id_from_href(image_ref) + image_id = common.get_id_from_href(image_href) personalities = info["rebuild"].get("personality", []) metadata = info["rebuild"].get("metadata", {}) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 4d825ff53..ddd17ab93 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -131,10 +131,10 @@ class ViewBuilderV11(ViewBuilder): def _build_image(self, response, inst): if 'image_id' in dict(inst): - image_ref = inst['image_id'] - if str(image_ref).isdigit(): - image_ref = int(image_ref) - response['imageRef'] = image_ref + image_href = inst['image_id'] + if str(image_href).isdigit(): + image_href = int(image_href) + response['imageRef'] = image_href def _build_flavor(self, response, inst): if "instance_type" in dict(inst): diff --git a/nova/compute/api.py b/nova/compute/api.py index 4c4bc592b..bb419520d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -130,7 +130,7 @@ class API(base.Base): raise quota.QuotaError(msg, "MetadataLimitExceeded") def create(self, context, instance_type, - image_ref, kernel_id=None, ramdisk_id=None, + image_href, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', @@ -157,7 +157,7 @@ class API(base.Base): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) - (image_service, image_id) = nova.image.get_image_service(image_ref) + (image_service, image_id) = nova.image.get_image_service(image_href) image = image_service.show(context, image_id) os_type = None @@ -201,7 +201,7 @@ class API(base.Base): base_options = { 'reservation_id': utils.generate_uid('r'), - 'image_id': image_ref, + 'image_id': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, diff --git a/nova/exception.py b/nova/exception.py index 13f5bf95c..5b91e1cde 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -284,7 +284,7 @@ class DiskNotFound(NotFound): class InvalidImageRef(Invalid): - message = _("Invalid image ref %(image_ref)s.") + message = _("Invalid image ref %(image_href)s.") class ImageNotFound(NotFound): diff --git a/nova/image/__init__.py b/nova/image/__init__.py index 088b7796e..9b9108bd2 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -28,14 +28,14 @@ from nova import flags FLAGS = flags.FLAGS -def _parse_image_ref(image_ref): +def _parse_image_ref(image_href): """Parse an image href into composite parts. - :param image_ref: href of an image + :param image_href: href of an image :returns: a tuple of the form (image_id, host, port) """ - o = urlparse(image_ref) + o = urlparse(image_href) port = o.port or 80 host = o.netloc.split(':', 1)[0] image_id = int(o.path.split('/')[-1]) @@ -47,25 +47,25 @@ def get_default_image_service(): return ImageService() -def get_image_service(image_ref): - """Get the proper image_service and id for the given image_ref. +def get_image_service(image_href): + """Get the proper image_service and id for the given image_href. - The image_ref param can be an href of the form + The image_href param can be an href of the form http://myglanceserver:9292/images/42, or just an int such as 42. If the - image_ref is an int, then the default image service is returned. + image_href is an int, then the default image service is returned. - :param image_ref: image ref/id for an image + :param image_href: image ref/id for an image :returns: a tuple of the form (image_service, image_id) """ - image_ref = image_ref or 0 - if str(image_ref).isdigit(): - return (get_default_image_service(), int(image_ref)) + image_href = image_href or 0 + if str(image_href).isdigit(): + return (get_default_image_service(), int(image_href)) try: - (image_id, host, port) = _parse_image_ref(image_ref) + (image_id, host, port) = _parse_image_ref(image_href) except: - raise exception.InvalidImageRef(image_ref=image_ref) + raise exception.InvalidImageRef(image_href=image_href) glance_client = nova.image.glance.GlanceClient(host, port) image_service = nova.image.glance.GlanceImageService(glance_client) return (image_service, image_id) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 3910f9820..8f8c6b024 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -584,12 +584,12 @@ class ServersTest(test.TestCase): def test_create_instance_v1_1(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/2' + image_href = 'http://localhost/v1.1/images/2' flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': image_ref, + 'imageRef': image_href, 'flavorRef': flavor_ref, 'metadata': { 'hello': 'world', @@ -611,16 +611,16 @@ class ServersTest(test.TestCase): self.assertEqual('server_test', server['name']) self.assertEqual(1, server['id']) self.assertEqual(flavor_ref, server['flavorRef']) - self.assertEqual(image_ref, server['imageRef']) + self.assertEqual(image_href, server['imageRef']) self.assertEqual(res.status_int, 200) def test_create_instance_v1_1_bad_href(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/asdf' + image_href = 'http://localhost/v1.1/images/asdf' flavor_ref = 'http://localhost/v1.1/flavors/3' body = dict(server=dict( - name='server_test', imageRef=image_ref, flavorRef=flavor_ref, + name='server_test', imageRef=image_href, flavorRef=flavor_ref, metadata={'hello': 'world', 'open': 'stack'}, personality={})) req = webob.Request.blank('/v1.1/servers') @@ -633,12 +633,12 @@ class ServersTest(test.TestCase): def test_create_instance_v1_1_local_href(self): self._setup_for_create_instance() - image_ref = 2 + image_id = 2 flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': image_ref, + 'imageRef': image_id, 'flavorRef': flavor_ref, }, } @@ -653,7 +653,7 @@ class ServersTest(test.TestCase): server = json.loads(res.body)['server'] self.assertEqual(1, server['id']) self.assertEqual(flavor_ref, server['flavorRef']) - self.assertEqual(image_ref, server['imageRef']) + self.assertEqual(image_id, server['imageRef']) self.assertEqual(res.status_int, 200) def test_create_instance_with_admin_pass_v1_0(self): @@ -680,12 +680,12 @@ class ServersTest(test.TestCase): def test_create_instance_with_admin_pass_v1_1(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/2' + image_href = 'http://localhost/v1.1/images/2' flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': image_ref, + 'imageRef': image_href, 'flavorRef': flavor_ref, 'adminPass': 'testpass', }, @@ -702,12 +702,12 @@ class ServersTest(test.TestCase): def test_create_instance_with_empty_admin_pass_v1_1(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/2' + image_href = 'http://localhost/v1.1/images/2' flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': image_ref, + 'imageRef': image_href, 'flavorRef': flavor_ref, 'adminPass': '', }, @@ -1658,7 +1658,7 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""", request = self.deserializer.deserialize(serial_request) self.assertEqual(request, expected) - def test_request_xmlser_with_flavor_image_ref(self): + def test_request_xmlser_with_flavor_image_href(self): serial_request = """ Date: Thu, 26 May 2011 19:46:11 +0900 Subject: creating _take_action_to_instance to nova.virt.libvirt_conn.py --- nova/virt/libvirt_conn.py | 43 +++++++++++++++---------------------------- 1 file changed, 15 insertions(+), 28 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index aa5e42fc8..4e2e2292e 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -45,6 +45,7 @@ import sys import tempfile import time import uuid +import inspect from xml.dom import minidom from xml.etree import ElementTree @@ -548,53 +549,39 @@ class LibvirtConnection(driver.ComputeDriver): timer = utils.LoopingCall(_wait_for_reboot) return timer.start(interval=0.5, now=True) - @exception.wrap_exception - def pause(self, instance, callback): - """Pause VM instance""" + def _take_action_to_instance(self, action, instance, *arg): + """action VM instance""" if self.read_only: tmpconn = self._connect(self.libvirt_uri, False) dom = tmpconn.lookupByName(instance.name) - dom.suspend() + method = getattr(dom, action) + method(*arg) tmpconn.close() else: dom = self._conn.lookupByName(instance.name) - dom.suspend() + method = getattr(dom, action) + method(*arg) + + @exception.wrap_exception + def pause(self, instance, callback): + """Pause VM instance""" + self._take_action_to_instance("suspend", instance) @exception.wrap_exception def unpause(self, instance, callback): """Unpause paused VM instance""" - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance.name) - dom.resume() - tmpconn.close() - else: - dom = self._conn.lookupByName(instance.name) - dom.resume() + self._take_action_to_instance("resume", instance) @exception.wrap_exception def suspend(self, instance, callback): """Suspend the specified instance""" - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance.name) - dom.managedSave(0) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance.name) - dom.managedSave(0) + self._take_action_to_instance("managedSave", instance, 0) @exception.wrap_exception def resume(self, instance, callback): """resume the specified instance""" try: - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance.name) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance.name) - dom.create() + self._take_action_to_instance("create", instance) except libvirt.LibvirtError: xml = self.to_xml(instance, None) self._create_new_domain(xml) -- cgit From ce5c7287e06cb7ce1d1a41354a5d6ea073d308d0 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 26 May 2011 20:31:50 +0900 Subject: remove unnecessary import inspect at nova.virt.libvirt_conn --- nova/virt/libvirt_conn.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 4e2e2292e..f9c441505 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -45,7 +45,6 @@ import sys import tempfile import time import uuid -import inspect from xml.dom import minidom from xml.etree import ElementTree -- cgit From 87717c33ae78201a24c0f5a3416ae4b0080e4668 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 26 May 2011 20:49:14 +0900 Subject: replace double quatation to single quatation at nova.virt.libvirt_conn --- nova/virt/libvirt_conn.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index f9c441505..8c9a3550a 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -564,23 +564,23 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def pause(self, instance, callback): """Pause VM instance""" - self._take_action_to_instance("suspend", instance) + self._take_action_to_instance('suspend', instance) @exception.wrap_exception def unpause(self, instance, callback): """Unpause paused VM instance""" - self._take_action_to_instance("resume", instance) + self._take_action_to_instance('resume', instance) @exception.wrap_exception def suspend(self, instance, callback): """Suspend the specified instance""" - self._take_action_to_instance("managedSave", instance, 0) + self._take_action_to_instance('managedSave', instance, 0) @exception.wrap_exception def resume(self, instance, callback): """resume the specified instance""" try: - self._take_action_to_instance("create", instance) + self._take_action_to_instance('create', instance) except libvirt.LibvirtError: xml = self.to_xml(instance, None) self._create_new_domain(xml) -- cgit From ac3348ae282b218a941b33a2d17b7d5ddaeebab6 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 26 May 2011 09:16:02 -0400 Subject: Switching back to chown. I'm fine w/ setfacl too but nova already has 'chown' via sudoers so this seems reasonable for now. --- nova/virt/xenapi/vm_utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 931f8e2d4..fdf51ff74 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -1013,8 +1013,7 @@ def _stream_disk(dev, image_type, virtual_size, image_file): offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) - utils.execute('sudo', 'setfacl', '-m', 'u:%s:rw' % os.getuid(), - '/dev/%s' % dev) + utils.execute('sudo', 'chown', os.getuid(), '/dev/%s' % dev) with open('/dev/%s' % dev, 'wb') as f: f.seek(offset) -- cgit From 131d5bcae4e5f0ab48369e2979f16468bd0900a4 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 26 May 2011 10:34:17 -0400 Subject: Switch the run_instances call in the EC2 back to 'image_id'. Incoming requests use 'imageId' so we shouldn't modify this for image HREF's. --- nova/api/ec2/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ed33e2d42..cc2e140b0 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -849,7 +849,7 @@ class CloudController(object): instances = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), - image_href=self._get_image(context, kwargs['image_href'])['id'], + image_id=self._get_image(context, kwargs['image_id'])['id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), -- cgit From f37d94428dd0b56632958d5d3a6930531a51cd44 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 26 May 2011 10:54:46 -0400 Subject: Restricted image filtering by name and status only --- nova/api/openstack/images.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 553566d58..2e779da79 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -28,8 +28,7 @@ from nova.api.openstack.views import images as images_view LOG = log.getLogger('nova.api.openstack.images') FLAGS = flags.FLAGS -SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format', - 'size_min', 'size_max'] +SUPPORTED_FILTERS = ['name', 'status'] class Controller(common.OpenstackController): -- cgit From 995a65ac42b4e36679ad0708a227139cdd3bc06e Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 26 May 2011 11:21:28 -0400 Subject: Fix test_cloud tests. --- nova/api/ec2/cloud.py | 2 +- nova/tests/test_cloud.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index cc2e140b0..8580dc79e 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -849,7 +849,7 @@ class CloudController(object): instances = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), - image_id=self._get_image(context, kwargs['image_id'])['id'], + image_href=self._get_image(context, kwargs['image_id'])['id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 1219d600e..54c0454de 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -302,7 +302,7 @@ class CloudTestCase(test.TestCase): def test_console_output(self): instance_type = FLAGS.default_instance_type max_count = 1 - kwargs = {'image_href': 'ami-1', + kwargs = {'image_id': 'ami-1', 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) @@ -318,7 +318,7 @@ class CloudTestCase(test.TestCase): greenthread.sleep(0.3) def test_ajax_console(self): - kwargs = {'image_href': 'ami-1'} + kwargs = {'image_id': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] greenthread.sleep(0.3) -- cgit From 7c0564baf72cbb5c3693ab72c72684a5c6b333c5 Mon Sep 17 00:00:00 2001 From: John Tran Date: Thu, 26 May 2011 10:22:45 -0700 Subject: instance obj returned is not a hash, instead is sqlalchemy obj and hostname attr is what the logic is looking for --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 26c0d776c..51373d282 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -536,7 +536,7 @@ class FloatingIpCommands(object): for floating_ip in floating_ips: instance = None if floating_ip['fixed_ip']: - instance = floating_ip['fixed_ip']['instance']['ec2_id'] + instance = floating_ip['fixed_ip']['instance'].hostname print "%s\t%s\t%s" % (floating_ip['host'], floating_ip['address'], instance) -- cgit From ff75e808eef06a72c0198fe976c19c60256c6b74 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 26 May 2011 10:28:22 -0700 Subject: log upload errors --- nova/image/s3.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/nova/image/s3.py b/nova/image/s3.py index 673cbf56f..bad04e5c0 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -31,12 +31,14 @@ import eventlet from nova import crypto from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth import manager from nova.image import service from nova.api.ec2 import ec2utils +LOG = logging.getLogger("nova.image.s3") FLAGS = flags.FLAGS flags.DEFINE_string('image_decryption_dir', '/tmp', 'parent dir for tempdir used for image decryption') @@ -181,6 +183,8 @@ class S3ImageService(service.BaseImageService): shutil.copyfileobj(part, combined) except Exception: + LOG.exception(_("Failed to download %(image_location)s " + "to %(image_path)s"), locals()) metadata['properties']['image_state'] = 'failed_download' self.service.update(context, image_id, metadata) raise @@ -203,6 +207,9 @@ class S3ImageService(service.BaseImageService): encrypted_iv, cloud_pk, dec_filename) except Exception: + LOG.exception(_("Failed to decrypt %(image_location)s " + "to %(image_path)s"), locals()) + LOG.exception(_("Failed to decrypt %s"), enc_filename) metadata['properties']['image_state'] = 'failed_decrypt' self.service.update(context, image_id, metadata) raise @@ -213,6 +220,8 @@ class S3ImageService(service.BaseImageService): try: unz_filename = self._untarzip_image(image_path, dec_filename) except Exception: + LOG.exception(_("Failed to untar %(image_location)s " + "to %(image_path)s"), locals()) metadata['properties']['image_state'] = 'failed_untar' self.service.update(context, image_id, metadata) raise @@ -224,6 +233,8 @@ class S3ImageService(service.BaseImageService): self.service.update(context, image_id, metadata, image_file) except Exception: + LOG.exception(_("Failed to upload %(image_location)s " + "to %(image_path)s"), locals()) metadata['properties']['image_state'] = 'failed_upload' self.service.update(context, image_id, metadata) raise -- cgit From 75ec3d77d3fa4078bbe7d647377f987d87d97651 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 26 May 2011 10:30:27 -0700 Subject: exceptions are logged via the raise, so just log an error message --- nova/image/s3.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/nova/image/s3.py b/nova/image/s3.py index bad04e5c0..ec8852f09 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -183,8 +183,8 @@ class S3ImageService(service.BaseImageService): shutil.copyfileobj(part, combined) except Exception: - LOG.exception(_("Failed to download %(image_location)s " - "to %(image_path)s"), locals()) + LOG.error(_("Failed to download %(image_location)s " + "to %(image_path)s"), locals()) metadata['properties']['image_state'] = 'failed_download' self.service.update(context, image_id, metadata) raise @@ -207,9 +207,8 @@ class S3ImageService(service.BaseImageService): encrypted_iv, cloud_pk, dec_filename) except Exception: - LOG.exception(_("Failed to decrypt %(image_location)s " - "to %(image_path)s"), locals()) - LOG.exception(_("Failed to decrypt %s"), enc_filename) + LOG.error(_("Failed to decrypt %(image_location)s " + "to %(image_path)s"), locals()) metadata['properties']['image_state'] = 'failed_decrypt' self.service.update(context, image_id, metadata) raise @@ -220,8 +219,8 @@ class S3ImageService(service.BaseImageService): try: unz_filename = self._untarzip_image(image_path, dec_filename) except Exception: - LOG.exception(_("Failed to untar %(image_location)s " - "to %(image_path)s"), locals()) + LOG.error(_("Failed to untar %(image_location)s " + "to %(image_path)s"), locals()) metadata['properties']['image_state'] = 'failed_untar' self.service.update(context, image_id, metadata) raise @@ -233,8 +232,8 @@ class S3ImageService(service.BaseImageService): self.service.update(context, image_id, metadata, image_file) except Exception: - LOG.exception(_("Failed to upload %(image_location)s " - "to %(image_path)s"), locals()) + LOG.error(_("Failed to upload %(image_location)s " + "to %(image_path)s"), locals()) metadata['properties']['image_state'] = 'failed_upload' self.service.update(context, image_id, metadata) raise -- cgit From 788893ef2a17a4fda5e907f048a94f3ed0435bbc Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Thu, 26 May 2011 10:53:48 -0700 Subject: missed a driver reference --- nova/compute/api.py | 2 +- nova/scheduler/host_filter.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 216550e40..0b2c4e642 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -264,7 +264,7 @@ class API(base.Base): "instance_id": instance_id, "request_spec": { 'instance_type': instance_type, - 'filter_driver': + 'filter': 'nova.scheduler.host_filter.' 'InstanceTypeFilter' }, diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index d9771754a..ed76c90bf 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -296,13 +296,13 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): hosts for weighing. The particular filter used may be passed in as an argument or the default will be used. - request_spec = {'filter_name': , + request_spec = {'filter': , 'instance_type': } """ def filter_hosts(self, num, request_spec): """Filter the full host list (from the ZoneManager)""" - filter_name = request_spec.get('filter_name', None) + filter_name = request_spec.get('filter', None) host_filter = choose_host_filter(filter_name) # TODO(sandy): We're only using InstanceType-based specs -- cgit From b9b16ca71d4bbb9782482bdf5d848bb5b787732f Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 26 May 2011 13:59:25 -0400 Subject: Expanded tests --- nova/tests/api/openstack/test_images.py | 122 ++++++++++++++++++++++++++++++-- 1 file changed, 116 insertions(+), 6 deletions(-) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index f3f0217d6..9f1f28611 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -709,23 +709,119 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) - def test_get_image_request_filters(self): + def test_image_filter_with_name(self): mocker = mox.Mox() image_service = mocker.CreateMockAnything() context = object() - filters = {'status': 'ACTIVE', - 'name': 'testname', - 'property-test': '3'} + filters = {'name': 'testname'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?name=testname') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_filter_with_status(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?status=ACTIVE') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_filter_with_property(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'property-test': '3'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?property-test=3') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_filter_not_supported(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_no_filters(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_detail_filter_with_name(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'name': 'testname'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images/detail?name=testname') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() + + def test_image_detail_filter_with_status(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images/detail?status=ACTIVE') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() + + def test_image_detail_filter_with_property(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'property-test': '3'} image_service.detail(context, filters).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( - '/v1.1/images/detail?status=ACTIVE&name=testname&property-test=3') + '/v1.1/images/detail?property-test=3') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.detail(request) mocker.VerifyAll() - def test_get_image_request_filters_not_supported(self): + def test_image_detail_filter_not_supported(self): mocker = mox.Mox() image_service = mocker.CreateMockAnything() context = object() @@ -739,6 +835,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): controller.detail(request) mocker.VerifyAll() + def test_image_detail_no_filters(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images/detail') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() + def test_get_image_found(self): req = webob.Request.blank('/v1.0/images/123') res = req.get_response(fakes.wsgi_app()) -- cgit From 2d834fa19078c645e3c36001b5dd34fb8e708f0a Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 26 May 2011 14:09:59 -0400 Subject: review fixups --- nova/api/openstack/wsgi.py | 27 ++++++++++++++++----------- nova/tests/api/openstack/test_wsgi.py | 2 +- nova/wsgi.py | 4 ++-- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index bd840a6f7..5577d326f 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -62,7 +62,7 @@ class TextDeserializer(object): """Find local deserialization method and parse request body.""" try: action_method = getattr(self, action) - except Exception: + except (AttributeError, TypeError): action_method = self.default return action_method(datastring) @@ -162,7 +162,7 @@ class RequestDeserializer(object): def get_deserializer(self, content_type): try: return self.deserializers[content_type] - except Exception: + except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) def get_expected_content_type(self, request): @@ -172,16 +172,20 @@ class RequestDeserializer(object): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() + except Exception: + return {} + try: del args['controller'] + except KeyError: + pass - if 'format' in args: - del args['format'] - - return args - + try: + del args['format'] except KeyError: - return {} + pass + + return args class DictSerializer(object): @@ -191,7 +195,7 @@ class DictSerializer(object): """Find local serialization method and encode response body.""" try: action_method = getattr(self, action) - except Exception: + except (AttributeError, TypeError): action_method = self.default return action_method(data) @@ -316,7 +320,7 @@ class ResponseSerializer(object): def get_serializer(self, content_type): try: return self.serializers[content_type] - except Exception: + except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) @@ -347,7 +351,8 @@ class Resource(wsgi.Application): def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" - LOG.debug("%s %s" % (request.method, request.url)) + LOG.debug("%(method)s %(url)s" % {"method": request.method, + "url": request.url}) try: action, action_args, accept = self.deserializer.deserialize( diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index 89603d82b..ebbdc9409 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -205,7 +205,7 @@ class ResponseSerializerTest(test.TestCase): def test_serialize_response_dict_to_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, self.serializer.serialize, - 'application/unknown', {}) + {}, 'application/unknown') class RequestDeserializerTest(test.TestCase): diff --git a/nova/wsgi.py b/nova/wsgi.py index d59d2ee13..33ba852bc 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -260,8 +260,8 @@ class Router(object): Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as - well and have your controller be a controller, who will route - the request to the action method. + well and have your controller be an object that can route + the request to the action-specific method. Examples: mapper = routes.Mapper() -- cgit From 3264c18fffa26b1288fc253f2526d9a78fdc9dd4 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 26 May 2011 15:01:24 -0400 Subject: cleaning up getattr calls with default param --- nova/api/openstack/wsgi.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 5577d326f..7a747842e 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -58,13 +58,9 @@ class Request(webob.Request): class TextDeserializer(object): """Custom request body deserialization based on controller action name.""" - def deserialize(self, datastring, action=None): + def deserialize(self, datastring, action='default'): """Find local deserialization method and parse request body.""" - try: - action_method = getattr(self, action) - except (AttributeError, TypeError): - action_method = self.default - + action_method = getattr(self, action, self.default) return action_method(datastring) def default(self, datastring): @@ -191,13 +187,9 @@ class RequestDeserializer(object): class DictSerializer(object): """Custom response body serialization based on controller action name.""" - def serialize(self, data, action=None): + def serialize(self, data, action='default'): """Find local serialization method and encode response body.""" - try: - action_method = getattr(self, action) - except (AttributeError, TypeError): - action_method = self.default - + action_method = getattr(self, action, self.default) return action_method(data) def default(self, data): -- cgit From 613aee2dd146957cb0c040d7a7a1a661b487efbc Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Thu, 26 May 2011 16:58:06 -0400 Subject: move udev file so it follows the xen-backend.rules --- plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules | 3 --- .../xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules | 3 +++ .../networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) delete mode 100644 plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules create mode 100644 plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules diff --git a/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules b/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules deleted file mode 100644 index b179f0847..000000000 --- a/plugins/xenserver/networking/etc/udev/rules.d/openvswitch-nova.rules +++ /dev/null @@ -1,3 +0,0 @@ -SUBSYSTEM=="xen-backend", KERNEL=="vif*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all" -# is this one needed? -#SUBSYSTEM=="net", KERNEL=="tap*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all" diff --git a/plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules b/plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules new file mode 100644 index 000000000..b179f0847 --- /dev/null +++ b/plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules @@ -0,0 +1,3 @@ +SUBSYSTEM=="xen-backend", KERNEL=="vif*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all" +# is this one needed? +#SUBSYSTEM=="net", KERNEL=="tap*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all" diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 2ebc4dd8c..9fde69377 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -172,7 +172,7 @@ def apply_ovs_ipv6_flows(ovs, bridge, params): if __name__ == "__main__": if len(sys.argv) != 4: - print "usage: %s [online|offline] vif-domid-idx ipv4|ipv6|all " % \ + print "usage: %s [online|offline] vif-domid-idx [ipv4|ipv6|all] " % \ os.path.basename(sys.argv[0]) sys.exit(1) else: -- cgit From a79f01fcea81bb6be233a65670c6a79af8534a10 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 26 May 2011 17:27:48 -0400 Subject: adding TODOs per dabo's review --- nova/api/openstack/wsgi.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 7a747842e..ddf4e6fa9 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -237,6 +237,7 @@ class XMLDictSerializer(DictSerializer): if xmlns: result.setAttribute('xmlns', xmlns) + #TODO(bcwaldon): accomplish this without a type-check if type(data) is list: collections = metadata.get('list_collections', {}) if nodename in collections: @@ -255,6 +256,7 @@ class XMLDictSerializer(DictSerializer): for item in data: node = self._to_xml_node(doc, metadata, singular, item) result.appendChild(node) + #TODO(bcwaldon): accomplish this without a type-check elif type(data) is dict: collections = metadata.get('dict_collections', {}) if nodename in collections: -- cgit From 2819681b762fe8a23f3af68f1c1cbed0a113c08e Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 26 May 2011 18:14:38 -0400 Subject: Rename instances.image_id to instances.image_ref. --- nova/api/ec2/cloud.py | 6 +++--- nova/compute/api.py | 2 +- nova/compute/manager.py | 2 +- nova/db/sqlalchemy/api.py | 2 +- nova/db/sqlalchemy/models.py | 4 ++-- nova/tests/test_cloud.py | 6 +++--- nova/tests/test_compute.py | 2 +- nova/virt/libvirt/firewall.py | 4 ++-- 8 files changed, 14 insertions(+), 14 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 8580dc79e..5bbee1afd 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -159,7 +159,7 @@ class CloudController(object): floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) - image_ec2_id = self.image_ec2_id(instance_ref['image_id']) + image_ec2_id = self.image_ec2_id(instance_ref['image_ref']) data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { @@ -724,13 +724,13 @@ class CloudController(object): instances = self.compute_api.get_all(context, **kwargs) for instance in instances: if not context.is_admin: - if instance['image_id'] == str(FLAGS.vpn_image_id): + if instance['image_ref'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id - i['imageId'] = self.image_ec2_id(instance['image_id']) + i['imageId'] = self.image_ec2_id(instance['image_ref']) i['instanceState'] = { 'code': instance['state'], 'name': instance['state_description']} diff --git a/nova/compute/api.py b/nova/compute/api.py index 432ea1fad..61b45843d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -207,7 +207,7 @@ class API(base.Base): base_options = { 'reservation_id': utils.generate_uid('r'), - 'image_id': image_href, + 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d1e01f275..7c88236ba 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -235,7 +235,7 @@ class ComputeManager(manager.SchedulerDependentManager): power_state.NOSTATE, 'networking') - is_vpn = instance_ref['image_id'] == str(FLAGS.vpn_image_id) + is_vpn = instance_ref['image_ref'] == str(FLAGS.vpn_image_id) # NOTE(vish): This could be a cast because we don't do anything # with the address currently, but I'm leaving it as # a call to ensure that network setup completes. We diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e4dda5c12..4403cd7d9 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -955,7 +955,7 @@ def instance_get_project_vpn(context, project_id): options(joinedload('security_groups')).\ options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ - filter_by(image_id=str(FLAGS.vpn_image_id)).\ + filter_by(image_ref=str(FLAGS.vpn_image_id)).\ filter_by(deleted=can_read_deleted(context)).\ first() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 1215448f8..6d4be8767 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -184,11 +184,11 @@ class Instance(BASE, NovaBase): def project(self): return auth.manager.AuthManager().get_project(self.project_id) - image_id = Column(String(255)) + image_ref = Column(String(255)) kernel_id = Column(String(255)) ramdisk_id = Column(String(255)) -# image_id = Column(Integer, ForeignKey('images.id'), nullable=True) +# image_ref = Column(Integer, ForeignKey('images.id'), nullable=True) # kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) # ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) # ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 54c0454de..eefab58d0 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -191,10 +191,10 @@ class CloudTestCase(test.TestCase): def test_describe_instances(self): """Makes sure describe_instances works and filters results.""" inst1 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_id': 1, + 'image_ref': 1, 'host': 'host1'}) inst2 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_id': 1, + 'image_ref': 1, 'host': 'host2'}) comp1 = db.service_create(self.context, {'host': 'host1', 'availability_zone': 'zone1', @@ -390,7 +390,7 @@ class CloudTestCase(test.TestCase): def test_terminate_instances(self): inst1 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_id': 1, + 'image_ref': 1, 'host': 'host1'}) terminate_instances = self.cloud.terminate_instances # valid instance_id diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index b4097660f..25454087d 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -84,7 +84,7 @@ class ComputeTestCase(test.TestCase): def _create_instance(self, params={}): """Create a test instance""" inst = {} - inst['image_id'] = 1 + inst['image_ref'] = 1 inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' inst['user_id'] = self.user.id diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 7e00662cd..12727f2b1 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -195,7 +195,7 @@ class NWFilterFirewall(FirewallDriver): logging.info('ensuring static filters') self._ensure_static_filters() - if instance['image_id'] == str(FLAGS.vpn_image_id): + if instance['image_ref'] == str(FLAGS.vpn_image_id): base_filter = 'nova-vpn' else: base_filter = 'nova-base' @@ -336,7 +336,7 @@ class NWFilterFirewall(FirewallDriver): def _create_network_filters(self, instance, network_info, instance_secgroup_filter_name): - if instance['image_id'] == str(FLAGS.vpn_image_id): + if instance['image_ref'] == str(FLAGS.vpn_image_id): base_filter = 'nova-vpn' else: base_filter = 'nova-base' -- cgit From c229d6e32f5275b2eb10e760f89a52dc31635c47 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Fri, 27 May 2011 14:13:17 +0900 Subject: Fix pep8 errors. --- nova/api/ec2/cloud.py | 7 ++++--- nova/tests/test_volume.py | 10 ++++++---- nova/volume/api.py | 3 ++- nova/volume/driver.py | 4 ++-- nova/volume/manager.py | 5 +++-- 5 files changed, 17 insertions(+), 12 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index b717a10c0..79cc3b3bf 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -666,14 +666,15 @@ class CloudController(object): return v def create_volume(self, context, **kwargs): - size = kwargs.get('size'); + size = kwargs.get('size') if kwargs.get('snapshot_id') != None: snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) - LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) + LOG.audit(_("Create volume from snapshot %s"), snapshot_id, + context=context) else: snapshot_id = None LOG.audit(_("Create volume of %s GB"), size, context=context) - + volume = self.volume_api.create( context, size=size, diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 8d58b3135..4f10ee6af 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -78,10 +78,12 @@ class VolumeTestCase(test.TestCase): self.volume.create_snapshot(self.context, volume_src_id, snapshot_id) volume_dst_id = self._create_volume(0, snapshot_id) self.volume.create_volume(self.context, volume_dst_id, snapshot_id) - self.assertEqual(volume_dst_id, db.volume_get(context.get_admin_context(), - volume_dst_id).id) - self.assertEqual(snapshot_id, db.volume_get(context.get_admin_context(), - volume_dst_id).snapshot_id) + self.assertEqual(volume_dst_id, db.volume_get( + context.get_admin_context(), + volume_dst_id).id) + self.assertEqual(snapshot_id, db.volume_get( + context.get_admin_context(), + volume_dst_id).snapshot_id) self.volume.delete_volume(self.context, volume_dst_id) self.volume.delete_snapshot(self.context, snapshot_id) diff --git a/nova/volume/api.py b/nova/volume/api.py index 7fa80383b..5804955f7 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -43,7 +43,8 @@ class API(base.Base): if snapshot_id != None: snapshot = self.get_snapshot(context, snapshot_id) if snapshot['status'] != "available": - raise exception.ApiError(_("Snapshot status must be available")) + raise exception.ApiError( + _("Snapshot status must be available")) size = snapshot['volume_size'] if quota.allowed_volumes(context, 1, size) < 1: diff --git a/nova/volume/driver.py b/nova/volume/driver.py index df9767a79..87e13277f 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -674,10 +674,10 @@ class SheepdogDriver(VolumeDriver): def create_volume_from_snapshot(self, volume, snapshot): """Creates a sheepdog volume from a snapshot.""" self._try_execute('qemu-img', 'create', '-b', - "sheepdog:%s:%s" % (snapshot['volume_name'], snapshot['name']), + "sheepdog:%s:%s" % (snapshot['volume_name'], + snapshot['name']), "sheepdog:%s" % volume['name']) - def delete_volume(self, volume): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 673771aa7..ff53f0701 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -112,8 +112,9 @@ class VolumeManager(manager.SchedulerDependentManager): model_update = self.driver.create_volume(volume_ref) else: snapshot_ref = self.db.snapshot_get(context, snapshot_id) - model_update = self.driver.create_volume_from_snapshot(volume_ref, - snapshot_ref) + model_update = self.driver.create_volume_from_snapshot( + volume_ref, + snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) -- cgit From ca5a91b3fe6eaa1c2d2b85cb5a11d2bb36e7a436 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 27 May 2011 15:14:16 +0900 Subject: fixed read_only check --- nova/virt/libvirt_conn.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 8c9a3550a..7982611fa 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -564,23 +564,27 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def pause(self, instance, callback): """Pause VM instance""" - self._take_action_to_instance('suspend', instance) + dom = self._lookup_by_name(instance.name) + dom.suspend() @exception.wrap_exception def unpause(self, instance, callback): """Unpause paused VM instance""" - self._take_action_to_instance('resume', instance) + dom = self._lookup_by_name(instance.name) + dom.resume() @exception.wrap_exception def suspend(self, instance, callback): """Suspend the specified instance""" - self._take_action_to_instance('managedSave', instance, 0) + dom = self._lookup_by_name(instance.name) + dom.managedSave(0) @exception.wrap_exception def resume(self, instance, callback): """resume the specified instance""" try: - self._take_action_to_instance('create', instance) + dom = self._lookup_by_name(instance.name) + dom.create() except libvirt.LibvirtError: xml = self.to_xml(instance, None) self._create_new_domain(xml) -- cgit From 34bd57c380c348fa9c60cf6b3371352da6e8853c Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 27 May 2011 16:03:56 +0900 Subject: remove _take_action_to_instance --- nova/virt/libvirt_conn.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 7982611fa..47a77b3ae 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -548,19 +548,6 @@ class LibvirtConnection(driver.ComputeDriver): timer = utils.LoopingCall(_wait_for_reboot) return timer.start(interval=0.5, now=True) - def _take_action_to_instance(self, action, instance, *arg): - """action VM instance""" - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance.name) - method = getattr(dom, action) - method(*arg) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance.name) - method = getattr(dom, action) - method(*arg) - @exception.wrap_exception def pause(self, instance, callback): """Pause VM instance""" -- cgit From bd19bd2edd612dfea09e4230c59422e59c6de181 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 27 May 2011 05:01:42 -0700 Subject: fixed docstrings and general tidying --- nova/compute/api.py | 39 ++++++++++++++++---------------- nova/scheduler/host_filter.py | 41 +++++++++++++++++++++------------- nova/scheduler/zone_aware_scheduler.py | 33 +++++++++++++++++---------- 3 files changed, 67 insertions(+), 46 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 7f1fe1b5c..3e5105050 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -88,10 +88,10 @@ class API(base.Base): {"method": "get_network_topic", "args": {'fake': 1}}) def _check_injected_file_quota(self, context, injected_files): - """Enforce quota limits on injected files. + """ + Enforce quota limits on injected files. Raises a QuotaError if any limit is exceeded. - """ if injected_files is None: return @@ -137,10 +137,10 @@ class API(base.Base): availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None): - """Create the number and type of instances requested. + """ + Create the number and type of instances requested. Verifies that quota and other arguments are valid. - """ if not instance_type: instance_type = instance_types.get_default_instance_type() @@ -293,13 +293,13 @@ class API(base.Base): return False def ensure_default_security_group(self, context): - """Ensure that a context has a security group. + """ + Ensure that a context has a security group. Creates a security group for the security context if it does not already exist. :param context: the security context - """ try: db.security_group_get_by_name(context, context.project_id, @@ -328,11 +328,11 @@ class API(base.Base): "args": {"security_group_id": security_group.id}}) def trigger_security_group_members_refresh(self, context, group_id): - """Called when a security group gains a new or loses a member. + """ + Called when a security group gains a new or loses a member. Sends an update request to each compute node for whom this is relevant. - """ # First, we get the security group rules that reference this group as # the grantee.. @@ -370,7 +370,8 @@ class API(base.Base): "args": {"security_group_id": group_id}}) def update(self, context, instance_id, **kwargs): - """Updates the instance in the datastore. + """ + Updates the instance in the datastore. :param context: The security context :param instance_id: ID of the instance to update @@ -379,7 +380,6 @@ class API(base.Base): updated :returns: None - """ rv = self.db.instance_update(context, instance_id, kwargs) return dict(rv.iteritems()) @@ -424,22 +424,22 @@ class API(base.Base): @scheduler_api.reroute_compute("get") def routing_get(self, context, instance_id): - """A version of get with special routing characteristics. + """ + A version of get with special routing characteristics. Use this method instead of get() if this is the only operation you intend to to. It will route to novaclient.get if the instance is not found. - """ return self.get(context, instance_id) def get_all(self, context, project_id=None, reservation_id=None, fixed_ip=None): - """Get all instances filtered by one of the given parameters. + """ + Get all instances filtered by one of the given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system. - """ if reservation_id is not None: return self.db.instance_get_all_by_reservation( @@ -463,13 +463,13 @@ class API(base.Base): def _cast_compute_message(self, method, context, instance_id, host=None, params=None): - """Generic handler for RPC casts to compute. + """ + Generic handler for RPC casts to compute. :param params: Optional dictionary of arguments to be passed to the compute worker :returns: None - """ if not params: params = {} @@ -483,7 +483,8 @@ class API(base.Base): def _call_compute_message(self, method, context, instance_id, host=None, params=None): - """Generic handler for RPC calls to compute. + """ + Generic handler for RPC calls to compute. :param params: Optional dictionary of arguments to be passed to the compute worker @@ -516,10 +517,10 @@ class API(base.Base): % instance_id) def snapshot(self, context, instance_id, name): - """Snapshot the given instance. + """ + Snapshot the given instance. :returns: A dict containing image metadata - """ properties = {'instance_id': str(instance_id), 'user_id': str(context.user_id)} diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index ed76c90bf..89faace45 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -69,9 +69,11 @@ class HostFilter(object): class AllHostsFilter(HostFilter): - """NOP host filter. Returns all hosts in ZoneManager. + """ + NOP host filter. Returns all hosts in ZoneManager. This essentially does what the old Scheduler+Chance used - to give us.""" + to give us. + """ def instance_type_to_filter(self, instance_type): """Return anything to prevent base-class from raising @@ -133,8 +135,10 @@ class InstanceTypeFilter(HostFilter): class JsonFilter(HostFilter): - """Host Filter to allow simple JSON-based grammar for - selecting hosts.""" + """ + Host Filter to allow simple JSON-based grammar for + selecting hosts. + """ def _equals(self, args): """First term is == all the other terms.""" @@ -229,8 +233,10 @@ class JsonFilter(HostFilter): return (self._full_name(), json.dumps(query)) def _parse_string(self, string, host, services): - """Strings prefixed with $ are capability lookups in the - form '$service.capability[.subcap*]'""" + """ + Strings prefixed with $ are capability lookups in the + form '$service.capability[.subcap*]' + """ if not string: return None if string[0] != '$': @@ -277,22 +283,25 @@ FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] def choose_host_filter(filter_name=None): - """Since the caller may specify which filter to use we need - to have an authoritative list of what is permissible. This - function checks the filter name against a predefined set - of acceptable filters.""" + """ + Since the caller may specify which filter to use we need + to have an authoritative list of what is permissible. This + function checks the filter name against a predefined set + of acceptable filters. + """ if not filter_name: filter_name = FLAGS.default_host_filter for filter_class in FILTERS: - if "%s.%s" % (filter_class.__module__, filter_class.__name__) == \ - filter_name: + host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) + if host_match == filter_name: return filter_class() raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): - """The HostFilterScheduler uses the HostFilter to filter + """ + The HostFilterScheduler uses the HostFilter to filter hosts for weighing. The particular filter used may be passed in as an argument or the default will be used. @@ -313,6 +322,8 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): return host_filter.filter_hosts(self.zone_manager, query) def weigh_hosts(self, num, request_spec, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format.""" + """ + Derived classes must override this method and return + a lists of hosts in [{weight, hostname}] format. + """ return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index dc18fc427..236907626 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -40,13 +40,15 @@ class ZoneAwareScheduler(driver.Scheduler): def schedule_run_instance(self, context, instance_id, request_spec, *args, **kwargs): - """This method is called from nova.compute.api to provision + """ + This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being passed in to see if this is a request to: 1. Create a Build Plan and then provision, or 2. Use the Build Plan information in the request parameters to simply create the instance (either in this zone or - a child zone).""" + a child zone). + """ # TODO(sandy): We'll have to look for richer specs at some point. @@ -79,15 +81,16 @@ class ZoneAwareScheduler(driver.Scheduler): % locals()) else: # TODO(sandy) Provision in child zone ... - LOG.warning(_("Provision to Child Zone not supported (yet)") - % locals()) + LOG.warning(_("Provision to Child Zone not supported (yet)")) pass def select(self, context, request_spec, *args, **kwargs): - """Select returns a list of weights and zone/host information + """ + Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any child zone information has been encrypted so as not to reveal - anything about the children.""" + anything about the children. + """ return self._schedule(context, "compute", request_spec, *args, **kwargs) @@ -95,13 +98,15 @@ class ZoneAwareScheduler(driver.Scheduler): # so we don't implement the default "schedule()" method required # of Schedulers. def schedule(self, context, topic, request_spec, *args, **kwargs): - """The schedule() contract requires we return the one + """ + The schedule() contract requires we return the one best-suited host for this request. """ raise driver.NoValidHost(_('No hosts were available')) def _schedule(self, context, topic, request_spec, *args, **kwargs): - """Returns a list of hosts that meet the required specs, + """ + Returns a list of hosts that meet the required specs, ordered by their fitness. """ @@ -137,11 +142,15 @@ class ZoneAwareScheduler(driver.Scheduler): return weighted def filter_hosts(self, num, request_spec): - """Derived classes must override this method and return - a list of hosts in [(hostname, capability_dict)] format.""" + """ + Derived classes must override this method and return + a list of hosts in [(hostname, capability_dict)] format. + """ raise NotImplemented() def weigh_hosts(self, num, request_spec, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format.""" + """ + Derived classes must override this method and return + a lists of hosts in [{weight, hostname}] format. + """ raise NotImplemented() -- cgit From 299cadb9ce2e2600b18e2befbed967ca2941256d Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 27 May 2011 08:15:56 -0400 Subject: Commit the migration script. --- .../migrate_repo/versions/019_rename_image_ids.py | 39 ++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py b/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py new file mode 100644 index 000000000..6838f1ea6 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table +#from nova import log as logging + +meta = MetaData() + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + image_id_column = instances.c.image_id + image_id_column.alter(name='image_ref') + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + image_ref_column = instances.c.image_ref + image_ref_column.alter(name='image_id') -- cgit From e5d89198b188b9ae62ff0ac2bd72fd321f541713 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 27 May 2011 09:43:10 -0400 Subject: Libvirt updates for image_ref. --- nova/virt/libvirt/connection.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 1cedd1fe3..62c40a022 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -392,7 +392,7 @@ class LibvirtConnection(driver.ComputeDriver): elevated = context.get_admin_context() (image_service, image_id) = nova.image.get_image_service( - instance['image_id']) + instance['image_ref']) base = image_service.show(elevated, image_id) metadata = {'disk_format': base['disk_format'], @@ -779,7 +779,7 @@ class LibvirtConnection(driver.ComputeDriver): project = manager.AuthManager().get_project(inst['project_id']) if not disk_images: - disk_images = {'image_id': inst['image_id'], + disk_images = {'image_id': inst['image_ref'], 'kernel_id': inst['kernel_id'], 'ramdisk_id': inst['ramdisk_id']} @@ -875,7 +875,7 @@ class LibvirtConnection(driver.ComputeDriver): if key or net: inst_name = inst['name'] - img_id = inst.image_id + img_id = inst.image_ref if key: LOG.info(_('instance %(inst_name)s: injecting key into' ' image %(img_id)s') % locals()) -- cgit From 3f911877a2a9facdf153f173b3fb76a18e44a2ac Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 27 May 2011 07:24:02 -0700 Subject: docstrings again and import ordering --- nova/compute/api.py | 30 ++++++++++-------------------- nova/scheduler/host_filter.py | 18 ++++++------------ nova/scheduler/zone_aware_scheduler.py | 20 +++++++------------- 3 files changed, 23 insertions(+), 45 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 3e5105050..35d60446c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -88,8 +88,7 @@ class API(base.Base): {"method": "get_network_topic", "args": {'fake': 1}}) def _check_injected_file_quota(self, context, injected_files): - """ - Enforce quota limits on injected files. + """Enforce quota limits on injected files. Raises a QuotaError if any limit is exceeded. """ @@ -137,8 +136,7 @@ class API(base.Base): availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None): - """ - Create the number and type of instances requested. + """Create the number and type of instances requested. Verifies that quota and other arguments are valid. """ @@ -293,8 +291,7 @@ class API(base.Base): return False def ensure_default_security_group(self, context): - """ - Ensure that a context has a security group. + """Ensure that a context has a security group. Creates a security group for the security context if it does not already exist. @@ -328,8 +325,7 @@ class API(base.Base): "args": {"security_group_id": security_group.id}}) def trigger_security_group_members_refresh(self, context, group_id): - """ - Called when a security group gains a new or loses a member. + """Called when a security group gains a new or loses a member. Sends an update request to each compute node for whom this is relevant. @@ -370,8 +366,7 @@ class API(base.Base): "args": {"security_group_id": group_id}}) def update(self, context, instance_id, **kwargs): - """ - Updates the instance in the datastore. + """Updates the instance in the datastore. :param context: The security context :param instance_id: ID of the instance to update @@ -424,8 +419,7 @@ class API(base.Base): @scheduler_api.reroute_compute("get") def routing_get(self, context, instance_id): - """ - A version of get with special routing characteristics. + """A version of get with special routing characteristics. Use this method instead of get() if this is the only operation you intend to to. It will route to novaclient.get if the instance is not @@ -435,8 +429,7 @@ class API(base.Base): def get_all(self, context, project_id=None, reservation_id=None, fixed_ip=None): - """ - Get all instances filtered by one of the given parameters. + """Get all instances filtered by one of the given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system. @@ -463,8 +456,7 @@ class API(base.Base): def _cast_compute_message(self, method, context, instance_id, host=None, params=None): - """ - Generic handler for RPC casts to compute. + """Generic handler for RPC casts to compute. :param params: Optional dictionary of arguments to be passed to the compute worker @@ -483,8 +475,7 @@ class API(base.Base): def _call_compute_message(self, method, context, instance_id, host=None, params=None): - """ - Generic handler for RPC calls to compute. + """Generic handler for RPC calls to compute. :param params: Optional dictionary of arguments to be passed to the compute worker @@ -517,8 +508,7 @@ class API(base.Base): % instance_id) def snapshot(self, context, instance_id, name): - """ - Snapshot the given instance. + """Snapshot the given instance. :returns: A dict containing image metadata """ diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 89faace45..4260cbf42 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -69,8 +69,7 @@ class HostFilter(object): class AllHostsFilter(HostFilter): - """ - NOP host filter. Returns all hosts in ZoneManager. + """ NOP host filter. Returns all hosts in ZoneManager. This essentially does what the old Scheduler+Chance used to give us. """ @@ -135,8 +134,7 @@ class InstanceTypeFilter(HostFilter): class JsonFilter(HostFilter): - """ - Host Filter to allow simple JSON-based grammar for + """Host Filter to allow simple JSON-based grammar for selecting hosts. """ @@ -233,8 +231,7 @@ class JsonFilter(HostFilter): return (self._full_name(), json.dumps(query)) def _parse_string(self, string, host, services): - """ - Strings prefixed with $ are capability lookups in the + """Strings prefixed with $ are capability lookups in the form '$service.capability[.subcap*]' """ if not string: @@ -283,8 +280,7 @@ FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] def choose_host_filter(filter_name=None): - """ - Since the caller may specify which filter to use we need + """Since the caller may specify which filter to use we need to have an authoritative list of what is permissible. This function checks the filter name against a predefined set of acceptable filters. @@ -300,8 +296,7 @@ def choose_host_filter(filter_name=None): class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): - """ - The HostFilterScheduler uses the HostFilter to filter + """The HostFilterScheduler uses the HostFilter to filter hosts for weighing. The particular filter used may be passed in as an argument or the default will be used. @@ -322,8 +317,7 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): return host_filter.filter_hosts(self.zone_manager, query) def weigh_hosts(self, num, request_spec, hosts): - """ - Derived classes must override this method and return + """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format. """ return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 236907626..bc67c7794 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -23,8 +23,8 @@ across zones. There are two expansion points to this class for: import operator from nova import db -from nova import rpc from nova import log as logging +from nova import rpc from nova.scheduler import api from nova.scheduler import driver @@ -40,8 +40,7 @@ class ZoneAwareScheduler(driver.Scheduler): def schedule_run_instance(self, context, instance_id, request_spec, *args, **kwargs): - """ - This method is called from nova.compute.api to provision + """This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being passed in to see if this is a request to: 1. Create a Build Plan and then provision, or @@ -85,8 +84,7 @@ class ZoneAwareScheduler(driver.Scheduler): pass def select(self, context, request_spec, *args, **kwargs): - """ - Select returns a list of weights and zone/host information + """Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any child zone information has been encrypted so as not to reveal anything about the children. @@ -98,15 +96,13 @@ class ZoneAwareScheduler(driver.Scheduler): # so we don't implement the default "schedule()" method required # of Schedulers. def schedule(self, context, topic, request_spec, *args, **kwargs): - """ - The schedule() contract requires we return the one + """The schedule() contract requires we return the one best-suited host for this request. """ raise driver.NoValidHost(_('No hosts were available')) def _schedule(self, context, topic, request_spec, *args, **kwargs): - """ - Returns a list of hosts that meet the required specs, + """Returns a list of hosts that meet the required specs, ordered by their fitness. """ @@ -142,15 +138,13 @@ class ZoneAwareScheduler(driver.Scheduler): return weighted def filter_hosts(self, num, request_spec): - """ - Derived classes must override this method and return + """Derived classes must override this method and return a list of hosts in [(hostname, capability_dict)] format. """ raise NotImplemented() def weigh_hosts(self, num, request_spec, hosts): - """ - Derived classes must override this method and return + """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format. """ raise NotImplemented() -- cgit From c26be56d63a9d263ea8632514be03607713c754d Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 27 May 2011 15:48:40 -0400 Subject: Glance client updates for xenapi and vmware API to work with image refs. --- nova/image/__init__.py | 42 +++++++++++++++++++++++++++--------- nova/tests/glance/stubs.py | 12 +++++++---- nova/tests/test_vmwareapi.py | 5 ++--- nova/tests/test_xenapi.py | 29 +++++++++++-------------- nova/tests/vmwareapi/db_fakes.py | 2 +- nova/virt/vmwareapi/vmops.py | 12 +++++------ nova/virt/vmwareapi/vmware_images.py | 16 ++++++-------- nova/virt/xenapi/vm_utils.py | 14 ++++++------ nova/virt/xenapi/vmops.py | 2 +- 9 files changed, 78 insertions(+), 56 deletions(-) diff --git a/nova/image/__init__.py b/nova/image/__init__.py index 9b9108bd2..011d79d61 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -18,16 +18,16 @@ from urlparse import urlparse - from nova import exception -import nova.image.glance -from nova.utils import import_class +from nova import utils from nova import flags - FLAGS = flags.FLAGS +GlanceClient = utils.import_class('glance.client.Client') + + def _parse_image_ref(image_href): """Parse an image href into composite parts. @@ -43,10 +43,36 @@ def _parse_image_ref(image_href): def get_default_image_service(): - ImageService = import_class(FLAGS.image_service) + ImageService = utils.import_class(FLAGS.image_service) return ImageService() +def get_glance_client(image_href): + """Get the correct glance client and id for the given image_href. + + The image_href param can be an href of the form + http://myglanceserver:9292/images/42, or just an int such as 42. If the + image_href is an int, then flags are used to create the default + glance client. + + :param image_href: image ref/id for an image + :returns: a tuple of the form (glance_client, image_id) + + """ + image_href = image_href or 0 + if str(image_href).isdigit(): + glance_client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port) + return (glance_client, int(image_href)) + + try: + (image_id, host, port) = _parse_image_ref(image_href) + except: + raise exception.InvalidImageRef(image_href=image_href) + glance_client = GlanceClient(host, port) + #glance_client = client.Client(host, port) + return (glance_client, image_id) + + def get_image_service(image_href): """Get the proper image_service and id for the given image_href. @@ -62,10 +88,6 @@ def get_image_service(image_href): if str(image_href).isdigit(): return (get_default_image_service(), int(image_href)) - try: - (image_id, host, port) = _parse_image_ref(image_href) - except: - raise exception.InvalidImageRef(image_href=image_href) - glance_client = nova.image.glance.GlanceClient(host, port) + (glance_client, image_id) = get_glance_client(image_href) image_service = nova.image.glance.GlanceImageService(glance_client) return (image_service, image_id) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 5872552ec..8611fef29 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -16,13 +16,17 @@ import StringIO -import glance.client +from nova import images -def stubout_glance_client(stubs, cls): + +def get_mock_glance_client(): + return FakeGlance() + + +def stubout_glance_client(stubs): """Stubs out glance.client.Client""" - stubs.Set(glance.client, 'Client', - lambda *args, **kwargs: cls(*args, **kwargs)) + stubs.Set(images, 'get_glance_client', get_mock_glance_client) class FakeGlance(object): diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py index 22b66010a..e5ebd1600 100644 --- a/nova/tests/test_vmwareapi.py +++ b/nova/tests/test_vmwareapi.py @@ -55,8 +55,7 @@ class VMWareAPIVMTestCase(test.TestCase): vmwareapi_fake.reset() db_fakes.stub_out_db_instance_api(self.stubs) stubs.set_stubs(self.stubs) - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) self.conn = vmwareapi_conn.get_connection(False) def _create_instance_in_the_db(self): @@ -64,7 +63,7 @@ class VMWareAPIVMTestCase(test.TestCase): 'id': 1, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': "1", + 'image_ref': "1", 'kernel_id': "1", 'ramdisk_id': "1", 'instance_type': 'm1.large', diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 18a267896..56e1e47af 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -79,7 +79,7 @@ class XenAPIVolumeTestCase(test.TestCase): self.values = {'id': 1, 'project_id': 'fake', 'user_id': 'fake', - 'image_id': 1, + 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large @@ -193,8 +193,7 @@ class XenAPIVMTestCase(test.TestCase): stubs.stubout_is_vdi_pv(self.stubs) self.stubs.Set(VMOps, 'reset_network', reset_network) stubs.stub_out_vm_methods(self.stubs) - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) fake_utils.stub_out_utils_execute(self.stubs) self.context = context.RequestContext('fake', 'fake', False) self.conn = xenapi_conn.get_connection(False) @@ -207,7 +206,7 @@ class XenAPIVMTestCase(test.TestCase): 'id': id, 'project_id': proj, 'user_id': user, - 'image_id': 1, + 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large @@ -351,14 +350,14 @@ class XenAPIVMTestCase(test.TestCase): self.assertEquals(self.vm['HVM_boot_params'], {}) self.assertEquals(self.vm['HVM_boot_policy'], '') - def _test_spawn(self, image_id, kernel_id, ramdisk_id, + def _test_spawn(self, image_ref, kernel_id, ramdisk_id, instance_type_id="3", os_type="linux", instance_id=1, check_injection=False): stubs.stubout_loopingcall_start(self.stubs) values = {'id': instance_id, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': image_id, + 'image_ref': image_ref, 'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id, 'instance_type_id': instance_type_id, @@ -567,7 +566,7 @@ class XenAPIVMTestCase(test.TestCase): 'id': 1, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': 1, + 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large @@ -623,7 +622,7 @@ class XenAPIMigrateInstance(test.TestCase): self.values = {'id': 1, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': 1, + 'image_ref': 1, 'kernel_id': None, 'ramdisk_id': None, 'local_gb': 5, @@ -634,8 +633,7 @@ class XenAPIMigrateInstance(test.TestCase): fake_utils.stub_out_utils_execute(self.stubs) stubs.stub_out_migration_methods(self.stubs) stubs.stubout_get_this_vm_uuid(self.stubs) - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) def tearDown(self): super(XenAPIMigrateInstance, self).tearDown() @@ -661,8 +659,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): """Unit tests for code that detects the ImageType.""" def setUp(self): super(XenAPIDetermineDiskImageTestCase, self).setUp() - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) class FakeInstance(object): pass @@ -679,7 +676,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): def test_instance_disk(self): """If a kernel is specified, the image type is DISK (aka machine).""" FLAGS.xenapi_image_service = 'objectstore' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL self.assert_disk_type(vm_utils.ImageType.DISK) @@ -689,7 +686,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): DISK_RAW is assumed. """ FLAGS.xenapi_image_service = 'objectstore' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_RAW) @@ -699,7 +696,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): this case will be 'raw'. """ FLAGS.xenapi_image_service = 'glance' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_RAW) @@ -709,7 +706,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): this case will be 'vhd'. """ FLAGS.xenapi_image_service = 'glance' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_VHD) diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py index 0addd5573..764de42d8 100644 --- a/nova/tests/vmwareapi/db_fakes.py +++ b/nova/tests/vmwareapi/db_fakes.py @@ -61,7 +61,7 @@ def stub_out_db_instance_api(stubs): 'name': values['name'], 'id': values['id'], 'reservation_id': utils.generate_uid('r'), - 'image_id': values['image_id'], + 'image_ref': values['image_ref'], 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], 'state_description': 'scheduling', diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index c3e79a92f..d1bf2de2c 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -150,7 +150,7 @@ class VMWareVMOps(object): """ image_size, image_properties = \ vmware_images.get_vmdk_size_and_properties( - instance.image_id, instance) + instance.image_ref, instance) vmdk_file_size_in_kb = int(image_size) / 1024 os_type = image_properties.get("vmware_ostype", "otherGuest") adapter_type = image_properties.get("vmware_adaptertype", @@ -265,23 +265,23 @@ class VMWareVMOps(object): def _fetch_image_on_esx_datastore(): """Fetch image from Glance to ESX datastore.""" - LOG.debug(_("Downloading image file data %(image_id)s to the ESX " + LOG.debug(_("Downloading image file data %(image_ref)s to the ESX " "data store %(data_store_name)s") % - ({'image_id': instance.image_id, + ({'image_ref': instance.image_ref, 'data_store_name': data_store_name})) # Upload the -flat.vmdk file whose meta-data file we just created # above vmware_images.fetch_image( - instance.image_id, + instance.image_ref, instance, host=self._session._host_ip, data_center_name=self._get_datacenter_name_and_ref()[1], datastore_name=data_store_name, cookies=cookies, file_path=flat_uploaded_vmdk_name) - LOG.debug(_("Downloaded image file data %(image_id)s to the ESX " + LOG.debug(_("Downloaded image file data %(image_ref)s to the ESX " "data store %(data_store_name)s") % - ({'image_id': instance.image_id, + ({'image_ref': instance.image_ref, 'data_store_name': data_store_name})) _fetch_image_on_esx_datastore() diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py index 50c6baedf..11f4fe06a 100644 --- a/nova/virt/vmwareapi/vmware_images.py +++ b/nova/virt/vmwareapi/vmware_images.py @@ -18,10 +18,9 @@ Utility functions for Image transfer. """ -from glance import client - from nova import exception from nova import flags +import nova.image from nova import log as logging from nova.virt.vmwareapi import io_util from nova.virt.vmwareapi import read_write_util @@ -117,8 +116,8 @@ def upload_image(image, instance, **kwargs): def _get_glance_image(image, instance, **kwargs): """Download image from the glance image server.""" LOG.debug(_("Downloading image %s from glance image server") % image) - glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port) - metadata, read_iter = glance_client.get_image(image) + glance_client, image_id = nova.image.get_glance_client(image) + metadata, read_iter = glance_client.get_image(image_id) read_file_handle = read_write_util.GlanceFileRead(read_iter) file_size = int(metadata['size']) write_file_handle = read_write_util.VMWareHTTPWriteFile( @@ -153,7 +152,7 @@ def _put_glance_image(image, instance, **kwargs): kwargs.get("cookies"), kwargs.get("file_path")) file_size = read_file_handle.get_size() - glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port) + glance_client, image_id = nova.image.get_glance_client(image) # The properties and other fields that we need to set for the image. image_metadata = {"is_public": True, "disk_format": "vmdk", @@ -165,7 +164,7 @@ def _put_glance_image(image, instance, **kwargs): "vmware_image_version": kwargs.get("image_version")}} start_transfer(read_file_handle, file_size, glance_client=glance_client, - image_id=image, image_meta=image_metadata) + image_id=image_id, image_meta=image_metadata) LOG.debug(_("Uploaded image %s to the Glance image server") % image) @@ -188,9 +187,8 @@ def get_vmdk_size_and_properties(image, instance): LOG.debug(_("Getting image size for the image %s") % image) if FLAGS.image_service == "nova.image.glance.GlanceImageService": - glance_client = client.Client(FLAGS.glance_host, - FLAGS.glance_port) - meta_data = glance_client.get_image_meta(image) + glance_client, image_id = nova.image.get_glance_client(image) + meta_data = glance_client.get_image_meta(image_id) size, properties = meta_data["size"], meta_data["properties"] elif FLAGS.image_service == "nova.image.s3.S3ImageService": raise NotImplementedError diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 06ee8ee9b..3b1209da8 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -32,6 +32,7 @@ from xml.dom import minidom import glance.client from nova import exception from nova import flags +import nova.image from nova import log as logging from nova import utils from nova.auth.manager import AuthManager @@ -455,8 +456,8 @@ class VMHelper(HelperBase): # DISK restores sr_ref = safe_find_sr(session) - client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) - meta, image_file = client.get_image(image) + glance_client, image_id = nova.image.get_glance_client(image) + meta, image_file = glance_client.get_image(image_id) virtual_size = int(meta['size']) vdi_size = virtual_size LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals()) @@ -515,10 +516,10 @@ class VMHelper(HelperBase): ImageType.DISK_RAW: 'DISK_RAW', ImageType.DISK_VHD: 'DISK_VHD'} disk_format = pretty_format[image_type] - image_id = instance.image_id + image_ref = instance.image_ref instance_id = instance.id LOG.debug(_("Detected %(disk_format)s format for image " - "%(image_id)s, instance %(instance_id)s") % locals()) + "%(image_ref)s, instance %(instance_id)s") % locals()) def determine_from_glance(): glance_disk_format2nova_type = { @@ -527,8 +528,9 @@ class VMHelper(HelperBase): 'ari': ImageType.KERNEL_RAMDISK, 'raw': ImageType.DISK_RAW, 'vhd': ImageType.DISK_VHD} - client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) - meta = client.get_image_meta(instance.image_id) + image_ref = instance.image_ref + glance_client, image_id = nova.image.get_glance_client(image_ref) + meta = glance_client.get_image_meta(image_id) disk_format = meta['disk_format'] try: return glance_disk_format2nova_type[disk_format] diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6d516ddbc..183d29470 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -111,7 +111,7 @@ class VMOps(object): project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) vdis = VMHelper.fetch_image(self._session, - instance.id, instance.image_id, user, project, + instance.id, instance.image_ref, user, project, disk_image_type) return vdis -- cgit From e75bbc348c713775af11293fc6e5e05667279234 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sat, 28 May 2011 02:18:48 -0400 Subject: More image_id to image_ref stuff. Also fixed tests in test_servers. --- nova/api/openstack/servers.py | 2 +- nova/api/openstack/views/servers.py | 8 ++++---- nova/tests/api/openstack/test_servers.py | 16 +++++++++------- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 76800795c..7593694bd 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -189,7 +189,7 @@ class Controller(common.OpenstackController): return faults.Fault(exc.HTTPBadRequest(msg)) inst['instance_type'] = inst_type - inst['image_id'] = image_href + inst['image_ref'] = image_href builder = self._get_view_builder(req) server = builder.build(inst, is_detail=True) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index ddd17ab93..dd1d68ff0 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -112,8 +112,8 @@ class ViewBuilderV10(ViewBuilder): """Model an Openstack API V1.0 server response.""" def _build_image(self, response, inst): - if 'image_id' in dict(inst): - response['imageId'] = int(inst['image_id']) + if 'image_ref' in dict(inst): + response['imageId'] = int(inst['image_ref']) def _build_flavor(self, response, inst): if 'instance_type' in dict(inst): @@ -130,8 +130,8 @@ class ViewBuilderV11(ViewBuilder): self.base_url = base_url def _build_image(self, response, inst): - if 'image_id' in dict(inst): - image_href = inst['image_id'] + if 'image_ref' in dict(inst): + image_href = inst['image_ref'] if str(image_href).isdigit(): image_href = int(image_href) response['imageRef'] = image_href diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index a6ab9c0c8..1ce0e8e84 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -98,7 +98,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None, "admin_pass": "", "user_id": user_id, "project_id": "", - "image_id": "10", + "image_ref": "10", "kernel_id": "", "ramdisk_id": "", "launch_index": 0, @@ -475,12 +475,13 @@ class ServersTest(test.TestCase): def image_id_from_hash(*args, **kwargs): return 2 - def fake_image_service(*args): - return nova.image.fake.FakeImageService() + def fake_get_image_service(image_href): + image_id = int(str(image_href).split('/')[-1]) + return (nova.image.fake.FakeImageService(), image_id) - FLAGS.image_service = 'nova.image.fake.FakeImageService' - self.stubs.Set( - nova.image.glance, 'GlanceImageService', fake_image_service) + self.stubs.Set(nova.image, 'get_default_image_service', + lambda: nova.image.fake.FakeImageService()) + self.stubs.Set(nova.image, 'get_image_service', fake_get_image_service) self.stubs.Set(nova.db.api, 'project_get_network', project_get_network) self.stubs.Set(nova.db.api, 'instance_create', instance_create) self.stubs.Set(nova.rpc, 'cast', fake_method) @@ -1685,6 +1686,8 @@ class TestServerInstanceCreation(test.TestCase): fakes.stub_out_auth(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) self.allow_admin = FLAGS.allow_admin_api + self.stubs.Set(nova.image, 'get_default_image_service', + lambda: nova.image.fake.FakeImageService()) def tearDown(self): self.stubs.UnsetAll() @@ -1714,7 +1717,6 @@ class TestServerInstanceCreation(test.TestCase): return stub_method compute_api = MockComputeAPI() - FLAGS.image_service = 'nova.image.fake.FakeImageService' self.stubs.Set(nova.compute, 'API', make_stub_method(compute_api)) self.stubs.Set(nova.api.openstack.servers.Controller, '_get_kernel_ramdisk_from_image', make_stub_method((1, 1))) -- cgit From 1fced8f7a527f25abde457cfcf056a9a082a79c3 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sat, 28 May 2011 04:29:35 -0400 Subject: Fixing integration tests by correctly stubbing image service. --- nova/tests/integrated/integrated_helpers.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 3b4c49c93..5eacc829d 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -152,10 +152,10 @@ class _IntegratedTestBase(test.TestCase): f = self._get_flags() self.flags(**f) - def fake_image_service(*args): - return nova.image.fake.FakeImageService() - self.stubs.Set( - nova.image.glance, 'GlanceImageService', fake_image_service) + def fake_get_image_service(image_href): + image_id = int(str(image_href).split('/')[-1]) + return (nova.image.fake.FakeImageService(), image_id) + self.stubs.Set(nova.image, 'get_image_service', fake_get_image_service) # set up services self.start_service('compute') -- cgit From bceac9e68021959c8711a0be4ed7ac13352a4623 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sat, 28 May 2011 06:04:19 -0400 Subject: Fixing xen and vmware tests by correctly mocking glance client. --- nova/tests/glance/stubs.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 8611fef29..fdd9ad4da 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -16,19 +16,16 @@ import StringIO - -from nova import images - - -def get_mock_glance_client(): - return FakeGlance() +import nova.image def stubout_glance_client(stubs): - """Stubs out glance.client.Client""" - stubs.Set(images, 'get_glance_client', get_mock_glance_client) - + def fake_get_glance_client(image_href): + image_id = int(str(image_href).split('/')[-1]) + return (FakeGlance('foo'), image_id) + stubs.Set(nova.image, 'get_glance_client', fake_get_glance_client) + class FakeGlance(object): IMAGE_MACHINE = 1 IMAGE_KERNEL = 2 -- cgit From 9ce5728a0d800374a76cacf935daf2c032f1c33d Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sat, 28 May 2011 06:25:04 -0400 Subject: Fixing nova.tests.api.openstack.fakes.stub_out_image_service. It now stubs out the get_image_service and get_default_image_service functions. Also some pep8 whitespace fixes. --- .../sqlalchemy/migrate_repo/versions/019_rename_image_ids.py | 3 ++- nova/tests/api/openstack/fakes.py | 11 +++++++---- nova/tests/api/openstack/test_servers.py | 10 +--------- nova/tests/glance/stubs.py | 2 +- nova/virt/vmwareapi/vmware_images.py | 6 +++--- 5 files changed, 14 insertions(+), 18 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py b/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py index 6838f1ea6..73a5e8477 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py @@ -15,10 +15,11 @@ # under the License. from sqlalchemy import Column, Integer, MetaData, String, Table -#from nova import log as logging + meta = MetaData() + def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index bf51239e6..01b42d00c 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -38,6 +38,7 @@ from nova.api.openstack import auth from nova.api.openstack import versions from nova.api.openstack import limits from nova.auth.manager import User, Project +import nova.image.fake from nova.image import glance from nova.image import local from nova.image import service @@ -104,10 +105,12 @@ def stub_out_key_pair_funcs(stubs, have_key_pair=True): def stub_out_image_service(stubs): - def fake_image_show(meh, context, id): - return dict(kernelId=1, ramdiskId=1) - - stubs.Set(local.LocalImageService, 'show', fake_image_show) + def fake_get_image_service(image_href): + image_id = int(str(image_href).split('/')[-1]) + return (nova.image.fake.FakeImageService(), image_id) + stubs.Set(nova.image, 'get_image_service', fake_get_image_service) + stubs.Set(nova.image, 'get_default_image_service', + lambda: nova.image.fake.FakeImageService()) def stub_out_auth(stubs): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 1ce0e8e84..9f3b53cdf 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -475,13 +475,6 @@ class ServersTest(test.TestCase): def image_id_from_hash(*args, **kwargs): return 2 - def fake_get_image_service(image_href): - image_id = int(str(image_href).split('/')[-1]) - return (nova.image.fake.FakeImageService(), image_id) - - self.stubs.Set(nova.image, 'get_default_image_service', - lambda: nova.image.fake.FakeImageService()) - self.stubs.Set(nova.image, 'get_image_service', fake_get_image_service) self.stubs.Set(nova.db.api, 'project_get_network', project_get_network) self.stubs.Set(nova.db.api, 'instance_create', instance_create) self.stubs.Set(nova.rpc, 'cast', fake_method) @@ -1684,10 +1677,9 @@ class TestServerInstanceCreation(test.TestCase): fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} fakes.stub_out_auth(self.stubs) + fakes.stub_out_image_service(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) self.allow_admin = FLAGS.allow_admin_api - self.stubs.Set(nova.image, 'get_default_image_service', - lambda: nova.image.fake.FakeImageService()) def tearDown(self): self.stubs.UnsetAll() diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index fdd9ad4da..1e0b90d82 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -25,7 +25,7 @@ def stubout_glance_client(stubs): return (FakeGlance('foo'), image_id) stubs.Set(nova.image, 'get_glance_client', fake_get_glance_client) - + class FakeGlance(object): IMAGE_MACHINE = 1 IMAGE_KERNEL = 2 diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py index 11f4fe06a..48edc5384 100644 --- a/nova/virt/vmwareapi/vmware_images.py +++ b/nova/virt/vmwareapi/vmware_images.py @@ -116,7 +116,7 @@ def upload_image(image, instance, **kwargs): def _get_glance_image(image, instance, **kwargs): """Download image from the glance image server.""" LOG.debug(_("Downloading image %s from glance image server") % image) - glance_client, image_id = nova.image.get_glance_client(image) + (glance_client, image_id) = nova.image.get_glance_client(image) metadata, read_iter = glance_client.get_image(image_id) read_file_handle = read_write_util.GlanceFileRead(read_iter) file_size = int(metadata['size']) @@ -152,7 +152,7 @@ def _put_glance_image(image, instance, **kwargs): kwargs.get("cookies"), kwargs.get("file_path")) file_size = read_file_handle.get_size() - glance_client, image_id = nova.image.get_glance_client(image) + (glance_client, image_id) = nova.image.get_glance_client(image) # The properties and other fields that we need to set for the image. image_metadata = {"is_public": True, "disk_format": "vmdk", @@ -187,7 +187,7 @@ def get_vmdk_size_and_properties(image, instance): LOG.debug(_("Getting image size for the image %s") % image) if FLAGS.image_service == "nova.image.glance.GlanceImageService": - glance_client, image_id = nova.image.get_glance_client(image) + (glance_client, image_id) = nova.image.get_glance_client(image) meta_data = glance_client.get_image_meta(image_id) size, properties = meta_data["size"], meta_data["properties"] elif FLAGS.image_service == "nova.image.s3.S3ImageService": -- cgit From b0636780291fc6531d89a69e164e82203414a875 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sat, 28 May 2011 07:49:31 -0400 Subject: Another image_id location in hyperv. --- nova/virt/hyperv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 1142e97a4..05b4775c1 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -151,7 +151,7 @@ class HyperVConnection(driver.ComputeDriver): base_vhd_filename = os.path.join(FLAGS.instances_path, instance.name) vhdfile = "%s.vhd" % (base_vhd_filename) - images.fetch(instance['image_id'], vhdfile, user, project) + images.fetch(instance['image_ref'], vhdfile, user, project) try: self._create_vm(instance) -- cgit From 29387999d6befc29dddfb7dfd5d543607676e106 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sat, 28 May 2011 14:18:25 -0400 Subject: Added missing nova import to image/__init__.py. --- nova/image/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/image/__init__.py b/nova/image/__init__.py index 011d79d61..f42332a29 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -18,6 +18,7 @@ from urlparse import urlparse +import nova from nova import exception from nova import utils from nova import flags -- cgit From 5976b50299b31292d578dcdd8576607e175fca44 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sat, 28 May 2011 21:10:57 -0400 Subject: Cleanup instances_path in test_libvirt test_spawn_with_network_info test. --- nova/tests/test_libvirt.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 4efdd6ae9..1fac4e4e6 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -18,6 +18,7 @@ import eventlet import mox import os import re +import shutil import sys from xml.etree.ElementTree import fromstring as xml_to_tree @@ -645,6 +646,8 @@ class LibvirtConnTestCase(test.TestCase): except Exception, e: count = (0 <= str(e.message).find('Unexpected method call')) + shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name)) + self.assertTrue(count) def test_get_host_ip_addr(self): -- cgit From a9278909cbb6d5ea9283231dbd6efc67b812abff Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sat, 28 May 2011 23:10:42 -0400 Subject: Update the rebuild_instance function in the compute manager so that it accepts the arguments that our current compute API sends. --- nova/compute/manager.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d1e01f275..3897b3a9e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -331,7 +331,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock - def rebuild_instance(self, context, instance_id, image_id): + def rebuild_instance(self, context, instance_id, **kwargs): """Destroy and re-make this instance. A 'rebuild' effectively purges all existing data from the system and @@ -349,7 +349,8 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_state(context, instance_id, power_state.BUILDING) self.driver.destroy(instance_ref) - instance_ref.image_id = image_id + instance_ref.image_id = kwargs.get('image_id') + instance_ref.injected_files = kwargs.get('injected_files', []) self.driver.spawn(instance_ref) self._update_image_id(context, instance_id, image_id) -- cgit From ccf522daaca0d4136c072c1905dd9fbaa1dfb2e9 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sat, 28 May 2011 23:12:07 -0400 Subject: Fixes to the SQLAlchmeny API such that metadata is saved on an instance_update. Added integration test to test that instance metadata is updated on a rebuild. --- nova/db/sqlalchemy/api.py | 22 +++++++++++++--------- nova/tests/integrated/api/client.py | 10 ++++++++-- nova/tests/integrated/test_servers.py | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 11 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e4dda5c12..1a7cae6e9 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -771,6 +771,15 @@ def fixed_ip_update(context, address, values): ################### +def _metadata_refs(metadata_dict): + metadata_refs = [] + if metadata_dict: + for k, v in metadata_dict.iteritems(): + metadata_ref = models.InstanceMetadata() + metadata_ref['key'] = k + metadata_ref['value'] = v + metadata_refs.append(metadata_ref) + return metadata_refs @require_context @@ -780,15 +789,7 @@ def instance_create(context, values): context - request context object values - dict containing column values. """ - metadata = values.get('metadata') - metadata_refs = [] - if metadata: - for k, v in metadata.iteritems(): - metadata_ref = models.InstanceMetadata() - metadata_ref['key'] = k - metadata_ref['value'] = v - metadata_refs.append(metadata_ref) - values['metadata'] = metadata_refs + values['metadata'] = _metadata_refs(values.get('metadata')) instance_ref = models.Instance() instance_ref.update(values) @@ -1010,6 +1011,9 @@ def instance_set_state(context, instance_id, state, description=None): @require_context def instance_update(context, instance_id, values): session = get_session() + metadata = values.get('metadata') + if metadata: + values['metadata'] = _metadata_refs(values.get('metadata')) with session.begin(): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py index 7e20c9b00..eb9a3056e 100644 --- a/nova/tests/integrated/api/client.py +++ b/nova/tests/integrated/api/client.py @@ -152,7 +152,10 @@ class TestOpenStackClient(object): def _decode_json(self, response): body = response.read() LOG.debug(_("Decoding JSON: %s") % (body)) - return json.loads(body) + if body: + return json.loads(body) + else: + return "" def api_get(self, relative_uri, **kwargs): kwargs.setdefault('check_response_status', [200]) @@ -166,7 +169,7 @@ class TestOpenStackClient(object): headers['Content-Type'] = 'application/json' kwargs['body'] = json.dumps(body) - kwargs.setdefault('check_response_status', [200]) + kwargs.setdefault('check_response_status', [200, 202]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) @@ -185,6 +188,9 @@ class TestOpenStackClient(object): def post_server(self, server): return self.api_post('/servers', server)['server'] + def post_server_action(self, server_id, data): + return self.api_post('/servers/%s/action' % server_id, data) + def delete_server(self, server_id): return self.api_delete('/servers/%s' % server_id) diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index e89d0100a..604faf59f 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -179,6 +179,40 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Cleanup self._delete_server(created_server_id) + def test_create_and_rebuild_server_with_metadata(self): + """Rebuild a server with metadata.""" + + # create a server with initially has no metadata + server = self._build_minimal_create_server_request() + server_post = {'server': server} + created_server = self.api.post_server(server_post) + LOG.debug("created_server: %s" % created_server) + self.assertTrue(created_server['id']) + created_server_id = created_server['id'] + + # rebuild the server with metadata + post = {} + post['rebuild'] = { + "imageRef": "https://localhost/v1.1/32278/images/2", + "name": "blah" + } + + metadata = {} + for i in range(30): + metadata['key_%s' % i] = 'value_%s' % i + + post['rebuild']['metadata'] = metadata + + self.api.post_server_action(created_server_id, post) + LOG.debug("rebuilt server: %s" % created_server) + self.assertTrue(created_server['id']) + + found_server = self.api.get_server(created_server_id) + self.assertEqual(created_server_id, found_server['id']) + self.assertEqual(metadata, found_server.get('metadata')) + + # Cleanup + self._delete_server(created_server_id) if __name__ == "__main__": unittest.main() -- cgit From 394b37f8c944fbd3ca683d7752cd751bc69cce51 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sun, 29 May 2011 00:00:02 -0400 Subject: Implement the v1.1 style resize action with support for flavorRef. --- nova/api/openstack/servers.py | 32 ++++++++++++++++++++++++++++++++ nova/tests/api/openstack/test_servers.py | 19 +++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 5c10fc916..a3066e578 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -332,6 +332,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() def _action_resize(self, input_dict, req, id): + return exc.HTTPNotImplemented() """ Resizes a given instance to the flavor size requested """ try: if 'resize' in input_dict and 'flavorId' in input_dict['resize']: @@ -610,6 +611,21 @@ class ControllerV10(Controller): self.compute_api.set_admin_password(context, server_id, inst_dict['server']['adminPass']) + def _action_resize(self, input_dict, req, id): + """ Resizes a given instance to the flavor size requested """ + try: + if 'resize' in input_dict and 'flavorId' in input_dict['resize']: + flavor_id = input_dict['resize']['flavorId'] + self.compute_api.resize(req.environ['nova.context'], id, + flavor_id) + else: + LOG.exception(_("Missing arguments for resize")) + return faults.Fault(exc.HTTPUnprocessableEntity()) + except Exception, e: + LOG.exception(_("Error in resize %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPAccepted() + def _action_rebuild(self, info, request, instance_id): context = request.environ['nova.context'] instance_id = int(instance_id) @@ -695,6 +711,22 @@ class ControllerV11(Controller): LOG.info(msg) raise faults.Fault(exc.HTTPBadRequest(explanation=msg)) + def _action_resize(self, input_dict, req, id): + """ Resizes a given instance to the flavor size requested """ + try: + if 'resize' in input_dict and 'flavorRef' in input_dict['resize']: + flavor_ref = input_dict['resize']['flavorRef'] + flavor_id = common.get_id_from_href(flavor_ref) + self.compute_api.resize(req.environ['nova.context'], id, + flavor_id) + else: + LOG.exception(_("Missing arguments for resize")) + return faults.Fault(exc.HTTPUnprocessableEntity()) + except Exception, e: + LOG.exception(_("Error in resize %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPAccepted() + def _action_rebuild(self, info, request, instance_id): context = request.environ['nova.context'] instance_id = int(instance_id) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index fbde5c9ce..e0910fed6 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1267,6 +1267,25 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 202) self.assertEqual(self.resize_called, True) + def test_resize_server_v11(self): + + req = webob.Request.blank('/v1.1/servers/1/action') + req.content_type = 'application/json' + req.method = 'POST' + body_dict = dict(resize=dict(flavorRef="http://localhost/3")) + req.body = json.dumps(body_dict) + + self.resize_called = False + + def resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + def test_resize_bad_flavor_fails(self): req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3))) -- cgit From 833481d796db557dddde6b4b9e75b7cf518b88fa Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sun, 29 May 2011 07:51:44 -0400 Subject: Use metadata variable when calling _metadata_refs. --- nova/db/sqlalchemy/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 1a7cae6e9..a678ebedd 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1013,7 +1013,7 @@ def instance_update(context, instance_id, values): session = get_session() metadata = values.get('metadata') if metadata: - values['metadata'] = _metadata_refs(values.get('metadata')) + values['metadata'] = _metadata_refs(metadata) with session.begin(): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) -- cgit From c9926b12f4c554d9a21c6e77fc657e54a2dd4888 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Sun, 29 May 2011 18:01:46 -0700 Subject: starting --- doc/source/devref/distributed_scheduler.rst | 164 ++++++++++++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100644 doc/source/devref/distributed_scheduler.rst diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst new file mode 100644 index 000000000..75a4d57ce --- /dev/null +++ b/doc/source/devref/distributed_scheduler.rst @@ -0,0 +1,164 @@ +.. + Copyright 2011 OpenStack LLC + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Distributed Scheduler +===== + +The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and Compute nodes are selected for where the work should be performed. In a small deployment we may be happy with the currently available Change Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone). + +But for larger deployments a more complex scheduling algorithm is required. Additionally, if you are using Zones in your Nova setup, you'll need a scheduler that understand how to pass instance requests from Zone to Zone. + +This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capabilities of a Zone and its component services to make informed decisions on where a new instance should be created. When making this decision it consults not only all the Compute nodes in the current Zone, but the Compute nodes in each Child Zone. This continues recursively until the ideal host is found. + +So, how does this all work? + +This document will explain the strategy employed by the ZoneAwareScheduler and its derivations. + +Costs & Weights +---------- +When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to put an Instance with 8G of RAM on a Host that only has 4G remaining would have a very high cost. But putting a 512m RAM instance on an empty Host should have a low cost. + +Some Costs are more esoteric. Consider a rule that says we should prefer Hosts that don't already have an instance on it that is owned by the user requesting it (to mitigate against machine failures). Here we have to look at all the other Instances on the host to compute our cost. + +An example of some other costs might include selecting: +* a GPU-based host over a standard CPU +* a host with fast ethernet over a 10mbps line +* a host than can run Windows instances +* a host in the EU vs North America +* etc + +This Weight is computed for each Instance requested. If the customer asked for 1000 instances, the consumed resources on each Host are "virtually" depleted so the Cost can change accordingly. + +nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler +----------- +As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about Child Zones and each of the Services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions. + +Here is how it works: + +1. The Compute nodes are Filtered and the remaining set are Weighted. +1a. Filtering the hosts is a simple matter of ensuring the Compute node has ample resources (CPU, RAM, DISK, etc) to fulfil the request. +1b. Weighing of the remaining Compute nodes is performed +2. The same request is sent to each Child Zone and step #1 is done there too. The resulting Weighted List is returned to the parent. +3. The Parent Zone sorts and aggregates all the Weights and a final Build Plan is constructed. +4. The Build Plan is executed upon. Concurrently, Instance Create requests are sent to each of the selected Hosts, be they local or in a child zone. Child Zones may forward the requests to their Child Zones as needed. + +Filtering and Weighing +------------ +Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. + + + + + + +- +Routing between Zones is based on the Capabilities of that Zone. Capabilities are nothing more than key/value pairs. Values are multi-value, with each value separated with a semicolon (`;`). When expressed as a string they take the form: + +:: + + key=value;value;value, key=value;value;value + +Zones have Capabilities which are general to the Zone and are set via `--zone_capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag. + +Flow within a Zone +------------------ +The brunt of the work within a Zone is done in the Scheduler Service. The Scheduler is responsible for: +- collecting capability messages from the Compute, Volume and Network nodes, +- polling the child Zones for their status and +- providing data to the Distributed Scheduler for performing load balancing calculations + +Inter-service communication within a Zone is done with RabbitMQ. Each class of Service (Compute, Volume and Network) has both a named message exchange (particular to that host) and a general message exchange (particular to that class of service). Messages sent to these exchanges are picked off in round-robin fashion. Zones introduce a new fan-out exchange per service. Messages sent to the fan-out exchange are picked up by all services of a particular class. This fan-out exchange is used by the Scheduler services to receive capability messages from the Compute, Volume and Network nodes. + +These capability messages are received by the Scheduler services and stored in the `ZoneManager` object. The SchedulerManager object has a reference to the `ZoneManager` it can use for load balancing. + +The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone_name` flag (and defaults to "nova"). + +Zone administrative functions +----------------------------- +Zone administrative operations are usually done using python-novaclient_ + +.. _python-novaclient: https://github.com/rackspace/python-novaclient + +In order to use the Zone operations, be sure to enable administrator operations in OpenStack API by setting the `--allow_admin_api=true` flag. + +Finally you need to enable Zone Forwarding. This will be used by the Distributed Scheduler initiative currently underway. Set `--enable_zone_routing=true` to enable this feature. + +Find out about this Zone +------------------------ +In any Zone you can find the Zone's name and capabilities with the ``nova zone-info`` command. + +:: + + alice@novadev:~$ nova zone-info + +-----------------+---------------+ + | Property | Value | + +-----------------+---------------+ + | compute_cpu | 0.7,0.7 | + | compute_disk | 123000,123000 | + | compute_network | 800,800 | + | hypervisor | xenserver | + | name | nova | + | network_cpu | 0.7,0.7 | + | network_disk | 123000,123000 | + | network_network | 800,800 | + | os | linux | + +-----------------+---------------+ + +This equates to a GET operation on `.../zones/info`. If you have no child Zones defined you'll usually only get back the default `name`, `hypervisor` and `os` capabilities. Otherwise you'll get back a tuple of min, max values for each capabilities of all the hosts of all the services running in the child zone. These take the `_ = ,` format. + +Adding a child Zone +------------------- +Any Zone can be a parent Zone. Children are associated to a Zone. The Zone where this command originates from is known as the Parent Zone. Routing is only ever conducted from a Zone to its children, never the other direction. From a parent zone you can add a child zone with the following command: + +:: + + nova zone-add + +You can get the `child zone api url`, `nova api key` and `username` from the `novarc` file in the child zone. For example: + +:: + + export NOVA_API_KEY="3bd1af06-6435-4e23-a827-413b2eb86934" + export NOVA_USERNAME="alice" + export NOVA_URL="http://192.168.2.120:8774/v1.0/" + + +This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done when this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information. + +Getting a list of child Zones +----------------------------- + +:: + + nova zone-list + + alice@novadev:~$ nova zone-list + +----+-------+-----------+--------------------------------------------+---------------------------------+ + | ID | Name | Is Active | Capabilities | API URL | + +----+-------+-----------+--------------------------------------------+---------------------------------+ + | 2 | zone1 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.108:8774/v1.0/ | + | 3 | zone2 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.115:8774/v1.0/ | + +----+-------+-----------+--------------------------------------------+---------------------------------+ + +This equates to a GET operation to `.../zones`. + +Removing a child Zone +--------------------- +:: + + nova zone-delete + +This equates to a DELETE call to `.../zones/N`. The Zone with ID=N will be removed. This will only remove the zone entry from the current (parent) Zone, no child Zones are affected. Removing a Child Zone doesn't affect any other part of the hierarchy. -- cgit From 5aa54545486ffe9d9988761576f497de9a957d47 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Sun, 29 May 2011 23:42:46 -0300 Subject: lots more --- doc/source/devref/distributed_scheduler.rst | 44 +++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index 75a4d57ce..7599f2cc5 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -25,7 +25,7 @@ This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capab So, how does this all work? -This document will explain the strategy employed by the ZoneAwareScheduler and its derivations. +This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the Zones documentation before reading this. Costs & Weights ---------- @@ -48,19 +48,51 @@ As we explained in the Zones documentation, each Scheduler has a `ZoneManager` o Here is how it works: -1. The Compute nodes are Filtered and the remaining set are Weighted. +1. The Compute nodes are Filtered and the nodes remaining are Weighed. 1a. Filtering the hosts is a simple matter of ensuring the Compute node has ample resources (CPU, RAM, DISK, etc) to fulfil the request. -1b. Weighing of the remaining Compute nodes is performed +1b. Weighing of the remaining Compute nodes assigns a number based on their suitability for the request. 2. The same request is sent to each Child Zone and step #1 is done there too. The resulting Weighted List is returned to the parent. 3. The Parent Zone sorts and aggregates all the Weights and a final Build Plan is constructed. 4. The Build Plan is executed upon. Concurrently, Instance Create requests are sent to each of the selected Hosts, be they local or in a child zone. Child Zones may forward the requests to their Child Zones as needed. Filtering and Weighing ------------ -Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. - - +Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. We will explain how to do this later in this document. +Requesting a new instance +------------ +To request a new instance, a call is made to `nova.compute.api.create()`. The type of instance created depends on the value of the `InstanceType` record being passed in. The `InstanceType` determines the amount of disk, cpu, ram and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table, but we'll discuss that later. + +`nova.compute.api.create()` performs the following actions: +1. it validates all the fields passed into it. +2. it creates an entry in the `Instance` table for each instance requested +3. it puts one `run_instance` message in the scheduler queue for each instance requested +4. the schedulers pick off the messages and decide which Compute node should handle the request. +5. the `run_instance` message is forwarded to the Compute node for processing and the instance is created. +6. it returns a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_id`'s are valid. + +Generally, the standard schedulers (like `ChangeScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of Child Zones. + +The problem with this approach is that each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once. + +For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently: +1. it validates all the fields passed into it. +2. it creates a single `request_id` for all of instances created. This is a UUID. +3. it creates a single `run_instance` request in the scheduler queue +4. a scheduler picks the message off the queue and works on it. +5. the scheduler sends off an OS API `POST /zones/select` command to each Child Zone. The `BODY` payload of the call contains the `request_spec`. +6. the Child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones. +7. if the Child Zone has its own Child Zone's, the `/zones/select` call will be sent down to them as well. +8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed. +9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant Child Zone. The parameters to the Child Zone call are the same as what was passed in by the user. + +The Catch +------------- +This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, Database and set of Nova Services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world. + +When `POST /zones/select` is called to estimate which Compute node to use, time passes until the `POST /servers` call is issued. If we only passed the Weight back from the `select` we would have to re-compute the appropriate Compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to Child Zones asking for estimates. + +Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. -- cgit From c3c2c1a63c126f046457d0d61306ebe9c46af700 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 30 May 2011 00:00:28 -0300 Subject: basic flow done --- doc/source/devref/distributed_scheduler.rst | 105 ++++------------------------ 1 file changed, 13 insertions(+), 92 deletions(-) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index 7599f2cc5..c9aaf8c01 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -55,6 +55,8 @@ Here is how it works: 3. The Parent Zone sorts and aggregates all the Weights and a final Build Plan is constructed. 4. The Build Plan is executed upon. Concurrently, Instance Create requests are sent to each of the selected Hosts, be they local or in a child zone. Child Zones may forward the requests to their Child Zones as needed. +`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which Host filtering and Weighing strategy will be used. We'll go into more detail on that later. + Filtering and Weighing ------------ Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. We will explain how to do this later in this document. @@ -77,7 +79,7 @@ The problem with this approach is that each request is scattered amongst each of For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently: 1. it validates all the fields passed into it. -2. it creates a single `request_id` for all of instances created. This is a UUID. +2. it creates a single `reservation_id` for all of instances created. This is a UUID. 3. it creates a single `run_instance` request in the scheduler queue 4. a scheduler picks the message off the queue and works on it. 5. the scheduler sends off an OS API `POST /zones/select` command to each Child Zone. The `BODY` payload of the call contains the `request_spec`. @@ -85,6 +87,7 @@ For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to 7. if the Child Zone has its own Child Zone's, the `/zones/select` call will be sent down to them as well. 8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed. 9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant Child Zone. The parameters to the Child Zone call are the same as what was passed in by the user. +10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`. The Catch ------------- @@ -92,105 +95,23 @@ This all seems pretty straightforward but, like most things, there's a catch. Zo When `POST /zones/select` is called to estimate which Compute node to use, time passes until the `POST /servers` call is issued. If we only passed the Weight back from the `select` we would have to re-compute the appropriate Compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to Child Zones asking for estimates. -Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. - - - -- -Routing between Zones is based on the Capabilities of that Zone. Capabilities are nothing more than key/value pairs. Values are multi-value, with each value separated with a semicolon (`;`). When expressed as a string they take the form: - -:: - - key=value;value;value, key=value;value;value - -Zones have Capabilities which are general to the Zone and are set via `--zone_capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag. - -Flow within a Zone ------------------- -The brunt of the work within a Zone is done in the Scheduler Service. The Scheduler is responsible for: -- collecting capability messages from the Compute, Volume and Network nodes, -- polling the child Zones for their status and -- providing data to the Distributed Scheduler for performing load balancing calculations - -Inter-service communication within a Zone is done with RabbitMQ. Each class of Service (Compute, Volume and Network) has both a named message exchange (particular to that host) and a general message exchange (particular to that class of service). Messages sent to these exchanges are picked off in round-robin fashion. Zones introduce a new fan-out exchange per service. Messages sent to the fan-out exchange are picked up by all services of a particular class. This fan-out exchange is used by the Scheduler services to receive capability messages from the Compute, Volume and Network nodes. - -These capability messages are received by the Scheduler services and stored in the `ZoneManager` object. The SchedulerManager object has a reference to the `ZoneManager` it can use for load balancing. - -The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone_name` flag (and defaults to "nova"). - -Zone administrative functions ------------------------------ -Zone administrative operations are usually done using python-novaclient_ - -.. _python-novaclient: https://github.com/rackspace/python-novaclient - -In order to use the Zone operations, be sure to enable administrator operations in OpenStack API by setting the `--allow_admin_api=true` flag. - -Finally you need to enable Zone Forwarding. This will be used by the Distributed Scheduler initiative currently underway. Set `--enable_zone_routing=true` to enable this feature. - -Find out about this Zone ------------------------- -In any Zone you can find the Zone's name and capabilities with the ``nova zone-info`` command. - -:: - - alice@novadev:~$ nova zone-info - +-----------------+---------------+ - | Property | Value | - +-----------------+---------------+ - | compute_cpu | 0.7,0.7 | - | compute_disk | 123000,123000 | - | compute_network | 800,800 | - | hypervisor | xenserver | - | name | nova | - | network_cpu | 0.7,0.7 | - | network_disk | 123000,123000 | - | network_network | 800,800 | - | os | linux | - +-----------------+---------------+ - -This equates to a GET operation on `.../zones/info`. If you have no child Zones defined you'll usually only get back the default `name`, `hypervisor` and `os` capabilities. Otherwise you'll get back a tuple of min, max values for each capabilities of all the hosts of all the services running in the child zone. These take the `_ = ,` format. - -Adding a child Zone -------------------- -Any Zone can be a parent Zone. Children are associated to a Zone. The Zone where this command originates from is known as the Parent Zone. Routing is only ever conducted from a Zone to its children, never the other direction. From a parent zone you can add a child zone with the following command: - -:: - - nova zone-add - -You can get the `child zone api url`, `nova api key` and `username` from the `novarc` file in the child zone. For example: - -:: +Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. - export NOVA_API_KEY="3bd1af06-6435-4e23-a827-413b2eb86934" - export NOVA_USERNAME="alice" - export NOVA_URL="http://192.168.2.120:8774/v1.0/" +In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent. +Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.zone_aware_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use. -This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done when this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information. +Reservation ID's +--------------- -Getting a list of child Zones ------------------------------ -:: - nova zone-list - alice@novadev:~$ nova zone-list - +----+-------+-----------+--------------------------------------------+---------------------------------+ - | ID | Name | Is Active | Capabilities | API URL | - +----+-------+-----------+--------------------------------------------+---------------------------------+ - | 2 | zone1 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.108:8774/v1.0/ | - | 3 | zone2 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.115:8774/v1.0/ | - +----+-------+-----------+--------------------------------------------+---------------------------------+ +Host Filter +-------------- -This equates to a GET operation to `.../zones`. -Removing a child Zone ---------------------- -:: +Cost Scheduler Weighing +-------------- - nova zone-delete -This equates to a DELETE call to `.../zones/N`. The Zone with ID=N will be removed. This will only remove the zone entry from the current (parent) Zone, no child Zones are affected. Removing a Child Zone doesn't affect any other part of the hierarchy. -- cgit From 5101aa300b087bf57f22cb128649679e8b11051d Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 30 May 2011 00:45:15 -0300 Subject: reservation_id's done --- doc/source/devref/distributed_scheduler.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index c9aaf8c01..402f50bee 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -104,8 +104,17 @@ Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.c Reservation ID's --------------- +NOTE: The features described in this section are related to the up-coming 'merge-4' branch. +The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created. +NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would be bad. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled. + +We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/servers` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches. + +Finally, we need to give the user a way to get information on each of the instances created under this `reservation_id`. Fortunately, this is still possible with the existing `GET /servers` command, so long as we add a new optional `reservation_id` parameter. + +`python-novaclient` will be extended to support both of these changes. Host Filter -------------- -- cgit From 45818393a20a56d5e0aab23f3c78e430e0c1167a Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 30 May 2011 14:17:00 +0900 Subject: fixed nova.virt.libvirt_conn.resume() method - removing try-catch --- nova/virt/libvirt_conn.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 47a77b3ae..32f374955 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -569,12 +569,8 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def resume(self, instance, callback): """resume the specified instance""" - try: - dom = self._lookup_by_name(instance.name) - dom.create() - except libvirt.LibvirtError: - xml = self.to_xml(instance, None) - self._create_new_domain(xml) + dom = self._lookup_by_name(instance.name) + dom.create() @exception.wrap_exception def rescue(self, instance): -- cgit From 0cf5316131aecbac5e843282e2e2eb2acd3fc9e3 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 30 May 2011 05:03:45 -0700 Subject: first cut complete --- doc/source/devref/distributed_scheduler.rst | 41 +++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index 402f50bee..a45505640 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -119,8 +119,49 @@ Finally, we need to give the user a way to get information on each of the instan Host Filter -------------- +As we mentioned earlier, filtering hosts is a very deployment specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms. +The filter used is determined by the `--default_host_filter` flag, which points to a Python Class. By default this flag is set to `nova.scheduler.host_filter.AllHostsFilter` which simply returns all available hosts. But there are others. + +`nova.scheduler.host_filter.InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`. +`nova.scheduler.host_filter.JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples. + +To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide the create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of weight tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The weight tuple contains (``, ``) where `` is whatever you want it to be. + Cost Scheduler Weighing -------------- +Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `ZoneAwareScheduler` base class when all the results have been assembled. + +Simple Zone Aware Scheduling +-------------- +The easiest way to get started with the Zone Aware Scheduler is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter as and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. + +The `--scheduler_driver` flag is how you specify the Scheduler class name. + +Flags +-------------- + +All this Zone and Distributed Scheduler stuff can seem a little daunting to configure, but it's actually not too bad. Here are some of the main flags you should set in your `nova.conf` file: + +:: + --allow_admin_api=true + --enable_zone_routing=true + --zone_name=zone1 + --build_plan_encryption_key=c286696d887c9aa0611bbb3e2025a45b + --scheduler_driver=nova.scheduler.host_filter.HostFilterScheduler + --default_host_filter=nova.scheduler.host_filter.AllHostsFilter + +`--allow_admin_api` must be set for OS API to enable the new `/zones/*` commands. +`--enable_zone_routing` must be set for OS API commands such as `create()`, `pause()` and `delete()` to get routed from Zone to Zone when looking for instances. +`--zone_name` is only required in Child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue. +`build_plan_encryption_key` is the SHA-256 key for encrypting/decrypting the Host information when it leaves a Zone. Be sure to change this key for each Zone you create. Do not duplicate keys. +`scheduler_driver` is the real work horse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler` +`default_host_filter` is the host filter to be used for filtering candidate Compute nodes. + +Some optional flags which are handy for debugging are: +:: + --connection_type=fake + --verbose +Using the `Fake` virtualization driver is handy when you're setting this stuff up so you're not dealing with a million possible issues at once. When things seem to working correctly, switch back to whatever hypervisor your deployment uses. -- cgit From 2155f2b1ab22c6183ab5266e16a675f1469fca50 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 30 May 2011 11:29:55 -0400 Subject: Updates so that 'name' can be updated when doing a OS API v1.1 rebuild. Fixed issue where metadata wasn't getting deleted when an empty dict was POST'd on a rebuild. --- nova/api/openstack/servers.py | 10 +++-- nova/compute/api.py | 13 ++++--- nova/db/sqlalchemy/api.py | 17 +++++++-- nova/tests/integrated/test_servers.py | 72 +++++++++++++++++++++++++++++++++++ 4 files changed, 100 insertions(+), 12 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 5c10fc916..8e191c232 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -708,14 +708,16 @@ class ControllerV11(Controller): image_id = common.get_id_from_href(image_ref) personalities = info["rebuild"].get("personality", []) - metadata = info["rebuild"].get("metadata", {}) + metadata = info["rebuild"].get("metadata") + name = info["rebuild"].get("name") - self._validate_metadata(metadata) + if metadata: + self._validate_metadata(metadata) self._decode_personalities(personalities) try: - self.compute_api.rebuild(context, instance_id, image_id, metadata, - personalities) + self.compute_api.rebuild(context, instance_id, image_id, name, + metadata, personalities) except exception.BuildInProgress: msg = _("Instance %d is currently being rebuilt.") % instance_id LOG.debug(msg) diff --git a/nova/compute/api.py b/nova/compute/api.py index 4f2363387..151679521 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -530,7 +530,7 @@ class API(base.Base): """Reboot the given instance.""" self._cast_compute_message('reboot_instance', context, instance_id) - def rebuild(self, context, instance_id, image_id, metadata=None, + def rebuild(self, context, instance_id, image_id, name=None, metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) @@ -539,13 +539,16 @@ class API(base.Base): msg = _("Instance already building") raise exception.BuildInProgress(msg) - metadata = metadata or {} - self._check_metadata_properties_quota(context, metadata) - files_to_inject = files_to_inject or [] self._check_injected_file_quota(context, files_to_inject) - self.db.instance_update(context, instance_id, {"metadata": metadata}) + values = {} + if metadata is not None: + self._check_metadata_properties_quota(context, metadata) + values['metadata'] = metadata + if name is not None: + values['display_name'] = name + self.db.instance_update(context, instance_id, values) rebuild_params = { "image_id": image_id, diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a678ebedd..ea84e96e7 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1012,8 +1012,9 @@ def instance_set_state(context, instance_id, state, description=None): def instance_update(context, instance_id, values): session = get_session() metadata = values.get('metadata') - if metadata: - values['metadata'] = _metadata_refs(metadata) + if metadata is not None: + instance_metadata_update_or_create(context, instance_id, + values.pop('metadata'), True) with session.begin(): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) @@ -2570,8 +2571,12 @@ def instance_metadata_get_item(context, instance_id, key): @require_context -def instance_metadata_update_or_create(context, instance_id, metadata): +def instance_metadata_update_or_create(context, instance_id, metadata, + purge=False): session = get_session() + + original_metadata = instance_metadata_get(context, instance_id) + meta_ref = None for key, value in metadata.iteritems(): try: @@ -2583,4 +2588,10 @@ def instance_metadata_update_or_create(context, instance_id, metadata): "instance_id": instance_id, "deleted": 0}) meta_ref.save(session=session) + + if purge: + for key in original_metadata.keys(): + if not key in metadata.keys(): + instance_metadata_delete(context, instance_id, key) + return metadata diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 604faf59f..a67fa1bb5 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -179,6 +179,36 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Cleanup self._delete_server(created_server_id) + def test_create_and_rebuild_server(self): + """Rebuild a server.""" + + # create a server with initially has no metadata + server = self._build_minimal_create_server_request() + server_post = {'server': server} + created_server = self.api.post_server(server_post) + LOG.debug("created_server: %s" % created_server) + self.assertTrue(created_server['id']) + created_server_id = created_server['id'] + + # rebuild the server with metadata + post = {} + post['rebuild'] = { + "imageRef": "https://localhost/v1.1/32278/images/2", + "name": "blah" + } + + self.api.post_server_action(created_server_id, post) + LOG.debug("rebuilt server: %s" % created_server) + self.assertTrue(created_server['id']) + + found_server = self.api.get_server(created_server_id) + self.assertEqual(created_server_id, found_server['id']) + self.assertEqual({}, found_server.get('metadata')) + self.assertEqual('blah', found_server.get('name')) + + # Cleanup + self._delete_server(created_server_id) + def test_create_and_rebuild_server_with_metadata(self): """Rebuild a server with metadata.""" @@ -210,9 +240,51 @@ class ServersTest(integrated_helpers._IntegratedTestBase): found_server = self.api.get_server(created_server_id) self.assertEqual(created_server_id, found_server['id']) self.assertEqual(metadata, found_server.get('metadata')) + self.assertEqual('blah', found_server.get('name')) + + # Cleanup + self._delete_server(created_server_id) + + def test_create_and_rebuild_server_with_metadata_removal(self): + """Rebuild a server with metadata.""" + + # create a server with initially has no metadata + server = self._build_minimal_create_server_request() + server_post = {'server': server} + + metadata = {} + for i in range(30): + metadata['key_%s' % i] = 'value_%s' % i + + server_post['server']['metadata'] = metadata + + created_server = self.api.post_server(server_post) + LOG.debug("created_server: %s" % created_server) + self.assertTrue(created_server['id']) + created_server_id = created_server['id'] + + # rebuild the server with metadata + post = {} + post['rebuild'] = { + "imageRef": "https://localhost/v1.1/32278/images/2", + "name": "blah" + } + + metadata = {} + post['rebuild']['metadata'] = metadata + + self.api.post_server_action(created_server_id, post) + LOG.debug("rebuilt server: %s" % created_server) + self.assertTrue(created_server['id']) + + found_server = self.api.get_server(created_server_id) + self.assertEqual(created_server_id, found_server['id']) + self.assertEqual(metadata, found_server.get('metadata')) + self.assertEqual('blah', found_server.get('name')) # Cleanup self._delete_server(created_server_id) + if __name__ == "__main__": unittest.main() -- cgit From 4f8c995bbeca903319bcc1f314b25be0150eea2f Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 30 May 2011 22:47:10 -0400 Subject: Updated compute api and manager to support image_refs in rebuild. --- nova/compute/api.py | 4 ++-- nova/compute/manager.py | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 61b45843d..e0f9ec8f3 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -533,7 +533,7 @@ class API(base.Base): """Reboot the given instance.""" self._cast_compute_message('reboot_instance', context, instance_id) - def rebuild(self, context, instance_id, image_id, metadata=None, + def rebuild(self, context, instance_id, image_ref, metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) @@ -551,7 +551,7 @@ class API(base.Base): self.db.instance_update(context, instance_id, {"metadata": metadata}) rebuild_params = { - "image_id": image_id, + "image_ref": image_ref, "injected_files": files_to_inject, } diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7c88236ba..055d15c43 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -162,9 +162,9 @@ class ComputeManager(manager.SchedulerDependentManager): data = {'launched_at': launched_at or datetime.datetime.utcnow()} self.db.instance_update(context, instance_id, data) - def _update_image_id(self, context, instance_id, image_id): + def _update_image_ref(self, context, instance_id, image_ref): """Update the image_id for the given instance.""" - data = {'image_id': image_id} + data = {'image_ref': image_ref} self.db.instance_update(context, instance_id, data) def get_console_topic(self, context, **kwargs): @@ -331,7 +331,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock - def rebuild_instance(self, context, instance_id, image_id): + def rebuild_instance(self, context, instance_id, image_ref): """Destroy and re-make this instance. A 'rebuild' effectively purges all existing data from the system and @@ -339,7 +339,7 @@ class ComputeManager(manager.SchedulerDependentManager): :param context: `nova.RequestContext` object :param instance_id: Instance identifier (integer) - :param image_id: Image identifier (integer) + :param image_ref: Image identifier (href or integer) """ context = context.elevated() @@ -349,10 +349,10 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_state(context, instance_id, power_state.BUILDING) self.driver.destroy(instance_ref) - instance_ref.image_id = image_id + instance_ref.image_ref = image_ref self.driver.spawn(instance_ref) - self._update_image_id(context, instance_id, image_id) + self._update_image_ref(context, instance_id, image_ref) self._update_launched_at(context, instance_id) self._update_state(context, instance_id) -- cgit From d6cd02a07ab3b66a53689fb8edbf55db03b4bff2 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 31 May 2011 08:20:40 -0400 Subject: Actually remove the _action_resize code from the base Servers controller. The V11 and V10 controllers implement these now. --- nova/api/openstack/servers.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index a3066e578..4bd7ddb14 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -333,19 +333,6 @@ class Controller(common.OpenstackController): def _action_resize(self, input_dict, req, id): return exc.HTTPNotImplemented() - """ Resizes a given instance to the flavor size requested """ - try: - if 'resize' in input_dict and 'flavorId' in input_dict['resize']: - flavor_id = input_dict['resize']['flavorId'] - self.compute_api.resize(req.environ['nova.context'], id, - flavor_id) - else: - LOG.exception(_("Missing arguments for resize")) - return faults.Fault(exc.HTTPUnprocessableEntity()) - except Exception, e: - LOG.exception(_("Error in resize %s"), e) - return faults.Fault(exc.HTTPBadRequest()) - return exc.HTTPAccepted() def _action_reboot(self, input_dict, req, id): if 'reboot' in input_dict and 'type' in input_dict['reboot']: -- cgit From 2bd6e5561339a6755709461dab9aa6cad4a1cf81 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Tue, 31 May 2011 09:51:20 -0400 Subject: pep8 fixes --- .../etc/xensource/scripts/ovs_configure_base_flows.py | 11 ++++++----- .../etc/xensource/scripts/ovs_configure_vif_flows.py | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py index 0186a3c8b..514a43a2d 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py @@ -34,13 +34,13 @@ def main(command, phys_dev_name, bridge_name): ovs_ofctl('del-flows', bridge_name) if command in ('online', 'reset'): - pnic_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', 'Interface', - phys_dev_name, 'ofport') + pnic_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get', + 'Interface', phys_dev_name, 'ofport') # these flows are lower priority than all VM-specific flows. - # allow all traffic from the physical NIC, as it is trusted (i.e., from a - # filtered vif, or from the physical infrastructure + # allow all traffic from the physical NIC, as it is trusted (i.e., + # from a filtered vif, or from the physical infrastructure) ovs_ofctl('add-flow', bridge_name, "priority=2,in_port=%s,actions=normal" % pnic_ofport) @@ -53,7 +53,8 @@ if __name__ == "__main__": print sys.argv script_name = os.path.basename(sys.argv[0]) print "This script configures base ovs flows." - print "usage: %s [online|offline|reset] phys-dev-name bridge-name" % script_name + print "usage: %s [online|offline|reset] phys-dev-name bridge-name" \ + % script_name print " ex: %s online eth0 xenbr0" % script_name sys.exit(1) else: diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 9fde69377..accd08b91 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -52,7 +52,7 @@ def main(command, vif_raw, net_type): vif_name, dom_id, vif_index = vif_raw.split('-') vif = "%s%s.%s" % (vif_name, dom_id, vif_index) bridge = "xenbr%s" % vif_index - + xsls = execute_get_output('/usr/bin/xenstore-ls', '/local/domain/%s/vm-data/networking' % dom_id) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] -- cgit From 95f103f276f6eb7decd6ebd17ff4ac106bc7222f Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 31 May 2011 11:17:35 -0400 Subject: More specific error messages for resize requests. --- nova/api/openstack/servers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 4bd7ddb14..1ec74bc2e 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -606,7 +606,7 @@ class ControllerV10(Controller): self.compute_api.resize(req.environ['nova.context'], id, flavor_id) else: - LOG.exception(_("Missing arguments for resize")) + LOG.exception(_("Missing 'flavorId' argument for resize")) return faults.Fault(exc.HTTPUnprocessableEntity()) except Exception, e: LOG.exception(_("Error in resize %s"), e) @@ -707,7 +707,7 @@ class ControllerV11(Controller): self.compute_api.resize(req.environ['nova.context'], id, flavor_id) else: - LOG.exception(_("Missing arguments for resize")) + LOG.exception(_("Missing 'flavorRef' argument for resize")) return faults.Fault(exc.HTTPUnprocessableEntity()) except Exception, e: LOG.exception(_("Error in resize %s"), e) -- cgit From 1adb96550640a65a723635f2dc98e4595f95fd52 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 31 May 2011 08:26:11 -0700 Subject: edits based on ed's feedback --- doc/source/devref/distributed_scheduler.rst | 85 +++++++++++++++-------------- 1 file changed, 45 insertions(+), 40 deletions(-) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index a45505640..28ba20af7 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -1,3 +1,7 @@ + + + + .. Copyright 2011 OpenStack LLC All Rights Reserved. @@ -17,7 +21,7 @@ Distributed Scheduler ===== -The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and Compute nodes are selected for where the work should be performed. In a small deployment we may be happy with the currently available Change Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone). +The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Change Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone). But for larger deployments a more complex scheduling algorithm is required. Additionally, if you are using Zones in your Nova setup, you'll need a scheduler that understand how to pass instance requests from Zone to Zone. @@ -29,7 +33,7 @@ This document will explain the strategy employed by the `ZoneAwareScheduler` and Costs & Weights ---------- -When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to put an Instance with 8G of RAM on a Host that only has 4G remaining would have a very high cost. But putting a 512m RAM instance on an empty Host should have a low cost. +When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to putting a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost. Some Costs are more esoteric. Consider a rule that says we should prefer Hosts that don't already have an instance on it that is owned by the user requesting it (to mitigate against machine failures). Here we have to look at all the other Instances on the host to compute our cost. @@ -44,73 +48,73 @@ This Weight is computed for each Instance requested. If the customer asked for 1 nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler ----------- -As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about Child Zones and each of the Services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions. +As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions. Here is how it works: -1. The Compute nodes are Filtered and the nodes remaining are Weighed. -1a. Filtering the hosts is a simple matter of ensuring the Compute node has ample resources (CPU, RAM, DISK, etc) to fulfil the request. -1b. Weighing of the remaining Compute nodes assigns a number based on their suitability for the request. -2. The same request is sent to each Child Zone and step #1 is done there too. The resulting Weighted List is returned to the parent. -3. The Parent Zone sorts and aggregates all the Weights and a final Build Plan is constructed. -4. The Build Plan is executed upon. Concurrently, Instance Create requests are sent to each of the selected Hosts, be they local or in a child zone. Child Zones may forward the requests to their Child Zones as needed. +1. The compute nodes are filtered and the nodes remaining are weighed. +1a. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request. +1b. Weighing of the remaining compute nodes assigns a number based on their suitability for the request. +2. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent. +3. The parent Zone sorts and aggregates all the weights and a final build plan is constructed. +4. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed. -`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which Host filtering and Weighing strategy will be used. We'll go into more detail on that later. +`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which host filtering and weighing strategy will be used. Filtering and Weighing ------------ -Filtering (excluding Compute nodes incapable of fulfilling the request) and Weighing (computing the relative "fitness" of a Compute node to fulfill the request) are very subjective operations. Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. We will explain how to do this later in this document. +The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. Requesting a new instance ------------ -To request a new instance, a call is made to `nova.compute.api.create()`. The type of instance created depends on the value of the `InstanceType` record being passed in. The `InstanceType` determines the amount of disk, cpu, ram and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table, but we'll discuss that later. +Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table. -`nova.compute.api.create()` performs the following actions: -1. it validates all the fields passed into it. -2. it creates an entry in the `Instance` table for each instance requested -3. it puts one `run_instance` message in the scheduler queue for each instance requested -4. the schedulers pick off the messages and decide which Compute node should handle the request. -5. the `run_instance` message is forwarded to the Compute node for processing and the instance is created. -6. it returns a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_id`'s are valid. +`nova.compute.api.create()` performed the following actions: +1. it validated all the fields passed into it. +2. it created an entry in the `Instance` table for each instance requested +3. it put one `run_instance` message in the scheduler queue for each instance requested +4. the schedulers picked off the messages and decided which compute node should handle the request. +5. the `run_instance` message was forwarded to the compute node for processing and the instance is created. +6. it returned a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_id`s are valid. -Generally, the standard schedulers (like `ChangeScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of Child Zones. +Generally, the standard schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones. -The problem with this approach is that each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once. +The problem with this approach is each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once. For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently: 1. it validates all the fields passed into it. 2. it creates a single `reservation_id` for all of instances created. This is a UUID. 3. it creates a single `run_instance` request in the scheduler queue 4. a scheduler picks the message off the queue and works on it. -5. the scheduler sends off an OS API `POST /zones/select` command to each Child Zone. The `BODY` payload of the call contains the `request_spec`. -6. the Child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones. -7. if the Child Zone has its own Child Zone's, the `/zones/select` call will be sent down to them as well. +5. the scheduler sends off an OS API `POST /zones/select` command to each child Zone. The `BODY` payload of the call contains the `request_spec`. +6. the child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones. +7. if the child Zone has its own child Zones, the `/zones/select` call will be sent down to them as well. 8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed. -9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant Child Zone. The parameters to the Child Zone call are the same as what was passed in by the user. +9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant child Zone. The parameters to the child Zone call are the same as what was passed in by the user. 10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`. The Catch ------------- -This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, Database and set of Nova Services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world. +This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world. -When `POST /zones/select` is called to estimate which Compute node to use, time passes until the `POST /servers` call is issued. If we only passed the Weight back from the `select` we would have to re-compute the appropriate Compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to Child Zones asking for estimates. +When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates. -Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. +Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. It's for this reason that it is so important that each Zone defines a unique encryption key via `--build_plan_encryption_key` In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent. Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.zone_aware_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use. -Reservation ID's +Reservation IDs --------------- NOTE: The features described in this section are related to the up-coming 'merge-4' branch. The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created. -NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would be bad. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled. +NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled. -We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/servers` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches. +We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/boot` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches. Finally, we need to give the user a way to get information on each of the instances created under this `reservation_id`. Fortunately, this is still possible with the existing `GET /servers` command, so long as we add a new optional `reservation_id` parameter. @@ -119,14 +123,15 @@ Finally, we need to give the user a way to get information on each of the instan Host Filter -------------- -As we mentioned earlier, filtering hosts is a very deployment specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms. +As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms. + +The filter used is determined by the `--default_host_filter` flag, which points to a Python Class. By default this flag is set to `nova.scheduler.host_filter.AllHostsFilter` which simply returns all available hosts. But there are others: -The filter used is determined by the `--default_host_filter` flag, which points to a Python Class. By default this flag is set to `nova.scheduler.host_filter.AllHostsFilter` which simply returns all available hosts. But there are others. + * `nova.scheduler.host_filter.InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`. -`nova.scheduler.host_filter.InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`. -`nova.scheduler.host_filter.JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples. + * `nova.scheduler.host_filter.JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples. -To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide the create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of weight tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The weight tuple contains (``, ``) where `` is whatever you want it to be. +To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The host tuple contains (``, ``) where `` is whatever you want it to be. Cost Scheduler Weighing -------------- @@ -134,9 +139,9 @@ Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` metho Simple Zone Aware Scheduling -------------- -The easiest way to get started with the Zone Aware Scheduler is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter as and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. +The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter as and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. -The `--scheduler_driver` flag is how you specify the Scheduler class name. +The `--scheduler_driver` flag is how you specify the scheduler class name. Flags -------------- @@ -153,9 +158,9 @@ All this Zone and Distributed Scheduler stuff can seem a little daunting to conf `--allow_admin_api` must be set for OS API to enable the new `/zones/*` commands. `--enable_zone_routing` must be set for OS API commands such as `create()`, `pause()` and `delete()` to get routed from Zone to Zone when looking for instances. -`--zone_name` is only required in Child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue. +`--zone_name` is only required in child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue. `build_plan_encryption_key` is the SHA-256 key for encrypting/decrypting the Host information when it leaves a Zone. Be sure to change this key for each Zone you create. Do not duplicate keys. -`scheduler_driver` is the real work horse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler` +`scheduler_driver` is the real workhorse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler`. `default_host_filter` is the host filter to be used for filtering candidate Compute nodes. Some optional flags which are handy for debugging are: -- cgit From 099c29549a70cb88a6266a5e4145f855e1862d99 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Tue, 31 May 2011 11:58:15 -0400 Subject: Handle the case when a v1.0 api tries to list servers that contain image hrefs. --- nova/api/openstack/servers.py | 12 ++++++++++-- nova/api/openstack/views/servers.py | 6 +++++- nova/exception.py | 7 ++++++- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 7593694bd..33b677ffd 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -69,11 +69,19 @@ class Controller(common.OpenstackController): def index(self, req): """ Returns a list of server names and ids for a given user """ - return self._items(req, is_detail=False) + try: + servers = self._items(req, is_detail=False) + except exception.Invalid as err: + return exc.HTTPBadRequest(str(err)) + return servers def detail(self, req): """ Returns a list of server details for a given user """ - return self._items(req, is_detail=True) + try: + servers = self._items(req, is_detail=True) + except exception.Invalid as err: + return exc.HTTPBadRequest(str(err)) + return servers def _image_ref_from_req_data(self, data): raise NotImplementedError() diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index dd1d68ff0..595a54790 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -18,6 +18,7 @@ import hashlib import os +from nova import exception from nova.compute import power_state import nova.compute import nova.context @@ -113,7 +114,10 @@ class ViewBuilderV10(ViewBuilder): def _build_image(self, response, inst): if 'image_ref' in dict(inst): - response['imageId'] = int(inst['image_ref']) + image_ref = inst['image_ref'] + if str(image_ref).startswith('http'): + raise exception.ListingImageRefsNotSupported(); + response['imageId'] = int(image_ref) def _build_flavor(self, response, inst): if 'instance_type' in dict(inst): diff --git a/nova/exception.py b/nova/exception.py index 5b91e1cde..6ea6c3620 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -284,7 +284,12 @@ class DiskNotFound(NotFound): class InvalidImageRef(Invalid): - message = _("Invalid image ref %(image_href)s.") + message = _("Invalid image href %(image_href)s.") + + +class ListingImageRefsNotSupported(Invalid): + message = _("Some images have been stored via hrefs." + + " This version of the api does not support displaying image hrefs.") class ImageNotFound(NotFound): -- cgit From 770c0a5ecd2e19318e5b581de1f23e4e1d3f5f9d Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Tue, 31 May 2011 12:37:36 -0400 Subject: removing semicolon --- nova/api/openstack/views/servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 595a54790..b2352e3fd 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -116,7 +116,7 @@ class ViewBuilderV10(ViewBuilder): if 'image_ref' in dict(inst): image_ref = inst['image_ref'] if str(image_ref).startswith('http'): - raise exception.ListingImageRefsNotSupported(); + raise exception.ListingImageRefsNotSupported() response['imageId'] = int(image_ref) def _build_flavor(self, response, inst): -- cgit From 7beafb1aafac97e6dfc28108062785465cc8f577 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 31 May 2011 14:38:12 -0400 Subject: Use a new instance_metadata_delete_all DB api call to delete existing metadata when updating a server. --- nova/db/sqlalchemy/api.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ea84e96e7..8df96cbf4 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1013,8 +1013,9 @@ def instance_update(context, instance_id, values): session = get_session() metadata = values.get('metadata') if metadata is not None: + instance_metadata_delete_all(context, instance_id) instance_metadata_update_or_create(context, instance_id, - values.pop('metadata'), True) + values.pop('metadata')) with session.begin(): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) @@ -2554,6 +2555,17 @@ def instance_metadata_delete(context, instance_id, key): 'updated_at': literal_column('updated_at')}) +@require_context +def instance_metadata_delete_all(context, instance_id): + session = get_session() + session.query(models.InstanceMetadata).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False).\ + update({'deleted': True, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) + + @require_context def instance_metadata_get_item(context, instance_id, key): session = get_session() @@ -2571,8 +2583,7 @@ def instance_metadata_get_item(context, instance_id, key): @require_context -def instance_metadata_update_or_create(context, instance_id, metadata, - purge=False): +def instance_metadata_update_or_create(context, instance_id, metadata): session = get_session() original_metadata = instance_metadata_get(context, instance_id) @@ -2589,9 +2600,4 @@ def instance_metadata_update_or_create(context, instance_id, metadata, "deleted": 0}) meta_ref.save(session=session) - if purge: - for key in original_metadata.keys(): - if not key in metadata.keys(): - instance_metadata_delete(context, instance_id, key) - return metadata -- cgit From f2da479b8988cd55d39e89935b10e0b348df43c9 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 31 May 2011 23:36:49 +0400 Subject: Moved everything from thread-local storage to class attributes --- nova/auth/ldapdriver.py | 38 +++++++++++--------------------------- nova/auth/manager.py | 14 +++----------- 2 files changed, 14 insertions(+), 38 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 9fe0165a1..91f412baa 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -26,7 +26,6 @@ public methods. import functools import sys -import threading from nova import exception from nova import flags @@ -106,7 +105,8 @@ class LdapDriver(object): isadmin_attribute = 'isNovaAdmin' project_attribute = 'owner' project_objectclass = 'groupOfNames' - __local = threading.local() + conn = None + mc = None def __init__(self): """Imports the LDAP module""" @@ -117,15 +117,22 @@ class LdapDriver(object): LdapDriver.project_attribute = 'projectManager' LdapDriver.project_objectclass = 'novaProject' self.__cache = None + if LdapDriver.conn is None: + LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) + LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + if LdapDriver.mc is None: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def __enter__(self): - """Creates the connection to LDAP""" # TODO(yorik-sar): Should be per-request cache, not per-driver-request self.__cache = {} return self def __exit__(self, exc_type, exc_value, traceback): - """Destroys the connection to LDAP""" self.__cache = None return False @@ -149,29 +156,6 @@ class LdapDriver(object): return inner return do_wrap - @property - def conn(self): - try: - return self.__local.conn - except AttributeError: - conn = self.ldap.initialize(FLAGS.ldap_url) - conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) - self.__local.conn = conn - return conn - - @property - def mc(self): - try: - return self.__local.mc - except AttributeError: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache - mc = memcache.Client(FLAGS.memcached_servers, debug=0) - self.__local.mc = mc - return mc - @sanitize @__local_cache('uid_user-%s') def get_user(self, uid): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index c71f0f161..c887297f3 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -23,7 +23,6 @@ Nova authentication management import os import shutil import string # pylint: disable=W0402 -import threading import tempfile import uuid import zipfile @@ -207,7 +206,7 @@ class AuthManager(object): """ _instance = None - __local = threading.local() + mc = None def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" @@ -224,19 +223,12 @@ class AuthManager(object): self.network_manager = utils.import_object(FLAGS.network_manager) if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) - - @property - def mc(self): - try: - return self.__local.mc - except AttributeError: + if AuthManager.mc is None: if FLAGS.memcached_servers: import memcache else: from nova import fakememcache as memcache - mc = memcache.Client(FLAGS.memcached_servers, debug=0) - self.__local.mc = mc - return mc + AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', -- cgit From 5922b5dc166476adf550abbbacc21e4585e53a37 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 31 May 2011 21:23:36 +0000 Subject: Fixing Scheduler Tests --- nova/tests/scheduler/test_scheduler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 54b3f80fb..b0f0e882a 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -61,7 +61,8 @@ class SchedulerTestCase(test.TestCase): """Test case for scheduler""" def setUp(self): super(SchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') + driver = 'nova.tests.scheduler.test_scheduler.TestDriver' + self.flags(scheduler_driver=driver) def _create_compute_service(self): """Create compute-manager(ComputeNode and Service record).""" -- cgit From b0c43e57ad6a7e5be8a749e70da39b7f7ba547bd Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 31 May 2011 14:49:47 -0700 Subject: switch to using webob exception --- nova/api/ec2/metadatarequesthandler.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 720f264a4..9c8e52270 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -76,12 +76,10 @@ class MetadataRequestHandler(wsgi.Application): meta_data = cc.get_metadata(remote_address) except Exception: LOG.exception(_('Failed to get metadata for ip: %s'), remote_address) - resp = webob.Response() - resp.status = 500 - message = _('An unknown error has occurred. ' - 'Please try your request again.') - resp.body = str(utils.utf8(message)) - return resp + msg = _('An unknown error has occurred. ' + 'Please try your request again.') + exc = webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + return exc if meta_data is None: LOG.error(_('Failed to get metadata for ip: %s'), remote_address) raise webob.exc.HTTPNotFound() -- cgit From 81f40ed1ca284bc9a8ee948ae23fdff93d632cb0 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 31 May 2011 15:50:33 -0700 Subject: pep8 --- nova/api/ec2/metadatarequesthandler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 9c8e52270..b70266a20 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -75,7 +75,8 @@ class MetadataRequestHandler(wsgi.Application): try: meta_data = cc.get_metadata(remote_address) except Exception: - LOG.exception(_('Failed to get metadata for ip: %s'), remote_address) + LOG.exception(_('Failed to get metadata for ip: %s'), + remote_address) msg = _('An unknown error has occurred. ' 'Please try your request again.') exc = webob.exc.HTTPInternalServerError(explanation=unicode(msg)) -- cgit From 16bd0ff62dccda5eba800b2762437d5e86faaafd Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 1 Jun 2011 09:20:15 -0400 Subject: Renamed migration to 020. --- .../migrate_repo/versions/019_rename_image_ids.py | 40 ---------------------- .../migrate_repo/versions/021_rename_image_ids.py | 40 ++++++++++++++++++++++ 2 files changed, 40 insertions(+), 40 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py b/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py deleted file mode 100644 index 73a5e8477..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/019_rename_image_ids.py +++ /dev/null @@ -1,40 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column, Integer, MetaData, String, Table - - -meta = MetaData() - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - image_id_column = instances.c.image_id - image_id_column.alter(name='image_ref') - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - image_ref_column = instances.c.image_ref - image_ref_column.alter(name='image_id') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py b/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py new file mode 100644 index 000000000..73a5e8477 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py @@ -0,0 +1,40 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +meta = MetaData() + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + image_id_column = instances.c.image_id + image_id_column.alter(name='image_ref') + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + image_ref_column = instances.c.image_ref + image_ref_column.alter(name='image_id') -- cgit From 9fc8e71f1b201adc0a5e49ac3a94e22bf47596fb Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 1 Jun 2011 10:17:00 -0400 Subject: pep8 fixes --- nova/log.py | 2 +- nova/tests/test_notifier.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/log.py b/nova/log.py index 960598b14..6909916a1 100644 --- a/nova/log.py +++ b/nova/log.py @@ -272,7 +272,7 @@ class PublishErrorsHandler(logging.Handler): def emit(self, record): nova.notifier.api.notify('nova.error.publisher', 'error_notification', nova.notifier.api.ERROR, dict(error=record.msg)) - + def handle_exception(type, value, tb): extra = {} diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 523f38f24..64b799a2c 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -122,12 +122,13 @@ class NotifierTestCase(test.TestCase): self.stubs.Set(nova.flags.FLAGS, 'publish_errors', True) LOG = log.getLogger('nova') LOG.setup_from_flags() - msgs = [] + def mock_cast(context, topic, data): msgs.append(data) - self.stubs.Set(nova.rpc, 'cast', mock_cast) - LOG.error('foo'); + + self.stubs.Set(nova.rpc, 'cast', mock_cast) + LOG.error('foo') self.assertEqual(1, len(msgs)) msg = msgs[0] self.assertEqual(msg['event_type'], 'error_notification') -- cgit From a26e21040681fd6db5a6ae862ca18ee17689854c Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Wed, 1 Jun 2011 18:32:49 +0400 Subject: Moved memcached driver import to the top of modules. --- nova/auth/ldapdriver.py | 10 ++++++---- nova/auth/manager.py | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 91f412baa..e26a360af 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -69,6 +69,12 @@ flags.DEFINE_string('ldap_developer', LOG = logging.getLogger("nova.ldapdriver") +if FLAGS.memcached_servers: + import memcache +else: + from nova import fakememcache as memcache + + # TODO(vish): make an abstract base class with the same public methods # to define a set interface for AuthDrivers. I'm delaying # creating this now because I'm expecting an auth refactor @@ -121,10 +127,6 @@ class LdapDriver(object): LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) if LdapDriver.mc is None: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def __enter__(self): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index c887297f3..98c7dd263 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -73,6 +73,12 @@ flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', LOG = logging.getLogger('nova.auth.manager') +if FLAGS.memcached_servers: + import memcache +else: + from nova import fakememcache as memcache + + class AuthBase(object): """Base class for objects relating to auth @@ -224,10 +230,6 @@ class AuthManager(object): if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) if AuthManager.mc is None: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def authenticate(self, access, signature, params, verb='GET', -- cgit From 4d1271821f782d4e11934d69b4ffe3aced6072eb Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Wed, 1 Jun 2011 18:34:54 +0400 Subject: PEP8 fix. --- nova/auth/ldapdriver.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index e26a360af..95e31ae3b 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -125,7 +125,8 @@ class LdapDriver(object): self.__cache = None if LdapDriver.conn is None: LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) - LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, + FLAGS.ldap_password) if LdapDriver.mc is None: LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) -- cgit From 8b716bc23ac4e5e5398db9557757621fccb08204 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 1 Jun 2011 10:37:54 -0400 Subject: fix pep8 issues --- nova/compute/api.py | 3 +-- .../migrate_repo/versions/016_make_quotas_key_and_value.py | 3 +-- nova/scheduler/host_filter.py | 3 +-- nova/tests/api/openstack/test_servers.py | 3 +-- nova/tests/integrated/test_servers.py | 9 +++------ nova/tests/test_host_filter.py | 14 +++++--------- nova/tests/test_zone_aware_scheduler.py | 10 +++------- tools/install_venv.py | 2 +- 8 files changed, 16 insertions(+), 31 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index de774e807..3e991e68a 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -270,8 +270,7 @@ class API(base.Base): 'instance_type': instance_type, 'filter': 'nova.scheduler.host_filter.' - 'InstanceTypeFilter' - }, + 'InstanceTypeFilter'}, "availability_zone": availability_zone, "injected_files": injected_files, "admin_password": admin_password}}) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py index a2d8192ca..1a2a6d7ce 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py @@ -160,8 +160,7 @@ def convert_backward(migrate_engine, old_quotas, new_quotas): 'project_id': quota.project_id, 'created_at': quota.created_at, 'updated_at': quota.updated_at, - quota.resource: quota.hard_limit - } + quota.resource: quota.hard_limit} else: quotas[quota.project_id]['created_at'] = earliest( quota.created_at, quotas[quota.project_id]['created_at']) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 4260cbf42..8827db4d4 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -226,8 +226,7 @@ class JsonFilter(HostFilter): required_disk = instance_type['local_gb'] query = ['and', ['>=', '$compute.host_memory_free', required_ram], - ['>=', '$compute.disk_available', required_disk] - ] + ['>=', '$compute.disk_available', required_disk]] return (self._full_name(), json.dumps(query)) def _parse_string(self, string, host, services): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index fbde5c9ce..20379e2bd 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -774,8 +774,7 @@ class ServersTest(test.TestCase): def server_update(context, id, params): filtered_dict = dict( - display_name='server_test' - ) + display_name='server_test') self.assertEqual(params, filtered_dict) return filtered_dict diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index a67fa1bb5..35c6bb34f 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -194,8 +194,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah" - } + "name": "blah"} self.api.post_server_action(created_server_id, post) LOG.debug("rebuilt server: %s" % created_server) @@ -224,8 +223,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah" - } + "name": "blah"} metadata = {} for i in range(30): @@ -267,8 +265,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah" - } + "name": "blah"} metadata = {} post['rebuild']['metadata'] = metadata diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py index 07817cc5a..098ebff3d 100644 --- a/nova/tests/test_host_filter.py +++ b/nova/tests/test_host_filter.py @@ -133,13 +133,11 @@ class HostFilterTestCase(test.TestCase): raw = ['or', ['and', ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300] - ], + ['<', '$compute.disk_available', 300]], ['and', ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700] - ] - ] + ['>', '$compute.disk_available', 700]]] + cooked = json.dumps(raw) hosts = hf.filter_hosts(self.zone_manager, cooked) @@ -183,13 +181,11 @@ class HostFilterTestCase(test.TestCase): self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([]))) self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({}))) self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps( - ['not', True, False, True, False] - ))) + ['not', True, False, True, False]))) try: hf.filter_hosts(self.zone_manager, json.dumps( - 'not', True, False, True, False - )) + 'not', True, False, True, False)) self.fail("Should give KeyError") except KeyError, e: pass diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py index 37169fb97..90ae427e3 100644 --- a/nova/tests/test_zone_aware_scheduler.py +++ b/nova/tests/test_zone_aware_scheduler.py @@ -39,15 +39,11 @@ class FakeZoneManager(zone_manager.ZoneManager): def __init__(self): self.service_states = { 'host1': { - 'compute': {'ram': 1000} - }, + 'compute': {'ram': 1000}}, 'host2': { - 'compute': {'ram': 2000} - }, + 'compute': {'ram': 2000}}, 'host3': { - 'compute': {'ram': 3000} - } - } + 'compute': {'ram': 3000}}} class FakeEmptyZoneManager(zone_manager.ZoneManager): diff --git a/tools/install_venv.py b/tools/install_venv.py index 812b1dd0f..f4b6583ed 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -36,7 +36,7 @@ PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) def die(message, *args): - print >>sys.stderr, message % args + print >> sys.stderr, message % args sys.exit(1) -- cgit From c80fedead72456c18c3a0e63348e1a4d40c7e7c5 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 1 Jun 2011 10:58:17 -0400 Subject: updates to keep things looking better --- nova/compute/api.py | 7 +++++-- .../versions/016_make_quotas_key_and_value.py | 3 ++- nova/scheduler/host_filter.py | 3 ++- nova/tests/api/openstack/test_servers.py | 3 +-- nova/tests/integrated/test_servers.py | 9 ++++++--- nova/tests/test_host_filter.py | 7 +++++-- nova/tests/test_zone_aware_scheduler.py | 16 ++++++++++------ 7 files changed, 31 insertions(+), 17 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 3e991e68a..263e44bab 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -270,10 +270,13 @@ class API(base.Base): 'instance_type': instance_type, 'filter': 'nova.scheduler.host_filter.' - 'InstanceTypeFilter'}, + 'InstanceTypeFilter', + }, "availability_zone": availability_zone, "injected_files": injected_files, - "admin_password": admin_password}}) + "admin_password": admin_password, + }, + }) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py index 1a2a6d7ce..5d0593f2e 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py @@ -160,7 +160,8 @@ def convert_backward(migrate_engine, old_quotas, new_quotas): 'project_id': quota.project_id, 'created_at': quota.created_at, 'updated_at': quota.updated_at, - quota.resource: quota.hard_limit} + quota.resource: quota.hard_limit, + } else: quotas[quota.project_id]['created_at'] = earliest( quota.created_at, quotas[quota.project_id]['created_at']) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 8827db4d4..7d6ee0ee3 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -226,7 +226,8 @@ class JsonFilter(HostFilter): required_disk = instance_type['local_gb'] query = ['and', ['>=', '$compute.host_memory_free', required_ram], - ['>=', '$compute.disk_available', required_disk]] + ['>=', '$compute.disk_available', required_disk], + ] return (self._full_name(), json.dumps(query)) def _parse_string(self, string, host, services): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 20379e2bd..ee27d24eb 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -773,8 +773,7 @@ class ServersTest(test.TestCase): self.body = json.dumps(dict(server=inst_dict)) def server_update(context, id, params): - filtered_dict = dict( - display_name='server_test') + filtered_dict = dict(display_name='server_test') self.assertEqual(params, filtered_dict) return filtered_dict diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 35c6bb34f..fcb517cf5 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -194,7 +194,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah"} + "name": "blah", + } self.api.post_server_action(created_server_id, post) LOG.debug("rebuilt server: %s" % created_server) @@ -223,7 +224,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah"} + "name": "blah", + } metadata = {} for i in range(30): @@ -265,7 +267,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): post = {} post['rebuild'] = { "imageRef": "https://localhost/v1.1/32278/images/2", - "name": "blah"} + "name": "blah", + } metadata = {} post['rebuild']['metadata'] = metadata diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py index 098ebff3d..3361c7b73 100644 --- a/nova/tests/test_host_filter.py +++ b/nova/tests/test_host_filter.py @@ -133,10 +133,13 @@ class HostFilterTestCase(test.TestCase): raw = ['or', ['and', ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300]], + ['<', '$compute.disk_available', 300], + ], ['and', ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700]]] + ['>', '$compute.disk_available', 700], + ], + ] cooked = json.dumps(raw) hosts = hf.filter_hosts(self.zone_manager, cooked) diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py index 90ae427e3..72b74be20 100644 --- a/nova/tests/test_zone_aware_scheduler.py +++ b/nova/tests/test_zone_aware_scheduler.py @@ -38,12 +38,16 @@ class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): class FakeZoneManager(zone_manager.ZoneManager): def __init__(self): self.service_states = { - 'host1': { - 'compute': {'ram': 1000}}, - 'host2': { - 'compute': {'ram': 2000}}, - 'host3': { - 'compute': {'ram': 3000}}} + 'host1': { + 'compute': {'ram': 1000}, + }, + 'host2': { + 'compute': {'ram': 2000}, + }, + 'host3': { + 'compute': {'ram': 3000}, + }, + } class FakeEmptyZoneManager(zone_manager.ZoneManager): -- cgit From ad964ef8934a14329a9100946bed26bcf37b1d52 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 1 Jun 2011 14:56:06 -0400 Subject: Updates to the 018_rename_server_management_url to avoid adding and dropping a column. Just simply rename the column. --- .../versions/018_rename_server_management_url.py | 29 ++++------------------ 1 file changed, 5 insertions(+), 24 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py index a169afb40..73c76f666 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py @@ -14,23 +14,10 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import Column, Integer, MetaData, String, Table -#from nova import log as logging +from sqlalchemy import MetaData, Table meta = MetaData() -c_manageent = Column('server_manageent_url', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - -c_management = Column('server_management_url', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; @@ -40,11 +27,8 @@ def upgrade(migrate_engine): tokens = Table('auth_tokens', meta, autoload=True, autoload_with=migrate_engine) - tokens.create_column(c_management) - migrate_engine.execute(tokens.update() - .values(server_management_url=tokens.c.server_manageent_url)) - - tokens.c.server_manageent_url.drop() + c_manageent = tokens.c.server_manageent_url + c_manageent.alter(name='server_management_url') def downgrade(migrate_engine): @@ -53,8 +37,5 @@ def downgrade(migrate_engine): tokens = Table('auth_tokens', meta, autoload=True, autoload_with=migrate_engine) - tokens.create_column(c_manageent) - migrate_engine.execute(tokens.update() - .values(server_manageent_url=tokens.c.server_management_url)) - - tokens.c.server_management_url.drop() + c_management = tokens.c.server_management_url + c_management.alter(name='server_manageent_url') -- cgit From d77aa5862762bc6efda46d92940143a1b9cbccf5 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 1 Jun 2011 14:46:05 -0500 Subject: Allow SSL AMQP connections. --- nova/flags.py | 1 + nova/rpc.py | 1 + 2 files changed, 2 insertions(+) diff --git a/nova/flags.py b/nova/flags.py index 9eaac5596..d5090edba 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -296,6 +296,7 @@ DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses') DEFINE_string('rabbit_host', 'localhost', 'rabbit host') DEFINE_integer('rabbit_port', 5672, 'rabbit port') +DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') diff --git a/nova/rpc.py b/nova/rpc.py index c5277c6a9..2e78a31e7 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -65,6 +65,7 @@ class Connection(carrot_connection.BrokerConnection): if new or not hasattr(cls, '_instance'): params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, + ssl=FLAGS.rabbit_use_ssl, userid=FLAGS.rabbit_userid, password=FLAGS.rabbit_password, virtual_host=FLAGS.rabbit_virtual_host) -- cgit From ced79009e6555eb75f3862184834a883d37b2062 Mon Sep 17 00:00:00 2001 From: John Tran Date: Wed, 1 Jun 2011 16:01:41 -0700 Subject: fixed as per peer review to make more consistent --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 51373d282..5de4d9e81 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -536,7 +536,7 @@ class FloatingIpCommands(object): for floating_ip in floating_ips: instance = None if floating_ip['fixed_ip']: - instance = floating_ip['fixed_ip']['instance'].hostname + instance = floating_ip['fixed_ip']['instance']['hostname'] print "%s\t%s\t%s" % (floating_ip['host'], floating_ip['address'], instance) -- cgit From ef1f5b3aadde2fedb4b2d197af0f1c0f07375714 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 1 Jun 2011 16:51:26 -0700 Subject: fix novarc to work on mac and zsh --- nova/auth/novarc.template | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index 8170fcafe..4a1f41802 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,4 +1,5 @@ -NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE})) +NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' ${BASH_SOURCE-0}) +NOVA_KEY_DIR=$(dirname ${NOVARC}) export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" export EC2_URL="%(ec2)s" -- cgit From 8ee41f679bd72af6aab098f9d9735e342b281635 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 1 Jun 2011 18:55:41 -0700 Subject: missed a couple chars --- nova/auth/novarc.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index 4a1f41802..92eed3520 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,4 +1,4 @@ -NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' ${BASH_SOURCE-0}) +NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' ${BASH_SOURCE:-$0}) NOVA_KEY_DIR=$(dirname ${NOVARC}) export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" -- cgit From 0e419c00ef9a463acc704f034e4c37929f0ef2eb Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 1 Jun 2011 23:37:51 -0400 Subject: image href should be passed through the rebuild pipeline, not the image id. --- nova/api/openstack/servers.py | 3 +-- nova/compute/api.py | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 55fed408c..0ef1a83da 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -714,7 +714,6 @@ class ControllerV11(Controller): LOG.debug(msg) return faults.Fault(exc.HTTPBadRequest(explanation=msg)) - image_id = common.get_id_from_href(image_href) personalities = info["rebuild"].get("personality", []) metadata = info["rebuild"].get("metadata") name = info["rebuild"].get("name") @@ -724,7 +723,7 @@ class ControllerV11(Controller): self._decode_personalities(personalities) try: - self.compute_api.rebuild(context, instance_id, image_id, name, + self.compute_api.rebuild(context, instance_id, image_href, name, metadata, personalities) except exception.BuildInProgress: msg = _("Instance %d is currently being rebuilt.") % instance_id diff --git a/nova/compute/api.py b/nova/compute/api.py index 2b353cebb..6a1c68561 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -529,8 +529,8 @@ class API(base.Base): """Reboot the given instance.""" self._cast_compute_message('reboot_instance', context, instance_id) - def rebuild(self, context, instance_id, image_id, name=None, metadata=None, - files_to_inject=None): + def rebuild(self, context, instance_id, image_href, name=None, + metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) @@ -550,7 +550,7 @@ class API(base.Base): self.db.instance_update(context, instance_id, values) rebuild_params = { - "image_ref": image_ref, + "image_ref": image_href, "injected_files": files_to_inject, } -- cgit From b39b0e66f16d49890189c63fba528734ef476068 Mon Sep 17 00:00:00 2001 From: Mike Scherbakov Date: Thu, 2 Jun 2011 10:29:58 +0400 Subject: Refactored after review, fixed merge. --- nova/image/fake.py | 5 +---- nova/tests/test_libvirt.py | 10 +++++----- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/nova/image/fake.py b/nova/image/fake.py index 4bf25d9af..019d683f4 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -82,12 +82,9 @@ class _FakeImageService(service.BaseImageService): :raises: Duplicate if the image already exist. """ - #image_id = int(metadata['id']) - # metadata['id'] may not exists, and since image_id is - # randomly generated in local.py, let us do the same here try: image_id = int(metadata['id']) - except: + except KeyError: image_id = random.randint(0, 2 ** 31 - 1) if self.images.get(image_id): diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index d9316ab4f..d008a149e 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -328,14 +328,14 @@ class LibvirtConnTestCase(test.TestCase): # To work with it from snapshot, the single image_service is needed recv_meta = image_service.create(context, sent_meta) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - libvirt_conn.LibvirtConnection._conn.lookupByName = fake_lookup - self.mox.StubOutWithMock(libvirt_conn.utils, 'execute') - libvirt_conn.utils.execute = fake_execute + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') + connection.LibvirtConnection._conn.lookupByName = fake_lookup + self.mox.StubOutWithMock(connection.utils, 'execute') + connection.utils.execute = fake_execute self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) conn.snapshot(instance_ref, recv_meta['id']) snapshot = image_service.show(context, recv_meta['id']) -- cgit From b380e0de4b7c24607c16734a46b3e11d64947b01 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 2 Jun 2011 11:12:35 -0400 Subject: Remove a rogue comment. --- nova/image/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/image/__init__.py b/nova/image/__init__.py index f42332a29..93d83df24 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -70,7 +70,6 @@ def get_glance_client(image_href): except: raise exception.InvalidImageRef(image_href=image_href) glance_client = GlanceClient(host, port) - #glance_client = client.Client(host, port) return (glance_client, image_id) -- cgit From 052f08256d2be2dda5ed792be48aa4f97cb93a93 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 2 Jun 2011 11:38:20 -0400 Subject: Remove comment about imageRef not being implemented. --- nova/tests/integrated/integrated_helpers.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 5eacc829d..522c7cb0e 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -207,10 +207,7 @@ class _IntegratedTestBase(test.TestCase): if 'imageRef' in image: image_href = image['imageRef'] else: - # NOTE(justinsb): The imageRef code hasn't yet landed - LOG.warning("imageRef not yet in images output") image_href = image['id'] - image_href = 'http://fake.server/%s' % image_href # We now have a valid imageId -- cgit From 9034bb2fcd5f03df2b25d6114adc4e7d5f3549fe Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 2 Jun 2011 13:00:17 -0400 Subject: Remove some of the extra image service calls from the OS API images controller. --- nova/api/openstack/images.py | 6 ++---- nova/tests/api/openstack/fakes.py | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 87cbef791..59d9e3082 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -94,8 +94,7 @@ class Controller(object): context = req.environ['nova.context'] try: - (image_service, image_id) = nova.image.get_image_service(id) - image = image_service.show(context, image_id) + image = self._image_service.show(context, id) except (exception.NotFound, exception.InvalidImageRef): explanation = _("Image not found.") raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) @@ -109,8 +108,7 @@ class Controller(object): :param id: Image identifier (integer) """ context = req.environ['nova.context'] - (image_service, image_id) = nova.image.get_image_service(id) - image_service.delete(context, image_id) + self._image_service.delete(context, id) return webob.exc.HTTPNoContent() def create(self, req, body): diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 97fc3900d..17d6d591c 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -211,7 +211,7 @@ def stub_out_glance(stubs, initial_fixtures=None): def _find_image(self, image_id): for f in self.fixtures: - if f['id'] == image_id: + if str(f['id']) == str(image_id): return f return None -- cgit From be2f5e986e41f8f8d63c0ef7a5c03916c70ba455 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 10:20:26 -0700 Subject: don't use python if readlink is available --- nova/auth/novarc.template | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index 92eed3520..d30bd849c 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,5 +1,6 @@ -NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' ${BASH_SOURCE:-$0}) -NOVA_KEY_DIR=$(dirname ${NOVARC}) +NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) || + NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}") +NOVA_KEY_DIR=${NOVARC%/*} export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" export EC2_URL="%(ec2)s" -- cgit From 28320ced7afb2c224ab4e1cfb8a607646a2bd2e3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 11:28:41 -0700 Subject: use %% because % is a replacement string character --- nova/auth/novarc.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index d30bd849c..eba3a8537 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,6 +1,6 @@ NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) || NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}") -NOVA_KEY_DIR=${NOVARC%/*} +NOVA_KEY_DIR=${NOVARC%%/*} export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" export EC2_URL="%(ec2)s" -- cgit From ae1842174f4b079c8d84b32ddad4df1b7ff29bec Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 12:01:49 -0700 Subject: Tests to assure all exceptions can be raised as well as fixing NotAuthorized --- nova/exception.py | 4 ++-- nova/tests/test_misc.py | 13 +++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index d3d58f3b2..5b824bba6 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -65,7 +65,7 @@ class BuildInProgress(Error): class DBError(Error): """Wraps an implementation specific exception.""" - def __init__(self, inner_exception): + def __init__(self, inner_exception=None): self.inner_exception = inner_exception super(DBError, self).__init__(str(inner_exception)) @@ -122,7 +122,7 @@ class NotAuthorized(NovaException): message = _("Not authorized.") def __init__(self, *args, **kwargs): - super(NotFound, self).__init__(**kwargs) + super(NotAuthorized, self).__init__(**kwargs) class AdminRequired(NotAuthorized): diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py index cf8f4c05e..c5875a843 100644 --- a/nova/tests/test_misc.py +++ b/nova/tests/test_misc.py @@ -21,11 +21,24 @@ import select from eventlet import greenpool from eventlet import greenthread +from nova import exception from nova import test from nova import utils from nova.utils import parse_mailmap, str_dict_replace +class ExceptionTestCase(test.TestCase): + @staticmethod + def _raise_exc(exc): + raise exc() + + def test_exceptions_raise(self): + for name in dir(exception): + exc = getattr(exception, name) + if isinstance(exc, type): + self.assertRaises(exc, self._raise_exc, exc) + + class ProjectTestCase(test.TestCase): def test_authors_up_to_date(self): topdir = os.path.normpath(os.path.dirname(__file__) + '/../../') -- cgit From b2fb1738db489206557abccb631b13991c31fd4e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 14:23:05 -0700 Subject: make all uses of utcnow use our testable utils.utcnow --- bin/nova-manage | 3 +-- nova/api/ec2/admin.py | 3 +-- nova/api/ec2/cloud.py | 5 ++-- nova/api/openstack/auth.py | 5 ++-- nova/api/openstack/contrib/__init__.py | 2 +- nova/api/openstack/limits.py | 2 +- nova/api/openstack/ratelimiting/__init__.py | 2 +- nova/compute/api.py | 3 +-- nova/compute/manager.py | 3 +-- nova/compute/monitor.py | 2 +- nova/context.py | 1 - nova/db/sqlalchemy/api.py | 29 +++++++++++----------- .../versions/016_make_quotas_key_and_value.py | 10 ++++---- nova/db/sqlalchemy/models.py | 9 +++---- nova/network/manager.py | 2 +- nova/notifier/api.py | 7 +++--- nova/scheduler/driver.py | 3 ++- nova/scheduler/simple.py | 11 ++++---- nova/test.py | 4 +-- nova/tests/api/openstack/fakes.py | 3 +-- nova/tests/api/openstack/test_images.py | 1 - nova/tests/api/openstack/test_servers.py | 8 +++--- nova/tests/test_compute.py | 5 ++-- nova/tests/test_console.py | 2 -- nova/tests/test_middleware.py | 1 - nova/tests/test_scheduler.py | 16 ++++++------ nova/utils.py | 2 +- nova/virt/xenapi/fake.py | 4 +-- nova/volume/api.py | 5 ++-- nova/volume/manager.py | 4 +-- 30 files changed, 69 insertions(+), 88 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 5de4d9e81..b545c4246 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -53,7 +53,6 @@ CLI interface for nova management. """ -import datetime import gettext import glob import json @@ -689,7 +688,7 @@ class ServiceCommands(object): """Show a list of all running services. Filter by host & service name. args: [host] [service]""" ctxt = context.get_admin_context() - now = datetime.datetime.utcnow() + now = utils.utcnow() services = db.service_get_all(ctxt) if host: services = [s for s in services if s['host'] == host] diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index ea94d9c1f..aeebd86fb 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -21,7 +21,6 @@ Admin API controller, exposed through http via the api worker. """ import base64 -import datetime from nova import db from nova import exception @@ -305,7 +304,7 @@ class AdminController(object): * Volume Count """ services = db.service_get_all(context, False) - now = datetime.datetime.utcnow() + now = utils.utcnow() hosts = [] rv = [] for host in [service['host'] for service in services]: diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 79cc3b3bf..04675174f 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -23,7 +23,6 @@ datastore. """ import base64 -import datetime import IPy import os import urllib @@ -235,7 +234,7 @@ class CloudController(object): 'zoneState': 'available'}]} services = db.service_get_all(context, False) - now = datetime.datetime.utcnow() + now = utils.utcnow() hosts = [] for host in [service['host'] for service in services]: if not host in hosts: @@ -595,7 +594,7 @@ class CloudController(object): instance_id = ec2utils.ec2_id_to_id(ec2_id) output = self.compute_api.get_console_output( context, instance_id=instance_id) - now = datetime.datetime.utcnow() + now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 6c6ee22a2..b49bf449b 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -13,9 +13,8 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. -import datetime import hashlib import time @@ -127,7 +126,7 @@ class AuthMiddleware(wsgi.Middleware): except exception.NotFound: return None if token: - delta = datetime.datetime.utcnow() - token['created_at'] + delta = utils.utcnow() - token['created_at'] if delta.days >= 2: self.db.auth_token_destroy(ctxt, token['token_hash']) else: diff --git a/nova/api/openstack/contrib/__init__.py b/nova/api/openstack/contrib/__init__.py index b42a1d89d..acb5eb280 100644 --- a/nova/api/openstack/contrib/__init__.py +++ b/nova/api/openstack/contrib/__init__.py @@ -13,7 +13,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """Contrib contains extensions that are shipped with nova. diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 4d46b92df..dc2bc6bbc 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -11,7 +11,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """ Module dedicated functions/classes dealing with rate limiting requests. diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index 88ffc3246..9ede548c2 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -13,7 +13,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """Rate limiting of arbitrary actions.""" diff --git a/nova/compute/api.py b/nova/compute/api.py index 7122ebe67..de87ddd88 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -18,7 +18,6 @@ """Handles all requests relating to instances (guest vms).""" -import datetime import eventlet import re import time @@ -405,7 +404,7 @@ class API(base.Base): instance['id'], state_description='terminating', state=0, - terminated_at=datetime.datetime.utcnow()) + terminated_at=utils.utcnow()) host = instance['host'] if host: diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3897b3a9e..a57d6e246 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -35,7 +35,6 @@ terminating it. """ -import datetime import os import socket import sys @@ -159,7 +158,7 @@ class ComputeManager(manager.SchedulerDependentManager): def _update_launched_at(self, context, instance_id, launched_at=None): """Update the launched_at parameter of the given instance.""" - data = {'launched_at': launched_at or datetime.datetime.utcnow()} + data = {'launched_at': launched_at or utils.utcnow()} self.db.instance_update(context, instance_id, data) def _update_image_id(self, context, instance_id, image_id): diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 3bb54a382..613734bef 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -86,7 +86,7 @@ RRD_VALUES = { ]} -utcnow = datetime.datetime.utcnow +utcnow = utils.utcnow LOG = logging.getLogger('nova.compute.monitor') diff --git a/nova/context.py b/nova/context.py index c113f7ea7..99085ed75 100644 --- a/nova/context.py +++ b/nova/context.py @@ -18,7 +18,6 @@ """RequestContext: context for requests that persist through all of nova.""" -import datetime import random from nova import exception diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index c3a971a82..bb7bf5b89 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -19,7 +19,6 @@ Implementation of SQLAlchemy backend. """ -import datetime import warnings from nova import db @@ -674,7 +673,7 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time): filter_by(allocated=0).\ update({'instance_id': None, 'leased': 0, - 'updated_at': datetime.datetime.utcnow()}, + 'updated_at': utils.utcnow()}, synchronize_session='fetch') return result @@ -820,17 +819,17 @@ def instance_destroy(context, instance_id): session.query(models.Instance).\ filter_by(id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1123,7 +1122,7 @@ def key_pair_destroy_all_by_user(context, user_id): session.query(models.KeyPair).\ filter_by(user_id=user_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1655,7 +1654,7 @@ def volume_destroy(context, volume_id): session.query(models.Volume).\ filter_by(id=volume_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.ExportDevice).\ filter_by(volume_id=volume_id).\ @@ -1813,7 +1812,7 @@ def snapshot_destroy(context, snapshot_id): session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1968,17 +1967,17 @@ def security_group_destroy(context, security_group_id): session.query(models.SecurityGroup).\ filter_by(id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1989,11 +1988,11 @@ def security_group_destroy_all(context, session=None): with session.begin(): session.query(models.SecurityGroup).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -2627,7 +2626,7 @@ def instance_metadata_delete(context, instance_id, key): filter_by(key=key).\ filter_by(deleted=False).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -2638,7 +2637,7 @@ def instance_metadata_delete_all(context, instance_id): filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py index 5d0593f2e..a4fe3e482 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py @@ -17,7 +17,7 @@ from sqlalchemy import Boolean, Column, DateTime, Integer from sqlalchemy import MetaData, String, Table -import datetime +from nova import utils meta = MetaData() @@ -35,9 +35,9 @@ def old_style_quotas_table(name): return Table(name, meta, Column('id', Integer(), primary_key=True), Column('created_at', DateTime(), - default=datetime.datetime.utcnow), + default=utils.utcnow), Column('updated_at', DateTime(), - onupdate=datetime.datetime.utcnow), + onupdate=utils.utcnow), Column('deleted_at', DateTime()), Column('deleted', Boolean(), default=False), Column('project_id', @@ -57,9 +57,9 @@ def new_style_quotas_table(name): return Table(name, meta, Column('id', Integer(), primary_key=True), Column('created_at', DateTime(), - default=datetime.datetime.utcnow), + default=utils.utcnow), Column('updated_at', DateTime(), - onupdate=datetime.datetime.utcnow), + onupdate=utils.utcnow), Column('deleted_at', DateTime()), Column('deleted', Boolean(), default=False), Column('project_id', diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 22a1a84e8..dbe72efd9 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -19,8 +19,6 @@ SQLAlchemy models for nova data. """ -import datetime - from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy import Column, Integer, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text @@ -33,6 +31,7 @@ from nova.db.sqlalchemy.session import get_session from nova import auth from nova import exception from nova import flags +from nova import utils FLAGS = flags.FLAGS @@ -43,8 +42,8 @@ class NovaBase(object): """Base class for Nova Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} __table_initialized__ = False - created_at = Column(DateTime, default=datetime.datetime.utcnow) - updated_at = Column(DateTime, onupdate=datetime.datetime.utcnow) + created_at = Column(DateTime, default=utils.utcnow) + updated_at = Column(DateTime, onupdate=utils.utcnow) deleted_at = Column(DateTime) deleted = Column(Boolean, default=False) @@ -64,7 +63,7 @@ class NovaBase(object): def delete(self, session=None): """Delete this object.""" self.deleted = True - self.deleted_at = datetime.datetime.utcnow() + self.deleted_at = utils.utcnow() self.save(session=session) def __setitem__(self, key, value): diff --git a/nova/network/manager.py b/nova/network/manager.py index 5a6fdde5a..f726c4b26 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -235,7 +235,7 @@ class NetworkManager(manager.SchedulerDependentManager): inst_addr = instance_ref['mac_address'] raise exception.Error(_('IP %(address)s leased to bad mac' ' %(inst_addr)s vs %(mac)s') % locals()) - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': True, diff --git a/nova/notifier/api.py b/nova/notifier/api.py index a3e7a039e..d49517c8b 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -11,9 +11,8 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. -import datetime import uuid from nova import flags @@ -64,7 +63,7 @@ def notify(publisher_id, event_type, priority, payload): {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', - 'timestamp': datetime.datetime.utcnow(), + 'timestamp': utils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} @@ -79,5 +78,5 @@ def notify(publisher_id, event_type, priority, payload): event_type=event_type, priority=priority, payload=payload, - timestamp=str(datetime.datetime.utcnow())) + timestamp=str(utils.utcnow())) driver.notify(msg) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 2094e3565..0b257c5d8 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -28,6 +28,7 @@ from nova import exception from nova import flags from nova import log as logging from nova import rpc +from nova import utils from nova.compute import power_state FLAGS = flags.FLAGS @@ -61,7 +62,7 @@ class Scheduler(object): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. - elapsed = datetime.datetime.utcnow() - last_heartbeat + elapsed = utils.utcnow() - last_heartbeat return elapsed < datetime.timedelta(seconds=FLAGS.service_down_time) def hosts_up(self, context, topic): diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index dd568d2c6..87cdef11d 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -21,10 +21,9 @@ Simple Scheduler """ -import datetime - from nova import db from nova import flags +from nova import utils from nova.scheduler import driver from nova.scheduler import chance @@ -54,7 +53,7 @@ class SimpleScheduler(chance.ChanceScheduler): # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.instance_update(context, instance_id, {'host': host, 'scheduled_at': now}) return host @@ -66,7 +65,7 @@ class SimpleScheduler(chance.ChanceScheduler): if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.instance_update(context, instance_id, {'host': service['host'], @@ -90,7 +89,7 @@ class SimpleScheduler(chance.ChanceScheduler): # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.volume_update(context, volume_id, {'host': host, 'scheduled_at': now}) return host @@ -103,7 +102,7 @@ class SimpleScheduler(chance.ChanceScheduler): if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.volume_update(context, volume_id, {'host': service['host'], diff --git a/nova/test.py b/nova/test.py index 80b2d0a74..60b599ce4 100644 --- a/nova/test.py +++ b/nova/test.py @@ -23,7 +23,6 @@ inline callbacks. """ -import datetime import functools import os import shutil @@ -37,6 +36,7 @@ from eventlet import greenthread from nova import fakerabbit from nova import flags from nova import rpc +from nova import utils from nova import service from nova import wsgi from nova.virt import fake @@ -69,7 +69,7 @@ class TestCase(unittest.TestCase): # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. - self.start = datetime.datetime.utcnow() + self.start = utils.utcnow() shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db), os.path.join(FLAGS.state_path, FLAGS.sqlite_db)) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 8e0156afa..4fb0613fc 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -16,7 +16,6 @@ # under the License. import copy -import datetime import json import random import string @@ -253,7 +252,7 @@ class FakeAuthDatabase(object): @staticmethod def auth_token_create(context, token): - fake_token = FakeToken(created_at=datetime.datetime.now(), **token) + fake_token = FakeToken(created_at=utils.utcnow(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 9f1f28611..93b402081 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -22,7 +22,6 @@ and as a WSGI layer import copy import json -import datetime import os import shutil import tempfile diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 11dcaaade..50d5fe980 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -16,7 +16,6 @@ # under the License. import base64 -import datetime import json import unittest from xml.dom import minidom @@ -29,6 +28,7 @@ from nova import db from nova import exception from nova import flags from nova import test +from nova import utils import nova.api.openstack from nova.api.openstack import servers import nova.compute.api @@ -114,9 +114,9 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None, "user_data": "", "reservation_id": "", "mac_address": "", - "scheduled_at": datetime.datetime.now(), - "launched_at": datetime.datetime.now(), - "terminated_at": datetime.datetime.now(), + "scheduled_at": utils.utcnow(), + "launched_at": utils.utcnow(), + "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": "server%s" % id, "display_description": "", diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 9170837b6..c726080ee 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -19,7 +19,6 @@ Tests For Compute """ -import datetime import mox import stubout @@ -217,12 +216,12 @@ class ComputeTestCase(test.TestCase): instance_ref = db.instance_get(self.context, instance_id) self.assertEqual(instance_ref['launched_at'], None) self.assertEqual(instance_ref['deleted_at'], None) - launch = datetime.datetime.utcnow() + launch = utils.utcnow() self.compute.run_instance(self.context, instance_id) instance_ref = db.instance_get(self.context, instance_id) self.assert_(instance_ref['launched_at'] > launch) self.assertEqual(instance_ref['deleted_at'], None) - terminate = datetime.datetime.utcnow() + terminate = utils.utcnow() self.compute.terminate_instance(self.context, instance_id) self.context = self.context.elevated(True) instance_ref = db.instance_get(self.context, instance_id) diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py index 1a9a867ee..831e7670f 100644 --- a/nova/tests/test_console.py +++ b/nova/tests/test_console.py @@ -20,8 +20,6 @@ Tests For Console proxy. """ -import datetime - from nova import context from nova import db from nova import exception diff --git a/nova/tests/test_middleware.py b/nova/tests/test_middleware.py index 6564a6955..40d117c45 100644 --- a/nova/tests/test_middleware.py +++ b/nova/tests/test_middleware.py @@ -16,7 +16,6 @@ # License for the specific language governing permissions and limitations # under the License. -import datetime import webob import webob.dec import webob.exc diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 54b3f80fb..1cf6bbfbf 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -196,7 +196,7 @@ class ZoneSchedulerTestCase(test.TestCase): service.topic = 'compute' service.id = kwargs['id'] service.availability_zone = kwargs['zone'] - service.created_at = datetime.datetime.utcnow() + service.created_at = utils.utcnow() return service def test_with_two_zones(self): @@ -290,7 +290,7 @@ class SimpleDriverTestCase(test.TestCase): dic['host'] = kwargs.get('host', 'dummy') s_ref = db.service_create(self.context, dic) if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): - t = datetime.datetime.utcnow() - datetime.timedelta(0) + t = utils.utcnow() - datetime.timedelta(0) dic['created_at'] = kwargs.get('created_at', t) dic['updated_at'] = kwargs.get('updated_at', t) db.service_update(self.context, s_ref['id'], dic) @@ -401,7 +401,7 @@ class SimpleDriverTestCase(test.TestCase): FLAGS.compute_manager) compute1.start() s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() + now = utils.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta db.service_update(self.context, s1['id'], {'updated_at': past}) @@ -542,7 +542,7 @@ class SimpleDriverTestCase(test.TestCase): def test_wont_sechedule_if_specified_host_is_down(self): compute1 = self.start_service('compute', host='host1') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() + now = utils.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta db.service_update(self.context, s1['id'], {'updated_at': past}) @@ -692,7 +692,7 @@ class SimpleDriverTestCase(test.TestCase): dic = {'instance_id': instance_id, 'size': 1} v_ref = db.volume_create(self.context, {'instance_id': instance_id, 'size': 1}) - t1 = datetime.datetime.utcnow() - datetime.timedelta(1) + t1 = utils.utcnow() - datetime.timedelta(1) dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', 'topic': 'volume', 'report_count': 0} s_ref = db.service_create(self.context, dic) @@ -709,7 +709,7 @@ class SimpleDriverTestCase(test.TestCase): """Confirms src-compute node is alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) + t = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) @@ -737,7 +737,7 @@ class SimpleDriverTestCase(test.TestCase): """Confirms exception raises in case dest host does not exist.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) + t = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) @@ -796,7 +796,7 @@ class SimpleDriverTestCase(test.TestCase): # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t1 = datetime.datetime.utcnow() - datetime.timedelta(10) + t1 = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t1, updated_at=t1, host=dest) diff --git a/nova/utils.py b/nova/utils.py index 361fc9873..b1638e72c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -307,7 +307,7 @@ def get_my_linklocal(interface): def utcnow(): - """Overridable version of datetime.datetime.utcnow.""" + """Overridable version of utils.utcnow.""" if utcnow.override_time: return utcnow.override_time return datetime.datetime.utcnow() diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 76988b172..165888cb2 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -51,13 +51,13 @@ A fake XenAPI SDK. """ -import datetime import uuid from pprint import pformat from nova import exception from nova import log as logging +from nova import utils _CLASSES = ['host', 'network', 'session', 'SR', 'VBD', @@ -540,7 +540,7 @@ class SessionBase(object): except Failure, exc: task['error_info'] = exc.details task['status'] = 'failed' - task['finished'] = datetime.datetime.now() + task['finished'] = utils.utcnow() return task_ref def _check_session(self, params): diff --git a/nova/volume/api.py b/nova/volume/api.py index 5804955f7..b07f2e94b 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -20,14 +20,13 @@ Handles all requests relating to volumes. """ -import datetime -from nova import db from nova import exception from nova import flags from nova import log as logging from nova import quota from nova import rpc +from nova import utils from nova.db import base FLAGS = flags.FLAGS @@ -78,7 +77,7 @@ class API(base.Base): volume = self.get(context, volume_id) if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) host = volume['host'] diff --git a/nova/volume/manager.py b/nova/volume/manager.py index ff53f0701..798bd379a 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -42,8 +42,6 @@ intact. """ -import datetime - from nova import context from nova import exception @@ -127,7 +125,7 @@ class VolumeManager(manager.SchedulerDependentManager): volume_ref['id'], {'status': 'error'}) raise - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) -- cgit From 4762aebe4ddc57d8502ed3b5aec56b613d0ec93b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 14:51:30 -0700 Subject: switch zones to use utcnow --- nova/scheduler/zone_manager.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index 3ddf6f3c3..3f483adff 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -17,16 +17,17 @@ ZoneManager oversees all communications with child Zones. """ +import datetime import novaclient import thread import traceback -from datetime import datetime from eventlet import greenpool from nova import db from nova import flags from nova import log as logging +from nova import utils FLAGS = flags.FLAGS flags.DEFINE_integer('zone_db_check_interval', 60, @@ -42,7 +43,7 @@ class ZoneState(object): self.name = None self.capabilities = None self.attempt = 0 - self.last_seen = datetime.min + self.last_seen = datetime.datetime.min self.last_exception = None self.last_exception_time = None @@ -56,7 +57,7 @@ class ZoneState(object): def update_metadata(self, zone_metadata): """Update zone metadata after successful communications with child zone.""" - self.last_seen = datetime.now() + self.last_seen = utils.utcnow() self.attempt = 0 self.name = zone_metadata.get("name", "n/a") self.capabilities = ", ".join(["%s=%s" % (k, v) @@ -72,7 +73,7 @@ class ZoneState(object): """Something went wrong. Check to see if zone should be marked as offline.""" self.last_exception = exception - self.last_exception_time = datetime.now() + self.last_exception_time = utils.utcnow() api_url = self.api_url logging.warning(_("'%(exception)s' error talking to " "zone %(api_url)s") % locals()) @@ -104,7 +105,7 @@ def _poll_zone(zone): class ZoneManager(object): """Keeps the zone states updated.""" def __init__(self): - self.last_zone_db_check = datetime.min + self.last_zone_db_check = datetime.datetime.min self.zone_states = {} # { : ZoneState } self.service_states = {} # { : { : { cap k : v }}} self.green_pool = greenpool.GreenPool() @@ -158,10 +159,10 @@ class ZoneManager(object): def ping(self, context=None): """Ping should be called periodically to update zone status.""" - diff = datetime.now() - self.last_zone_db_check + diff = utils.utcnow() - self.last_zone_db_check if diff.seconds >= FLAGS.zone_db_check_interval: logging.debug(_("Updating zone cache from db.")) - self.last_zone_db_check = datetime.now() + self.last_zone_db_check = utils.utcnow() self._refresh_from_db(context) self._poll_zones(context) -- cgit From d7d628d58612b94491310a1a03727e1afa9d5ad5 Mon Sep 17 00:00:00 2001 From: Justin Shepherd Date: Thu, 2 Jun 2011 20:45:36 -0500 Subject: Added paramiko to tools/pip-requires --- tools/pip-requires | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pip-requires b/tools/pip-requires index f1c5b2003..e81ef944a 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -33,3 +33,4 @@ suds==0.4 coverage nosexcover GitPython +paramiko -- cgit From a3b8b3467d836463dda806c93756841a52c055d3 Mon Sep 17 00:00:00 2001 From: Justin Shepherd Date: Thu, 2 Jun 2011 21:18:09 -0500 Subject: added nova_adminclient to tools/pip-requires --- tools/pip-requires | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pip-requires b/tools/pip-requires index e81ef944a..035e4347d 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -34,3 +34,4 @@ coverage nosexcover GitPython paramiko +nova_adminclient -- cgit From 9ee103a91fe3bed03c3f4c6c1a6e89fa474e1aae Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Fri, 3 Jun 2011 12:37:58 +0400 Subject: Fixed FakeLdapDriver, made it call LdapDriver.__init__ --- nova/auth/ldapdriver.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 95e31ae3b..183f7a985 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -676,6 +676,7 @@ class LdapDriver(object): class FakeLdapDriver(LdapDriver): """Fake Ldap Auth driver""" - def __init__(self): # pylint: disable=W0231 - __import__('nova.auth.fakeldap') - self.ldap = sys.modules['nova.auth.fakeldap'] + def __init__(self): + import nova.auth.fakeldap + sys.modules['ldap'] = nova.auth.fakeldap + super(FakeLdapDriver, self).__init__() -- cgit From 72a47784dc09d9b840db146d58ea71f6af30a8ea Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Fri, 3 Jun 2011 13:39:22 +0400 Subject: Flush AuthManager's cache before each test. --- nova/tests/test_auth.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index f02dd94b7..7d00bddfe 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -86,6 +86,7 @@ class _AuthManagerBaseTestCase(test.TestCase): super(_AuthManagerBaseTestCase, self).setUp() self.flags(connection_type='fake') self.manager = manager.AuthManager(new=True) + self.manager.mc.cache = {} def test_create_and_find_user(self): with user_generator(self.manager): -- cgit From 29eec21f6752ef2c03412213a74aa12745286c82 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 3 Jun 2011 05:23:43 -0700 Subject: little tweaks --- doc/source/devref/distributed_scheduler.rst | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index 28ba20af7..eb6a1a03e 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -1,7 +1,3 @@ - - - - .. Copyright 2011 OpenStack LLC All Rights Reserved. @@ -40,7 +36,7 @@ Some Costs are more esoteric. Consider a rule that says we should prefer Hosts t An example of some other costs might include selecting: * a GPU-based host over a standard CPU * a host with fast ethernet over a 10mbps line -* a host than can run Windows instances +* a host that can run Windows instances * a host in the EU vs North America * etc -- cgit From 25c8e9318c1ffbf2f2c88d3ed644df9e81b92b04 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Fri, 3 Jun 2011 11:52:20 -0400 Subject: Fixed pip-requires double requirement. --- tools/pip-requires | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/pip-requires b/tools/pip-requires index 035e4347d..e81ef944a 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -34,4 +34,3 @@ coverage nosexcover GitPython paramiko -nova_adminclient -- cgit From f521426039e8a9cc5dccc2c7e7e1797cfe778d7e Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 3 Jun 2011 14:14:28 -0400 Subject: Updated to use the '/v1/images' URL when uploading images to glance in the Xen glance plugin. Fixes issue where snapshots failed to get uploaded. --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 0c00d168b..46031ebe8 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -244,7 +244,7 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type): conn = httplib.HTTPConnection(glance_host, glance_port) # NOTE(sirp): httplib under python2.4 won't accept a file-like object # to request - conn.putrequest('PUT', '/images/%s' % image_id) + conn.putrequest('PUT', '/v1/images/%s' % image_id) # NOTE(sirp): There is some confusion around OVF. Here's a summary of # where we currently stand: -- cgit From f6aa513024e14975709ef8facf1db6535eefbc44 Mon Sep 17 00:00:00 2001 From: Justin Shepherd Date: Fri, 3 Jun 2011 13:20:34 -0500 Subject: added 'nova-manage config list' which will list out all of the flags and their values. I also alphabetized the list of available categories --- bin/nova-manage | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 5de4d9e81..fb3810779 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -1081,24 +1081,35 @@ class ImageCommands(object): self._convert_images(machine_images) +class ConfigCommands(object): + """Class for exposing the flags defined by flag_file(s).""" + + def __init__(self): + pass + + def list(self): + print FLAGS.FlagsIntoString() + + CATEGORIES = [ - ('user', UserCommands), ('account', AccountCommands), - ('project', ProjectCommands), - ('role', RoleCommands), - ('shell', ShellCommands), - ('vpn', VpnCommands), + ('config', ConfigCommands), + ('db', DbCommands), ('fixed', FixedIpCommands), + ('flavor', InstanceTypeCommands), ('floating', FloatingIpCommands), + ('instance_type', InstanceTypeCommands), + ('image', ImageCommands), ('network', NetworkCommands), - ('vm', VmCommands), + ('project', ProjectCommands), + ('role', RoleCommands), ('service', ServiceCommands), - ('db', DbCommands), + ('shell', ShellCommands), + ('user', UserCommands), + ('version', VersionCommands), + ('vm', VmCommands), ('volume', VolumeCommands), - ('instance_type', InstanceTypeCommands), - ('image', ImageCommands), - ('flavor', InstanceTypeCommands), - ('version', VersionCommands)] + ('vpn', VpnCommands)] def lazy_match(name, key_value_tuples): -- cgit