From 08d60702f9995f9e756a16c733f6a26b9d0f5019 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Fri, 22 Jul 2011 16:56:00 -0700 Subject: merge --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 005fa73e7..4706da6ea 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -879,7 +879,6 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): def _setup_network(self, context, network_ref): """Sets up network on this host.""" - network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) if not network_ref['vpn_public_address']: net = {} address = FLAGS.vpn_ip @@ -887,6 +886,7 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): network_ref = db.network_update(context, network_ref['id'], net) else: address = network_ref['vpn_public_address'] + network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) self.driver.ensure_vlan_bridge(network_ref['vlan'], network_ref['bridge'], network_ref['bridge_interface'], -- cgit From 873aad92944f8840e772d65eda4b3320d65a9ce7 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Mon, 1 Aug 2011 18:11:15 -0700 Subject: initial commit of vif-plugging for network-service interfaces --- bin/nova-dhcpbridge | 24 +++--- nova/network/linux_net.py | 205 ++++++++++++++++++++++++++++++++-------------- nova/network/manager.py | 28 ++----- nova/utils.py | 9 ++ nova/virt/libvirt/vif.py | 2 +- 5 files changed, 174 insertions(+), 94 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 325642d52..1727ebf9b 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -48,12 +48,11 @@ flags.DECLARE('auth_driver', 'nova.auth.manager') flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager') -flags.DEFINE_string('dnsmasq_interface', 'br0', 'Default Dnsmasq interface') LOG = logging.getLogger('nova.dhcpbridge') -def add_lease(mac, ip_address, _hostname, _interface): +def add_lease(mac, ip_address, _hostname): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: LOG.debug(_("leasing ip")) @@ -67,13 +66,13 @@ def add_lease(mac, ip_address, _hostname, _interface): "args": {"address": ip_address}}) -def old_lease(mac, ip_address, hostname, interface): +def old_lease(mac, ip_address, hostname): """Update just as add lease.""" LOG.debug(_("Adopted old lease or got a change of mac/hostname")) - add_lease(mac, ip_address, hostname, interface) + add_lease(mac, ip_address, hostname) -def del_lease(mac, ip_address, _hostname, _interface): +def del_lease(mac, ip_address, _hostname): """Called when a lease expires.""" if FLAGS.fake_rabbit: LOG.debug(_("releasing ip")) @@ -87,10 +86,10 @@ def del_lease(mac, ip_address, _hostname, _interface): "args": {"address": ip_address}}) -def init_leases(interface): - """Get the list of hosts for an interface.""" +def init_leases(network_id): + """Get the list of hosts for a network.""" ctxt = context.get_admin_context() - network_ref = db.network_get_by_bridge(ctxt, interface) + network_ref = db.network_get(ctxt, network_id) return linux_net.get_dhcp_leases(ctxt, network_ref) @@ -101,7 +100,8 @@ def main(): argv = FLAGS(sys.argv) logging.setup() # check ENV first so we don't break any older deploys - interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface) + network_id = int(os.environ.get('NETWORK_ID')) + if int(os.environ.get('TESTING', '0')): from nova.tests import fake_flags @@ -117,11 +117,11 @@ def main(): ip = argv[3] hostname = argv[4] msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s and" - " hostname %(hostname)s on interface %(interface)s") % locals() + " hostname %(hostname)s on network %(network_id)s") % locals() LOG.debug(msg) - globals()[action + '_lease'](mac, ip, hostname, interface) + globals()[action + '_lease'](mac, ip, hostname) else: - print init_leases(interface) + print init_leases(network_id) if __name__ == "__main__": main() diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 8ace07884..ee0ef0b85 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -63,6 +63,11 @@ flags.DEFINE_string('dmz_cidr', '10.128.0.0/24', 'dmz range that should be accepted') flags.DEFINE_string('dnsmasq_config_file', "", 'Override the default dnsmasq settings with this file') +flags.DEFINE_string('linuxnet_interface_driver', + 'nova.network.linux_net.LinuxBridgeInterfaceDriver', + 'Driver used to create ethernet devices.') +flags.DEFINE_string('linuxnet_ovs_integration_bridge', + 'br-int', 'Name of Open vSwitch bridge used with linuxnet') binary_name = os.path.basename(inspect.stack()[-1][1]) @@ -413,7 +418,7 @@ def ensure_metadata_ip(): 'scope', 'link', 'dev', 'lo', check_exit_code=False) -def ensure_vlan_forward(public_ip, port, private_ip): +def ensure_vpn_forward(public_ip, port, private_ip): """Sets up forwarding rules for vlan.""" iptables_manager.ipv4['filter'].add_rule('FORWARD', '-d %s -p udp ' @@ -491,32 +496,11 @@ def ensure_bridge(bridge, interface, net_attrs=None): _execute('sudo', 'brctl', 'setfd', bridge, 0) # _execute('sudo brctl setageing %s 10' % bridge) _execute('sudo', 'brctl', 'stp', bridge, 'off') - _execute('sudo', 'ip', 'link', 'set', bridge, 'up') - if net_attrs: - # NOTE(vish): The ip for dnsmasq has to be the first address on the - # bridge for it to respond to reqests properly - suffix = net_attrs['cidr'].rpartition('/')[2] - out, err = _execute('sudo', 'ip', 'addr', 'add', - '%s/%s' % - (net_attrs['dhcp_server'], suffix), - 'brd', - net_attrs['broadcast'], - 'dev', - bridge, - check_exit_code=False) - if err and err != 'RTNETLINK answers: File exists\n': - raise exception.Error('Failed to add ip: %s' % err) - if(FLAGS.use_ipv6): - _execute('sudo', 'ip', '-f', 'inet6', 'addr', - 'change', net_attrs['cidr_v6'], - 'dev', bridge) - # NOTE(vish): If the public interface is the same as the - # bridge, then the bridge has to be in promiscuous - # to forward packets properly. - if(FLAGS.public_interface == bridge): - _execute('sudo', 'ip', 'link', 'set', - 'dev', bridge, 'promisc', 'on') + if interface: + out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, + check_exit_code=False) + # NOTE(vish): This will break if there is already an ip on the # interface, so we move any ips to the bridge gateway = None @@ -526,9 +510,9 @@ def ensure_bridge(bridge, interface, net_attrs=None): if fields and fields[0] == '0.0.0.0' and fields[-1] == interface: gateway = fields[1] _execute('sudo', 'route', 'del', 'default', 'gw', gateway, - 'dev', interface, check_exit_code=False) + 'dev', interface, check_exit_code=False) out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface, - 'scope', 'global') + 'scope', 'global') for line in out.split('\n'): fields = line.split() if fields and fields[0] == 'inet': @@ -537,8 +521,6 @@ def ensure_bridge(bridge, interface, net_attrs=None): _execute(*_ip_bridge_cmd('add', params, bridge)) if gateway: _execute('sudo', 'route', 'add', 'default', 'gw', gateway) - out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, - check_exit_code=False) if (err and err != "device %s is already a member of a bridge; can't " "enslave it to bridge %s.\n" % (interface, bridge)): @@ -552,6 +534,36 @@ def ensure_bridge(bridge, interface, net_attrs=None): bridge) +def initialize_gateway_device(dev, network_ref): + if not network_ref: + return + + # NOTE(vish): The ip for dnsmasq has to be the first address on the + # bridge for it to respond to reqests properly + suffix = network_ref['cidr'].rpartition('/')[2] + out, err = _execute('sudo', 'ip', 'addr', 'add', + '%s/%s' % + (network_ref['dhcp_server'], suffix), + 'brd', + network_ref['broadcast'], + 'dev', + dev, + check_exit_code=False) + if err and err != 'RTNETLINK answers: File exists\n': + raise exception.Error('Failed to add ip: %s' % err) + if(FLAGS.use_ipv6): + _execute('sudo', 'ip', '-f', 'inet6', 'addr', + 'change', network_ref['cidr_v6'], + 'dev', dev) + # NOTE(vish): If the public interface is the same as the + # bridge, then the bridge has to be in promiscuous + # to forward packets properly. + if(FLAGS.public_interface == dev): + _execute('sudo', 'ip', 'link', 'set', + 'dev', dev, 'promisc', 'on') + _execute('sudo', 'ip', 'link', 'set', dev, 'up') + + def get_dhcp_leases(context, network_ref): """Return a network's hosts config in dnsmasq leasefile format.""" hosts = [] @@ -580,21 +592,21 @@ def get_dhcp_hosts(context, network_ref): # configuration options (like dchp-range, vlan, ...) # aren't reloaded. @utils.synchronized('dnsmasq_start') -def update_dhcp(context, network_ref): +def update_dhcp(context, dev, network_ref): """(Re)starts a dnsmasq server for a given network. If a dnsmasq instance is already running then send a HUP signal causing it to reload, otherwise spawn a new instance. """ - conffile = _dhcp_file(network_ref['bridge'], 'conf') + conffile = _dhcp_file(dev, 'conf') with open(conffile, 'w') as f: f.write(get_dhcp_hosts(context, network_ref)) # Make sure dnsmasq can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) - pid = _dnsmasq_pid_for(network_ref['bridge']) + pid = _dnsmasq_pid_for(dev) # if dnsmasq is already running, then tell it to reload if pid: @@ -609,16 +621,16 @@ def update_dhcp(context, network_ref): else: LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid) - # FLAGFILE and DNSMASQ_INTERFACE in env + # FLAGFILE and NETWORK_ID in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, - 'DNSMASQ_INTERFACE': network_ref['bridge']} - command = _dnsmasq_cmd(network_ref) + 'NETWORK_ID': str(network_ref['id'])} + command = _dnsmasq_cmd(dev, network_ref) _execute(*command, addl_env=env) @utils.synchronized('radvd_start') -def update_ra(context, network_ref): - conffile = _ra_file(network_ref['bridge'], 'conf') +def update_ra(context, dev, network_ref): + conffile = _ra_file(dev, 'conf') with open(conffile, 'w') as f: conf_str = """ interface %s @@ -632,13 +644,13 @@ interface %s AdvAutonomous on; }; }; -""" % (network_ref['bridge'], network_ref['cidr_v6']) +""" % (dev, network_ref['cidr_v6']) f.write(conf_str) # Make sure radvd can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) - pid = _ra_pid_for(network_ref['bridge']) + pid = _ra_pid_for(dev) # if radvd is already running, then tell it to reload if pid: @@ -651,7 +663,7 @@ interface %s LOG.debug(_('killing radvd threw %s'), exc) else: LOG.debug(_('Pid %d is stale, relaunching radvd'), pid) - command = _ra_cmd(network_ref) + command = _ra_cmd(dev) _execute(*command) @@ -696,20 +708,20 @@ def _device_exists(device): return not err -def _dnsmasq_cmd(net): +def _dnsmasq_cmd(dev, net): """Builds dnsmasq command.""" cmd = ['sudo', '-E', 'dnsmasq', '--strict-order', '--bind-interfaces', - '--interface=%s' % net['bridge'], + '--interface=%s' % dev, '--conf-file=%s' % FLAGS.dnsmasq_config_file, '--domain=%s' % FLAGS.dhcp_domain, - '--pid-file=%s' % _dhcp_file(net['bridge'], 'pid'), + '--pid-file=%s' % _dhcp_file(dev, 'pid'), '--listen-address=%s' % net['dhcp_server'], '--except-interface=lo', '--dhcp-range=%s,static,120s' % net['dhcp_start'], '--dhcp-lease-max=%s' % len(netaddr.IPNetwork(net['cidr'])), - '--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'), + '--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'), '--dhcp-script=%s' % FLAGS.dhcpbridge, '--leasefile-ro'] if FLAGS.dns_server: @@ -717,18 +729,18 @@ def _dnsmasq_cmd(net): return cmd -def _ra_cmd(net): +def _ra_cmd(dev): """Builds radvd command.""" cmd = ['sudo', '-E', 'radvd', # '-u', 'nobody', - '-C', '%s' % _ra_file(net['bridge'], 'conf'), - '-p', '%s' % _ra_file(net['bridge'], 'pid')] + '-C', '%s' % _ra_file(dev, 'conf'), + '-p', '%s' % _ra_file(dev, 'pid')] return cmd -def _stop_dnsmasq(network): +def _stop_dnsmasq(dev): """Stops the dnsmasq instance for a given network.""" - pid = _dnsmasq_pid_for(network) + pid = _dnsmasq_pid_for(dev) if pid: try: @@ -737,49 +749,49 @@ def _stop_dnsmasq(network): LOG.debug(_('Killing dnsmasq threw %s'), exc) -def _dhcp_file(bridge, kind): - """Return path to a pid, leases or conf file for a bridge.""" +def _dhcp_file(dev, kind): + """Return path to a pid, leases or conf file for a bridge/device.""" if not os.path.exists(FLAGS.networks_path): os.makedirs(FLAGS.networks_path) return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path, - bridge, + dev, kind)) -def _ra_file(bridge, kind): - """Return path to a pid or conf file for a bridge.""" +def _ra_file(dev, kind): + """Return path to a pid or conf file for a bridge/device.""" if not os.path.exists(FLAGS.networks_path): os.makedirs(FLAGS.networks_path) return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path, - bridge, + dev, kind)) -def _dnsmasq_pid_for(bridge): - """Returns the pid for prior dnsmasq instance for a bridge. +def _dnsmasq_pid_for(dev): + """Returns the pid for prior dnsmasq instance for a bridge/device. Returns None if no pid file exists. If machine has rebooted pid might be incorrect (caller should check). """ - pid_file = _dhcp_file(bridge, 'pid') + pid_file = _dhcp_file(dev, 'pid') if os.path.exists(pid_file): with open(pid_file, 'r') as f: return int(f.read()) -def _ra_pid_for(bridge): - """Returns the pid for prior radvd instance for a bridge. +def _ra_pid_for(dev): + """Returns the pid for prior radvd instance for a bridge/device. Returns None if no pid file exists. If machine has rebooted pid might be incorrect (caller should check). """ - pid_file = _ra_file(bridge, 'pid') + pid_file = _ra_file(dev, 'pid') if os.path.exists(pid_file): with open(pid_file, 'r') as f: @@ -795,3 +807,72 @@ def _ip_bridge_cmd(action, params, device): iptables_manager = IptablesManager() + +# Similar to compute virt layers, the Linux network node +# code uses a flexible driver model to support different ways +# of creating ethernet interfaces and attaching them to the network. +# In the case of a network host, these interfaces +# act as gateway/dhcp/vpn/etc. endpoints not VM interfaces. + + +def plug(network): + return interface_driver.plug(network) + + +def unplug(network): + return interface_driver.unplug(network) + + +class LinuxNetInterfaceDriver(object): + """Abstract class that defines generic network host API""" + """ for for all Linux interface drivers.""" + + def plug(self, network): + """Create Linux device, return device name""" + raise NotImplementedError() + + def unplug(self, network): + """Destory Linux device, return device name""" + raise NotImplementedError() + + +# plugs interfaces using Linux Bridge +class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): + + def plug(self, network): + self.driver.ensure_vlan_bridge(network['vlan'], + network['bridge'], + network['bridge_interface'], + network) + return network['bridge'] + + def unplug(self, network): + return network['bridge'] + + +# plugs interfaces using Open vSwitch +class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver): + + def plug(self, network): + dev = "gw-" + str(network['id']) + if not _device_exists(dev): + bridge = FLAGS.linuxnet_ovs_integration_bridge + mac_addr = utils.generate_mac_address() + _execute('sudo', 'ovs-vsctl', + '--', '--may-exist', 'add-port', bridge, dev, + '--', 'set', 'Interface', dev, "type=internal", + '--', 'set', 'Interface', dev, + "external-ids:iface-id=nova-%s" % dev, + '--', 'set', 'Interface', dev, + "external-ids:iface-status=active", + '--', 'set', 'Interface', dev, + "external-ids:attached-mac=%s" % mac_addr) + _execute('sudo', 'ip', 'link', 'set', + dev, "address", mac_addr) + return dev + + def unplug(self, network): + dev = "gw-" + str(network['id']) + return dev + +interface_driver = utils.import_object(FLAGS.linuxnet_interface_driver) diff --git a/nova/network/manager.py b/nova/network/manager.py index 4706da6ea..0fc55f441 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -504,7 +504,7 @@ class NetworkManager(manager.SchedulerDependentManager): def _allocate_mac_addresses(self, context, instance_id, networks): """Generates mac addresses and creates vif rows in db for them.""" for network in networks: - vif = {'address': self.generate_mac_address(), + vif = {'address': utils.generate_mac_address(), 'instance_id': instance_id, 'network_id': network['id']} # try FLAG times to create a vif record with a unique mac_address @@ -513,20 +513,12 @@ class NetworkManager(manager.SchedulerDependentManager): self.db.virtual_interface_create(context, vif) break except exception.VirtualInterfaceCreateException: - vif['address'] = self.generate_mac_address() + vif['address'] = utils.generate_mac_address() else: self.db.virtual_interface_delete_by_instance(context, instance_id) raise exception.VirtualInterfaceMacAddressException() - def generate_mac_address(self): - """Generate a mac address for a vif on an instance.""" - mac = [0x02, 0x16, 0x3e, - random.randint(0x00, 0x7f), - random.randint(0x00, 0xff), - random.randint(0x00, 0xff)] - return ':'.join(map(lambda x: "%02x" % x, mac)) - def add_fixed_ip_to_instance(self, context, instance_id, host, network_id): """Adds a fixed ip to an instance from specified network.""" networks = [self.db.network_get(context, network_id)] @@ -887,23 +879,21 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): else: address = network_ref['vpn_public_address'] network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) - self.driver.ensure_vlan_bridge(network_ref['vlan'], - network_ref['bridge'], - network_ref['bridge_interface'], - network_ref) + dev = self.driver.plug(network_ref) + self.driver.initialize_gateway_device(dev, network_ref) # NOTE(vish): only ensure this forward if the address hasn't been set # manually. if address == FLAGS.vpn_ip and hasattr(self.driver, - "ensure_vlan_forward"): - self.driver.ensure_vlan_forward(FLAGS.vpn_ip, + "ensure_vpn_forward"): + self.driver.ensure_vpn_forward(FLAGS.vpn_ip, network_ref['vpn_public_port'], network_ref['vpn_private_address']) if not FLAGS.fake_network: - self.driver.update_dhcp(context, network_ref) + self.driver.update_dhcp(context, dev, network_ref) if(FLAGS.use_ipv6): - self.driver.update_ra(context, network_ref) - gateway = utils.get_my_linklocal(network_ref['bridge']) + self.driver.update_ra(context, dev, network_ref) + gateway = utils.get_my_linklocal(dev) self.db.network_update(context, network_ref['id'], {'gateway_v6': gateway}) diff --git a/nova/utils.py b/nova/utils.py index 8784a227d..d56fa614c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -320,6 +320,15 @@ def get_my_linklocal(interface): " :%(ex)s") % locals()) +def generate_mac_address(): + """Generate an Ethernet MAC address.""" + mac = [0x02, 0x16, 0x3e, + random.randint(0x00, 0x7f), + random.randint(0x00, 0xff), + random.randint(0x00, 0xff)] + return ':'.join(map(lambda x: "%02x" % x, mac)) + + def utcnow(): """Overridable version of utils.utcnow.""" if utcnow.override_time: diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index aa566cc72..432a585ad 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -99,8 +99,8 @@ class LibvirtOpenVswitchDriver(VIFDriver): def plug(self, instance, network, mapping): vif_id = str(instance['id']) + "-" + str(network['id']) dev = "tap-%s" % vif_id + iface_id = "nova-" + vif_id if not linux_net._device_exists(dev): - iface_id = "nova-" + vif_id utils.execute('sudo', 'ip', 'tuntap', 'add', dev, 'mode', 'tap') utils.execute('sudo', 'ip', 'link', 'set', dev, 'up') utils.execute('sudo', 'ovs-vsctl', '--', '--may-exist', 'add-port', -- cgit From 857f4453efaca98ced3e07d55ee6f0188713e60e Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Mon, 1 Aug 2011 18:30:59 -0700 Subject: fix LinuxBridgeInterfaceDriver --- nova/network/linux_net.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index ee0ef0b85..ce1f53ab9 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -840,10 +840,10 @@ class LinuxNetInterfaceDriver(object): class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): def plug(self, network): - self.driver.ensure_vlan_bridge(network['vlan'], - network['bridge'], - network['bridge_interface'], - network) + ensure_vlan_bridge(network['vlan'], + network['bridge'], + network['bridge_interface'], + network) return network['bridge'] def unplug(self, network): -- cgit From dfc9c9c2b5e92e599bdeae4c03d3761215a0deca Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 4 Aug 2011 12:36:11 -0700 Subject: modify _setup_network for flatDHCP as well --- nova/network/linux_net.py | 12 ++++++++---- nova/network/manager.py | 13 +++++++------ 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index ce1f53ab9..7012cceeb 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -805,9 +805,6 @@ def _ip_bridge_cmd(action, params, device): cmd.extend(['dev', device]) return cmd - -iptables_manager = IptablesManager() - # Similar to compute virt layers, the Linux network node # code uses a flexible driver model to support different ways # of creating ethernet interfaces and attaching them to the network. @@ -840,10 +837,16 @@ class LinuxNetInterfaceDriver(object): class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): def plug(self, network): - ensure_vlan_bridge(network['vlan'], + if network.get('vlan', None) is not None: + ensure_vlan_bridge(network['vlan'], network['bridge'], network['bridge_interface'], network) + else: + ensure_bridge(network['bridge'], + network['bridge_interface'], + network) + return network['bridge'] def unplug(self, network): @@ -875,4 +878,5 @@ class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver): dev = "gw-" + str(network['id']) return dev +iptables_manager = IptablesManager() interface_driver = utils.import_object(FLAGS.linuxnet_interface_driver) diff --git a/nova/network/manager.py b/nova/network/manager.py index bedbe2c21..e7c84d478 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -785,14 +785,15 @@ class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager): def _setup_network(self, context, network_ref): """Sets up network on this host.""" network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) - self.driver.ensure_bridge(network_ref['bridge'], - network_ref['bridge_interface'], - network_ref) + + dev = self.driver.plug(network_ref) + self.driver.initialize_gateway_device(dev, network_ref) + if not FLAGS.fake_network: - self.driver.update_dhcp(context, network_ref) + self.driver.update_dhcp(context, dev, network_ref) if(FLAGS.use_ipv6): - self.driver.update_ra(context, network_ref) - gateway = utils.get_my_linklocal(network_ref['bridge']) + self.driver.update_ra(context, dev, network_ref) + gateway = utils.get_my_linklocal(dev) self.db.network_update(context, network_ref['id'], {'gateway_v6': gateway}) -- cgit From 18f09f165b5dca5f11253b143045b2ff7327532d Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 4 Aug 2011 16:20:38 -0700 Subject: move ensure_vlan_bridge,ensure_bridge,ensure_vlan to the bridge/vlan specific vif-plugging driver --- nova/network/linux_net.py | 166 ++++++++++++++++++++++++---------------------- nova/virt/libvirt/vif.py | 6 +- 2 files changed, 90 insertions(+), 82 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 7012cceeb..17b63a849 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -456,84 +456,6 @@ def floating_forward_rules(floating_ip, fixed_ip): '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))] -def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None): - """Create a vlan and bridge unless they already exist.""" - interface = ensure_vlan(vlan_num, bridge_interface) - ensure_bridge(bridge, interface, net_attrs) - return interface - - -@utils.synchronized('ensure_vlan', external=True) -def ensure_vlan(vlan_num, bridge_interface): - """Create a vlan unless it already exists.""" - interface = 'vlan%s' % vlan_num - if not _device_exists(interface): - LOG.debug(_('Starting VLAN inteface %s'), interface) - _execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD') - _execute('sudo', 'vconfig', 'add', bridge_interface, vlan_num) - _execute('sudo', 'ip', 'link', 'set', interface, 'up') - return interface - - -@utils.synchronized('ensure_bridge', external=True) -def ensure_bridge(bridge, interface, net_attrs=None): - """Create a bridge unless it already exists. - - :param interface: the interface to create the bridge on. - :param net_attrs: dictionary with attributes used to create the bridge. - - If net_attrs is set, it will add the net_attrs['gateway'] to the bridge - using net_attrs['broadcast'] and net_attrs['cidr']. It will also add - the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set. - - The code will attempt to move any ips that already exist on the interface - onto the bridge and reset the default gateway if necessary. - - """ - if not _device_exists(bridge): - LOG.debug(_('Starting Bridge interface for %s'), interface) - _execute('sudo', 'brctl', 'addbr', bridge) - _execute('sudo', 'brctl', 'setfd', bridge, 0) - # _execute('sudo brctl setageing %s 10' % bridge) - _execute('sudo', 'brctl', 'stp', bridge, 'off') - - if interface: - out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, - check_exit_code=False) - - # NOTE(vish): This will break if there is already an ip on the - # interface, so we move any ips to the bridge - gateway = None - out, err = _execute('sudo', 'route', '-n') - for line in out.split('\n'): - fields = line.split() - if fields and fields[0] == '0.0.0.0' and fields[-1] == interface: - gateway = fields[1] - _execute('sudo', 'route', 'del', 'default', 'gw', gateway, - 'dev', interface, check_exit_code=False) - out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface, - 'scope', 'global') - for line in out.split('\n'): - fields = line.split() - if fields and fields[0] == 'inet': - params = fields[1:-1] - _execute(*_ip_bridge_cmd('del', params, fields[-1])) - _execute(*_ip_bridge_cmd('add', params, bridge)) - if gateway: - _execute('sudo', 'route', 'add', 'default', 'gw', gateway) - - if (err and err != "device %s is already a member of a bridge; can't " - "enslave it to bridge %s.\n" % (interface, bridge)): - raise exception.Error('Failed to add interface: %s' % err) - - iptables_manager.ipv4['filter'].add_rule('FORWARD', - '--in-interface %s -j ACCEPT' % \ - bridge) - iptables_manager.ipv4['filter'].add_rule('FORWARD', - '--out-interface %s -j ACCEPT' % \ - bridge) - - def initialize_gateway_device(dev, network_ref): if not network_ref: return @@ -838,12 +760,14 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): def plug(self, network): if network.get('vlan', None) is not None: - ensure_vlan_bridge(network['vlan'], + LinuxBridgeInterfaceDriver.ensure_vlan_bridge( + network['vlan'], network['bridge'], network['bridge_interface'], network) else: - ensure_bridge(network['bridge'], + LinuxBridgeInterfaceDriver.ensure_bridge( + network['bridge'], network['bridge_interface'], network) @@ -852,6 +776,88 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): def unplug(self, network): return network['bridge'] + @classmethod + def ensure_vlan_bridge(_self, vlan_num, bridge, bridge_interface, + net_attrs=None): + """Create a vlan and bridge unless they already exist.""" + interface = LinuxBridgeInterfaceDriver.ensure_vlan(vlan_num, + bridge_interface) + LinuxBridgeInterfaceDriver.ensure_bridge(bridge, interface, net_attrs) + return interface + + @classmethod + @utils.synchronized('ensure_vlan', external=True) + def ensure_vlan(_self, vlan_num, bridge_interface): + """Create a vlan unless it already exists.""" + interface = 'vlan%s' % vlan_num + if not _device_exists(interface): + LOG.debug(_('Starting VLAN inteface %s'), interface) + _execute('sudo', 'vconfig', 'set_name_type', + 'VLAN_PLUS_VID_NO_PAD') + _execute('sudo', 'vconfig', 'add', bridge_interface, vlan_num) + _execute('sudo', 'ip', 'link', 'set', interface, 'up') + return interface + + @classmethod + @utils.synchronized('ensure_bridge', external=True) + def ensure_bridge(_self, bridge, interface, net_attrs=None): + """Create a bridge unless it already exists. + + :param interface: the interface to create the bridge on. + :param net_attrs: dictionary with attributes used to create bridge. + + If net_attrs is set, it will add the net_attrs['gateway'] to the bridge + using net_attrs['broadcast'] and net_attrs['cidr']. It will also add + the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set. + + The code will attempt to move any ips that already exist on the + interface onto the bridge and reset the default gateway if necessary. + + """ + if not _device_exists(bridge): + LOG.debug(_('Starting Bridge interface for %s'), interface) + _execute('sudo', 'brctl', 'addbr', bridge) + _execute('sudo', 'brctl', 'setfd', bridge, 0) + # _execute('sudo brctl setageing %s 10' % bridge) + _execute('sudo', 'brctl', 'stp', bridge, 'off') + + if interface: + out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, + check_exit_code=False) + + # NOTE(vish): This will break if there is already an ip on the + # interface, so we move any ips to the bridge + gateway = None + out, err = _execute('sudo', 'route', '-n') + for line in out.split('\n'): + fields = line.split() + if fields and fields[0] == '0.0.0.0' and \ + fields[-1] == interface: + gateway = fields[1] + _execute('sudo', 'route', 'del', 'default', 'gw', gateway, + 'dev', interface, check_exit_code=False) + out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface, + 'scope', 'global') + for line in out.split('\n'): + fields = line.split() + if fields and fields[0] == 'inet': + params = fields[1:-1] + _execute(*_ip_bridge_cmd('del', params, fields[-1])) + _execute(*_ip_bridge_cmd('add', params, bridge)) + if gateway: + _execute('sudo', 'route', 'add', 'default', 'gw', gateway) + + if (err and err != "device %s is already a member of a bridge;" + "can't enslave it to bridge %s.\n" % (interface, bridge)): + raise exception.Error('Failed to add interface: %s' % err) + + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '--in-interface %s -j ACCEPT' % \ + bridge) + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '--out-interface %s -j ACCEPT' % \ + bridge) + # plugs interfaces using Open vSwitch class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver): diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index eef582fac..f42eeec98 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -79,12 +79,14 @@ class LibvirtBridgeDriver(VIFDriver): LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'), {'vlan': network['vlan'], 'bridge': network['bridge']}) - linux_net.ensure_vlan_bridge(network['vlan'], + linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge( + network['vlan'], network['bridge'], network['bridge_interface']) else: LOG.debug(_("Ensuring bridge %s"), network['bridge']) - linux_net.ensure_bridge(network['bridge'], + linux_net.LinuxBridgeInterfaceDriver.ensure_bridge( + network['bridge'], network['bridge_interface']) return self._get_configurations(network, mapping) -- cgit From aee7778549904dc89fbd792ee60924932621a720 Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Fri, 5 Aug 2011 15:02:29 +0900 Subject: Added migration to add uuid to virtual interfaces. Added uuid column to models --- .../versions/037_add_uuid_to_virtual_interfaces.py | 44 ++++++++++++++++++++++ nova/db/sqlalchemy/models.py | 2 + 2 files changed, 46 insertions(+) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/037_add_uuid_to_virtual_interfaces.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_add_uuid_to_virtual_interfaces.py b/nova/db/sqlalchemy/migrate_repo/versions/037_add_uuid_to_virtual_interfaces.py new file mode 100644 index 000000000..0f542cbec --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_add_uuid_to_virtual_interfaces.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2011 Midokura KK +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + +from nova import utils + + +meta = MetaData() + +virtual_interfaces = Table("virtual_interfaces", meta, + Column("id", Integer(), primary_key=True, + nullable=False)) +uuid_column = Column("uuid", String(36)) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + virtual_interfaces.create_column(uuid_column) + + rows = migrate_engine.execute(virtual_interfaces.select()) + for row in rows: + vif_uuid = str(utils.gen_uuid()) + migrate_engine.execute(virtual_interfaces.update()\ + .where(virtual_interfaces.c.id == row[0])\ + .values(uuid=vif_uuid)) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + virtual_interfaces.drop_column(uuid_column) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 9f4c7a0aa..3ab0a2b0c 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -565,6 +565,8 @@ class VirtualInterface(BASE, NovaBase): instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False) instance = relationship(Instance, backref=backref('virtual_interfaces')) + uuid = Column(String(36)) + @property def fixed_ipv6(self): cidr_v6 = self.network.cidr_v6 -- cgit From 7407a1a86c4039bdc541e9a26cc68c9c93f49bc3 Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Fri, 5 Aug 2011 18:29:32 +0900 Subject: Added virtual interfaces REST API extension controller --- nova/api/openstack/contrib/virtual_interfaces.py | 102 +++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 nova/api/openstack/contrib/virtual_interfaces.py diff --git a/nova/api/openstack/contrib/virtual_interfaces.py b/nova/api/openstack/contrib/virtual_interfaces.py new file mode 100644 index 000000000..3466d31c7 --- /dev/null +++ b/nova/api/openstack/contrib/virtual_interfaces.py @@ -0,0 +1,102 @@ +# Copyright (C) 2011 Midokura KK +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The virtual interfaces extension.""" + +from webob import exc +import webob + +from nova import compute +from nova import exception +from nova import log as logging +from nova.api.openstack import common +from nova.api.openstack import extensions +from nova.api.openstack import faults + + +LOG = logging.getLogger("nova.api.virtual_interfaces") + + +def _translate_vif_summary_view(_context, vif): + """Maps keys for attachment summary view.""" + d = {} + d['id'] = vif['uuid'] + d['macAddress'] = vif['address'] + d['serverId'] = vif['instance_id'] + return d + + +class ServerVirtualInterfaceController(object): + """The instance VIF API controller for the Openstack API. + """ + + _serialization_metadata = { + 'application/xml': { + 'attributes': { + 'serverVirtualInterface': ['id', + 'macAddress']}}} + + def __init__(self): + self.compute_api = compute.API() + super(ServerVirtualInterfaceController, self).__init__() + + def _items(self, req, server_id, entity_maker): + """Returns a list of VIFs, transformed through entity_maker.""" + context = req.environ['nova.context'] + + try: + instance = self.compute_api.get(context, server_id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + vifs = instance['virtual_interfaces'] + limited_list = common.limited(vifs, req) + res = [entity_maker(context, vif) for vif in limited_list] + return {'serverVirtualInterfaces': res} + + def index(self, req, server_id): + """Returns the list of VIFs for a given instance.""" + return self._items(req, server_id, + entity_maker=_translate_vif_summary_view) + + +class VirtualInterfaces(extensions.ExtensionDescriptor): + + def get_name(self): + return "VirtualInterfaces" + + def get_alias(self): + return "os-virtual_interfaces" + + def get_description(self): + return "Virtual interface support" + + def get_namespace(self): + return "http://docs.openstack.org/ext/virtual_interfaces/api/v1.1" + + def get_updated(self): + return "2011-08-05T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension('os-virtual_interfaces', + ServerVirtualInterfaceController(), + parent=dict( + member_name='server', + collection_name='servers')) + resources.append(res) + + return resources -- cgit From c81febc28a602989636e77d1b3e9a75741e04352 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 9 Aug 2011 16:00:54 -0400 Subject: Updated the EC2 metadata controller so that it returns the correct value for instance-type metadata. --- nova/api/ec2/cloud.py | 2 +- smoketests/test_netadmin.py | 19 +++++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index f64a92d12..9538a31e2 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -299,7 +299,7 @@ class CloudController(object): 'hostname': hostname, 'instance-action': 'none', 'instance-id': ec2_id, - 'instance-type': instance_ref['instance_type'], + 'instance-type': instance_ref['instance_type'].name, 'local-hostname': hostname, 'local-ipv4': address, 'placement': {'availability-zone': availability_zone}, diff --git a/smoketests/test_netadmin.py b/smoketests/test_netadmin.py index 8c8fa35b8..ef73e6f4c 100644 --- a/smoketests/test_netadmin.py +++ b/smoketests/test_netadmin.py @@ -107,14 +107,18 @@ class AddressTests(base.UserSmokeTestCase): class SecurityGroupTests(base.UserSmokeTestCase): - def __public_instance_is_accessible(self): - id_url = "latest/meta-data/instance-id" + def __get_metadata_item(self, category): + id_url = "latest/meta-data/%s" % category options = "-f -s --max-time 1" command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url) status, output = commands.getstatusoutput(command) - instance_id = output.strip() + value = output.strip() if status > 0: return False + return value + + def __public_instance_is_accessible(self): + instance_id = self.__get_metadata_item('instance-id') if not instance_id: return False if instance_id != self.data['instance'].id: @@ -166,7 +170,14 @@ class SecurityGroupTests(base.UserSmokeTestCase): finally: result = self.conn.disassociate_address(self.data['public_ip']) - def test_005_can_revoke_security_group_ingress(self): + def test_005_validate_metadata(self): + + instance = self.data['instance'] + self.assertTrue(instance.instance_type, + self.__get_metadata_item("instance-type")) + #FIXME(dprince): validate more metadata here + + def test_006_can_revoke_security_group_ingress(self): self.assertTrue(self.conn.revoke_security_group(TEST_GROUP, ip_protocol='tcp', from_port=80, -- cgit From f9cf0d334330f034d0e0fb2ae8c88dda38e62832 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 11 Aug 2011 11:54:35 -0700 Subject: Added search instance by metadata. get_all_by_filters should filter deleted --- Authors | 1 + nova/db/sqlalchemy/api.py | 20 ++++++++++++++- nova/tests/test_compute.py | 63 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 83 insertions(+), 1 deletion(-) diff --git a/Authors b/Authors index e639cbf76..ccd70baaf 100644 --- a/Authors +++ b/Authors @@ -103,6 +103,7 @@ Tushar Patil Vasiliy Shlykov Vishvananda Ishaya Vivek Y S +Vladimir Popovski William Wolf Yoshiaki Tamura Youcef Laribi diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 8119cdfb8..65977a8bf 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1175,6 +1175,19 @@ def instance_get_all_by_filters(context, filters): return True return False + def _regexp_filter_by_metadata(instance, meta): + inst_metadata = [{node['key']: node['value']} \ + for node in instance['metadata']] + if isinstance(meta, list): + for node in meta: + if node not in inst_metadata: + return False + elif isinstance(meta, dict): + for k, v in meta.iteritems(): + if {k: v} not in inst_metadata: + return False + return True + def _regexp_filter_by_column(instance, filter_name, filter_re): try: v = getattr(instance, filter_name) @@ -1232,7 +1245,9 @@ def instance_get_all_by_filters(context, filters): query_prefix = _exact_match_filter(query_prefix, filter_name, filters.pop(filter_name)) - instances = query_prefix.all() + instances = query_prefix.\ + filter_by(deleted=can_read_deleted(context)).\ + all() if not instances: return [] @@ -1248,6 +1263,9 @@ def instance_get_all_by_filters(context, filters): filter_re = re.compile(str(filters[filter_name])) if filter_func: filter_l = lambda instance: filter_func(instance, filter_re) + elif filter_name == 'metadata': + filter_l = lambda instance: _regexp_filter_by_metadata(instance, + filters[filter_name]) else: filter_l = lambda instance: _regexp_filter_by_column(instance, filter_name, filter_re) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 73c9bd78d..18daa970e 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -1323,6 +1323,69 @@ class ComputeTestCase(test.TestCase): db.instance_destroy(c, instance_id2) db.instance_destroy(c, instance_id3) + def test_get_all_by_metadata(self): + """Test searching instances by metadata""" + + c = context.get_admin_context() + instance_id0 = self._create_instance() + instance_id1 = self._create_instance({ + 'metadata': {'key1': 'value1'}}) + instance_id2 = self._create_instance({ + 'metadata': {'key2': 'value2'}}) + instance_id3 = self._create_instance({ + 'metadata': {'key3': 'value3'}}) + instance_id4 = self._create_instance({ + 'metadata': {'key3': 'value3', + 'key4': 'value4'}}) + + # get all instances + instances = self.compute_api.get_all(c, + search_opts={'metadata': {}}) + self.assertEqual(len(instances), 5) + + # wrong key/value combination + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key1': 'value3'}}) + self.assertEqual(len(instances), 0) + + # non-existing keys + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key5': 'value1'}}) + self.assertEqual(len(instances), 0) + + # find existing instance + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key2': 'value2'}}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id2) + + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key3': 'value3'}}) + self.assertEqual(len(instances), 2) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id3 in instance_ids) + self.assertTrue(instance_id4 in instance_ids) + + # multiple criterias as a dict + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key3': 'value3', + 'key4': 'value4'}}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id4) + + # multiple criterias as a list + instances = self.compute_api.get_all(c, + search_opts={'metadata': [{'key4': 'value4'}, + {'key3': 'value3'}]}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id4) + + db.instance_destroy(c, instance_id0) + db.instance_destroy(c, instance_id1) + db.instance_destroy(c, instance_id2) + db.instance_destroy(c, instance_id3) + db.instance_destroy(c, instance_id4) + @staticmethod def _parse_db_block_device_mapping(bdm_ref): attr_list = ('delete_on_termination', 'device_name', 'no_device', -- cgit From ca7bf95e610bdc47f01b8fb7b459269bb8e5df66 Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Thu, 11 Aug 2011 18:11:59 -0700 Subject: Initial version --- nova/api/__init__.py | 6 ++ nova/api/ec2/__init__.py | 3 - nova/api/openstack/create_instance_helper.py | 4 +- nova/api/openstack/userdatarequesthandler.py | 110 +++++++++++++++++++++ nova/network/linux_net.py | 5 + nova/tests/api/openstack/fakes.py | 2 + .../api/openstack/test_userdatarequesthandler.py | 80 +++++++++++++++ 7 files changed, 206 insertions(+), 4 deletions(-) create mode 100644 nova/api/openstack/userdatarequesthandler.py create mode 100644 nova/tests/api/openstack/test_userdatarequesthandler.py diff --git a/nova/api/__init__.py b/nova/api/__init__.py index 747015af5..6e6b092b3 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -15,3 +15,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from nova import flags + + +flags.DEFINE_boolean('use_forwarded_for', False, + 'Treat X-Forwarded-For as the canonical remote address. ' + 'Only enable this if you have a sanitizing proxy.') diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 8b6e47cfb..e497b499a 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -37,9 +37,6 @@ from nova.auth import manager FLAGS = flags.FLAGS LOG = logging.getLogger("nova.api") -flags.DEFINE_boolean('use_forwarded_for', False, - 'Treat X-Forwarded-For as the canonical remote address. ' - 'Only enable this if you have a sanitizing proxy.') flags.DEFINE_integer('lockout_attempts', 5, 'Number of failed auths before lockout.') flags.DEFINE_integer('lockout_minutes', 15, diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 1425521a9..144697790 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -122,6 +122,7 @@ class CreateInstanceHelper(object): raise exc.HTTPBadRequest(explanation=msg) zone_blob = server_dict.get('blob') + user_data = server_dict.get('user_data') name = server_dict['name'] self._validate_server_name(name) name = name.strip() @@ -161,7 +162,8 @@ class CreateInstanceHelper(object): zone_blob=zone_blob, reservation_id=reservation_id, min_count=min_count, - max_count=max_count)) + max_count=max_count, + user_data=user_data)) except quota.QuotaError as error: self._handle_quota_error(error) except exception.ImageNotFound as error: diff --git a/nova/api/openstack/userdatarequesthandler.py b/nova/api/openstack/userdatarequesthandler.py new file mode 100644 index 000000000..5daa37e95 --- /dev/null +++ b/nova/api/openstack/userdatarequesthandler.py @@ -0,0 +1,110 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""User data request handler.""" + +import base64 +import webob.dec +import webob.exc + +from nova import log as logging +from nova import context +from nova import exception +from nova import db +from nova import flags +from nova import wsgi + + +LOG = logging.getLogger('nova.api.openstack.userdata') +FLAGS = flags.FLAGS + + +class Controller(object): + """ The server user-data API controller for the Openstack API """ + + def __init__(self): + super(Controller, self).__init__() + + @staticmethod + def _format_user_data(instance_ref): + return base64.b64decode(instance_ref['user_data']) + + def get_user_data(self, address): + ctxt = context.get_admin_context() + try: + instance_ref = db.instance_get_by_fixed_ip(ctxt, address) + except exception.NotFound: + instance_ref = None + if not instance_ref: + return None + + data = {'user-data': self._format_user_data(instance_ref)} + return data + + +class UserdataRequestHandler(wsgi.Application): + """Serve user-data from the OS API.""" + + def __init__(self): + self.cc = Controller() + + def print_data(self, data): + if isinstance(data, dict): + output = '' + for key in data: + if key == '_name': + continue + output += key + if isinstance(data[key], dict): + if '_name' in data[key]: + output += '=' + str(data[key]['_name']) + else: + output += '/' + output += '\n' + # Cut off last \n + return output[:-1] + elif isinstance(data, list): + return '\n'.join(data) + else: + return str(data) + + def lookup(self, path, data): + items = path.split('/') + for item in items: + if item: + if not isinstance(data, dict): + return data + if not item in data: + return None + data = data[item] + return data + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + remote_address = "10.0.1.6"#req.remote_addr + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + + data = self.cc.get_user_data(remote_address) + if data is None: + LOG.error(_('Failed to get user data for ip: %s'), remote_address) + raise webob.exc.HTTPNotFound() + data = self.lookup(req.path_info, data) + if data is None: + raise webob.exc.HTTPNotFound() + return self.print_data(data) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 4e1e1f85a..d8fff8a32 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -371,6 +371,11 @@ def metadata_forward(): '-p tcp -m tcp --dport 80 -j DNAT ' '--to-destination %s:%s' % \ (FLAGS.ec2_dmz_host, FLAGS.ec2_port)) + iptables_manager.ipv4['nat'].add_rule('PREROUTING', + '-s 0.0.0.0/0 -d 169.254.169.253/32 ' + '-p tcp -m tcp --dport 80 -j DNAT ' + '--to-destination %s:%s' % \ + (FLAGS.osapi_host, FLAGS.osapi_port)) iptables_manager.apply() diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index d11fbf788..aa5aeef16 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -36,6 +36,7 @@ from nova.api.openstack import auth from nova.api.openstack import extensions from nova.api.openstack import versions from nova.api.openstack import limits +from nova.api.openstack import userdatarequesthandler from nova.auth.manager import User, Project import nova.image.fake from nova.image import glance @@ -99,6 +100,7 @@ def wsgi_app(inner_app10=None, inner_app11=None, fake_auth=True, mapper['/v1.0'] = api10 mapper['/v1.1'] = api11 mapper['/'] = openstack.FaultWrapper(versions.Versions()) + mapper['/latest'] = userdatarequesthandler.UserdataRequestHandler() return mapper diff --git a/nova/tests/api/openstack/test_userdatarequesthandler.py b/nova/tests/api/openstack/test_userdatarequesthandler.py new file mode 100644 index 000000000..0c63076b4 --- /dev/null +++ b/nova/tests/api/openstack/test_userdatarequesthandler.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import json +import unittest +import webob + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import test +from nova import log as logging + +from nova.tests.api.openstack import fakes + +LOG = logging.getLogger('nova.api.openstack.userdata') + +USER_DATA_STRING = ("This is an encoded string") +ENCODE_STRING = base64.b64encode(USER_DATA_STRING) + + +def return_server_by_address(context, address): + instance = {"user_data": ENCODE_STRING} + instance["fixed_ips"] = {"address": address, + "floating_ips": []} + return instance + + +def return_non_existing_server_by_address(context, address): + raise exception.NotFound() + + +class TestUserdatarequesthandler(test.TestCase): + + def setUp(self): + super(TestUserdatarequesthandler, self).setUp() + self.stubs.Set(db, 'instance_get_by_fixed_ip', + return_server_by_address) + + def test_user_data(self): + req = webob.Request.blank('/latest/user-data') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.body, USER_DATA_STRING) + + def test_user_data_non_existing_fixed_address(self): + self.stubs.Set(db, 'instance_get_by_fixed_ip', + return_non_existing_server_by_address) + self.flags(use_forwarded_for=False) + req = webob.Request.blank('/latest/user-data') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 404) + + def test_user_data_invalid_url(self): + req = webob.Request.blank('/latest/user-data-invalid') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 404) + + def test_user_data_with_use_forwarded_header(self): + self.flags(use_forwarded_for=True) + req = webob.Request.blank('/latest/user-data') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.body, USER_DATA_STRING) -- cgit From 7507ba23004c989c75962c47efbd2ce5e5178a90 Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Thu, 11 Aug 2011 18:22:35 -0700 Subject: added userdata entry in the api paste ini --- etc/nova/api-paste.ini | 7 +++++++ nova/api/openstack/userdatarequesthandler.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini index abe8c20c4..46a3b0af9 100644 --- a/etc/nova/api-paste.ini +++ b/etc/nova/api-paste.ini @@ -69,6 +69,7 @@ use = egg:Paste#urlmap /: osversions /v1.0: openstackapi10 /v1.1: openstackapi11 +/latest: osuserdata [pipeline:openstackapi10] pipeline = faultwrap auth ratelimit osapiapp10 @@ -76,6 +77,9 @@ pipeline = faultwrap auth ratelimit osapiapp10 [pipeline:openstackapi11] pipeline = faultwrap auth ratelimit extensions osapiapp11 +[pipeline:osuserdata] +pipeline = logrequest osappud + [filter:faultwrap] paste.filter_factory = nova.api.openstack:FaultWrapper.factory @@ -99,3 +103,6 @@ pipeline = faultwrap osversionapp [app:osversionapp] paste.app_factory = nova.api.openstack.versions:Versions.factory + +[app:osappud] +paste.app_factory = nova.api.openstack.userdatarequesthandler:UserdataRequestHandler.factory diff --git a/nova/api/openstack/userdatarequesthandler.py b/nova/api/openstack/userdatarequesthandler.py index 5daa37e95..f0205419b 100644 --- a/nova/api/openstack/userdatarequesthandler.py +++ b/nova/api/openstack/userdatarequesthandler.py @@ -96,7 +96,7 @@ class UserdataRequestHandler(wsgi.Application): @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): - remote_address = "10.0.1.6"#req.remote_addr + remote_address = req.remote_addr if FLAGS.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) -- cgit From 9ce9ef1166075e539442c61c65cf21b8d6e90cdd Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 11 Aug 2011 21:03:37 -0700 Subject: add keystone middlewares for ec2 api --- etc/nova/api-paste.ini | 32 +++++++++++++- nova/api/auth.py | 91 +++++++++++++++++++++++++++++++++++++++ nova/api/ec2/__init__.py | 55 +++++++++++++++++++++-- nova/tests/api/openstack/fakes.py | 5 ++- nova/tests/test_api.py | 3 +- nova/wsgi.py | 12 ------ 6 files changed, 179 insertions(+), 19 deletions(-) create mode 100644 nova/api/auth.py diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini index abe8c20c4..ec3d88caf 100644 --- a/etc/nova/api-paste.ini +++ b/etc/nova/api-paste.ini @@ -20,7 +20,8 @@ use = egg:Paste#urlmap [pipeline:ec2cloud] pipeline = logrequest authenticate cloudrequest authorizer ec2executor -#pipeline = logrequest ec2lockout authenticate cloudrequest authorizer ec2executor +# NOTE(vish): use the following pipeline for keystone +# pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor [pipeline:ec2admin] pipeline = logrequest authenticate adminrequest authorizer ec2executor @@ -37,6 +38,9 @@ paste.filter_factory = nova.api.ec2:RequestLogging.factory [filter:ec2lockout] paste.filter_factory = nova.api.ec2:Lockout.factory +[filter:totoken] +paste.filter_factory = nova.api.ec2:ToToken.factory + [filter:authenticate] paste.filter_factory = nova.api.ec2:Authenticate.factory @@ -72,9 +76,13 @@ use = egg:Paste#urlmap [pipeline:openstackapi10] pipeline = faultwrap auth ratelimit osapiapp10 +# NOTE(vish): use the following pipeline for keystone +#pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10 [pipeline:openstackapi11] pipeline = faultwrap auth ratelimit extensions osapiapp11 +# NOTE(vish): use the following pipeline for keystone +# pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11 [filter:faultwrap] paste.filter_factory = nova.api.openstack:FaultWrapper.factory @@ -99,3 +107,25 @@ pipeline = faultwrap osversionapp [app:osversionapp] paste.app_factory = nova.api.openstack.versions:Versions.factory + +########## +# Shared # +########## + +[filter:admincontext] +paste.filter_factory = nova.api.auth:AdminContext.factory + +[filter:keystonecontext] +paste.filter_factory = nova.api.auth:KeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystone.middleware.auth_token:filter_factory +service_protocol = http +service_host = 127.0.0.1 +service_port = 808 +auth_host = 127.0.0.1 +auth_port = 5001 +auth_protocol = http +auth_uri = http://127.0.0.1:5000/ +admin_token = 999888777666 + diff --git a/nova/api/auth.py b/nova/api/auth.py new file mode 100644 index 000000000..034057d77 --- /dev/null +++ b/nova/api/auth.py @@ -0,0 +1,91 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Common Auth Middleware. + +""" + +from nova import context +from nova import flags +from nova import wsgi +import webob.dec +import webob.exc + + +FLAGS = flags.FLAGS +flags.DEFINE_boolean('use_forwarded_for', False, + 'Treat X-Forwarded-For as the canonical remote address. ' + 'Only enable this if you have a sanitizing proxy.') + + +class InjectContext(wsgi.Middleware): + """Add a 'nova.context' to WSGI environ.""" + def __init__(self, context, *args, **kwargs): + self.context = context + super(InjectContext, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + req.environ['nova.context'] = self.context + return self.application + + +class AdminContext(wsgi.Middleware): + """Return an admin context no matter what""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + # Build a context, including the auth_token... + remote_address = req.remote_addr + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext('admin', + 'admin', + is_admin=True, + remote_address=remote_address) + + req.environ['nova.context'] = ctx + return self.application + + +class KeystoneContext(wsgi.Middleware): + """Make a request context from keystone headers""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + try: + user_id = req.headers['X_USER'] + except: + return webob.exc.HTTPUnauthorized() + # get the roles + roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] + project_id = req.headers['X_TENANT'] + # Get the auth token + auth_token = req.headers.get('X_AUTH_TOKEN', + req.headers.get('X_STORAGE_TOKEN')) + + # Build a context, including the auth_token... + remote_address = req.remote_addr + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + roles=roles, + auth_token=auth_token, + remote_address=remote_address) + + req.environ['nova.context'] = ctx + return self.application diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 8b6e47cfb..f3e6fa124 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -20,6 +20,7 @@ Starting point for routing EC2 requests. """ +import httplib2 import webob import webob.dec import webob.exc @@ -37,15 +38,17 @@ from nova.auth import manager FLAGS = flags.FLAGS LOG = logging.getLogger("nova.api") -flags.DEFINE_boolean('use_forwarded_for', False, - 'Treat X-Forwarded-For as the canonical remote address. ' - 'Only enable this if you have a sanitizing proxy.') flags.DEFINE_integer('lockout_attempts', 5, 'Number of failed auths before lockout.') flags.DEFINE_integer('lockout_minutes', 15, 'Number of minutes to lockout if triggered.') flags.DEFINE_integer('lockout_window', 15, 'Number of minutes for lockout window.') +flags.DEFINE_integer('lockout_window', 15, + 'Number of minutes for lockout window.') +flags.DEFINE_string('keystone_ec2_url', + 'http://localhost:5000/v2.0/ec2tokens', + 'URL to get token from ec2 request.') class RequestLogging(wsgi.Middleware): @@ -138,6 +141,49 @@ class Lockout(wsgi.Middleware): return res +class ToToken(wsgi.Middleware): + """Authenticate an EC2 request with keystone and convert to token.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + # Read request signature and access id. + try: + signature = req.params['Signature'] + access = req.params['AWSAccessKeyId'] + except KeyError, e: + raise webob.exc.HTTPBadRequest() + + # Make a copy of args for authentication and signature verification. + auth_params = dict(req.params) + # Not part of authentication args + auth_params.pop('Signature') + + # Authenticate the request. + client = httplib2.Http() + creds = {'ec2Credentials': {'access': access, + 'signature': signature, + 'host': req.host, + 'verb': req.method, + 'path': req.path, + 'params': auth_params, + }} + headers = {'Content-Type': 'application/json'}, + resp, content = client.request(FLAGS.keystone_ec2_url, + 'POST', + headers=headers, + body=utils.dumps(creds)) + # NOTE(vish): We could save a call to keystone by + # having keystone return token, tenant, + # user, and roles from this call. + result = utils.loads(content) + # TODO(vish): check for errors + token_id = result['auth']['token']['id'] + + # Authenticated! + req.headers['X-Auth-Token'] = token_id + return self.application + + class Authenticate(wsgi.Middleware): """Authenticate an EC2 request and add 'nova.context' to WSGI environ.""" @@ -196,6 +242,7 @@ class Requestify(wsgi.Middleware): @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): + LOG.audit("in request", context=req.environ['nova.context']) non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Version', 'Timestamp'] args = dict(req.params) @@ -286,6 +333,8 @@ class Authorizer(wsgi.Middleware): @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): context = req.environ['nova.context'] + LOG.warn(req.environ['nova.context'].__dict__) + LOG.warn(req.environ['ec2.request'].__dict__) controller = req.environ['ec2.request'].controller.__class__.__name__ action = req.environ['ec2.request'].action allowed_roles = self.action_roles[controller].get(action, ['none']) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index d11fbf788..a095dd90a 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -32,6 +32,7 @@ from nova import utils from nova import wsgi import nova.api.openstack.auth from nova.api import openstack +from nova.api import auth as api_auth from nova.api.openstack import auth from nova.api.openstack import extensions from nova.api.openstack import versions @@ -83,9 +84,9 @@ def wsgi_app(inner_app10=None, inner_app11=None, fake_auth=True, ctxt = fake_auth_context else: ctxt = context.RequestContext('fake', 'fake') - api10 = openstack.FaultWrapper(wsgi.InjectContext(ctxt, + api10 = openstack.FaultWrapper(api_auth.InjectContext(ctxt, limits.RateLimitingMiddleware(inner_app10))) - api11 = openstack.FaultWrapper(wsgi.InjectContext(ctxt, + api11 = openstack.FaultWrapper(api_auth.InjectContext(ctxt, limits.RateLimitingMiddleware( extensions.ExtensionMiddleware(inner_app11)))) else: diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 2011ae756..526d1c490 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -32,6 +32,7 @@ from nova import context from nova import exception from nova import test from nova import wsgi +from nova.api import auth from nova.api import ec2 from nova.api.ec2 import apirequest from nova.api.ec2 import cloud @@ -199,7 +200,7 @@ class ApiEc2TestCase(test.TestCase): # NOTE(vish): skipping the Authorizer roles = ['sysadmin', 'netadmin'] ctxt = context.RequestContext('fake', 'fake', roles=roles) - self.app = wsgi.InjectContext(ctxt, + self.app = auth.InjectContext(ctxt, ec2.Requestify(ec2.Authorizer(ec2.Executor()), 'nova.api.ec2.cloud.CloudController')) diff --git a/nova/wsgi.py b/nova/wsgi.py index c8ddb97d7..eae3afcb4 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -274,18 +274,6 @@ class Middleware(Application): return self.process_response(response) -class InjectContext(Middleware): - """Add a 'nova.context' to WSGI environ.""" - def __init__(self, context, *args, **kwargs): - self.context = context - super(InjectContext, self).__init__(*args, **kwargs) - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - req.environ['nova.context'] = self.context - return self.application - - class Debug(Middleware): """Helper class for debugging a WSGI application. -- cgit From e294303750f032f22dadaba7eb0c743effa8c3f5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 11 Aug 2011 21:30:07 -0700 Subject: remove accidentally duplicated flag --- nova/api/ec2/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index f3e6fa124..a93285dba 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -44,8 +44,6 @@ flags.DEFINE_integer('lockout_minutes', 15, 'Number of minutes to lockout if triggered.') flags.DEFINE_integer('lockout_window', 15, 'Number of minutes for lockout window.') -flags.DEFINE_integer('lockout_window', 15, - 'Number of minutes for lockout window.') flags.DEFINE_string('keystone_ec2_url', 'http://localhost:5000/v2.0/ec2tokens', 'URL to get token from ec2 request.') -- cgit From 7295b93192d2b151c108d7631c3b404ef65fdedf Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 01:21:47 -0700 Subject: remove extra log statements --- nova/api/ec2/__init__.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index a93285dba..1ae9a126a 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -240,7 +240,6 @@ class Requestify(wsgi.Middleware): @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): - LOG.audit("in request", context=req.environ['nova.context']) non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Version', 'Timestamp'] args = dict(req.params) @@ -331,8 +330,6 @@ class Authorizer(wsgi.Middleware): @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): context = req.environ['nova.context'] - LOG.warn(req.environ['nova.context'].__dict__) - LOG.warn(req.environ['ec2.request'].__dict__) controller = req.environ['ec2.request'].controller.__class__.__name__ action = req.environ['ec2.request'].action allowed_roles = self.action_roles[controller].get(action, ['none']) -- cgit From 0bc781425bea1162cd81bdc95f49d50068857057 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 12 Aug 2011 10:01:04 -0500 Subject: start of day --- nova/scheduler/abstract_scheduler.py | 180 +++++----------- nova/scheduler/base_scheduler.py | 403 +++++++++++++++++++++++++++++++++++ 2 files changed, 459 insertions(+), 124 deletions(-) create mode 100644 nova/scheduler/base_scheduler.py diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index eb924732a..a6457cc50 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -14,10 +14,10 @@ # under the License. """ -The AbsractScheduler is a base class Scheduler for creating instances -across zones. There are two expansion points to this class for: -1. Assigning Weights to hosts for requested instances -2. Filtering Hosts based on required instance capabilities +The AbsractScheduler is an abstract class Scheduler for creating instances +locally or across zones. Two methods should be overridden in order to +customize the behavior: filter_hosts() and weigh_hosts(). The default +behavior is to simply select all hosts and weight them the same. """ import operator @@ -185,13 +185,11 @@ class AbstractScheduler(driver.Scheduler): for zone_id, result in child_results: if not result: continue - assert isinstance(zone_id, int) for zone_rec in zones: if zone_rec['id'] != zone_id: continue - for item in result: try: offset = zone_rec['weight_offset'] @@ -202,10 +200,10 @@ class AbstractScheduler(driver.Scheduler): item['raw_weight'] = raw_weight except KeyError: LOG.exception(_("Bad child zone scaling values " - "for Zone: %(zone_id)s") % locals()) + "for Zone: %(zone_id)s") % locals()) def schedule_run_instance(self, context, instance_id, request_spec, - *args, **kwargs): + *args, **kwargs): """This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being passed in to see if this is a request to: @@ -214,13 +212,11 @@ class AbstractScheduler(driver.Scheduler): to simply create the instance (either in this zone or a child zone). """ - # TODO(sandy): We'll have to look for richer specs at some point. - blob = request_spec.get('blob') if blob: self._provision_resource(context, request_spec, instance_id, - request_spec, kwargs) + request_spec, kwargs) return None num_instances = request_spec.get('num_instances', 1) @@ -238,7 +234,7 @@ class AbstractScheduler(driver.Scheduler): build_plan_item = build_plan.pop(0) self._provision_resource(context, build_plan_item, instance_id, - request_spec, kwargs) + request_spec, kwargs) # Returning None short-circuits the routing to Compute (since # we've already done it here) @@ -251,58 +247,49 @@ class AbstractScheduler(driver.Scheduler): anything about the children. """ return self._schedule(context, "compute", request_spec, - *args, **kwargs) + *args, **kwargs) - # TODO(sandy): We're only focused on compute instances right now, - # so we don't implement the default "schedule()" method required - # of Schedulers. def schedule(self, context, topic, request_spec, *args, **kwargs): """The schedule() contract requires we return the one best-suited host for this request. """ - raise driver.NoValidHost(_('No hosts were available')) + # TODO(sandy): We're only focused on compute instances right now, + # so we don't implement the default "schedule()" method required + # of Schedulers. + msg = _("No host selection for %s defined." % topic) + raise driver.NoValidHost(msg) def _schedule(self, context, topic, request_spec, *args, **kwargs): """Returns a list of hosts that meet the required specs, ordered by their fitness. """ - if topic != "compute": - raise NotImplementedError(_("Scheduler only understands" - " Compute nodes (for now)")) - - num_instances = request_spec.get('num_instances', 1) - instance_type = request_spec['instance_type'] - - weighted = [] - host_list = None - - for i in xrange(num_instances): - # Filter local hosts based on requirements ... - # - # The first pass through here will pass 'None' as the - # host_list.. which tells the filter to build the full - # list of hosts. - # On a 2nd pass, the filter can modify the host_list with - # any updates it needs to make based on resources that - # may have been consumed from a previous build.. - host_list = self.filter_hosts(topic, request_spec, host_list) - if not host_list: - LOG.warn(_("Filter returned no hosts after processing " - "%(i)d of %(num_instances)d instances") % locals()) - break - - # then weigh the selected hosts. - # weighted = [{weight=weight, hostname=hostname, - # capabilities=capabs}, ...] - weights = self.weigh_hosts(topic, request_spec, host_list) - weights.sort(key=operator.itemgetter('weight')) - best_weight = weights[0] - weighted.append(best_weight) - self.consume_resources(topic, best_weight['capabilities'], - instance_type) - - # Next, tack on the best weights from the child zones ... + msg = _("Scheduler only understands Compute nodes (for now)") + raise NotImplementedError(msg) + + # Get all available hosts. + all_hosts = self.zone_manager.service_states.iteritems() + print "-"*88 + ss = self.zone_manager.service_states + print ss + print "KEYS", ss.keys() + print "-"*88 + + unfiltered_hosts = [(host, services[host]) + for host, services in all_hosts + if topic in services[host]] + + # Filter local hosts based on requirements ... + filtered_hosts = self.filter_hosts(topic, request_spec, host_list) + if not filtered_hosts: + LOG.warn(_("No hosts available")) + return [] + + # weigh the selected hosts. + # weighted_hosts = [{weight=weight, hostname=hostname, + # capabilities=capabs}, ...] + weighted_hosts = self.weigh_hosts(topic, request_spec, filtered_hosts) + # Next, tack on the host weights from the child zones json_spec = json.dumps(request_spec) all_zones = db.zone_get_all(context) child_results = self._call_zone_method(context, "select", @@ -314,14 +301,13 @@ class AbstractScheduler(driver.Scheduler): # it later if needed. This implicitly builds a zone # path structure. host_dict = {"weight": weighting["weight"], - "child_zone": child_zone, - "child_blob": weighting["blob"]} - weighted.append(host_dict) - - weighted.sort(key=operator.itemgetter('weight')) - return weighted + "child_zone": child_zone, + "child_blob": weighting["blob"]} + weighted_hosts.append(host_dict) + weighted_hosts.sort(key=operator.itemgetter('weight')) + return weighted_hosts - def compute_filter(self, hostname, capabilities, request_spec): + def basic_ram_filter(self, hostname, capabilities, request_spec): """Return whether or not we can schedule to this compute node. Derived classes should override this and return True if the host is acceptable for scheduling. @@ -330,74 +316,20 @@ class AbstractScheduler(driver.Scheduler): requested_mem = instance_type['memory_mb'] * 1024 * 1024 return capabilities['host_memory_free'] >= requested_mem - def hold_filter_hosts(self, topic, request_spec, hosts=None): - """Filter the full host list (from the ZoneManager)""" - # NOTE(dabo): The logic used by the current _schedule() method - # is incorrect. Since this task is just to refactor the classes, - # I'm not fixing the logic now - that will be the next task. - # So for now this method is just renamed; afterwards this will - # become the filter_hosts() method, and the one below will - # be removed. - filter_name = request_spec.get('filter', None) - # Make sure that the requested filter is legitimate. - selected_filter = host_filter.choose_host_filter(filter_name) - - # TODO(sandy): We're only using InstanceType-based specs - # currently. Later we'll need to snoop for more detailed - # host filter requests. - instance_type = request_spec['instance_type'] - name, query = selected_filter.instance_type_to_filter(instance_type) - return selected_filter.filter_hosts(self.zone_manager, query) - def filter_hosts(self, topic, request_spec, host_list=None): - """Return a list of hosts which are acceptable for scheduling. - Return value should be a list of (hostname, capability_dict)s. - Derived classes may override this, but may find the - '_filter' function more appropriate. + """Filter the full host list returned from the ZoneManager. By default, + this method only applies the basic_ram_filter(), meaning all hosts + with at least enough RAM for the requested instance are returned. + + Override in subclasses to provide greater selectivity. """ - def _default_filter(self, hostname, capabilities, request_spec): - """Default filter function if there's no _filter""" - # NOTE(sirp): The default logic is the equivalent to - # AllHostsFilter - return True - - filter_func = getattr(self, '%s_filter' % topic, _default_filter) - - if host_list is None: - first_run = True - host_list = self.zone_manager.service_states.iteritems() - else: - first_run = False - - filtered_hosts = [] - for host, services in host_list: - if first_run: - if topic not in services: - continue - services = services[topic] - if filter_func(host, services, request_spec): - filtered_hosts.append((host, services)) - return filtered_hosts + return [(host, services) for host, services in host_list + if basic_ram_filter(host, services, request_spec)] def weigh_hosts(self, topic, request_spec, hosts): - """Derived classes may override this to provide more sophisticated - scheduling objectives + """This version assigns a weight of 1 to all hosts, making selection + of any host basically a random event. Override this method in your + subclass to add logic to prefer one potential host over another. """ - # NOTE(sirp): The default logic is the same as the NoopCostFunction return [dict(weight=1, hostname=hostname, capabilities=capabilities) for hostname, capabilities in hosts] - - def compute_consume(self, capabilities, instance_type): - """Consume compute resources for selected host""" - - requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 - capabilities['host_memory_free'] -= requested_mem - - def consume_resources(self, topic, capabilities, instance_type): - """Consume resources for a specific host. 'host' is a tuple - of the hostname and the services""" - - consume_func = getattr(self, '%s_consume' % topic, None) - if not consume_func: - return - consume_func(capabilities, instance_type) diff --git a/nova/scheduler/base_scheduler.py b/nova/scheduler/base_scheduler.py new file mode 100644 index 000000000..43a6ab2b1 --- /dev/null +++ b/nova/scheduler/base_scheduler.py @@ -0,0 +1,403 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The BaseScheduler is the base class Scheduler for creating instances +across zones. There are two expansion points to this class for: +1. Assigning Weights to hosts for requested instances +2. Filtering Hosts based on required instance capabilities +""" + +import operator +import json + +import M2Crypto + +from novaclient import v1_1 as novaclient +from novaclient import exceptions as novaclient_exceptions + +from nova import crypto +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import rpc + +from nova.compute import api as compute_api +from nova.scheduler import api +from nova.scheduler import driver + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.scheduler.abstract_scheduler') + + +class InvalidBlob(exception.NovaException): + message = _("Ill-formed or incorrectly routed 'blob' data sent " + "to instance create request.") + + +class AbstractScheduler(driver.Scheduler): + """Base class for creating Schedulers that can work across any nova + deployment, from simple designs to multiply-nested zones. + """ + + def _call_zone_method(self, context, method, specs, zones): + """Call novaclient zone method. Broken out for testing.""" + return api.call_zone_method(context, method, specs=specs, zones=zones) + + def _provision_resource_locally(self, context, build_plan_item, + request_spec, kwargs): + """Create the requested resource in this Zone.""" + host = build_plan_item['hostname'] + base_options = request_spec['instance_properties'] + image = request_spec['image'] + + # TODO(sandy): I guess someone needs to add block_device_mapping + # support at some point? Also, OS API has no concept of security + # groups. + instance = compute_api.API().create_db_entry_for_new_instance(context, + image, base_options, None, []) + + instance_id = instance['id'] + kwargs['instance_id'] = instance_id + + rpc.cast(context, + db.queue_get_for(context, "compute", host), + {"method": "run_instance", + "args": kwargs}) + LOG.debug(_("Provisioning locally via compute node %(host)s") + % locals()) + + def _decrypt_blob(self, blob): + """Returns the decrypted blob or None if invalid. Broken out + for testing.""" + decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) + try: + json_entry = decryptor(blob) + return json.dumps(json_entry) + except M2Crypto.EVP.EVPError: + pass + return None + + def _ask_child_zone_to_create_instance(self, context, zone_info, + request_spec, kwargs): + """Once we have determined that the request should go to one + of our children, we need to fabricate a new POST /servers/ + call with the same parameters that were passed into us. + + Note that we have to reverse engineer from our args to get back the + image, flavor, ipgroup, etc. since the original call could have + come in from EC2 (which doesn't use these things).""" + + instance_type = request_spec['instance_type'] + instance_properties = request_spec['instance_properties'] + + name = instance_properties['display_name'] + image_ref = instance_properties['image_ref'] + meta = instance_properties['metadata'] + flavor_id = instance_type['flavorid'] + reservation_id = instance_properties['reservation_id'] + + files = kwargs['injected_files'] + ipgroup = None # Not supported in OS API ... yet + + child_zone = zone_info['child_zone'] + child_blob = zone_info['child_blob'] + zone = db.zone_get(context, child_zone) + url = zone.api_url + LOG.debug(_("Forwarding instance create call to child zone %(url)s" + ". ReservationID=%(reservation_id)s") + % locals()) + nova = None + try: + nova = novaclient.Client(zone.username, zone.password, None, url) + nova.authenticate() + except novaclient_exceptions.BadRequest, e: + raise exception.NotAuthorized(_("Bad credentials attempting " + "to talk to zone at %(url)s.") % locals()) + + nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, + child_blob, reservation_id=reservation_id) + + def _provision_resource_from_blob(self, context, build_plan_item, + instance_id, request_spec, kwargs): + """Create the requested resource locally or in a child zone + based on what is stored in the zone blob info. + + Attempt to decrypt the blob to see if this request is: + 1. valid, and + 2. intended for this zone or a child zone. + + Note: If we have "blob" that means the request was passed + into us from a parent zone. If we have "child_blob" that + means we gathered the info from one of our children. + It's possible that, when we decrypt the 'blob' field, it + contains "child_blob" data. In which case we forward the + request.""" + + host_info = None + if "blob" in build_plan_item: + # Request was passed in from above. Is it for us? + host_info = self._decrypt_blob(build_plan_item['blob']) + elif "child_blob" in build_plan_item: + # Our immediate child zone provided this info ... + host_info = build_plan_item + + if not host_info: + raise InvalidBlob() + + # Valid data ... is it for us? + if 'child_zone' in host_info and 'child_blob' in host_info: + self._ask_child_zone_to_create_instance(context, host_info, + request_spec, kwargs) + else: + self._provision_resource_locally(context, host_info, request_spec, + kwargs) + + def _provision_resource(self, context, build_plan_item, instance_id, + request_spec, kwargs): + """Create the requested resource in this Zone or a child zone.""" + if "hostname" in build_plan_item: + self._provision_resource_locally(context, build_plan_item, + request_spec, kwargs) + return + + self._provision_resource_from_blob(context, build_plan_item, + instance_id, request_spec, kwargs) + + def _adjust_child_weights(self, child_results, zones): + """Apply the Scale and Offset values from the Zone definition + to adjust the weights returned from the child zones. Alters + child_results in place. + """ + for zone_id, result in child_results: + if not result: + continue + + assert isinstance(zone_id, int) + + for zone_rec in zones: + if zone_rec['id'] != zone_id: + continue + + for item in result: + try: + offset = zone_rec['weight_offset'] + scale = zone_rec['weight_scale'] + raw_weight = item['weight'] + cooked_weight = offset + scale * raw_weight + item['weight'] = cooked_weight + item['raw_weight'] = raw_weight + except KeyError: + LOG.exception(_("Bad child zone scaling values " + "for Zone: %(zone_id)s") % locals()) + + def schedule_run_instance(self, context, instance_id, request_spec, + *args, **kwargs): + """This method is called from nova.compute.api to provision + an instance. However we need to look at the parameters being + passed in to see if this is a request to: + 1. Create a Build Plan and then provision, or + 2. Use the Build Plan information in the request parameters + to simply create the instance (either in this zone or + a child zone). + """ + + # TODO(sandy): We'll have to look for richer specs at some point. + + blob = request_spec.get('blob') + if blob: + self._provision_resource(context, request_spec, instance_id, + request_spec, kwargs) + return None + + num_instances = request_spec.get('num_instances', 1) + LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % + locals()) + + # Create build plan and provision ... + build_plan = self.select(context, request_spec) + if not build_plan: + raise driver.NoValidHost(_('No hosts were available')) + + for num in xrange(num_instances): + if not build_plan: + break + + build_plan_item = build_plan.pop(0) + self._provision_resource(context, build_plan_item, instance_id, + request_spec, kwargs) + + # Returning None short-circuits the routing to Compute (since + # we've already done it here) + return None + + def select(self, context, request_spec, *args, **kwargs): + """Select returns a list of weights and zone/host information + corresponding to the best hosts to service the request. Any + child zone information has been encrypted so as not to reveal + anything about the children. + """ + return self._schedule(context, "compute", request_spec, + *args, **kwargs) + + # TODO(sandy): We're only focused on compute instances right now, + # so we don't implement the default "schedule()" method required + # of Schedulers. + def schedule(self, context, topic, request_spec, *args, **kwargs): + """The schedule() contract requires we return the one + best-suited host for this request. + """ + raise driver.NoValidHost(_('No hosts were available')) + + def _schedule(self, context, topic, request_spec, *args, **kwargs): + """Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + + if topic != "compute": + raise NotImplementedError(_("Scheduler only understands" + " Compute nodes (for now)")) + + num_instances = request_spec.get('num_instances', 1) + instance_type = request_spec['instance_type'] + + weighted = [] + host_list = None + + for i in xrange(num_instances): + # Filter local hosts based on requirements ... + # + # The first pass through here will pass 'None' as the + # host_list.. which tells the filter to build the full + # list of hosts. + # On a 2nd pass, the filter can modify the host_list with + # any updates it needs to make based on resources that + # may have been consumed from a previous build.. + host_list = self.filter_hosts(topic, request_spec, host_list) + if not host_list: + LOG.warn(_("Filter returned no hosts after processing " + "%(i)d of %(num_instances)d instances") % locals()) + break + + # then weigh the selected hosts. + # weighted = [{weight=weight, hostname=hostname, + # capabilities=capabs}, ...] + weights = self.weigh_hosts(topic, request_spec, host_list) + weights.sort(key=operator.itemgetter('weight')) + best_weight = weights[0] + weighted.append(best_weight) + self.consume_resources(topic, best_weight['capabilities'], + instance_type) + + # Next, tack on the best weights from the child zones ... + json_spec = json.dumps(request_spec) + all_zones = db.zone_get_all(context) + child_results = self._call_zone_method(context, "select", + specs=json_spec, zones=all_zones) + self._adjust_child_weights(child_results, all_zones) + for child_zone, result in child_results: + for weighting in result: + # Remember the child_zone so we can get back to + # it later if needed. This implicitly builds a zone + # path structure. + host_dict = {"weight": weighting["weight"], + "child_zone": child_zone, + "child_blob": weighting["blob"]} + weighted.append(host_dict) + + weighted.sort(key=operator.itemgetter('weight')) + return weighted + + def compute_filter(self, hostname, capabilities, request_spec): + """Return whether or not we can schedule to this compute node. + Derived classes should override this and return True if the host + is acceptable for scheduling. + """ + instance_type = request_spec['instance_type'] + requested_mem = instance_type['memory_mb'] * 1024 * 1024 + return capabilities['host_memory_free'] >= requested_mem + + def hold_filter_hosts(self, topic, request_spec, hosts=None): + """Filter the full host list (from the ZoneManager)""" + # NOTE(dabo): The logic used by the current _schedule() method + # is incorrect. Since this task is just to refactor the classes, + # I'm not fixing the logic now - that will be the next task. + # So for now this method is just renamed; afterwards this will + # become the filter_hosts() method, and the one below will + # be removed. + filter_name = request_spec.get('filter', None) + # Make sure that the requested filter is legitimate. + selected_filter = host_filter.choose_host_filter(filter_name) + + # TODO(sandy): We're only using InstanceType-based specs + # currently. Later we'll need to snoop for more detailed + # host filter requests. + instance_type = request_spec['instance_type'] + name, query = selected_filter.instance_type_to_filter(instance_type) + return selected_filter.filter_hosts(self.zone_manager, query) + + def filter_hosts(self, topic, request_spec, host_list=None): + """Return a list of hosts which are acceptable for scheduling. + Return value should be a list of (hostname, capability_dict)s. + Derived classes may override this, but may find the + '_filter' function more appropriate. + """ + def _default_filter(self, hostname, capabilities, request_spec): + """Default filter function if there's no _filter""" + # NOTE(sirp): The default logic is the equivalent to + # AllHostsFilter + return True + + filter_func = getattr(self, '%s_filter' % topic, _default_filter) + + if host_list is None: + first_run = True + host_list = self.zone_manager.service_states.iteritems() + else: + first_run = False + + filtered_hosts = [] + for host, services in host_list: + if first_run: + if topic not in services: + continue + services = services[topic] + if filter_func(host, services, request_spec): + filtered_hosts.append((host, services)) + return filtered_hosts + + def weigh_hosts(self, topic, request_spec, hosts): + """Derived classes may override this to provide more sophisticated + scheduling objectives + """ + # NOTE(sirp): The default logic is the same as the NoopCostFunction + return [dict(weight=1, hostname=hostname, capabilities=capabilities) + for hostname, capabilities in hosts] + + def compute_consume(self, capabilities, instance_type): + """Consume compute resources for selected host""" + + requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 + capabilities['host_memory_free'] -= requested_mem + + def consume_resources(self, topic, capabilities, instance_type): + """Consume resources for a specific host. 'host' is a tuple + of the hostname and the services""" + + consume_func = getattr(self, '%s_consume' % topic, None) + if not consume_func: + return + consume_func(capabilities, instance_type) -- cgit From ee8ef9ab1de284ec77d33bb27741f010f9a63961 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Fri, 12 Aug 2011 11:55:38 -0500 Subject: use subnet iteration from netaddr for subnet calculation --- nova/network/manager.py | 55 ++++++++++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 8fc6a295f..93f571c21 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -45,6 +45,7 @@ topologies. All of the network commands are issued to a subclass of """ import datetime +import itertools import math import netaddr import socket @@ -618,61 +619,63 @@ class NetworkManager(manager.SchedulerDependentManager): network_size, cidr_v6, gateway_v6, bridge, bridge_interface, dns1=None, dns2=None, **kwargs): """Create networks based on parameters.""" + # NOTE(jkoelker): these are dummy values to make sure iter works + fixed_net_v4 = netaddr.IPNetwork('0/32') + fixed_net_v6 = netaddr.IPNetwork('::0/128') + subnets_v4 = [] + subnets_v6 = [] + + subnet_bits = int(math.ceil(math.log(network_size, 2)) + if cidr_v6: fixed_net_v6 = netaddr.IPNetwork(cidr_v6) - significant_bits_v6 = 64 - network_size_v6 = 1 << 64 + prefixlen_v6 = 128 - subnet_bits + subnets_v6 = fixed_net_v6.subnet(prefixlen_v6, count=num_networks) if cidr: fixed_net = netaddr.IPNetwork(cidr) - significant_bits = 32 - int(math.log(network_size, 2)) + prefixlen_v4 = 32 - subnet_bits + subnets_v4 = fixed_net_v4.subnet(prefixlen_v4, count=num_networks) - for index in range(num_networks): + subnets = itertools.izip_longest(subnets_v4, subnets_v6) + for index, (subnet_v4, subnet_v6) in enumerate(subnets): net = {} net['bridge'] = bridge net['bridge_interface'] = bridge_interface + net['multi_host'] = multi_host + net['dns1'] = dns1 net['dns2'] = dns2 - if cidr: - start = index * network_size - project_net = netaddr.IPNetwork('%s/%s' % (fixed_net[start], - significant_bits)) - net['cidr'] = str(project_net) - net['multi_host'] = multi_host - net['netmask'] = str(project_net.netmask) - net['gateway'] = str(project_net[1]) - net['broadcast'] = str(project_net.broadcast) - net['dhcp_start'] = str(project_net[2]) - if num_networks > 1: net['label'] = '%s_%d' % (label, index) else: net['label'] = label - if cidr_v6: - start_v6 = index * network_size_v6 - cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], - significant_bits_v6) - net['cidr_v6'] = cidr_v6 - - project_net_v6 = netaddr.IPNetwork(cidr_v6) + if cidr: + net['cidr'] = str(subnet_v4) + net['netmask'] = str(subnet_v4.netmask) + net['gateway'] = str(subnet_v4[1]) + net['broadcast'] = str(subnet_v4.broadcast) + net['dhcp_start'] = str(subnet_v4[2]) + if cidr_v6: + net['cidr_v6'] = str(subnet_v6) if gateway_v6: # use a pre-defined gateway if one is provided net['gateway_v6'] = str(gateway_v6) else: - net['gateway_v6'] = str(project_net_v6[1]) + net['gateway_v6'] = str(subnet_v6[1]) - net['netmask_v6'] = str(project_net_v6._prefixlen) + net['netmask_v6'] = str(subnet_v6._prefixlen) if kwargs.get('vpn', False): # this bit here is for vlan-manager del net['dns1'] del net['dns2'] vlan = kwargs['vlan_start'] + index - net['vpn_private_address'] = str(project_net[2]) - net['dhcp_start'] = str(project_net[3]) + net['vpn_private_address'] = str(subnet_v4[2]) + net['dhcp_start'] = str(subnet_v4[3]) net['vlan'] = vlan net['bridge'] = 'br%s' % vlan -- cgit From 8f5f2c651846f8a3ff66821451216552d71c8fe6 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Fri, 12 Aug 2011 12:00:23 -0500 Subject: forgot the closing paren --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 93f571c21..889e1f9cd 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -625,7 +625,7 @@ class NetworkManager(manager.SchedulerDependentManager): subnets_v4 = [] subnets_v6 = [] - subnet_bits = int(math.ceil(math.log(network_size, 2)) + subnet_bits = int(math.ceil(math.log(network_size, 2))) if cidr_v6: fixed_net_v6 = netaddr.IPNetwork(cidr_v6) -- cgit From 21707674ce862f4e12a8ee9db665829f09d29467 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Fri, 12 Aug 2011 12:03:02 -0500 Subject: don't require ipv4 --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 077a89d6f..1b7a2bf0a 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -719,7 +719,7 @@ class NetworkCommands(object): # sanitize other input using FLAGS if necessary if not num_networks: num_networks = FLAGS.num_networks - if not network_size: + if not network_size and fixed_range_v4: fixnet = netaddr.IPNetwork(fixed_range_v4) each_subnet_size = fixnet.size / int(num_networks) if each_subnet_size > FLAGS.network_size: -- cgit From 0beef1b24ee63f554f5478d54ee32f86fe5f4f2c Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Fri, 12 Aug 2011 12:06:57 -0500 Subject: make sure network_size gets set --- bin/nova-manage | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bin/nova-manage b/bin/nova-manage index 1b7a2bf0a..25655125d 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -741,6 +741,9 @@ class NetworkCommands(object): if not dns1 and FLAGS.flat_network_dns: dns1 = FLAGS.flat_network_dns + if not network_size: + network_size = FLAGS.network_size + # create the network net_manager = utils.import_object(FLAGS.network_manager) net_manager.create_networks(context.get_admin_context(), -- cgit From 07aae460e848af51667537d56ec8b89d0c79f048 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Fri, 12 Aug 2011 12:11:13 -0500 Subject: only run if the subnet and cidr exist --- nova/network/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 889e1f9cd..f26263f2e 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -652,14 +652,14 @@ class NetworkManager(manager.SchedulerDependentManager): else: net['label'] = label - if cidr: + if cidr and subnet_v4: net['cidr'] = str(subnet_v4) net['netmask'] = str(subnet_v4.netmask) net['gateway'] = str(subnet_v4[1]) net['broadcast'] = str(subnet_v4.broadcast) net['dhcp_start'] = str(subnet_v4[2]) - if cidr_v6: + if cidr_v6 and subnet_v6: net['cidr_v6'] = str(subnet_v6) if gateway_v6: # use a pre-defined gateway if one is provided -- cgit From 411ee0c1a1901de7c9b7ceae1d41b7742fce609e Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Fri, 12 Aug 2011 12:13:14 -0500 Subject: only run if the subnet and cidr exist --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index f26263f2e..ccc899d99 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -689,7 +689,7 @@ class NetworkManager(manager.SchedulerDependentManager): if not network: raise ValueError(_('Network already exists!')) - if network and cidr: + if network and cidr and subnet_v4: self._create_fixed_ips(context, network['id']) @property -- cgit From dbaa1c2299d3b97273698050b372b9714324706a Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Fri, 12 Aug 2011 12:23:50 -0500 Subject: need to actually assign the v4 network --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index ccc899d99..32e236073 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -633,7 +633,7 @@ class NetworkManager(manager.SchedulerDependentManager): subnets_v6 = fixed_net_v6.subnet(prefixlen_v6, count=num_networks) if cidr: - fixed_net = netaddr.IPNetwork(cidr) + fixed_net_v4 = netaddr.IPNetwork(cidr) prefixlen_v4 = 32 - subnet_bits subnets_v4 = fixed_net_v4.subnet(prefixlen_v4, count=num_networks) -- cgit From 9ab61aaa194a787b41b1d634c1b56c98574dcbc9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 11:28:47 -0700 Subject: updates from review --- nova/api/auth.py | 26 +++++--------------------- nova/api/ec2/__init__.py | 4 ++-- 2 files changed, 7 insertions(+), 23 deletions(-) diff --git a/nova/api/auth.py b/nova/api/auth.py index 034057d77..cd3e3e8a0 100644 --- a/nova/api/auth.py +++ b/nova/api/auth.py @@ -18,11 +18,12 @@ Common Auth Middleware. """ +import webob.dec +import webob.exc + from nova import context from nova import flags from nova import wsgi -import webob.dec -import webob.exc FLAGS = flags.FLAGS @@ -33,6 +34,7 @@ flags.DEFINE_boolean('use_forwarded_for', False, class InjectContext(wsgi.Middleware): """Add a 'nova.context' to WSGI environ.""" + def __init__(self, context, *args, **kwargs): self.context = context super(InjectContext, self).__init__(*args, **kwargs) @@ -43,24 +45,6 @@ class InjectContext(wsgi.Middleware): return self.application -class AdminContext(wsgi.Middleware): - """Return an admin context no matter what""" - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - # Build a context, including the auth_token... - remote_address = req.remote_addr - if FLAGS.use_forwarded_for: - remote_address = req.headers.get('X-Forwarded-For', remote_address) - ctx = context.RequestContext('admin', - 'admin', - is_admin=True, - remote_address=remote_address) - - req.environ['nova.context'] = ctx - return self.application - - class KeystoneContext(wsgi.Middleware): """Make a request context from keystone headers""" @@ -68,7 +52,7 @@ class KeystoneContext(wsgi.Middleware): def __call__(self, req): try: user_id = req.headers['X_USER'] - except: + except KeyError: return webob.exc.HTTPUnauthorized() # get the roles roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 1ae9a126a..2ae370f88 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -148,7 +148,7 @@ class ToToken(wsgi.Middleware): try: signature = req.params['Signature'] access = req.params['AWSAccessKeyId'] - except KeyError, e: + except KeyError: raise webob.exc.HTTPBadRequest() # Make a copy of args for authentication and signature verification. @@ -191,7 +191,7 @@ class Authenticate(wsgi.Middleware): try: signature = req.params['Signature'] access = req.params['AWSAccessKeyId'] - except KeyError, e: + except KeyError: raise webob.exc.HTTPBadRequest() # Make a copy of args for authentication and signature verification. -- cgit From 93207c19c72aff5eb2c99b0b42649a75def35cf0 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 11:29:25 -0700 Subject: removed admincontext middleware --- etc/nova/api-paste.ini | 3 --- 1 file changed, 3 deletions(-) diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini index ec3d88caf..b540509a2 100644 --- a/etc/nova/api-paste.ini +++ b/etc/nova/api-paste.ini @@ -112,9 +112,6 @@ paste.app_factory = nova.api.openstack.versions:Versions.factory # Shared # ########## -[filter:admincontext] -paste.filter_factory = nova.api.auth:AdminContext.factory - [filter:keystonecontext] paste.filter_factory = nova.api.auth:KeystoneContext.factory -- cgit From 90c6641d47e9c1012b9fb3e53fe0da21ae3d42b7 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 12 Aug 2011 13:58:26 -0500 Subject: Created the filters directory in nova/scheduler --- nova/scheduler/__init__.py | 2 + nova/scheduler/abstract_scheduler.py | 30 +-- nova/scheduler/base_scheduler.py | 312 +-------------------- nova/scheduler/filters/__init__.py | 18 ++ nova/scheduler/filters/abstract_filter.py | 87 ++++++ nova/scheduler/filters/all_hosts_filter.py | 31 +++ nova/scheduler/filters/instance_type_filter.py | 86 ++++++ nova/scheduler/filters/json_filter.py | 141 ++++++++++ nova/scheduler/host_filter.py | 314 ---------------------- nova/tests/scheduler/test_abstract_scheduler.py | 3 + nova/tests/scheduler/test_host_filter.py | 4 +- nova/tests/scheduler/test_least_cost_scheduler.py | 7 +- 12 files changed, 391 insertions(+), 644 deletions(-) create mode 100644 nova/scheduler/filters/__init__.py create mode 100644 nova/scheduler/filters/abstract_filter.py create mode 100644 nova/scheduler/filters/all_hosts_filter.py create mode 100644 nova/scheduler/filters/instance_type_filter.py create mode 100644 nova/scheduler/filters/json_filter.py delete mode 100644 nova/scheduler/host_filter.py diff --git a/nova/scheduler/__init__.py b/nova/scheduler/__init__.py index 8359a7aeb..25078f015 100644 --- a/nova/scheduler/__init__.py +++ b/nova/scheduler/__init__.py @@ -21,5 +21,7 @@ .. automodule:: nova.scheduler :platform: Unix :synopsis: Module that picks a compute node to run a VM instance. +.. moduleauthor:: Sandy Walsh +.. moduleauthor:: Ed Leafe .. moduleauthor:: Chris Behrens """ diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index a6457cc50..a0734f322 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -269,18 +269,13 @@ class AbstractScheduler(driver.Scheduler): # Get all available hosts. all_hosts = self.zone_manager.service_states.iteritems() - print "-"*88 - ss = self.zone_manager.service_states - print ss - print "KEYS", ss.keys() - print "-"*88 - - unfiltered_hosts = [(host, services[host]) + unfiltered_hosts = [(host, services[topic]) for host, services in all_hosts - if topic in services[host]] + if topic in services] # Filter local hosts based on requirements ... - filtered_hosts = self.filter_hosts(topic, request_spec, host_list) + filtered_hosts = self.filter_hosts(topic, request_spec, + unfiltered_hosts) if not filtered_hosts: LOG.warn(_("No hosts available")) return [] @@ -307,22 +302,19 @@ class AbstractScheduler(driver.Scheduler): weighted_hosts.sort(key=operator.itemgetter('weight')) return weighted_hosts - def basic_ram_filter(self, hostname, capabilities, request_spec): - """Return whether or not we can schedule to this compute node. - Derived classes should override this and return True if the host - is acceptable for scheduling. - """ - instance_type = request_spec['instance_type'] - requested_mem = instance_type['memory_mb'] * 1024 * 1024 - return capabilities['host_memory_free'] >= requested_mem - - def filter_hosts(self, topic, request_spec, host_list=None): + def filter_hosts(self, topic, request_spec, host_list): """Filter the full host list returned from the ZoneManager. By default, this method only applies the basic_ram_filter(), meaning all hosts with at least enough RAM for the requested instance are returned. Override in subclasses to provide greater selectivity. """ + def basic_ram_filter(hostname, capabilities, request_spec): + """Only return hosts with sufficient available RAM.""" + instance_type = request_spec['instance_type'] + requested_mem = instance_type['memory_mb'] * 1024 * 1024 + return capabilities['host_memory_free'] >= requested_mem + return [(host, services) for host, services in host_list if basic_ram_filter(host, services, request_spec)] diff --git a/nova/scheduler/base_scheduler.py b/nova/scheduler/base_scheduler.py index 43a6ab2b1..e14ee349e 100644 --- a/nova/scheduler/base_scheduler.py +++ b/nova/scheduler/base_scheduler.py @@ -20,324 +20,22 @@ across zones. There are two expansion points to this class for: 2. Filtering Hosts based on required instance capabilities """ -import operator -import json - -import M2Crypto - -from novaclient import v1_1 as novaclient -from novaclient import exceptions as novaclient_exceptions - -from nova import crypto -from nova import db -from nova import exception from nova import flags from nova import log as logging -from nova import rpc -from nova.compute import api as compute_api -from nova.scheduler import api -from nova.scheduler import driver +from nova.scheduler import abstract_scheduler +from nova.scheduler import host_filter FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.scheduler.abstract_scheduler') - +LOG = logging.getLogger('nova.scheduler.base_scheduler') -class InvalidBlob(exception.NovaException): - message = _("Ill-formed or incorrectly routed 'blob' data sent " - "to instance create request.") - -class AbstractScheduler(driver.Scheduler): +class BaseScheduler(abstract_scheduler.AbstractScheduler): """Base class for creating Schedulers that can work across any nova deployment, from simple designs to multiply-nested zones. """ - - def _call_zone_method(self, context, method, specs, zones): - """Call novaclient zone method. Broken out for testing.""" - return api.call_zone_method(context, method, specs=specs, zones=zones) - - def _provision_resource_locally(self, context, build_plan_item, - request_spec, kwargs): - """Create the requested resource in this Zone.""" - host = build_plan_item['hostname'] - base_options = request_spec['instance_properties'] - image = request_spec['image'] - - # TODO(sandy): I guess someone needs to add block_device_mapping - # support at some point? Also, OS API has no concept of security - # groups. - instance = compute_api.API().create_db_entry_for_new_instance(context, - image, base_options, None, []) - - instance_id = instance['id'] - kwargs['instance_id'] = instance_id - - rpc.cast(context, - db.queue_get_for(context, "compute", host), - {"method": "run_instance", - "args": kwargs}) - LOG.debug(_("Provisioning locally via compute node %(host)s") - % locals()) - - def _decrypt_blob(self, blob): - """Returns the decrypted blob or None if invalid. Broken out - for testing.""" - decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) - try: - json_entry = decryptor(blob) - return json.dumps(json_entry) - except M2Crypto.EVP.EVPError: - pass - return None - - def _ask_child_zone_to_create_instance(self, context, zone_info, - request_spec, kwargs): - """Once we have determined that the request should go to one - of our children, we need to fabricate a new POST /servers/ - call with the same parameters that were passed into us. - - Note that we have to reverse engineer from our args to get back the - image, flavor, ipgroup, etc. since the original call could have - come in from EC2 (which doesn't use these things).""" - - instance_type = request_spec['instance_type'] - instance_properties = request_spec['instance_properties'] - - name = instance_properties['display_name'] - image_ref = instance_properties['image_ref'] - meta = instance_properties['metadata'] - flavor_id = instance_type['flavorid'] - reservation_id = instance_properties['reservation_id'] - - files = kwargs['injected_files'] - ipgroup = None # Not supported in OS API ... yet - - child_zone = zone_info['child_zone'] - child_blob = zone_info['child_blob'] - zone = db.zone_get(context, child_zone) - url = zone.api_url - LOG.debug(_("Forwarding instance create call to child zone %(url)s" - ". ReservationID=%(reservation_id)s") - % locals()) - nova = None - try: - nova = novaclient.Client(zone.username, zone.password, None, url) - nova.authenticate() - except novaclient_exceptions.BadRequest, e: - raise exception.NotAuthorized(_("Bad credentials attempting " - "to talk to zone at %(url)s.") % locals()) - - nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, - child_blob, reservation_id=reservation_id) - - def _provision_resource_from_blob(self, context, build_plan_item, - instance_id, request_spec, kwargs): - """Create the requested resource locally or in a child zone - based on what is stored in the zone blob info. - - Attempt to decrypt the blob to see if this request is: - 1. valid, and - 2. intended for this zone or a child zone. - - Note: If we have "blob" that means the request was passed - into us from a parent zone. If we have "child_blob" that - means we gathered the info from one of our children. - It's possible that, when we decrypt the 'blob' field, it - contains "child_blob" data. In which case we forward the - request.""" - - host_info = None - if "blob" in build_plan_item: - # Request was passed in from above. Is it for us? - host_info = self._decrypt_blob(build_plan_item['blob']) - elif "child_blob" in build_plan_item: - # Our immediate child zone provided this info ... - host_info = build_plan_item - - if not host_info: - raise InvalidBlob() - - # Valid data ... is it for us? - if 'child_zone' in host_info and 'child_blob' in host_info: - self._ask_child_zone_to_create_instance(context, host_info, - request_spec, kwargs) - else: - self._provision_resource_locally(context, host_info, request_spec, - kwargs) - - def _provision_resource(self, context, build_plan_item, instance_id, - request_spec, kwargs): - """Create the requested resource in this Zone or a child zone.""" - if "hostname" in build_plan_item: - self._provision_resource_locally(context, build_plan_item, - request_spec, kwargs) - return - - self._provision_resource_from_blob(context, build_plan_item, - instance_id, request_spec, kwargs) - - def _adjust_child_weights(self, child_results, zones): - """Apply the Scale and Offset values from the Zone definition - to adjust the weights returned from the child zones. Alters - child_results in place. - """ - for zone_id, result in child_results: - if not result: - continue - - assert isinstance(zone_id, int) - - for zone_rec in zones: - if zone_rec['id'] != zone_id: - continue - - for item in result: - try: - offset = zone_rec['weight_offset'] - scale = zone_rec['weight_scale'] - raw_weight = item['weight'] - cooked_weight = offset + scale * raw_weight - item['weight'] = cooked_weight - item['raw_weight'] = raw_weight - except KeyError: - LOG.exception(_("Bad child zone scaling values " - "for Zone: %(zone_id)s") % locals()) - - def schedule_run_instance(self, context, instance_id, request_spec, - *args, **kwargs): - """This method is called from nova.compute.api to provision - an instance. However we need to look at the parameters being - passed in to see if this is a request to: - 1. Create a Build Plan and then provision, or - 2. Use the Build Plan information in the request parameters - to simply create the instance (either in this zone or - a child zone). - """ - - # TODO(sandy): We'll have to look for richer specs at some point. - - blob = request_spec.get('blob') - if blob: - self._provision_resource(context, request_spec, instance_id, - request_spec, kwargs) - return None - - num_instances = request_spec.get('num_instances', 1) - LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % - locals()) - - # Create build plan and provision ... - build_plan = self.select(context, request_spec) - if not build_plan: - raise driver.NoValidHost(_('No hosts were available')) - - for num in xrange(num_instances): - if not build_plan: - break - - build_plan_item = build_plan.pop(0) - self._provision_resource(context, build_plan_item, instance_id, - request_spec, kwargs) - - # Returning None short-circuits the routing to Compute (since - # we've already done it here) - return None - - def select(self, context, request_spec, *args, **kwargs): - """Select returns a list of weights and zone/host information - corresponding to the best hosts to service the request. Any - child zone information has been encrypted so as not to reveal - anything about the children. - """ - return self._schedule(context, "compute", request_spec, - *args, **kwargs) - - # TODO(sandy): We're only focused on compute instances right now, - # so we don't implement the default "schedule()" method required - # of Schedulers. - def schedule(self, context, topic, request_spec, *args, **kwargs): - """The schedule() contract requires we return the one - best-suited host for this request. - """ - raise driver.NoValidHost(_('No hosts were available')) - - def _schedule(self, context, topic, request_spec, *args, **kwargs): - """Returns a list of hosts that meet the required specs, - ordered by their fitness. - """ - - if topic != "compute": - raise NotImplementedError(_("Scheduler only understands" - " Compute nodes (for now)")) - - num_instances = request_spec.get('num_instances', 1) - instance_type = request_spec['instance_type'] - - weighted = [] - host_list = None - - for i in xrange(num_instances): - # Filter local hosts based on requirements ... - # - # The first pass through here will pass 'None' as the - # host_list.. which tells the filter to build the full - # list of hosts. - # On a 2nd pass, the filter can modify the host_list with - # any updates it needs to make based on resources that - # may have been consumed from a previous build.. - host_list = self.filter_hosts(topic, request_spec, host_list) - if not host_list: - LOG.warn(_("Filter returned no hosts after processing " - "%(i)d of %(num_instances)d instances") % locals()) - break - - # then weigh the selected hosts. - # weighted = [{weight=weight, hostname=hostname, - # capabilities=capabs}, ...] - weights = self.weigh_hosts(topic, request_spec, host_list) - weights.sort(key=operator.itemgetter('weight')) - best_weight = weights[0] - weighted.append(best_weight) - self.consume_resources(topic, best_weight['capabilities'], - instance_type) - - # Next, tack on the best weights from the child zones ... - json_spec = json.dumps(request_spec) - all_zones = db.zone_get_all(context) - child_results = self._call_zone_method(context, "select", - specs=json_spec, zones=all_zones) - self._adjust_child_weights(child_results, all_zones) - for child_zone, result in child_results: - for weighting in result: - # Remember the child_zone so we can get back to - # it later if needed. This implicitly builds a zone - # path structure. - host_dict = {"weight": weighting["weight"], - "child_zone": child_zone, - "child_blob": weighting["blob"]} - weighted.append(host_dict) - - weighted.sort(key=operator.itemgetter('weight')) - return weighted - - def compute_filter(self, hostname, capabilities, request_spec): - """Return whether or not we can schedule to this compute node. - Derived classes should override this and return True if the host - is acceptable for scheduling. - """ - instance_type = request_spec['instance_type'] - requested_mem = instance_type['memory_mb'] * 1024 * 1024 - return capabilities['host_memory_free'] >= requested_mem - - def hold_filter_hosts(self, topic, request_spec, hosts=None): + def filter_hosts(self, topic, request_spec, hosts=None): """Filter the full host list (from the ZoneManager)""" - # NOTE(dabo): The logic used by the current _schedule() method - # is incorrect. Since this task is just to refactor the classes, - # I'm not fixing the logic now - that will be the next task. - # So for now this method is just renamed; afterwards this will - # become the filter_hosts() method, and the one below will - # be removed. filter_name = request_spec.get('filter', None) # Make sure that the requested filter is legitimate. selected_filter = host_filter.choose_host_filter(filter_name) diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py new file mode 100644 index 000000000..27160ca0a --- /dev/null +++ b/nova/scheduler/filters/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from all_hosts_filter import AllHostsFilter +from instance_type_filter import InstanceTypeFilter +from json_filter import JsonFilter diff --git a/nova/scheduler/filters/abstract_filter.py b/nova/scheduler/filters/abstract_filter.py new file mode 100644 index 000000000..05982820f --- /dev/null +++ b/nova/scheduler/filters/abstract_filter.py @@ -0,0 +1,87 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The Host Filter classes are a way to ensure that only hosts that are +appropriate are considered when creating a new instance. Hosts that are +either incompatible or insufficient to accept a newly-requested instance +are removed by Host Filter classes from consideration. Those that pass +the filter are then passed on for weighting or other process for ordering. + +Three filters are included: AllHosts, Flavor & JSON. AllHosts just +returns the full, unfiltered list of hosts. Flavor is a hard coded +matching mechanism based on flavor criteria and JSON is an ad-hoc +filter grammar. + +Why JSON? The requests for instances may come in through the +REST interface from a user or a parent Zone. +Currently Flavors and/or InstanceTypes are used for +specifing the type of instance desired. Specific Nova users have +noted a need for a more expressive way of specifying instances. +Since we don't want to get into building full DSL this is a simple +form as an example of how this could be done. In reality, most +consumers will use the more rigid filters such as FlavorFilter. +""" + +import json + +from nova import exception +from nova import flags +from nova import log as logging + +import nova.scheduler + + +LOG = logging.getLogger('nova.scheduler.host_filter') +FLAGS = flags.FLAGS +flags.DEFINE_string('default_host_filter', + 'nova.scheduler.host_filter.AllHostsFilter', + 'Which filter to use for filtering hosts') + + +class AbstractHostFilter(object): + """Base class for host filters.""" + def instance_type_to_filter(self, instance_type): + """Convert instance_type into a filter for most common use-case.""" + raise NotImplementedError() + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that fulfill the filter.""" + raise NotImplementedError() + + def _full_name(self): + """module.classname of the filter.""" + return "%s.%s" % (self.__module__, self.__class__.__name__) + + +def _get_filters(): + from nova.scheduler import filters + return [itm for itm in dir(filters) + if issubclass(itm, AbstractHostFilter)] + + +def choose_host_filter(filter_name=None): + """Since the caller may specify which filter to use we need + to have an authoritative list of what is permissible. This + function checks the filter name against a predefined set + of acceptable filters. + """ + if not filter_name: + filter_name = FLAGS.default_host_filter + for filter_class in _get_filters(): + host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) + if host_match == filter_name: + return filter_class() + raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) diff --git a/nova/scheduler/filters/all_hosts_filter.py b/nova/scheduler/filters/all_hosts_filter.py new file mode 100644 index 000000000..bc4acfd1a --- /dev/null +++ b/nova/scheduler/filters/all_hosts_filter.py @@ -0,0 +1,31 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import nova.scheduler + + +class AllHostsFilter(nova.scheduler.host_filter.AbstractHostFilter): + """NOP host filter. Returns all hosts in ZoneManager.""" + def instance_type_to_filter(self, instance_type): + """Return anything to prevent base-class from raising + exception. + """ + return (self._full_name(), instance_type) + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts from ZoneManager list.""" + return [(host, services) + for host, services in zone_manager.service_states.iteritems()] diff --git a/nova/scheduler/filters/instance_type_filter.py b/nova/scheduler/filters/instance_type_filter.py new file mode 100644 index 000000000..03ffc46c6 --- /dev/null +++ b/nova/scheduler/filters/instance_type_filter.py @@ -0,0 +1,86 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from nova.scheduler import host_filter + + +class InstanceTypeFilter(host_filter.AbstractHostFilter): + """HostFilter hard-coded to work with InstanceType records.""" + def instance_type_to_filter(self, instance_type): + """Use instance_type to filter hosts.""" + return (self._full_name(), instance_type) + + def _satisfies_extra_specs(self, capabilities, instance_type): + """Check that the capabilities provided by the compute service + satisfy the extra specs associated with the instance type""" + if 'extra_specs' not in instance_type: + return True + # NOTE(lorinh): For now, we are just checking exact matching on the + # values. Later on, we want to handle numerical + # values so we can represent things like number of GPU cards + try: + for key, value in instance_type['extra_specs'].iteritems(): + if capabilities[key] != value: + return False + except KeyError: + return False + return True + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that can create instance_type.""" + instance_type = query + selected_hosts = [] + for host, services in zone_manager.service_states.iteritems(): + capabilities = services.get('compute', {}) + if not capabilities: + continue + host_ram_mb = capabilities['host_memory_free'] + disk_bytes = capabilities['disk_available'] + spec_ram = instance_type['memory_mb'] + spec_disk = instance_type['local_gb'] + extra_specs = instance_type['extra_specs'] + + if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and + self._satisfies_extra_specs(capabilities, instance_type)): + selected_hosts.append((host, capabilities)) + return selected_hosts + + +# host entries (currently) are like: +# {'host_name-description': 'Default install of XenServer', +# 'host_hostname': 'xs-mini', +# 'host_memory_total': 8244539392, +# 'host_memory_overhead': 184225792, +# 'host_memory_free': 3868327936, +# 'host_memory_free_computed': 3840843776, +# 'host_other_config': {}, +# 'host_ip_address': '192.168.1.109', +# 'host_cpu_info': {}, +# 'disk_available': 32954957824, +# 'disk_total': 50394562560, +# 'disk_used': 17439604736, +# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', +# 'host_name_label': 'xs-mini'} + +# instance_type table has: +# name = Column(String(255), unique=True) +# memory_mb = Column(Integer) +# vcpus = Column(Integer) +# local_gb = Column(Integer) +# flavorid = Column(Integer, unique=True) +# swap = Column(Integer, nullable=False, default=0) +# rxtx_quota = Column(Integer, nullable=False, default=0) +# rxtx_cap = Column(Integer, nullable=False, default=0) diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py new file mode 100644 index 000000000..358abdc4d --- /dev/null +++ b/nova/scheduler/filters/json_filter.py @@ -0,0 +1,141 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import operator + +from nova.scheduler import host_filter + + +class JsonFilter(host_filter.AbstractHostFilter): + """Host Filter to allow simple JSON-based grammar for + selecting hosts. + """ + def _op_comp(self, args, op): + """Returns True if the specified operator can successfully + compare the first item in the args with all the rest. Will + return False if only one item is in the list. + """ + if len(args) < 2: + return False + bad = [arg for arg in args[1:] + if not op(args[0], arg)] + return not bool(bad) + + def _equals(self, args): + """First term is == all the other terms.""" + return self._op_comp(args, operator.eq) + + def _less_than(self, args): + """First term is < all the other terms.""" + return self._op_comp(args, operator.lt) + + def _greater_than(self, args): + """First term is > all the other terms.""" + return self._op_comp(args, operator.gt) + + def _in(self, args): + """First term is in set of remaining terms""" + return self._op_comp(args, operator.contains) + + def _less_than_equal(self, args): + """First term is <= all the other terms.""" + return self._op_comp(args, operator.le) + + def _greater_than_equal(self, args): + """First term is >= all the other terms.""" + return self._op_comp(args, operator.ge) + + def _not(self, args): + """Flip each of the arguments.""" + return [not arg for arg in args] + + def _or(self, args): + """True if any arg is True.""" + return any(args) + + def _and(self, args): + """True if all args are True.""" + return all(args) + + commands = { + '=': _equals, + '<': _less_than, + '>': _greater_than, + 'in': _in, + '<=': _less_than_equal, + '>=': _greater_than_equal, + 'not': _not, + 'or': _or, + 'and': _and, + } + + def instance_type_to_filter(self, instance_type): + """Convert instance_type into JSON filter object.""" + required_ram = instance_type['memory_mb'] + required_disk = instance_type['local_gb'] + query = ['and', + ['>=', '$compute.host_memory_free', required_ram], + ['>=', '$compute.disk_available', required_disk]] + return (self._full_name(), json.dumps(query)) + + def _parse_string(self, string, host, services): + """Strings prefixed with $ are capability lookups in the + form '$service.capability[.subcap*]'. + """ + if not string: + return None + if not string.startswith("$"): + return string + + path = string[1:].split(".") + for item in path: + services = services.get(item, None) + if not services: + return None + return services + + def _process_filter(self, zone_manager, query, host, services): + """Recursively parse the query structure.""" + if not query: + return True + cmd = query[0] + method = self.commands[cmd] + cooked_args = [] + for arg in query[1:]: + if isinstance(arg, list): + arg = self._process_filter(zone_manager, arg, host, services) + elif isinstance(arg, basestring): + arg = self._parse_string(arg, host, services) + if arg is not None: + cooked_args.append(arg) + result = method(self, cooked_args) + return result + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that can fulfill the requirements + specified in the query. + """ + expanded = json.loads(query) + filtered_hosts = [] + for host, services in zone_manager.service_states.iteritems(): + result = self._process_filter(zone_manager, expanded, host, + services) + if isinstance(result, list): + # If any succeeded, include the host + result = any(result) + if result: + filtered_hosts.append((host, services)) + return filtered_hosts diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py deleted file mode 100644 index 45a8f40d8..000000000 --- a/nova/scheduler/host_filter.py +++ /dev/null @@ -1,314 +0,0 @@ -# Copyright (c) 2011 Openstack, LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -The Host Filter classes are a way to ensure that only hosts that are -appropriate are considered when creating a new instance. Hosts that are -either incompatible or insufficient to accept a newly-requested instance -are removed by Host Filter classes from consideration. Those that pass -the filter are then passed on for weighting or other process for ordering. - -Three filters are included: AllHosts, Flavor & JSON. AllHosts just -returns the full, unfiltered list of hosts. Flavor is a hard coded -matching mechanism based on flavor criteria and JSON is an ad-hoc -filter grammar. - -Why JSON? The requests for instances may come in through the -REST interface from a user or a parent Zone. -Currently Flavors and/or InstanceTypes are used for -specifing the type of instance desired. Specific Nova users have -noted a need for a more expressive way of specifying instances. -Since we don't want to get into building full DSL this is a simple -form as an example of how this could be done. In reality, most -consumers will use the more rigid filters such as FlavorFilter. -""" - -import json - -from nova import exception -from nova import flags -from nova import log as logging -from nova import utils - -LOG = logging.getLogger('nova.scheduler.host_filter') - -FLAGS = flags.FLAGS -flags.DEFINE_string('default_host_filter', - 'nova.scheduler.host_filter.AllHostsFilter', - 'Which filter to use for filtering hosts.') - - -class HostFilter(object): - """Base class for host filters.""" - - def instance_type_to_filter(self, instance_type): - """Convert instance_type into a filter for most common use-case.""" - raise NotImplementedError() - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts that fulfill the filter.""" - raise NotImplementedError() - - def _full_name(self): - """module.classname of the filter.""" - return "%s.%s" % (self.__module__, self.__class__.__name__) - - -class AllHostsFilter(HostFilter): - """ NOP host filter. Returns all hosts in ZoneManager. - This essentially does what the old Scheduler+Chance used - to give us. - """ - - def instance_type_to_filter(self, instance_type): - """Return anything to prevent base-class from raising - exception.""" - return (self._full_name(), instance_type) - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts from ZoneManager list.""" - return [(host, services) - for host, services in zone_manager.service_states.iteritems()] - - -class InstanceTypeFilter(HostFilter): - """HostFilter hard-coded to work with InstanceType records.""" - - def instance_type_to_filter(self, instance_type): - """Use instance_type to filter hosts.""" - return (self._full_name(), instance_type) - - def _satisfies_extra_specs(self, capabilities, instance_type): - """Check that the capabilities provided by the compute service - satisfy the extra specs associated with the instance type""" - - if 'extra_specs' not in instance_type: - return True - - # Note(lorinh): For now, we are just checking exact matching on the - # values. Later on, we want to handle numerical - # values so we can represent things like number of GPU cards - - try: - for key, value in instance_type['extra_specs'].iteritems(): - if capabilities[key] != value: - return False - except KeyError: - return False - - return True - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts that can create instance_type.""" - instance_type = query - selected_hosts = [] - for host, services in zone_manager.service_states.iteritems(): - capabilities = services.get('compute', {}) - host_ram_mb = capabilities['host_memory_free'] - disk_bytes = capabilities['disk_available'] - spec_ram = instance_type['memory_mb'] - spec_disk = instance_type['local_gb'] - extra_specs = instance_type['extra_specs'] - - if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and - self._satisfies_extra_specs(capabilities, instance_type)): - selected_hosts.append((host, capabilities)) - return selected_hosts - -#host entries (currently) are like: -# {'host_name-description': 'Default install of XenServer', -# 'host_hostname': 'xs-mini', -# 'host_memory_total': 8244539392, -# 'host_memory_overhead': 184225792, -# 'host_memory_free': 3868327936, -# 'host_memory_free_computed': 3840843776, -# 'host_other_config': {}, -# 'host_ip_address': '192.168.1.109', -# 'host_cpu_info': {}, -# 'disk_available': 32954957824, -# 'disk_total': 50394562560, -# 'disk_used': 17439604736, -# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', -# 'host_name_label': 'xs-mini'} - -# instance_type table has: -#name = Column(String(255), unique=True) -#memory_mb = Column(Integer) -#vcpus = Column(Integer) -#local_gb = Column(Integer) -#flavorid = Column(Integer, unique=True) -#swap = Column(Integer, nullable=False, default=0) -#rxtx_quota = Column(Integer, nullable=False, default=0) -#rxtx_cap = Column(Integer, nullable=False, default=0) - - -class JsonFilter(HostFilter): - """Host Filter to allow simple JSON-based grammar for - selecting hosts. - """ - - def _equals(self, args): - """First term is == all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs != rhs: - return False - return True - - def _less_than(self, args): - """First term is < all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs >= rhs: - return False - return True - - def _greater_than(self, args): - """First term is > all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs <= rhs: - return False - return True - - def _in(self, args): - """First term is in set of remaining terms""" - if len(args) < 2: - return False - return args[0] in args[1:] - - def _less_than_equal(self, args): - """First term is <= all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs > rhs: - return False - return True - - def _greater_than_equal(self, args): - """First term is >= all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs < rhs: - return False - return True - - def _not(self, args): - """Flip each of the arguments.""" - if len(args) == 0: - return False - return [not arg for arg in args] - - def _or(self, args): - """True if any arg is True.""" - return True in args - - def _and(self, args): - """True if all args are True.""" - return False not in args - - commands = { - '=': _equals, - '<': _less_than, - '>': _greater_than, - 'in': _in, - '<=': _less_than_equal, - '>=': _greater_than_equal, - 'not': _not, - 'or': _or, - 'and': _and, - } - - def instance_type_to_filter(self, instance_type): - """Convert instance_type into JSON filter object.""" - required_ram = instance_type['memory_mb'] - required_disk = instance_type['local_gb'] - query = ['and', - ['>=', '$compute.host_memory_free', required_ram], - ['>=', '$compute.disk_available', required_disk]] - return (self._full_name(), json.dumps(query)) - - def _parse_string(self, string, host, services): - """Strings prefixed with $ are capability lookups in the - form '$service.capability[.subcap*]' - """ - if not string: - return None - if string[0] != '$': - return string - - path = string[1:].split('.') - for item in path: - services = services.get(item, None) - if not services: - return None - return services - - def _process_filter(self, zone_manager, query, host, services): - """Recursively parse the query structure.""" - if len(query) == 0: - return True - cmd = query[0] - method = self.commands[cmd] # Let exception fly. - cooked_args = [] - for arg in query[1:]: - if isinstance(arg, list): - arg = self._process_filter(zone_manager, arg, host, services) - elif isinstance(arg, basestring): - arg = self._parse_string(arg, host, services) - if arg != None: - cooked_args.append(arg) - result = method(self, cooked_args) - return result - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts that can fulfill filter.""" - expanded = json.loads(query) - hosts = [] - for host, services in zone_manager.service_states.iteritems(): - r = self._process_filter(zone_manager, expanded, host, services) - if isinstance(r, list): - r = True in r - if r: - hosts.append((host, services)) - return hosts - - -FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] - - -def choose_host_filter(filter_name=None): - """Since the caller may specify which filter to use we need - to have an authoritative list of what is permissible. This - function checks the filter name against a predefined set - of acceptable filters. - """ - if not filter_name: - filter_name = FLAGS.default_host_filter - for filter_class in FILTERS: - host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) - if host_match == filter_name: - return filter_class() - raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) diff --git a/nova/tests/scheduler/test_abstract_scheduler.py b/nova/tests/scheduler/test_abstract_scheduler.py index f4f5cc233..aa97e2344 100644 --- a/nova/tests/scheduler/test_abstract_scheduler.py +++ b/nova/tests/scheduler/test_abstract_scheduler.py @@ -77,6 +77,9 @@ class FakeZoneManager(zone_manager.ZoneManager): 'host3': { 'compute': {'host_memory_free': 3221225472}, }, + 'host4': { + 'compute': {'host_memory_free': 999999999}, + }, } diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index 7e664d3f9..818be2f45 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -20,7 +20,7 @@ import json from nova import exception from nova import test -from nova.scheduler import host_filter +from nova.scheduler import filters class FakeZoneManager: @@ -55,7 +55,7 @@ class HostFilterTestCase(test.TestCase): def setUp(self): super(HostFilterTestCase, self).setUp() - default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter' + default_host_filter = 'nova.scheduler.filteris.AllHostsFilter' self.flags(default_host_filter=default_host_filter) self.instance_type = dict(name='tiny', memory_mb=50, diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index de7581d0a..16ec4420b 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -122,11 +122,14 @@ class LeastCostSchedulerTestCase(test.TestCase): self.flags(least_cost_scheduler_cost_functions=[ 'nova.scheduler.least_cost.compute_fill_first_cost_fn'], compute_fill_first_cost_fn_weight=1) - num = 1 instance_type = {'memory_mb': 1024} request_spec = {'instance_type': instance_type} - hosts = self.sched.filter_hosts('compute', request_spec, None) + all_hosts = self.sched.zone_manager.service_states.iteritems() + all_hosts = [(host, services["compute"]) + for host, services in all_hosts + if "compute" in services] + hosts = self.sched.filter_hosts('compute', request_spec, host_list) expected = [] for idx, (hostname, caps) in enumerate(hosts): -- cgit From d940fa4619584dac967176d045407f0919da0a74 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 12 Aug 2011 16:19:46 -0500 Subject: end of day --- nova/scheduler/filters/__init__.py | 1 + nova/scheduler/filters/abstract_filter.py | 54 +------------------ nova/scheduler/filters/all_hosts_filter.py | 3 +- nova/scheduler/filters/instance_type_filter.py | 5 +- nova/scheduler/filters/json_filter.py | 39 ++++++++++---- nova/scheduler/host_filter.py | 75 ++++++++++++++++++++++++++ nova/tests/scheduler/test_host_filter.py | 34 ++++++------ 7 files changed, 129 insertions(+), 82 deletions(-) create mode 100644 nova/scheduler/host_filter.py diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py index 27160ca0a..4c9187c5a 100644 --- a/nova/scheduler/filters/__init__.py +++ b/nova/scheduler/filters/__init__.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +from abstract_filter import AbstractHostFilter from all_hosts_filter import AllHostsFilter from instance_type_filter import InstanceTypeFilter from json_filter import JsonFilter diff --git a/nova/scheduler/filters/abstract_filter.py b/nova/scheduler/filters/abstract_filter.py index 05982820f..fe5610923 100644 --- a/nova/scheduler/filters/abstract_filter.py +++ b/nova/scheduler/filters/abstract_filter.py @@ -13,44 +13,15 @@ # License for the specific language governing permissions and limitations # under the License. -""" -The Host Filter classes are a way to ensure that only hosts that are -appropriate are considered when creating a new instance. Hosts that are -either incompatible or insufficient to accept a newly-requested instance -are removed by Host Filter classes from consideration. Those that pass -the filter are then passed on for weighting or other process for ordering. - -Three filters are included: AllHosts, Flavor & JSON. AllHosts just -returns the full, unfiltered list of hosts. Flavor is a hard coded -matching mechanism based on flavor criteria and JSON is an ad-hoc -filter grammar. - -Why JSON? The requests for instances may come in through the -REST interface from a user or a parent Zone. -Currently Flavors and/or InstanceTypes are used for -specifing the type of instance desired. Specific Nova users have -noted a need for a more expressive way of specifying instances. -Since we don't want to get into building full DSL this is a simple -form as an example of how this could be done. In reality, most -consumers will use the more rigid filters such as FlavorFilter. -""" - -import json - -from nova import exception -from nova import flags -from nova import log as logging import nova.scheduler +from nova import flags - -LOG = logging.getLogger('nova.scheduler.host_filter') FLAGS = flags.FLAGS flags.DEFINE_string('default_host_filter', - 'nova.scheduler.host_filter.AllHostsFilter', + 'nova.scheduler.filters.AllHostsFilter', 'Which filter to use for filtering hosts') - class AbstractHostFilter(object): """Base class for host filters.""" def instance_type_to_filter(self, instance_type): @@ -64,24 +35,3 @@ class AbstractHostFilter(object): def _full_name(self): """module.classname of the filter.""" return "%s.%s" % (self.__module__, self.__class__.__name__) - - -def _get_filters(): - from nova.scheduler import filters - return [itm for itm in dir(filters) - if issubclass(itm, AbstractHostFilter)] - - -def choose_host_filter(filter_name=None): - """Since the caller may specify which filter to use we need - to have an authoritative list of what is permissible. This - function checks the filter name against a predefined set - of acceptable filters. - """ - if not filter_name: - filter_name = FLAGS.default_host_filter - for filter_class in _get_filters(): - host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) - if host_match == filter_name: - return filter_class() - raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) diff --git a/nova/scheduler/filters/all_hosts_filter.py b/nova/scheduler/filters/all_hosts_filter.py index bc4acfd1a..e80d829ca 100644 --- a/nova/scheduler/filters/all_hosts_filter.py +++ b/nova/scheduler/filters/all_hosts_filter.py @@ -15,9 +15,10 @@ import nova.scheduler +from nova.scheduler.filters import abstract_filter -class AllHostsFilter(nova.scheduler.host_filter.AbstractHostFilter): +class AllHostsFilter(abstract_filter.AbstractHostFilter): """NOP host filter. Returns all hosts in ZoneManager.""" def instance_type_to_filter(self, instance_type): """Return anything to prevent base-class from raising diff --git a/nova/scheduler/filters/instance_type_filter.py b/nova/scheduler/filters/instance_type_filter.py index 03ffc46c6..62b9ee414 100644 --- a/nova/scheduler/filters/instance_type_filter.py +++ b/nova/scheduler/filters/instance_type_filter.py @@ -14,10 +14,11 @@ # under the License. -from nova.scheduler import host_filter +import nova.scheduler +from nova.scheduler.filters import abstract_filter -class InstanceTypeFilter(host_filter.AbstractHostFilter): +class InstanceTypeFilter(abstract_filter.AbstractHostFilter): """HostFilter hard-coded to work with InstanceType records.""" def instance_type_to_filter(self, instance_type): """Use instance_type to filter hosts.""" diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py index 358abdc4d..889b96915 100644 --- a/nova/scheduler/filters/json_filter.py +++ b/nova/scheduler/filters/json_filter.py @@ -14,49 +14,64 @@ # under the License. +import json import operator -from nova.scheduler import host_filter +import nova.scheduler +from nova.scheduler.filters import abstract_filter +def debug(*args): + with file("/tmp/debug", "a") as dbg: + msg = " ".join([str(arg) for arg in args]) + dbg.write("%s\n" % msg) -class JsonFilter(host_filter.AbstractHostFilter): + +class JsonFilter(abstract_filter.AbstractHostFilter): """Host Filter to allow simple JSON-based grammar for selecting hosts. """ - def _op_comp(self, args, op): + def _op_compare(self, args, op): """Returns True if the specified operator can successfully compare the first item in the args with all the rest. Will return False if only one item is in the list. """ if len(args) < 2: return False - bad = [arg for arg in args[1:] - if not op(args[0], arg)] + if op is operator.contains: + debug("ARGS", type(args), args) + debug("op", op) + debug("REVERSED!!!") + # operator.contains reverses the param order. + bad = [arg for arg in args[1:] + if not op(args, args[0])] + else: + bad = [arg for arg in args[1:] + if not op(args[0], arg)] return not bool(bad) def _equals(self, args): """First term is == all the other terms.""" - return self._op_comp(args, operator.eq) + return self._op_compare(args, operator.eq) def _less_than(self, args): """First term is < all the other terms.""" - return self._op_comp(args, operator.lt) + return self._op_compare(args, operator.lt) def _greater_than(self, args): """First term is > all the other terms.""" - return self._op_comp(args, operator.gt) + return self._op_compare(args, operator.gt) def _in(self, args): """First term is in set of remaining terms""" - return self._op_comp(args, operator.contains) + return self._op_compare(args, operator.contains) def _less_than_equal(self, args): """First term is <= all the other terms.""" - return self._op_comp(args, operator.le) + return self._op_compare(args, operator.le) def _greater_than_equal(self, args): """First term is >= all the other terms.""" - return self._op_comp(args, operator.ge) + return self._op_compare(args, operator.ge) def _not(self, args): """Flip each of the arguments.""" @@ -129,6 +144,8 @@ class JsonFilter(host_filter.AbstractHostFilter): specified in the query. """ expanded = json.loads(query) + + debug("expanded", type(expanded), expanded) filtered_hosts = [] for host, services in zone_manager.service_states.iteritems(): result = self._process_filter(zone_manager, expanded, host, diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py new file mode 100644 index 000000000..f5191f5c9 --- /dev/null +++ b/nova/scheduler/host_filter.py @@ -0,0 +1,75 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The Host Filter classes are a way to ensure that only hosts that are +appropriate are considered when creating a new instance. Hosts that are +either incompatible or insufficient to accept a newly-requested instance +are removed by Host Filter classes from consideration. Those that pass +the filter are then passed on for weighting or other process for ordering. + +Three filters are included: AllHosts, Flavor & JSON. AllHosts just +returns the full, unfiltered list of hosts. Flavor is a hard coded +matching mechanism based on flavor criteria and JSON is an ad-hoc +filter grammar. + +Why JSON? The requests for instances may come in through the +REST interface from a user or a parent Zone. +Currently Flavors and/or InstanceTypes are used for +specifing the type of instance desired. Specific Nova users have +noted a need for a more expressive way of specifying instances. +Since we don't want to get into building full DSL this is a simple +form as an example of how this could be done. In reality, most +consumers will use the more rigid filters such as FlavorFilter. +""" + +import json +import types + +from nova import exception +from nova import flags +from nova import log as logging + +import nova.scheduler + + +LOG = logging.getLogger('nova.scheduler.host_filter') +FLAGS = flags.FLAGS + + +def _get_filters(): + from nova.scheduler import filters + def get_itm(nm): + return getattr(filters, nm) + + return [get_itm(itm) for itm in dir(filters) + if (type(get_itm(itm)) is types.TypeType) + and issubclass(get_itm(itm), filters.AbstractHostFilter)] + + +def choose_host_filter(filter_name=None): + """Since the caller may specify which filter to use we need + to have an authoritative list of what is permissible. This + function checks the filter name against a predefined set + of acceptable filters. + """ + if not filter_name: + filter_name = FLAGS.default_host_filter + for filter_class in _get_filters(): + host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) + if (host_match.startswith("nova.scheduler.filters") and + (host_match.split(".")[-1] == filter_name)): + return filter_class() + raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index 818be2f45..a64b25138 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -20,6 +20,7 @@ import json from nova import exception from nova import test +from nova.scheduler import host_filter from nova.scheduler import filters @@ -55,7 +56,7 @@ class HostFilterTestCase(test.TestCase): def setUp(self): super(HostFilterTestCase, self).setUp() - default_host_filter = 'nova.scheduler.filteris.AllHostsFilter' + default_host_filter = 'AllHostsFilter' self.flags(default_host_filter=default_host_filter) self.instance_type = dict(name='tiny', memory_mb=50, @@ -98,13 +99,10 @@ class HostFilterTestCase(test.TestCase): def test_choose_filter(self): # Test default filter ... hf = host_filter.choose_host_filter() - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.AllHostsFilter') + self.assertEquals(hf._full_name().split(".")[-1], 'AllHostsFilter') # Test valid filter ... - hf = host_filter.choose_host_filter( - 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.InstanceTypeFilter') + hf = host_filter.choose_host_filter('InstanceTypeFilter') + self.assertEquals(hf._full_name().split(".")[-1], 'InstanceTypeFilter') # Test invalid filter ... try: host_filter.choose_host_filter('does not exist') @@ -113,7 +111,7 @@ class HostFilterTestCase(test.TestCase): pass def test_all_host_filter(self): - hf = host_filter.AllHostsFilter() + hf = filters.AllHostsFilter() cooked = hf.instance_type_to_filter(self.instance_type) hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(10, len(hosts)) @@ -121,11 +119,10 @@ class HostFilterTestCase(test.TestCase): self.assertTrue(host.startswith('host')) def test_instance_type_filter(self): - hf = host_filter.InstanceTypeFilter() + hf = filters.InstanceTypeFilter() # filter all hosts that can support 50 ram and 500 disk name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) + self.assertEquals(name.split(".")[-1], 'InstanceTypeFilter') hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -134,21 +131,20 @@ class HostFilterTestCase(test.TestCase): self.assertEquals('host10', just_hosts[5]) def test_instance_type_filter_extra_specs(self): - hf = host_filter.InstanceTypeFilter() + hf = filters.InstanceTypeFilter() # filter all hosts that can support 50 ram and 500 disk name, cooked = hf.instance_type_to_filter(self.gpu_instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) + self.assertEquals(name.split(".")[-1], 'InstanceTypeFilter') hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(1, len(hosts)) just_hosts = [host for host, caps in hosts] self.assertEquals('host07', just_hosts[0]) def test_json_filter(self): - hf = host_filter.JsonFilter() + hf = filters.JsonFilter() # filter all hosts that can support 50 ram and 500 disk name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) + self.assertEquals(name.split(".")[-1], 'JsonFilter') hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -191,6 +187,12 @@ class HostFilterTestCase(test.TestCase): raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] cooked = json.dumps(raw) + def debug(*args): + with file("/tmp/debug", "a") as dbg: + msg = " ".join([str(arg) for arg in args]) + dbg.write("%s\n" % msg) + + debug("cooked", cooked, type(cooked)) hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(5, len(hosts)) -- cgit From 19a4ddaf157ebb388cce37ddc142dfad304b8cf0 Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Fri, 12 Aug 2011 16:48:13 -0700 Subject: Added add securitygroup to instance and remove securitygroup from instance functionality --- nova/api/openstack/contrib/security_groups.py | 199 ++++++++++++-- nova/api/openstack/create_instance_helper.py | 30 ++- nova/db/api.py | 6 + nova/db/sqlalchemy/api.py | 15 ++ .../api/openstack/contrib/test_security_groups.py | 299 +++++++++++++++++++++ 5 files changed, 530 insertions(+), 19 deletions(-) diff --git a/nova/api/openstack/contrib/security_groups.py b/nova/api/openstack/contrib/security_groups.py index 6c57fbb51..a104a42e4 100644 --- a/nova/api/openstack/contrib/security_groups.py +++ b/nova/api/openstack/contrib/security_groups.py @@ -25,10 +25,11 @@ from nova import db from nova import exception from nova import flags from nova import log as logging +from nova import rpc from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi - +from nova.compute import power_state from xml.dom import minidom @@ -73,33 +74,28 @@ class SecurityGroupController(object): context, rule)] return security_group - def show(self, req, id): - """Return data about the given security group.""" - context = req.environ['nova.context'] + def _get_security_group(self, context, id): try: id = int(id) security_group = db.security_group_get(context, id) except ValueError: - msg = _("Security group id is not integer") - return exc.HTTPBadRequest(explanation=msg) + msg = _("Security group id should be integer") + raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: - return exc.HTTPNotFound(explanation=unicode(exp)) + raise exc.HTTPNotFound(explanation=unicode(exp)) + return security_group + def show(self, req, id): + """Return data about the given security group.""" + context = req.environ['nova.context'] + security_group = self._get_security_group(context, id) return {'security_group': self._format_security_group(context, security_group)} def delete(self, req, id): """Delete a security group.""" context = req.environ['nova.context'] - try: - id = int(id) - security_group = db.security_group_get(context, id) - except ValueError: - msg = _("Security group id is not integer") - return exc.HTTPBadRequest(explanation=msg) - except exception.SecurityGroupNotFound as exp: - return exc.HTTPNotFound(explanation=unicode(exp)) - + security_group = self._get_security_group(context, id) LOG.audit(_("Delete security group %s"), id, context=context) db.security_group_destroy(context, security_group.id) @@ -172,6 +168,135 @@ class SecurityGroupController(object): "than 255 characters.") % typ raise exc.HTTPBadRequest(explanation=msg) + def associate(self, req, id, body): + context = req.environ['nova.context'] + + if not body: + raise exc.HTTPUnprocessableEntity() + + if not 'security_group_associate' in body: + raise exc.HTTPUnprocessableEntity() + + security_group = self._get_security_group(context, id) + + servers = body['security_group_associate'].get('servers') + + if not servers: + msg = _("No servers found") + return exc.HTTPBadRequest(explanation=msg) + + hosts = set() + for server in servers: + if server['id']: + try: + # check if the server exists + inst = db.instance_get(context, server['id']) + #check if the security group is assigned to the server + if self._is_security_group_associated_to_server( + security_group, inst['id']): + msg = _("Security group %s is already associated with" + " the instance %s") % (security_group['id'], + server['id']) + raise exc.HTTPBadRequest(explanation=msg) + + #check if the instance is in running state + if inst['state'] != power_state.RUNNING: + msg = _("Server %s is not in the running state")\ + % server['id'] + raise exc.HTTPBadRequest(explanation=msg) + + hosts.add(inst['host']) + except exception.InstanceNotFound as exp: + return exc.HTTPNotFound(explanation=unicode(exp)) + + # Associate security group with the server in the db + for server in servers: + if server['id']: + db.instance_add_security_group(context.elevated(), + server['id'], + security_group['id']) + + for host in hosts: + rpc.cast(context, + db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_rules", + "args": {"security_group_id": security_group['id']}}) + + return exc.HTTPAccepted() + + def _is_security_group_associated_to_server(self, security_group, + instance_id): + if not security_group: + return False + + instances = security_group.get('instances') + if not instances: + return False + + inst_id = None + for inst_id in (instance['id'] for instance in instances \ + if instance_id == instance['id']): + return True + + return False + + def disassociate(self, req, id, body): + context = req.environ['nova.context'] + + if not body: + raise exc.HTTPUnprocessableEntity() + + if not 'security_group_disassociate' in body: + raise exc.HTTPUnprocessableEntity() + + security_group = self._get_security_group(context, id) + + servers = body['security_group_disassociate'].get('servers') + + if not servers: + msg = _("No servers found") + return exc.HTTPBadRequest(explanation=msg) + + hosts = set() + for server in servers: + if server['id']: + try: + # check if the instance exists + inst = db.instance_get(context, server['id']) + # Check if the security group is not associated + # with the instance + if not self._is_security_group_associated_to_server( + security_group, inst['id']): + msg = _("Security group %s is not associated with the" + "instance %s") % (security_group['id'], + server['id']) + raise exc.HTTPBadRequest(explanation=msg) + + #check if the instance is in running state + if inst['state'] != power_state.RUNNING: + msg = _("Server %s is not in the running state")\ + % server['id'] + raise exp.HTTPBadRequest(explanation=msg) + + hosts.add(inst['host']) + except exception.InstanceNotFound as exp: + return exc.HTTPNotFound(explanation=unicode(exp)) + + # Disassociate security group from the server + for server in servers: + if server['id']: + db.instance_remove_security_group(context.elevated(), + server['id'], + security_group['id']) + + for host in hosts: + rpc.cast(context, + db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_rules", + "args": {"security_group_id": security_group['id']}}) + + return exc.HTTPAccepted() + class SecurityGroupRulesController(SecurityGroupController): @@ -226,9 +351,9 @@ class SecurityGroupRulesController(SecurityGroupController): security_group_rule = db.security_group_rule_create(context, values) self.compute_api.trigger_security_group_rules_refresh(context, - security_group_id=security_group['id']) + security_group_id=security_group['id']) - return {'security_group_rule': self._format_security_group_rule( + return {"security_group_rule": self._format_security_group_rule( context, security_group_rule)} @@ -368,6 +493,10 @@ class Security_groups(extensions.ExtensionDescriptor): res = extensions.ResourceExtension('os-security-groups', controller=SecurityGroupController(), + member_actions={ + 'associate': 'POST', + 'disassociate': 'POST' + }, deserializer=deserializer, serializer=serializer) @@ -405,6 +534,40 @@ class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): security_group['description'] = self.extract_text(desc_node) return {'body': {'security_group': security_group}} + def _get_servers(self, node): + servers_dict = {'servers': []} + if node is not None: + servers_node = self.find_first_child_named(node, + 'servers') + if servers_node is not None: + for server_node in self.find_children_named(servers_node, + "server"): + servers_dict['servers'].append( + {"id": self.extract_text(server_node)}) + return servers_dict + + def associate(self, string): + """Deserialize an xml-formatted security group associate request""" + dom = minidom.parseString(string) + node = self.find_first_child_named(dom, + 'security_group_associate') + result = {'body': {}} + if node: + result['body']['security_group_associate'] = \ + self._get_servers(node) + return result + + def disassociate(self, string): + """Deserialize an xml-formatted security group disassociate request""" + dom = minidom.parseString(string) + node = self.find_first_child_named(dom, + 'security_group_disassociate') + result = {'body': {}} + if node: + result['body']['security_group_disassociate'] = \ + self._get_servers(node) + return result + class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): """ diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 1425521a9..4ceb972c0 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -111,6 +111,16 @@ class CreateInstanceHelper(object): if personality: injected_files = self._get_injected_files(personality) + sg_names = [] + security_groups = server_dict.get('security_groups') + if security_groups: + sg_names = [sg['name'] for sg in security_groups if sg.get('name')] + if not sg_names: + sg_names.append('default') + + sg_names = list(set(sg_names)) + LOG.debug(sg_names) + try: flavor_id = self.controller._flavor_id_from_req_data(body) except ValueError as error: @@ -161,7 +171,8 @@ class CreateInstanceHelper(object): zone_blob=zone_blob, reservation_id=reservation_id, min_count=min_count, - max_count=max_count)) + max_count=max_count, + security_group=sg_names)) except quota.QuotaError as error: self._handle_quota_error(error) except exception.ImageNotFound as error: @@ -170,6 +181,8 @@ class CreateInstanceHelper(object): except exception.FlavorNotFound as error: msg = _("Invalid flavorRef provided.") raise exc.HTTPBadRequest(explanation=msg) + except exception.SecurityGroupNotFound as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) # Let the caller deal with unhandled exceptions. def _handle_quota_error(self, error): @@ -454,6 +467,8 @@ class ServerXMLDeserializerV11(wsgi.MetadataXMLDeserializer): if personality is not None: server["personality"] = personality + server["security_groups"] = self._extract_security_groups(server_node) + return server def _extract_personality(self, server_node): @@ -470,3 +485,16 @@ class ServerXMLDeserializerV11(wsgi.MetadataXMLDeserializer): return personality else: return None + + def _extract_security_groups(self, server_node): + """Marshal the security_groups attribute of a parsed request""" + node = self.find_first_child_named(server_node, "security_groups") + security_groups = [] + if node is not None: + for sg_node in self.find_children_named(node, "security_group"): + item = {} + name_node = self.find_first_child_named(sg_node, "name") + if name_node: + item["name"] = self.extract_text(name_node) + security_groups.append(item) + return security_groups diff --git a/nova/db/api.py b/nova/db/api.py index 0f2218752..cf814d43e 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -570,6 +570,12 @@ def instance_add_security_group(context, instance_id, security_group_id): security_group_id) +def instance_remove_security_group(context, instance_id, security_group_id): + """Disassociate the given security group from the given instance.""" + return IMPL.instance_remove_security_group(context, instance_id, + security_group_id) + + def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): """Get instances.vcpus by host and project.""" return IMPL.instance_get_vcpu_sum_by_host_and_project(context, diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e5d35a20b..ba16f9109 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1482,6 +1482,19 @@ def instance_add_security_group(context, instance_id, security_group_id): instance_ref.save(session=session) +@require_context +def instance_remove_security_group(context, instance_id, security_group_id): + """Disassociate the given security group from the given instance""" + session = get_session() + + session.query(models.SecurityGroupInstanceAssociation).\ + filter_by(instance_id=instance_id).\ + filter_by(security_group_id=security_group_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + @require_context def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): session = get_session() @@ -2456,6 +2469,7 @@ def security_group_get(context, security_group_id, session=None): filter_by(deleted=can_read_deleted(context),).\ filter_by(id=security_group_id).\ options(joinedload_all('rules')).\ + options(joinedload_all('instances')).\ first() else: result = session.query(models.SecurityGroup).\ @@ -2463,6 +2477,7 @@ def security_group_get(context, security_group_id, session=None): filter_by(id=security_group_id).\ filter_by(project_id=context.project_id).\ options(joinedload_all('rules')).\ + options(joinedload_all('instances')).\ first() if not result: raise exception.SecurityGroupNotFound( diff --git a/nova/tests/api/openstack/contrib/test_security_groups.py b/nova/tests/api/openstack/contrib/test_security_groups.py index 4317880ca..894b0c591 100644 --- a/nova/tests/api/openstack/contrib/test_security_groups.py +++ b/nova/tests/api/openstack/contrib/test_security_groups.py @@ -15,10 +15,13 @@ # under the License. import json +import mox +import nova import unittest import webob from xml.dom import minidom +from nova import exception from nova import test from nova.api.openstack.contrib import security_groups from nova.tests.api.openstack import fakes @@ -51,6 +54,28 @@ def _create_security_group_request_dict(security_group): return {'security_group': sg} +def return_server(context, server_id): + return {'id': server_id, 'state': 0x01, 'host': "localhost"} + + +def return_non_running_server(context, server_id): + return {'id': server_id, 'state': 0x02, + 'host': "localhost"} + + +def return_security_group(context, group_id): + return {'id': group_id, "instances": [ + {'id': 1}]} + + +def return_security_group_without_instances(context, group_id): + return {'id': group_id} + + +def return_server_nonexistant(context, server_id): + raise exception.InstanceNotFound() + + class TestSecurityGroups(test.TestCase): def setUp(self): super(TestSecurityGroups, self).setUp() @@ -325,6 +350,280 @@ class TestSecurityGroups(test.TestCase): response = self._delete_security_group(11111111) self.assertEquals(response.status_int, 404) + def test_associate_by_non_existing_security_group_id(self): + req = webob.Request.blank('/v1.1/os-security-groups/111111/associate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_associate": { + "servers": [ + {"id": '2'} + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 404) + + def test_associate_by_invalid_security_group_id(self): + req = webob.Request.blank('/v1.1/os-security-groups/invalid/associate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_associate": { + "servers": [ + {"id": "2"} + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 400) + + def test_associate_without_body(self): + req = webob.Request.blank('/v1.1/os-security-groups/1/associate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + req.body = json.dumps(None) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 422) + + def test_associate_no_security_group_element(self): + req = webob.Request.blank('/v1.1/os-security-groups/1/associate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_associate_invalid": { + "servers": [ + {"id": "2"} + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 422) + + def test_associate_no_instances(self): + #self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.stubs.Set(nova.db, 'security_group_get', return_security_group) + req = webob.Request.blank('/v1.1/os-security-groups/1/associate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_associate": { + "servers": [ + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 400) + + def test_associate_non_existing_instance(self): + self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant) + self.stubs.Set(nova.db, 'security_group_get', return_security_group) + req = webob.Request.blank('/v1.1/os-security-groups/1/associate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_associate": { + "servers": [ + {'id': 2} + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 404) + + def test_associate_non_running_instance(self): + self.stubs.Set(nova.db, 'instance_get', return_non_running_server) + self.stubs.Set(nova.db, 'security_group_get', + return_security_group_without_instances) + req = webob.Request.blank('/v1.1/os-security-groups/1/associate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_associate": { + "servers": [ + {'id': 1} + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 400) + + def test_associate_already_associated_security_group_to_instance(self): + self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db, 'security_group_get', return_security_group) + req = webob.Request.blank('/v1.1/os-security-groups/1/associate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_associate": { + "servers": [ + {'id': 1} + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 400) + + def test_associate(self): + self.stubs.Set(nova.db, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db, 'instance_add_security_group') + nova.db.instance_add_security_group(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()) + self.stubs.Set(nova.db, 'security_group_get', + return_security_group_without_instances) + self.mox.ReplayAll() + + req = webob.Request.blank('/v1.1/os-security-groups/1/associate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_associate": { + "servers": [ + {'id': 1} + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 202) + + def test_disassociate_by_non_existing_security_group_id(self): + req = webob.Request.blank('/v1.1/os-security-groups/1111/disassociate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_disassociate": { + "servers": [ + {"id": "2"} + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 404) + + def test_disassociate_by_invalid_security_group_id(self): + req = webob.Request.blank('/v1.1/os-security-groups/id/disassociate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_disassociate": { + "servers": [ + {"id": "2"} + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 400) + + def test_disassociate_without_body(self): + req = webob.Request.blank('/v1.1/os-security-groups/1/disassociate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + req.body = json.dumps(None) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 422) + + def test_disassociate_no_security_group_element(self): + req = webob.Request.blank('/v1.1/os-security-groups/1/disassociate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_disassociate_invalid": { + "servers": [ + {"id": "2"} + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 422) + + def test_disassociate_no_instances(self): + #self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.stubs.Set(nova.db, 'security_group_get', return_security_group) + req = webob.Request.blank('/v1.1/os-security-groups/1/disassociate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_disassociate": { + "servers": [ + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 400) + + def test_disassociate_non_existing_instance(self): + self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant) + self.stubs.Set(nova.db, 'security_group_get', return_security_group) + req = webob.Request.blank('/v1.1/os-security-groups/1/disassociate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_disassociate": { + "servers": [ + {'id': 2} + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 404) + + def test_disassociate_non_running_instance(self): + self.stubs.Set(nova.db, 'instance_get', return_non_running_server) + self.stubs.Set(nova.db, 'security_group_get', + return_security_group_without_instances) + req = webob.Request.blank('/v1.1/os-security-groups/1/disassociate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_disassociate": { + "servers": [ + {'id': 1} + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 400) + + def test_disassociate_not_associated_security_group_to_instance(self): + self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db, 'security_group_get', return_security_group) + req = webob.Request.blank('/v1.1/os-security-groups/1/disassociate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_disassociate": { + "servers": [ + {'id': 2} + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 400) + + def test_disassociate(self): + self.stubs.Set(nova.db, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group') + nova.db.instance_remove_security_group(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()) + self.stubs.Set(nova.db, 'security_group_get', + return_security_group) + self.mox.ReplayAll() + + req = webob.Request.blank('/v1.1/os-security-groups/1/disassociate') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + body_dict = {"security_group_disassociate": { + "servers": [ + {'id': 1} + ] + } + } + req.body = json.dumps(body_dict) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 202) + class TestSecurityGroupRules(test.TestCase): def setUp(self): -- cgit From 1f3eb69ec547737447e91116881a8cb85157d65c Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Fri, 12 Aug 2011 18:16:07 -0700 Subject: fix issue introduced in merge --- nova/network/linux_net.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index be4269392..904014716 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -463,7 +463,7 @@ def initialize_gateway_device(dev, network_ref): # NOTE(vish): The ip for dnsmasq has to be the first address on the # bridge for it to respond to reqests properly - suffix = net_attrs['cidr'].rpartition('/')[2] + suffix = network_ref['cidr'].rpartition('/')[2] out, err = _execute('ip', 'addr', 'add', '%s/%s' % (network_ref['dhcp_server'], suffix), @@ -477,7 +477,7 @@ def initialize_gateway_device(dev, network_ref): raise exception.Error('Failed to add ip: %s' % err) if(FLAGS.use_ipv6): _execute('ip', '-f', 'inet6', 'addr', - 'change', net_attrs['cidr_v6'], + 'change', network_ref['cidr_v6'], 'dev', dev, run_as_root=True) # NOTE(vish): If the public interface is the same as the # bridge, then the bridge has to be in promiscuous -- cgit From e0e49dd7340bbb26c82f18a94a6582a5684925fa Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Sat, 13 Aug 2011 18:58:29 -0700 Subject: have NetworkManager generate MAC address and pass it to the driver for plugging. Sets the stage for being able to do duplicate checks on those MACs as well. --- nova/network/linux_net.py | 68 ++++++++++++++++++++++++++++------------------- nova/network/manager.py | 18 ++++++++++--- nova/utils.py | 9 ------- 3 files changed, 54 insertions(+), 41 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 904014716..248f1ce5a 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -458,13 +458,13 @@ def floating_forward_rules(floating_ip, fixed_ip): def initialize_gateway_device(dev, network_ref): - if not network_ref: - return + if not network_ref: + return - # NOTE(vish): The ip for dnsmasq has to be the first address on the - # bridge for it to respond to reqests properly - suffix = network_ref['cidr'].rpartition('/')[2] - out, err = _execute('ip', 'addr', 'add', + # NOTE(vish): The ip for dnsmasq has to be the first address on the + # bridge for it to respond to reqests properly + suffix = network_ref['cidr'].rpartition('/')[2] + out, err = _execute('ip', 'addr', 'add', '%s/%s' % (network_ref['dhcp_server'], suffix), 'brd', @@ -473,19 +473,18 @@ def initialize_gateway_device(dev, network_ref): dev, run_as_root=True, check_exit_code=False) - if err and err != 'RTNETLINK answers: File exists\n': - raise exception.Error('Failed to add ip: %s' % err) - if(FLAGS.use_ipv6): - _execute('ip', '-f', 'inet6', 'addr', + if err and err != 'RTNETLINK answers: File exists\n': + raise exception.Error('Failed to add ip: %s' % err) + if(FLAGS.use_ipv6): + _execute('ip', '-f', 'inet6', 'addr', 'change', network_ref['cidr_v6'], 'dev', dev, run_as_root=True) - # NOTE(vish): If the public interface is the same as the - # bridge, then the bridge has to be in promiscuous - # to forward packets properly. - if(FLAGS.public_interface == dev): - _execute('ip', 'link', 'set', + # NOTE(vish): If the public interface is the same as the + # bridge, then the bridge has to be in promiscuous + # to forward packets properly. + if(FLAGS.public_interface == dev): + _execute('ip', 'link', 'set', 'dev', dev, 'promisc', 'on', run_as_root=True) - _execute('ip', 'link', 'set', dev, 'up', run_as_root=True) def get_dhcp_leases(context, network_ref): @@ -718,6 +717,7 @@ def _ip_bridge_cmd(action, params, device): cmd.extend(['dev', device]) return cmd + # Similar to compute virt layers, the Linux network node # code uses a flexible driver model to support different ways # of creating ethernet interfaces and attaching them to the network. @@ -725,8 +725,8 @@ def _ip_bridge_cmd(action, params, device): # act as gateway/dhcp/vpn/etc. endpoints not VM interfaces. -def plug(network): - return interface_driver.plug(network) +def plug(network, mac_address): + return interface_driver.plug(network, mac_address) def unplug(network): @@ -737,7 +737,7 @@ class LinuxNetInterfaceDriver(object): """Abstract class that defines generic network host API""" """ for for all Linux interface drivers.""" - def plug(self, network): + def plug(self, network, mac_address): """Create Linux device, return device name""" raise NotImplementedError() @@ -749,13 +749,14 @@ class LinuxNetInterfaceDriver(object): # plugs interfaces using Linux Bridge class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): - def plug(self, network): + def plug(self, network, mac_address): if network.get('vlan', None) is not None: LinuxBridgeInterfaceDriver.ensure_vlan_bridge( network['vlan'], network['bridge'], network['bridge_interface'], - network) + network, + mac_address) else: LinuxBridgeInterfaceDriver.ensure_bridge( network['bridge'], @@ -769,16 +770,16 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): @classmethod def ensure_vlan_bridge(_self, vlan_num, bridge, bridge_interface, - net_attrs=None): + net_attrs=None, mac_address=None): """Create a vlan and bridge unless they already exist.""" interface = LinuxBridgeInterfaceDriver.ensure_vlan(vlan_num, - bridge_interface) + bridge_interface, mac_address) LinuxBridgeInterfaceDriver.ensure_bridge(bridge, interface, net_attrs) return interface @classmethod @utils.synchronized('ensure_vlan', external=True) - def ensure_vlan(_self, vlan_num, bridge_interface): + def ensure_vlan(_self, vlan_num, bridge_interface, mac_address=None): """Create a vlan unless it already exists.""" interface = 'vlan%s' % vlan_num if not _device_exists(interface): @@ -787,6 +788,11 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): 'VLAN_PLUS_VID_NO_PAD', run_as_root=True) _execute('vconfig', 'add', bridge_interface, vlan_num, run_as_root=True) + # (danwent) the bridge will inherit this address, so we want to + # make sure it is the value set from the NetworkManager + if mac_address: + _execute('ip', 'link', 'set', interface, "address", + mac_address, run_as_root=True) _execute('ip', 'link', 'set', interface, 'up', run_as_root=True) return interface @@ -812,6 +818,11 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): _execute('brctl', 'setfd', bridge, 0, run_as_root=True) # _execute('brctl setageing %s 10' % bridge, run_as_root=True) _execute('brctl', 'stp', bridge, 'off', run_as_root=True) + # (danwent) bridge device MAC address can't be set directly. + # instead it inherits the MAC address of the first device on the + # bridge, which will either be the vlan interface, or a + # physical NIC. + _execute('ip', 'link', 'set', bridge, 'up', run_as_root=True) if interface: out, err = _execute('brctl', 'addif', bridge, interface, @@ -856,11 +867,10 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): # plugs interfaces using Open vSwitch class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver): - def plug(self, network): + def plug(self, network, mac_address): dev = "gw-" + str(network['id']) if not _device_exists(dev): bridge = FLAGS.linuxnet_ovs_integration_bridge - mac_addr = utils.generate_mac_address() _execute('ovs-vsctl', '--', '--may-exist', 'add-port', bridge, dev, '--', 'set', 'Interface', dev, "type=internal", @@ -869,10 +879,12 @@ class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver): '--', 'set', 'Interface', dev, "external-ids:iface-status=active", '--', 'set', 'Interface', dev, - "external-ids:attached-mac=%s" % mac_addr, + "external-ids:attached-mac=%s" % mac_address, run_as_root=True) - _execute('ip', 'link', 'set', dev, "address", mac_addr, + _execute('ip', 'link', 'set', dev, "address", mac_address, run_as_root=True) + _execute('ip', 'link', 'set', dev, 'up', run_as_root=True) + return dev def unplug(self, network): diff --git a/nova/network/manager.py b/nova/network/manager.py index 653d116fb..1ad48fbc1 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -510,7 +510,7 @@ class NetworkManager(manager.SchedulerDependentManager): def _allocate_mac_addresses(self, context, instance_id, networks): """Generates mac addresses and creates vif rows in db for them.""" for network in networks: - vif = {'address': utils.generate_mac_address(), + vif = {'address': self.generate_mac_address(), 'instance_id': instance_id, 'network_id': network['id']} # try FLAG times to create a vif record with a unique mac_address @@ -519,12 +519,20 @@ class NetworkManager(manager.SchedulerDependentManager): self.db.virtual_interface_create(context, vif) break except exception.VirtualInterfaceCreateException: - vif['address'] = utils.generate_mac_address() + vif['address'] = self.generate_mac_address() else: self.db.virtual_interface_delete_by_instance(context, instance_id) raise exception.VirtualInterfaceMacAddressException() + def generate_mac_address(self): + """Generate an Ethernet MAC address.""" + mac = [0x02, 0x16, 0x3e, + random.randint(0x00, 0x7f), + random.randint(0x00, 0xff), + random.randint(0x00, 0xff)] + return ':'.join(map(lambda x: "%02x" % x, mac)) + def add_fixed_ip_to_instance(self, context, instance_id, host, network_id): """Adds a fixed ip to an instance from specified network.""" networks = [self.db.network_get(context, network_id)] @@ -796,7 +804,8 @@ class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager): """Sets up network on this host.""" network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) - dev = self.driver.plug(network_ref) + mac_address = self.generate_mac_address() + dev = self.driver.plug(network_ref, mac_address) self.driver.initialize_gateway_device(dev, network_ref) if not FLAGS.fake_network: @@ -897,7 +906,8 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): address = network_ref['vpn_public_address'] network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) - dev = self.driver.plug(network_ref) + mac_address = self.generate_mac_address() + dev = self.driver.plug(network_ref, mac_address) self.driver.initialize_gateway_device(dev, network_ref) # NOTE(vish): only ensure this forward if the address hasn't been set diff --git a/nova/utils.py b/nova/utils.py index 46d60c735..7276b6bd5 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -338,15 +338,6 @@ def get_my_linklocal(interface): " :%(ex)s") % locals()) -def generate_mac_address(): - """Generate an Ethernet MAC address.""" - mac = [0x02, 0x16, 0x3e, - random.randint(0x00, 0x7f), - random.randint(0x00, 0xff), - random.randint(0x00, 0xff)] - return ':'.join(map(lambda x: "%02x" % x, mac)) - - def utcnow(): """Overridable version of utils.utcnow.""" if utcnow.override_time: -- cgit From 53ca062830639de242d2cadda4f6bf473d4b6b62 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Sun, 14 Aug 2011 20:05:18 -0700 Subject: fix missing 'run_as_root' from bad merge --- nova/network/linux_net.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 248f1ce5a..57c1d0c28 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -846,8 +846,10 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): fields = line.split() if fields and fields[0] == 'inet': params = fields[1:-1] - _execute(*_ip_bridge_cmd('del', params, fields[-1])) - _execute(*_ip_bridge_cmd('add', params, bridge)) + _execute(*_ip_bridge_cmd('del', params, fields[-1]), + run_as_root=True) + _execute(*_ip_bridge_cmd('add', params, bridge), + run_as_root=True) if gateway: _execute('route', 'add', 'default', 'gw', gateway, run_as_root=True) -- cgit From 8666aca320ce95840a378231bfe81bc4e759df6e Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Mon, 15 Aug 2011 11:50:54 -0700 Subject: Fixed merging issue --- nova/api/openstack/create_instance_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 7e9d48c02..c8798536e 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -164,7 +164,7 @@ class CreateInstanceHelper(object): reservation_id=reservation_id, min_count=min_count, max_count=max_count, - user_data=user_data)) + user_data=user_data, availability_zone=availability_zone)) except quota.QuotaError as error: self._handle_quota_error(error) -- cgit From 066b675e3ce5c2bd67dde124cbe01b68bd1eded8 Mon Sep 17 00:00:00 2001 From: John Tran Date: Mon, 15 Aug 2011 13:22:14 -0700 Subject: fix bug which DescribeInstances in EC2 api was returning deleted instances --- nova/db/sqlalchemy/api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e5d35a20b..e7b71d494 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1209,7 +1209,8 @@ def instance_get_all_by_filters(context, filters): options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ - options(joinedload('instance_type')) + options(joinedload('instance_type')).\ + filter_by(deleted=can_read_deleted(context)) # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. -- cgit From 54ba0d6d25f60b5acb363d141aeba63e4c727c72 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 15:28:28 -0500 Subject: fix typo where I forgot a comma --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 1513dd473..31bb29f81 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -671,7 +671,7 @@ class NetworkManager(manager.SchedulerDependentManager): if used_subnet in subnet: msg = _('requested cidr (%{cidr}) conflicts with ' 'existing smaller cidr (%{smaller})') - raise ValueError(msg % {'cidr': subnet + raise ValueError(msg % {'cidr': subnet, 'smaller': used_subnet}) -- cgit From d3e2be67d116dcdbe0484a10708ae00d040d2d9f Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 15:29:55 -0500 Subject: i hate these exceptions where it should just return an empty list --- nova/network/manager.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 31bb29f81..ddb60ecfa 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -656,7 +656,10 @@ class NetworkManager(manager.SchedulerDependentManager): subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4, count=num_networks)) - nets = self.db.network_get_all(context) + try: + nets = self.db.network_get_all(context) + except exception.NoNetworksFound: + nets = [] used_subnets = [netaddr.IPNetwork(net['cidr']) for net in nets] for subnet in subnets_v4: -- cgit From 5276b4981dc6e8c4f3d4b9c733939290df3c6a72 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 15:45:14 -0500 Subject: pep8 fix --- nova/network/manager.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index ddb60ecfa..6ba73300e 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -677,7 +677,6 @@ class NetworkManager(manager.SchedulerDependentManager): raise ValueError(msg % {'cidr': subnet, 'smaller': used_subnet}) - subnets = itertools.izip_longest(subnets_v4, subnets_v6) for index, (subnet_v4, subnet_v6) in enumerate(subnets): net = {} -- cgit From 945a874a77c63710f57fa31988ba7f9ba65a5ad0 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 15:47:11 -0500 Subject: return the created networks --- nova/network/manager.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/network/manager.py b/nova/network/manager.py index 6ba73300e..7748479e9 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -677,6 +677,7 @@ class NetworkManager(manager.SchedulerDependentManager): raise ValueError(msg % {'cidr': subnet, 'smaller': used_subnet}) + networks = [] subnets = itertools.izip_longest(subnets_v4, subnets_v6) for index, (subnet_v4, subnet_v6) in enumerate(subnets): net = {} @@ -728,9 +729,12 @@ class NetworkManager(manager.SchedulerDependentManager): if not network: raise ValueError(_('Network already exists!')) + else: + networks.append(network) if network and cidr and subnet_v4: self._create_fixed_ips(context, network['id']) + return networks @property def _bottom_reserved_ips(self): # pylint: disable=R0201 -- cgit From f06f80591a41f5d1b373677937bbbcddcfb0bb7c Mon Sep 17 00:00:00 2001 From: John Tran Date: Mon, 15 Aug 2011 13:48:09 -0700 Subject: added cloud unit test for describe_instances to ensure doesn't return deleted instances --- nova/tests/test_cloud.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index b2afc53c9..07a35c447 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -487,6 +487,16 @@ class CloudTestCase(test.TestCase): db.service_destroy(self.context, comp1['id']) db.service_destroy(self.context, comp2['id']) + def test_describe_instances_deleted(self): + args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} + inst1 = db.instance_create(self.context, args) + inst2 = db.instance_create(self.context, args) + db.instance_destroy(self.context, inst1.id) + result = self.cloud.describe_instances(self.context) + result = result['reservationSet'][0]['instancesSet'] + print result + self.assertEqual(1, len(result)) + def _block_device_mapping_create(self, instance_id, mappings): volumes = [] for bdm in mappings: -- cgit From 3e561f148fcba627f8fbd4ab1089f426fbc2e61b Mon Sep 17 00:00:00 2001 From: John Tran Date: Mon, 15 Aug 2011 13:58:44 -0700 Subject: adding sqlalchemi api tests for test_instance_get_all_by_filter to ensure doesn't return deleted instances --- nova/tests/test_cloud.py | 1 - nova/tests/test_db_api.py | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 07a35c447..39358eeff 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -494,7 +494,6 @@ class CloudTestCase(test.TestCase): db.instance_destroy(self.context, inst1.id) result = self.cloud.describe_instances(self.context) result = result['reservationSet'][0]['instancesSet'] - print result self.assertEqual(1, len(result)) def _block_device_mapping_create(self, instance_id, mappings): diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py index 0c07cbb7c..ed363d1be 100644 --- a/nova/tests/test_db_api.py +++ b/nova/tests/test_db_api.py @@ -76,3 +76,18 @@ class DbApiTestCase(test.TestCase): self.assertEqual(instance['id'], result['id']) self.assertEqual(result['fixed_ips'][0]['floating_ips'][0].address, '1.2.1.2') + + def test_instance_get_all_by_filters(self): + args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} + inst1 = db.instance_create(self.context, args) + inst2 = db.instance_create(self.context, args) + result = db.instance_get_all_by_filters(self.context, {}) + self.assertTrue(2, len(result)) + + def test_instance_get_all_by_filters_deleted(self): + args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} + inst1 = db.instance_create(self.context, args) + inst2 = db.instance_create(self.context, args) + db.instance_destroy(self.context, inst1.id) + result = db.instance_get_all_by_filters(self.context, {}) + self.assertTrue(1, len(result)) -- cgit From a20e18c5ae2c77ed005e5dc9cec7b92d67e50a0b Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 16:30:51 -0500 Subject: allow for finding a network that fits the size, also format string correctly --- nova/network/manager.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 7748479e9..7e9a0ecf6 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -662,18 +662,26 @@ class NetworkManager(manager.SchedulerDependentManager): nets = [] used_subnets = [netaddr.IPNetwork(net['cidr']) for net in nets] - for subnet in subnets_v4: + for subnet in list(subnets_v4): if subnet in used_subnets: - raise ValueError(_('cidr already in use')) + next_subnet = subnet.next() + while next_subnet in subnets_v4: + next_subnet = next_subnet.next() + if next_subnet in fixed_net_v4: + subnets_v4.remove(subnet) + subnets_v4.append(next_subnet) + subnet = next_subnet + else: + raise ValueError(_('cidr already in use')) for used_subnet in used_subnets: if subnet in used_subnet: - msg = _('requested cidr (%{cidr}) conflicts with ' - 'existing supernet (%{super})') + msg = _('requested cidr (%(cidr)s) conflicts with ' + 'existing supernet (%(super)s)') raise ValueError(msg % {'cidr': subnet, 'super': used_subnet}) if used_subnet in subnet: - msg = _('requested cidr (%{cidr}) conflicts with ' - 'existing smaller cidr (%{smaller})') + msg = _('requested cidr (%(cidr)s) conflicts with ' + 'existing smaller cidr (%(smaller)s)') raise ValueError(msg % {'cidr': subnet, 'smaller': used_subnet}) -- cgit From a34943e89e9aee0a26bd4fd03a2b12fc954029fd Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 16:41:11 -0500 Subject: have the tests call create_networks directly --- nova/network/manager.py | 28 ++++++++++++------ nova/tests/test_network.py | 72 +++++++++++++++++++++++++++++----------------- 2 files changed, 65 insertions(+), 35 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 7e9a0ecf6..4954dc7e9 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -662,12 +662,17 @@ class NetworkManager(manager.SchedulerDependentManager): nets = [] used_subnets = [netaddr.IPNetwork(net['cidr']) for net in nets] + def find_next(subnet): + next_subnet = subnet.next() + while next_subnet in subnets_v4: + next_subnet = next_subnet.next() + if next_subnet in fixed_net_v4: + return next_subnet + for subnet in list(subnets_v4): if subnet in used_subnets: - next_subnet = subnet.next() - while next_subnet in subnets_v4: - next_subnet = next_subnet.next() - if next_subnet in fixed_net_v4: + next_subnet = find_next(subnet) + if next_subnet: subnets_v4.remove(subnet) subnets_v4.append(next_subnet) subnet = next_subnet @@ -680,10 +685,17 @@ class NetworkManager(manager.SchedulerDependentManager): raise ValueError(msg % {'cidr': subnet, 'super': used_subnet}) if used_subnet in subnet: - msg = _('requested cidr (%(cidr)s) conflicts with ' - 'existing smaller cidr (%(smaller)s)') - raise ValueError(msg % {'cidr': subnet, - 'smaller': used_subnet}) + next_subnet = find_next(subnet) + if next_subnet: + subnets_v4.remove(subnet) + subnets_v4.append(next_subnet) + subnet = next_subnet + else: + msg = _('requested cidr (%(cidr)s) conflicts ' + 'with existing smaller cidr ' + '(%(smaller)s)') + raise ValueError(msg % {'cidr': subnet, + 'smaller': used_subnet}) networks = [] subnets = itertools.izip_longest(subnets_v4, subnets_v6) diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index c673f5d06..0ead680ee 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -255,7 +255,7 @@ class CommonNetworkTestCase(test.TestCase): raise exception.NetworkNotFoundForCidr() def network_create_safe(self, context, net): - fakenet = {} + fakenet = dict(net) fakenet['id'] = 999 return fakenet @@ -269,6 +269,9 @@ class CommonNetworkTestCase(test.TestCase): def deallocate_fixed_ip(self, context, address): self.deallocate_called = address + def _create_fixed_ips(self, context, network_id): + pass + def fake_create_fixed_ips(self, context, network_id): return None @@ -286,16 +289,20 @@ class CommonNetworkTestCase(test.TestCase): def test_validate_cidrs(self): manager = self.FakeNetworkManager() - nets = manager._validate_cidrs(None, '192.168.0.0/24', 1, 256) + nets = manager.create_networks(None, 'fake', '192.168.0.0/24', + False, 1, 256, None, None, None, + None) self.assertEqual(1, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] self.assertTrue('192.168.0.0/24' in cidrs) def test_validate_cidrs_split_exact_in_half(self): manager = self.FakeNetworkManager() - nets = manager._validate_cidrs(None, '192.168.0.0/24', 2, 128) + nets = manager.create_networks(None, 'fake', '192.168.0.0/24', + False, 2, 128, None, None, None, + None) self.assertEqual(2, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] self.assertTrue('192.168.0.0/25' in cidrs) self.assertTrue('192.168.0.128/25' in cidrs) @@ -306,9 +313,11 @@ class CommonNetworkTestCase(test.TestCase): manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.0/24'}]) self.mox.ReplayAll() - nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) + nets = manager.create_networks(None, 'fake', '192.168.0.0/16', + False, 4, 256, None, None, None, + None) self.assertEqual(4, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', '192.168.4.0/24'] for exp_cidr in exp_cidrs: @@ -324,8 +333,9 @@ class CommonNetworkTestCase(test.TestCase): self.mox.ReplayAll() # ValueError: requested cidr (192.168.2.0/24) conflicts with # existing smaller cidr - args = [None, '192.168.2.0/24', 1, 256] - self.assertRaises(ValueError, manager._validate_cidrs, *args) + args = (None, 'fake', '192.168.2.0/24', False, 1, 256, None, None, + None, None) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_split_smaller_cidr_in_use(self): manager = self.FakeNetworkManager() @@ -334,9 +344,10 @@ class CommonNetworkTestCase(test.TestCase): manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.0/25'}]) self.mox.ReplayAll() - nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) + nets = manager.create_networks(None, 'fake', '192.168.0.0/16', + False, 4, 256, None, None, None, None) self.assertEqual(4, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', '192.168.4.0/24'] for exp_cidr in exp_cidrs: @@ -350,9 +361,10 @@ class CommonNetworkTestCase(test.TestCase): manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.9/29'}]) self.mox.ReplayAll() - nets = manager._validate_cidrs(None, '192.168.2.0/24', 3, 32) + nets = manager.create_networks(None, 'fake', '192.168.2.0/24', + False, 3, 32, None, None, None, None) self.assertEqual(3, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27'] for exp_cidr in exp_cidrs: self.assertTrue(exp_cidr in cidrs) @@ -367,17 +379,19 @@ class CommonNetworkTestCase(test.TestCase): {'id': 3, 'cidr': '192.168.2.128/26'}] manager.db.network_get_all(ctxt).AndReturn(in_use) self.mox.ReplayAll() - args = [None, '192.168.2.0/24', 3, 64] + args = (None, 'fake', '192.168.2.0/24', False, 3, 64, None, None, + None, None) # ValueError: Not enough subnets avail to satisfy requested num_ # networks - some subnets in requested range already # in use - self.assertRaises(ValueError, manager._validate_cidrs, *args) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_one_in_use(self): manager = self.FakeNetworkManager() - args = [None, '192.168.0.0/24', 2, 256] + args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None, + None, None) # ValueError: network_size * num_networks exceeds cidr size - self.assertRaises(ValueError, manager._validate_cidrs, *args) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_already_used(self): manager = self.FakeNetworkManager() @@ -387,20 +401,23 @@ class CommonNetworkTestCase(test.TestCase): 'cidr': '192.168.0.0/24'}]) self.mox.ReplayAll() # ValueError: cidr already in use - args = [None, '192.168.0.0/24', 1, 256] - self.assertRaises(ValueError, manager._validate_cidrs, *args) + args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None, + None, None) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_too_many(self): manager = self.FakeNetworkManager() - args = [None, '192.168.0.0/24', 200, 256] + args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None, + None, None) # ValueError: Not enough subnets avail to satisfy requested # num_networks - self.assertRaises(ValueError, manager._validate_cidrs, *args) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_split_partial(self): manager = self.FakeNetworkManager() - nets = manager._validate_cidrs(None, '192.168.0.0/16', 2, 256) - returned_cidrs = [str(net) for net in nets] + nets = manager.create_networks(None, 'fake', '192.168.0.0/16', + False, 2, 256, None, None, None, None) + returned_cidrs = [str(net['cidr']) for net in nets] self.assertTrue('192.168.0.0/24' in returned_cidrs) self.assertTrue('192.168.1.0/24' in returned_cidrs) @@ -411,10 +428,11 @@ class CommonNetworkTestCase(test.TestCase): fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}] manager.db.network_get_all(ctxt).AndReturn(fakecidr) self.mox.ReplayAll() - args = [None, '192.168.0.0/24', 1, 256] + args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None, + None, None) # ValueError: requested cidr (192.168.0.0/24) conflicts # with existing supernet - self.assertRaises(ValueError, manager._validate_cidrs, *args) + self.assertRaises(ValueError, manager.create_networks, *args) def test_create_networks(self): cidr = '192.168.0.0/24' @@ -424,7 +442,7 @@ class CommonNetworkTestCase(test.TestCase): args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None, None] result = manager.create_networks(*args) - self.assertEqual(manager.create_networks(*args), None) + self.assertTrue(manager.create_networks(*args)) def test_create_networks_cidr_already_used(self): manager = self.FakeNetworkManager() @@ -444,4 +462,4 @@ class CommonNetworkTestCase(test.TestCase): self.fake_create_fixed_ips) args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None, None] - self.assertEqual(manager.create_networks(*args), None) + self.assertTrue(manager.create_networks(*args)) -- cgit From cef63a6c2e8f3e1baa126ba41703aac81c2fc6ae Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 16:46:30 -0500 Subject: add note --- nova/network/manager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/network/manager.py b/nova/network/manager.py index 4954dc7e9..a52dd1953 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -656,6 +656,8 @@ class NetworkManager(manager.SchedulerDependentManager): subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4, count=num_networks)) + # NOTE(jkoelker): This replaces the _validate_cidrs call and + # prevents looping multiple times try: nets = self.db.network_get_all(context) except exception.NoNetworksFound: -- cgit From 8b4805b24bb51adca501a38b4b7dbf730cc826d2 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 16:47:48 -0500 Subject: pep8 --- nova/compute/manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 16b8e14b4..4e84cb936 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1315,7 +1315,8 @@ class ComputeManager(manager.SchedulerDependentManager): # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. - self.driver.ensure_filtering_rules_for_instance(instance_ref, network_info) + self.driver.ensure_filtering_rules_for_instance(instance_ref, + network_info) # Preparation for block migration if block_migration: -- cgit From e57e9e9bbc37fbe87052ccc66bf7b97501e1e759 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 16:48:28 -0500 Subject: pep8 --- nova/virt/libvirt/connection.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 2b17e244a..6ad4328b1 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1539,7 +1539,8 @@ class LibvirtConnection(driver.ComputeDriver): # basic-filtering must be set here. self.firewall_driver.setup_basic_filtering(instance_ref, network_info) # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref, network_info) + self.firewall_driver.prepare_instance_filter(instance_ref, + network_info) # wait for completion timeout_count = range(FLAGS.live_migration_retry_count) -- cgit From 55dd18f30eee4f4a75c825c33d4a78b2ef94be4a Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Mon, 15 Aug 2011 17:09:39 -0500 Subject: got tests passing with logic changes --- nova/scheduler/abstract_scheduler.py | 53 +++--- nova/scheduler/base_scheduler.py | 50 +----- nova/scheduler/filters/__init__.py | 17 ++ nova/scheduler/filters/abstract_filter.py | 2 +- nova/scheduler/filters/json_filter.py | 14 +- nova/scheduler/host_filter.py | 25 +-- nova/scheduler/least_cost.py | 134 +++++++-------- nova/tests/scheduler/test_host_filter.py | 2 - nova/tests/scheduler/test_least_cost_scheduler.py | 16 +- nova/tests/test_host_filter.py | 200 ---------------------- 10 files changed, 125 insertions(+), 388 deletions(-) delete mode 100644 nova/tests/test_host_filter.py diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index a0734f322..2f1ede0a4 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -45,20 +45,19 @@ LOG = logging.getLogger('nova.scheduler.abstract_scheduler') class InvalidBlob(exception.NovaException): message = _("Ill-formed or incorrectly routed 'blob' data sent " - "to instance create request.") + "to instance create request.") class AbstractScheduler(driver.Scheduler): """Base class for creating Schedulers that can work across any nova deployment, from simple designs to multiply-nested zones. """ - def _call_zone_method(self, context, method, specs, zones): """Call novaclient zone method. Broken out for testing.""" return api.call_zone_method(context, method, specs=specs, zones=zones) def _provision_resource_locally(self, context, build_plan_item, - request_spec, kwargs): + request_spec, kwargs): """Create the requested resource in this Zone.""" host = build_plan_item['hostname'] base_options = request_spec['instance_properties'] @@ -68,21 +67,21 @@ class AbstractScheduler(driver.Scheduler): # support at some point? Also, OS API has no concept of security # groups. instance = compute_api.API().create_db_entry_for_new_instance(context, - image, base_options, None, []) + image, base_options, None, []) instance_id = instance['id'] kwargs['instance_id'] = instance_id - rpc.cast(context, - db.queue_get_for(context, "compute", host), - {"method": "run_instance", - "args": kwargs}) + queue = db.queue_get_for(context, "compute", host) + params = {"method": "run_instance", "args": kwargs} + rpc.cast(context, queue, params) LOG.debug(_("Provisioning locally via compute node %(host)s") - % locals()) + % locals()) def _decrypt_blob(self, blob): """Returns the decrypted blob or None if invalid. Broken out - for testing.""" + for testing. + """ decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) try: json_entry = decryptor(blob) @@ -92,15 +91,15 @@ class AbstractScheduler(driver.Scheduler): return None def _ask_child_zone_to_create_instance(self, context, zone_info, - request_spec, kwargs): + request_spec, kwargs): """Once we have determined that the request should go to one of our children, we need to fabricate a new POST /servers/ call with the same parameters that were passed into us. Note that we have to reverse engineer from our args to get back the image, flavor, ipgroup, etc. since the original call could have - come in from EC2 (which doesn't use these things).""" - + come in from EC2 (which doesn't use these things). + """ instance_type = request_spec['instance_type'] instance_properties = request_spec['instance_properties'] @@ -109,30 +108,26 @@ class AbstractScheduler(driver.Scheduler): meta = instance_properties['metadata'] flavor_id = instance_type['flavorid'] reservation_id = instance_properties['reservation_id'] - files = kwargs['injected_files'] ipgroup = None # Not supported in OS API ... yet - child_zone = zone_info['child_zone'] child_blob = zone_info['child_blob'] zone = db.zone_get(context, child_zone) url = zone.api_url LOG.debug(_("Forwarding instance create call to child zone %(url)s" - ". ReservationID=%(reservation_id)s") - % locals()) + ". ReservationID=%(reservation_id)s") % locals()) nova = None try: nova = novaclient.Client(zone.username, zone.password, None, url) nova.authenticate() except novaclient_exceptions.BadRequest, e: raise exception.NotAuthorized(_("Bad credentials attempting " - "to talk to zone at %(url)s.") % locals()) - + "to talk to zone at %(url)s.") % locals()) nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, - child_blob, reservation_id=reservation_id) + child_blob, reservation_id=reservation_id) def _provision_resource_from_blob(self, context, build_plan_item, - instance_id, request_spec, kwargs): + instance_id, request_spec, kwargs): """Create the requested resource locally or in a child zone based on what is stored in the zone blob info. @@ -145,8 +140,8 @@ class AbstractScheduler(driver.Scheduler): means we gathered the info from one of our children. It's possible that, when we decrypt the 'blob' field, it contains "child_blob" data. In which case we forward the - request.""" - + request. + """ host_info = None if "blob" in build_plan_item: # Request was passed in from above. Is it for us? @@ -161,21 +156,20 @@ class AbstractScheduler(driver.Scheduler): # Valid data ... is it for us? if 'child_zone' in host_info and 'child_blob' in host_info: self._ask_child_zone_to_create_instance(context, host_info, - request_spec, kwargs) + request_spec, kwargs) else: self._provision_resource_locally(context, host_info, request_spec, - kwargs) + kwargs) def _provision_resource(self, context, build_plan_item, instance_id, - request_spec, kwargs): + request_spec, kwargs): """Create the requested resource in this Zone or a child zone.""" if "hostname" in build_plan_item: self._provision_resource_locally(context, build_plan_item, - request_spec, kwargs) + request_spec, kwargs) return - self._provision_resource_from_blob(context, build_plan_item, - instance_id, request_spec, kwargs) + instance_id, request_spec, kwargs) def _adjust_child_weights(self, child_results, zones): """Apply the Scale and Offset values from the Zone definition @@ -231,7 +225,6 @@ class AbstractScheduler(driver.Scheduler): for num in xrange(num_instances): if not build_plan: break - build_plan_item = build_plan.pop(0) self._provision_resource(context, build_plan_item, instance_id, request_spec, kwargs) diff --git a/nova/scheduler/base_scheduler.py b/nova/scheduler/base_scheduler.py index e14ee349e..35e5af035 100644 --- a/nova/scheduler/base_scheduler.py +++ b/nova/scheduler/base_scheduler.py @@ -43,40 +43,13 @@ class BaseScheduler(abstract_scheduler.AbstractScheduler): # TODO(sandy): We're only using InstanceType-based specs # currently. Later we'll need to snoop for more detailed # host filter requests. - instance_type = request_spec['instance_type'] + instance_type = request_spec.get("instance_type", None) + if instance_type is None: + # No way to select; return the specified hosts + return hosts or [] name, query = selected_filter.instance_type_to_filter(instance_type) return selected_filter.filter_hosts(self.zone_manager, query) - def filter_hosts(self, topic, request_spec, host_list=None): - """Return a list of hosts which are acceptable for scheduling. - Return value should be a list of (hostname, capability_dict)s. - Derived classes may override this, but may find the - '_filter' function more appropriate. - """ - def _default_filter(self, hostname, capabilities, request_spec): - """Default filter function if there's no _filter""" - # NOTE(sirp): The default logic is the equivalent to - # AllHostsFilter - return True - - filter_func = getattr(self, '%s_filter' % topic, _default_filter) - - if host_list is None: - first_run = True - host_list = self.zone_manager.service_states.iteritems() - else: - first_run = False - - filtered_hosts = [] - for host, services in host_list: - if first_run: - if topic not in services: - continue - services = services[topic] - if filter_func(host, services, request_spec): - filtered_hosts.append((host, services)) - return filtered_hosts - def weigh_hosts(self, topic, request_spec, hosts): """Derived classes may override this to provide more sophisticated scheduling objectives @@ -84,18 +57,3 @@ class BaseScheduler(abstract_scheduler.AbstractScheduler): # NOTE(sirp): The default logic is the same as the NoopCostFunction return [dict(weight=1, hostname=hostname, capabilities=capabilities) for hostname, capabilities in hosts] - - def compute_consume(self, capabilities, instance_type): - """Consume compute resources for selected host""" - - requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 - capabilities['host_memory_free'] -= requested_mem - - def consume_resources(self, topic, capabilities, instance_type): - """Consume resources for a specific host. 'host' is a tuple - of the hostname and the services""" - - consume_func = getattr(self, '%s_consume' % topic, None) - if not consume_func: - return - consume_func(capabilities, instance_type) diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py index 4c9187c5a..b86fb795f 100644 --- a/nova/scheduler/filters/__init__.py +++ b/nova/scheduler/filters/__init__.py @@ -13,6 +13,23 @@ # License for the specific language governing permissions and limitations # under the License. +""" +There are three filters included: AllHosts, InstanceType & JSON. + +AllHosts just returns the full, unfiltered list of hosts. +InstanceType is a hard coded matching mechanism based on flavor criteria. +JSON is an ad-hoc filter grammar. + +Why JSON? The requests for instances may come in through the +REST interface from a user or a parent Zone. +Currently InstanceTypes are used for specifing the type of instance desired. +Specific Nova users have noted a need for a more expressive way of specifying +instance requirements. Since we don't want to get into building full DSL, +this filter is a simple form as an example of how this could be done. +In reality, most consumers will use the more rigid filters such as the +InstanceType filter. +""" + from abstract_filter import AbstractHostFilter from all_hosts_filter import AllHostsFilter from instance_type_filter import InstanceTypeFilter diff --git a/nova/scheduler/filters/abstract_filter.py b/nova/scheduler/filters/abstract_filter.py index fe5610923..d9d272130 100644 --- a/nova/scheduler/filters/abstract_filter.py +++ b/nova/scheduler/filters/abstract_filter.py @@ -19,7 +19,7 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('default_host_filter', - 'nova.scheduler.filters.AllHostsFilter', + 'AllHostsFilter', 'Which filter to use for filtering hosts') class AbstractHostFilter(object): diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py index 889b96915..caf22f5d5 100644 --- a/nova/scheduler/filters/json_filter.py +++ b/nova/scheduler/filters/json_filter.py @@ -20,11 +20,6 @@ import operator import nova.scheduler from nova.scheduler.filters import abstract_filter -def debug(*args): - with file("/tmp/debug", "a") as dbg: - msg = " ".join([str(arg) for arg in args]) - dbg.write("%s\n" % msg) - class JsonFilter(abstract_filter.AbstractHostFilter): """Host Filter to allow simple JSON-based grammar for @@ -38,12 +33,7 @@ class JsonFilter(abstract_filter.AbstractHostFilter): if len(args) < 2: return False if op is operator.contains: - debug("ARGS", type(args), args) - debug("op", op) - debug("REVERSED!!!") - # operator.contains reverses the param order. - bad = [arg for arg in args[1:] - if not op(args, args[0])] + bad = not args[0] in args[1:] else: bad = [arg for arg in args[1:] if not op(args[0], arg)] @@ -144,8 +134,6 @@ class JsonFilter(abstract_filter.AbstractHostFilter): specified in the query. """ expanded = json.loads(query) - - debug("expanded", type(expanded), expanded) filtered_hosts = [] for host, services in zone_manager.service_states.iteritems(): result = self._process_filter(zone_manager, expanded, host, diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index f5191f5c9..be618f3f3 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -20,43 +20,32 @@ either incompatible or insufficient to accept a newly-requested instance are removed by Host Filter classes from consideration. Those that pass the filter are then passed on for weighting or other process for ordering. -Three filters are included: AllHosts, Flavor & JSON. AllHosts just -returns the full, unfiltered list of hosts. Flavor is a hard coded -matching mechanism based on flavor criteria and JSON is an ad-hoc -filter grammar. - -Why JSON? The requests for instances may come in through the -REST interface from a user or a parent Zone. -Currently Flavors and/or InstanceTypes are used for -specifing the type of instance desired. Specific Nova users have -noted a need for a more expressive way of specifying instances. -Since we don't want to get into building full DSL this is a simple -form as an example of how this could be done. In reality, most -consumers will use the more rigid filters such as FlavorFilter. +Filters are in the 'filters' directory that is off the 'scheduler' +directory of nova. Additional filters can be created and added to that +directory; be sure to add them to the filters/__init__.py file so that +they are part of the nova.schedulers.filters namespace. """ -import json import types from nova import exception from nova import flags -from nova import log as logging - import nova.scheduler -LOG = logging.getLogger('nova.scheduler.host_filter') FLAGS = flags.FLAGS def _get_filters(): + # Imported here to avoid circular imports from nova.scheduler import filters def get_itm(nm): return getattr(filters, nm) return [get_itm(itm) for itm in dir(filters) if (type(get_itm(itm)) is types.TypeType) - and issubclass(get_itm(itm), filters.AbstractHostFilter)] + and issubclass(get_itm(itm), filters.AbstractHostFilter) + and get_itm(itm) is not filters.AbstractHostFilter] def choose_host_filter(filter_name=None): diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index a58b11289..903d786cd 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -22,14 +22,12 @@ The cost-function and weights are tabulated, and the host with the least cost is then selected for provisioning. """ -# TODO(dabo): This class will be removed in the next merge prop; it remains now -# because much of the code will be refactored into different classes. import collections from nova import flags from nova import log as logging -from nova.scheduler import abstract_scheduler +from nova.scheduler import base_scheduler from nova import utils from nova import exception @@ -37,14 +35,16 @@ LOG = logging.getLogger('nova.scheduler.least_cost') FLAGS = flags.FLAGS flags.DEFINE_list('least_cost_scheduler_cost_functions', - ['nova.scheduler.least_cost.noop_cost_fn'], - 'Which cost functions the LeastCostScheduler should use.') + ['nova.scheduler.least_cost.noop_cost_fn'], + 'Which cost functions the LeastCostScheduler should use.') # TODO(sirp): Once we have enough of these rules, we can break them out into a # cost_functions.py file (perhaps in a least_cost_scheduler directory) flags.DEFINE_integer('noop_cost_fn_weight', 1, - 'How much weight to give the noop cost function') + 'How much weight to give the noop cost function') +flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1, + 'How much weight to give the fill-first cost function') def noop_cost_fn(host): @@ -52,19 +52,64 @@ def noop_cost_fn(host): return 1 -flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1, - 'How much weight to give the fill-first cost function') - - def compute_fill_first_cost_fn(host): """Prefer hosts that have less ram available, filter_hosts will exclude - hosts that don't have enough ram""" - hostname, caps = host - free_mem = caps['host_memory_free'] + hosts that don't have enough ram. + """ + hostname, service = host + caps = service.get("compute", {}) + free_mem = caps.get("host_memory_free", 0) return free_mem -class LeastCostScheduler(abstract_scheduler.AbstractScheduler): +def normalize_list(L): + """Normalize an array of numbers such that each element satisfies: + 0 <= e <= 1 + """ + if not L: + return L + max_ = max(L) + if max_ > 0: + return [(float(e) / max_) for e in L] + return L + + +def weighted_sum(domain, weighted_fns, normalize=True): + """Use the weighted-sum method to compute a score for an array of objects. + Normalize the results of the objective-functions so that the weights are + meaningful regardless of objective-function's range. + + domain - input to be scored + weighted_fns - list of weights and functions like: + [(weight, objective-functions)] + + Returns an unsorted list of scores. To pair with hosts do: + zip(scores, hosts) + """ + # Table of form: + # { domain1: [score1, score2, ..., scoreM] + # ... + # domainN: [score1, score2, ..., scoreM] } + score_table = collections.defaultdict(list) + for weight, fn in weighted_fns: + scores = [fn(elem) for elem in domain] + if normalize: + norm_scores = normalize_list(scores) + else: + norm_scores = scores + for idx, score in enumerate(norm_scores): + weighted_score = score * weight + score_table[idx].append(weighted_score) + + # Sum rows in table to compute score for each element in domain + domain_scores = [] + for idx in sorted(score_table): + elem_score = sum(score_table[idx]) + domain_scores.append(elem_score) + return domain_scores + + +class LeastCostScheduler(base_scheduler.BaseScheduler): def __init__(self, *args, **kwargs): self.cost_fns_cache = {} super(LeastCostScheduler, self).__init__(*args, **kwargs) @@ -73,10 +118,8 @@ class LeastCostScheduler(abstract_scheduler.AbstractScheduler): """Returns a list of tuples containing weights and cost functions to use for weighing hosts """ - if topic in self.cost_fns_cache: return self.cost_fns_cache[topic] - cost_fns = [] for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions: if '.' in cost_fn_str: @@ -85,7 +128,6 @@ class LeastCostScheduler(abstract_scheduler.AbstractScheduler): short_name = cost_fn_str cost_fn_str = "%s.%s.%s" % ( __name__, self.__class__.__name__, short_name) - if not (short_name.startswith('%s_' % topic) or short_name.startswith('noop')): continue @@ -96,15 +138,14 @@ class LeastCostScheduler(abstract_scheduler.AbstractScheduler): cost_fn = utils.import_class(cost_fn_str) except exception.ClassNotFound: raise exception.SchedulerCostFunctionNotFound( - cost_fn_str=cost_fn_str) + cost_fn_str=cost_fn_str) try: flag_name = "%s_weight" % cost_fn.__name__ weight = getattr(FLAGS, flag_name) except AttributeError: raise exception.SchedulerWeightFlagNotFound( - flag_name=flag_name) - + flag_name=flag_name) cost_fns.append((weight, cost_fn)) self.cost_fns_cache[topic] = cost_fns @@ -114,13 +155,13 @@ class LeastCostScheduler(abstract_scheduler.AbstractScheduler): """Returns a list of dictionaries of form: [ {weight: weight, hostname: hostname, capabilities: capabs} ] """ - cost_fns = self.get_cost_fns(topic) costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) weighted = [] weight_log = [] - for cost, (hostname, caps) in zip(costs, hosts): + for cost, (hostname, service) in zip(costs, hosts): + caps = service[topic] weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) weight_dict = dict(weight=cost, hostname=hostname, capabilities=caps) @@ -128,52 +169,3 @@ class LeastCostScheduler(abstract_scheduler.AbstractScheduler): LOG.debug(_("Weighted Costs => %s") % weight_log) return weighted - - -def normalize_list(L): - """Normalize an array of numbers such that each element satisfies: - 0 <= e <= 1""" - if not L: - return L - max_ = max(L) - if max_ > 0: - return [(float(e) / max_) for e in L] - return L - - -def weighted_sum(domain, weighted_fns, normalize=True): - """Use the weighted-sum method to compute a score for an array of objects. - Normalize the results of the objective-functions so that the weights are - meaningful regardless of objective-function's range. - - domain - input to be scored - weighted_fns - list of weights and functions like: - [(weight, objective-functions)] - - Returns an unsorted list of scores. To pair with hosts do: - zip(scores, hosts) - """ - # Table of form: - # { domain1: [score1, score2, ..., scoreM] - # ... - # domainN: [score1, score2, ..., scoreM] } - score_table = collections.defaultdict(list) - for weight, fn in weighted_fns: - scores = [fn(elem) for elem in domain] - - if normalize: - norm_scores = normalize_list(scores) - else: - norm_scores = scores - - for idx, score in enumerate(norm_scores): - weighted_score = score * weight - score_table[idx].append(weighted_score) - - # Sum rows in table to compute score for each element in domain - domain_scores = [] - for idx in sorted(score_table): - elem_score = sum(score_table[idx]) - domain_scores.append(elem_score) - - return domain_scores diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index a64b25138..a961b1b06 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -192,9 +192,7 @@ class HostFilterTestCase(test.TestCase): msg = " ".join([str(arg) for arg in args]) dbg.write("%s\n" % msg) - debug("cooked", cooked, type(cooked)) hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(5, len(hosts)) just_hosts = [host for host, caps in hosts] just_hosts.sort() diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index 16ec4420b..d6eaaa223 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -15,6 +15,7 @@ """ Tests For Least Cost Scheduler """ +import copy from nova import test from nova.scheduler import least_cost @@ -81,7 +82,7 @@ class LeastCostSchedulerTestCase(test.TestCase): super(LeastCostSchedulerTestCase, self).tearDown() def assertWeights(self, expected, num, request_spec, hosts): - weighted = self.sched.weigh_hosts(num, request_spec, hosts) + weighted = self.sched.weigh_hosts("compute", request_spec, hosts) self.assertDictListMatch(weighted, expected, approx_equal=True) def test_no_hosts(self): @@ -125,19 +126,20 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 instance_type = {'memory_mb': 1024} request_spec = {'instance_type': instance_type} - all_hosts = self.sched.zone_manager.service_states.iteritems() + svc_states = self.sched.zone_manager.service_states.iteritems() all_hosts = [(host, services["compute"]) - for host, services in all_hosts + for host, services in svc_states if "compute" in services] - hosts = self.sched.filter_hosts('compute', request_spec, host_list) + hosts = self.sched.filter_hosts('compute', request_spec, all_hosts) expected = [] - for idx, (hostname, caps) in enumerate(hosts): + for idx, (hostname, services) in enumerate(hosts): + caps = copy.deepcopy(services["compute"]) # Costs are normalized so over 10 hosts, each host with increasing # free ram will cost 1/N more. Since the lowest cost host has some # free ram, we add in the 1/N for the base_cost weight = 0.1 + (0.1 * idx) - weight_dict = dict(weight=weight, hostname=hostname) - expected.append(weight_dict) + wtd_dict = dict(hostname=hostname, weight=weight, capabilities=caps) + expected.append(wtd_dict) self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py deleted file mode 100644 index 3a1389a49..000000000 --- a/nova/tests/test_host_filter.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler Host Filters. -""" - -import json - -from nova import exception -from nova import test -from nova.scheduler import host_filter - - -class FakeZoneManager: - pass - - -class HostFilterTestCase(test.TestCase): - """Test case for host filters.""" - - def _host_caps(self, multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - def setUp(self): - super(HostFilterTestCase, self).setUp() - default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter' - self.flags(default_host_filter=default_host_filter) - self.instance_type = dict(name='tiny', - memory_mb=50, - vcpus=10, - local_gb=500, - flavorid=1, - swap=500, - rxtx_quota=30000, - rxtx_cap=200, - extra_specs={}) - - self.zone_manager = FakeZoneManager() - states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} - self.zone_manager.service_states = states - - def test_choose_filter(self): - # Test default filter ... - hf = host_filter.choose_host_filter() - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.AllHostsFilter') - # Test valid filter ... - hf = host_filter.choose_host_filter( - 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.InstanceTypeFilter') - # Test invalid filter ... - try: - host_filter.choose_host_filter('does not exist') - self.fail("Should not find host filter.") - except exception.SchedulerHostFilterNotFound: - pass - - def test_all_host_filter(self): - hf = host_filter.AllHostsFilter() - cooked = hf.instance_type_to_filter(self.instance_type) - hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(10, len(hosts)) - for host, capabilities in hosts: - self.assertTrue(host.startswith('host')) - - def test_instance_type_filter(self): - hf = host_filter.InstanceTypeFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) - hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - def test_json_filter(self): - hf = host_filter.JsonFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) - hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - # Try some custom queries - - raw = ['or', - ['and', - ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300], - ], - ['and', - ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700], - ], - ] - - cooked = json.dumps(raw) - hosts = hf.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['not', - ['=', '$compute.host_memory_free', 30], - ] - cooked = json.dumps(raw) - hosts = hf.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(9, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] - cooked = json.dumps(raw) - hosts = hf.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([2, 4, 6, 8, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - # Try some bogus input ... - raw = ['unknown command', ] - cooked = json.dumps(raw) - try: - hf.filter_hosts(self.zone_manager, cooked) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([]))) - self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({}))) - self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps( - ['not', True, False, True, False]))) - - try: - hf.filter_hosts(self.zone_manager, json.dumps( - 'not', True, False, True, False)) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps(['=', '$foo', 100]))) - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps(['=', '$.....', 100]))) - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps( - ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]))) - - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps(['=', {}, ['>', '$missing....foo']]))) -- cgit From 9a4b1deb5f9abdc88809ff80bccdfb503e66dccd Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Mon, 15 Aug 2011 15:09:42 -0700 Subject: Removed newly added userdatarequesthandler for OS API, there is no need to add this handler since the existing Ec2 API metadatarequesthandler does the same job --- etc/nova/api-paste.ini | 7 -- nova/api/__init__.py | 6 -- nova/api/ec2/__init__.py | 3 + nova/api/openstack/userdatarequesthandler.py | 110 --------------------- nova/network/linux_net.py | 5 - nova/tests/api/openstack/fakes.py | 2 - .../api/openstack/test_userdatarequesthandler.py | 80 --------------- 7 files changed, 3 insertions(+), 210 deletions(-) delete mode 100644 nova/api/openstack/userdatarequesthandler.py delete mode 100644 nova/tests/api/openstack/test_userdatarequesthandler.py diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini index 46a3b0af9..abe8c20c4 100644 --- a/etc/nova/api-paste.ini +++ b/etc/nova/api-paste.ini @@ -69,7 +69,6 @@ use = egg:Paste#urlmap /: osversions /v1.0: openstackapi10 /v1.1: openstackapi11 -/latest: osuserdata [pipeline:openstackapi10] pipeline = faultwrap auth ratelimit osapiapp10 @@ -77,9 +76,6 @@ pipeline = faultwrap auth ratelimit osapiapp10 [pipeline:openstackapi11] pipeline = faultwrap auth ratelimit extensions osapiapp11 -[pipeline:osuserdata] -pipeline = logrequest osappud - [filter:faultwrap] paste.filter_factory = nova.api.openstack:FaultWrapper.factory @@ -103,6 +99,3 @@ pipeline = faultwrap osversionapp [app:osversionapp] paste.app_factory = nova.api.openstack.versions:Versions.factory - -[app:osappud] -paste.app_factory = nova.api.openstack.userdatarequesthandler:UserdataRequestHandler.factory diff --git a/nova/api/__init__.py b/nova/api/__init__.py index 6e6b092b3..747015af5 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -15,9 +15,3 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from nova import flags - - -flags.DEFINE_boolean('use_forwarded_for', False, - 'Treat X-Forwarded-For as the canonical remote address. ' - 'Only enable this if you have a sanitizing proxy.') diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 2e9278b52..96df97393 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -37,6 +37,9 @@ from nova.auth import manager FLAGS = flags.FLAGS LOG = logging.getLogger("nova.api") +flags.DEFINE_boolean('use_forwarded_for', False, + 'Treat X-Forwarded-For as the canonical remote address. ' + 'Only enable this if you have a sanitizing proxy.') flags.DEFINE_integer('lockout_attempts', 5, 'Number of failed auths before lockout.') flags.DEFINE_integer('lockout_minutes', 15, diff --git a/nova/api/openstack/userdatarequesthandler.py b/nova/api/openstack/userdatarequesthandler.py deleted file mode 100644 index f0205419b..000000000 --- a/nova/api/openstack/userdatarequesthandler.py +++ /dev/null @@ -1,110 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""User data request handler.""" - -import base64 -import webob.dec -import webob.exc - -from nova import log as logging -from nova import context -from nova import exception -from nova import db -from nova import flags -from nova import wsgi - - -LOG = logging.getLogger('nova.api.openstack.userdata') -FLAGS = flags.FLAGS - - -class Controller(object): - """ The server user-data API controller for the Openstack API """ - - def __init__(self): - super(Controller, self).__init__() - - @staticmethod - def _format_user_data(instance_ref): - return base64.b64decode(instance_ref['user_data']) - - def get_user_data(self, address): - ctxt = context.get_admin_context() - try: - instance_ref = db.instance_get_by_fixed_ip(ctxt, address) - except exception.NotFound: - instance_ref = None - if not instance_ref: - return None - - data = {'user-data': self._format_user_data(instance_ref)} - return data - - -class UserdataRequestHandler(wsgi.Application): - """Serve user-data from the OS API.""" - - def __init__(self): - self.cc = Controller() - - def print_data(self, data): - if isinstance(data, dict): - output = '' - for key in data: - if key == '_name': - continue - output += key - if isinstance(data[key], dict): - if '_name' in data[key]: - output += '=' + str(data[key]['_name']) - else: - output += '/' - output += '\n' - # Cut off last \n - return output[:-1] - elif isinstance(data, list): - return '\n'.join(data) - else: - return str(data) - - def lookup(self, path, data): - items = path.split('/') - for item in items: - if item: - if not isinstance(data, dict): - return data - if not item in data: - return None - data = data[item] - return data - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - remote_address = req.remote_addr - if FLAGS.use_forwarded_for: - remote_address = req.headers.get('X-Forwarded-For', remote_address) - - data = self.cc.get_user_data(remote_address) - if data is None: - LOG.error(_('Failed to get user data for ip: %s'), remote_address) - raise webob.exc.HTTPNotFound() - data = self.lookup(req.path_info, data) - if data is None: - raise webob.exc.HTTPNotFound() - return self.print_data(data) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index d8fff8a32..4e1e1f85a 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -371,11 +371,6 @@ def metadata_forward(): '-p tcp -m tcp --dport 80 -j DNAT ' '--to-destination %s:%s' % \ (FLAGS.ec2_dmz_host, FLAGS.ec2_port)) - iptables_manager.ipv4['nat'].add_rule('PREROUTING', - '-s 0.0.0.0/0 -d 169.254.169.253/32 ' - '-p tcp -m tcp --dport 80 -j DNAT ' - '--to-destination %s:%s' % \ - (FLAGS.osapi_host, FLAGS.osapi_port)) iptables_manager.apply() diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index aa5aeef16..d11fbf788 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -36,7 +36,6 @@ from nova.api.openstack import auth from nova.api.openstack import extensions from nova.api.openstack import versions from nova.api.openstack import limits -from nova.api.openstack import userdatarequesthandler from nova.auth.manager import User, Project import nova.image.fake from nova.image import glance @@ -100,7 +99,6 @@ def wsgi_app(inner_app10=None, inner_app11=None, fake_auth=True, mapper['/v1.0'] = api10 mapper['/v1.1'] = api11 mapper['/'] = openstack.FaultWrapper(versions.Versions()) - mapper['/latest'] = userdatarequesthandler.UserdataRequestHandler() return mapper diff --git a/nova/tests/api/openstack/test_userdatarequesthandler.py b/nova/tests/api/openstack/test_userdatarequesthandler.py deleted file mode 100644 index 0c63076b4..000000000 --- a/nova/tests/api/openstack/test_userdatarequesthandler.py +++ /dev/null @@ -1,80 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010-2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import json -import unittest -import webob - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import test -from nova import log as logging - -from nova.tests.api.openstack import fakes - -LOG = logging.getLogger('nova.api.openstack.userdata') - -USER_DATA_STRING = ("This is an encoded string") -ENCODE_STRING = base64.b64encode(USER_DATA_STRING) - - -def return_server_by_address(context, address): - instance = {"user_data": ENCODE_STRING} - instance["fixed_ips"] = {"address": address, - "floating_ips": []} - return instance - - -def return_non_existing_server_by_address(context, address): - raise exception.NotFound() - - -class TestUserdatarequesthandler(test.TestCase): - - def setUp(self): - super(TestUserdatarequesthandler, self).setUp() - self.stubs.Set(db, 'instance_get_by_fixed_ip', - return_server_by_address) - - def test_user_data(self): - req = webob.Request.blank('/latest/user-data') - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - self.assertEqual(res.body, USER_DATA_STRING) - - def test_user_data_non_existing_fixed_address(self): - self.stubs.Set(db, 'instance_get_by_fixed_ip', - return_non_existing_server_by_address) - self.flags(use_forwarded_for=False) - req = webob.Request.blank('/latest/user-data') - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 404) - - def test_user_data_invalid_url(self): - req = webob.Request.blank('/latest/user-data-invalid') - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 404) - - def test_user_data_with_use_forwarded_header(self): - self.flags(use_forwarded_for=True) - req = webob.Request.blank('/latest/user-data') - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - self.assertEqual(res.body, USER_DATA_STRING) -- cgit From d8a156f9ed0729c4c5553fe3b28f6c3afb93d54f Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Mon, 15 Aug 2011 17:31:24 -0500 Subject: pep8 cleanup --- nova/compute/manager.py | 3 ++- nova/scheduler/abstract_scheduler.py | 6 +++--- nova/scheduler/filters/abstract_filter.py | 4 ++-- nova/scheduler/host_filter.py | 1 + nova/tests/scheduler/test_host_filter.py | 5 ----- nova/tests/scheduler/test_least_cost_scheduler.py | 3 ++- nova/virt/libvirt/connection.py | 3 ++- 7 files changed, 12 insertions(+), 13 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 16b8e14b4..52fcf5c49 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1315,7 +1315,8 @@ class ComputeManager(manager.SchedulerDependentManager): # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. - self.driver.ensure_filtering_rules_for_instance(instance_ref, network_info) + self.driver.ensure_filtering_rules_for_instance(instance_ref, + network_info) # Preparation for block migration if block_migration: diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index 2f1ede0a4..77db67773 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -15,7 +15,7 @@ """ The AbsractScheduler is an abstract class Scheduler for creating instances -locally or across zones. Two methods should be overridden in order to +locally or across zones. Two methods should be overridden in order to customize the behavior: filter_hosts() and weigh_hosts(). The default behavior is to simply select all hosts and weight them the same. """ @@ -298,8 +298,8 @@ class AbstractScheduler(driver.Scheduler): def filter_hosts(self, topic, request_spec, host_list): """Filter the full host list returned from the ZoneManager. By default, this method only applies the basic_ram_filter(), meaning all hosts - with at least enough RAM for the requested instance are returned. - + with at least enough RAM for the requested instance are returned. + Override in subclasses to provide greater selectivity. """ def basic_ram_filter(hostname, capabilities, request_spec): diff --git a/nova/scheduler/filters/abstract_filter.py b/nova/scheduler/filters/abstract_filter.py index d9d272130..a1d00d562 100644 --- a/nova/scheduler/filters/abstract_filter.py +++ b/nova/scheduler/filters/abstract_filter.py @@ -18,10 +18,10 @@ import nova.scheduler from nova import flags FLAGS = flags.FLAGS -flags.DEFINE_string('default_host_filter', - 'AllHostsFilter', +flags.DEFINE_string('default_host_filter', 'AllHostsFilter', 'Which filter to use for filtering hosts') + class AbstractHostFilter(object): """Base class for host filters.""" def instance_type_to_filter(self, instance_type): diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index be618f3f3..4bc5158cc 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -39,6 +39,7 @@ FLAGS = flags.FLAGS def _get_filters(): # Imported here to avoid circular imports from nova.scheduler import filters + def get_itm(nm): return getattr(filters, nm) diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index a961b1b06..17431fc7e 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -187,11 +187,6 @@ class HostFilterTestCase(test.TestCase): raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] cooked = json.dumps(raw) - def debug(*args): - with file("/tmp/debug", "a") as dbg: - msg = " ".join([str(arg) for arg in args]) - dbg.write("%s\n" % msg) - hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(5, len(hosts)) just_hosts = [host for host, caps in hosts] diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index d6eaaa223..af58de527 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -139,7 +139,8 @@ class LeastCostSchedulerTestCase(test.TestCase): # free ram will cost 1/N more. Since the lowest cost host has some # free ram, we add in the 1/N for the base_cost weight = 0.1 + (0.1 * idx) - wtd_dict = dict(hostname=hostname, weight=weight, capabilities=caps) + wtd_dict = dict(hostname=hostname, weight=weight, + capabilities=caps) expected.append(wtd_dict) self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 2b17e244a..c009641ef 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1539,7 +1539,8 @@ class LibvirtConnection(driver.ComputeDriver): # basic-filtering must be set here. self.firewall_driver.setup_basic_filtering(instance_ref, network_info) # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref, network_info) + self.firewall_driver.prepare_instance_filter(instance_ref, + network_info) # wait for completion timeout_count = range(FLAGS.live_migration_retry_count) -- cgit From 8d83ceb9f8baef3c768c4fc087afb89188250c26 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 15 Aug 2011 16:29:52 -0700 Subject: fix error logging in s3.py --- nova/image/s3.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nova/image/s3.py b/nova/image/s3.py index ccbfa89cd..e5008d856 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -193,6 +193,8 @@ class S3ImageService(service.BaseImageService): def delayed_create(): """This handles the fetching and decrypting of the part files.""" + log_vars = {'image_location': image_location, + 'image_path': image_path} metadata['properties']['image_state'] = 'downloading' self.service.update(context, image_id, metadata) @@ -214,7 +216,7 @@ class S3ImageService(service.BaseImageService): except Exception: LOG.error(_("Failed to download %(image_location)s " - "to %(image_path)s"), locals()) + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_download' self.service.update(context, image_id, metadata) raise @@ -238,7 +240,7 @@ class S3ImageService(service.BaseImageService): dec_filename) except Exception: LOG.error(_("Failed to decrypt %(image_location)s " - "to %(image_path)s"), locals()) + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_decrypt' self.service.update(context, image_id, metadata) raise @@ -250,7 +252,7 @@ class S3ImageService(service.BaseImageService): unz_filename = self._untarzip_image(image_path, dec_filename) except Exception: LOG.error(_("Failed to untar %(image_location)s " - "to %(image_path)s"), locals()) + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_untar' self.service.update(context, image_id, metadata) raise @@ -263,7 +265,7 @@ class S3ImageService(service.BaseImageService): metadata, image_file) except Exception: LOG.error(_("Failed to upload %(image_location)s " - "to %(image_path)s"), locals()) + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_upload' self.service.update(context, image_id, metadata) raise -- cgit From c4f6df55fa8a9c0746074c814b510e4a4cd4e512 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 15 Aug 2011 17:12:03 -0700 Subject: log the full exception so we don't lose traceback through eventlet --- nova/image/s3.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/nova/image/s3.py b/nova/image/s3.py index e5008d856..abf01a942 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -215,11 +215,11 @@ class S3ImageService(service.BaseImageService): shutil.copyfileobj(part, combined) except Exception: - LOG.error(_("Failed to download %(image_location)s " - "to %(image_path)s"), log_vars) + LOG.exception(_("Failed to download %(image_location)s " + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_download' self.service.update(context, image_id, metadata) - raise + return metadata['properties']['image_state'] = 'decrypting' self.service.update(context, image_id, metadata) @@ -239,11 +239,11 @@ class S3ImageService(service.BaseImageService): encrypted_iv, cloud_pk, dec_filename) except Exception: - LOG.error(_("Failed to decrypt %(image_location)s " - "to %(image_path)s"), log_vars) + LOG.exception(_("Failed to decrypt %(image_location)s " + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_decrypt' self.service.update(context, image_id, metadata) - raise + return metadata['properties']['image_state'] = 'untarring' self.service.update(context, image_id, metadata) @@ -251,11 +251,11 @@ class S3ImageService(service.BaseImageService): try: unz_filename = self._untarzip_image(image_path, dec_filename) except Exception: - LOG.error(_("Failed to untar %(image_location)s " - "to %(image_path)s"), log_vars) + LOG.exception(_("Failed to untar %(image_location)s " + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_untar' self.service.update(context, image_id, metadata) - raise + return metadata['properties']['image_state'] = 'uploading' self.service.update(context, image_id, metadata) @@ -264,11 +264,11 @@ class S3ImageService(service.BaseImageService): self.service.update(context, image_id, metadata, image_file) except Exception: - LOG.error(_("Failed to upload %(image_location)s " - "to %(image_path)s"), log_vars) + LOG.exception(_("Failed to upload %(image_location)s " + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_upload' self.service.update(context, image_id, metadata) - raise + return metadata['properties']['image_state'] = 'available' metadata['status'] = 'active' -- cgit From 83b45a371665fd069fc7e372628f82874258fd08 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 16 Aug 2011 00:31:54 -0700 Subject: redux of floating ip api --- nova/api/openstack/contrib/floating_ips.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 44b35c385..722320534 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -78,11 +78,14 @@ class FloatingIPController(object): def index(self, req): context = req.environ['nova.context'] - floating_ips = self.network_api.list_floating_ips(context) + try: + floating_ips = self.network_api.list_floating_ips(context) + except exception.FloatingIpNotFoundForProject: + floating_ips = [] return _translate_floating_ips_view(floating_ips) - def create(self, req): + def create(self, req, body): context = req.environ['nova.context'] try: @@ -95,9 +98,7 @@ class FloatingIPController(object): else: raise - return {'allocated': { - "id": ip['id'], - "floating_ip": ip['address']}} + return _translate_floating_ip_view(ip) def delete(self, req, id): context = req.environ['nova.context'] @@ -125,26 +126,22 @@ class FloatingIPController(object): except rpc.RemoteError: raise - return {'associated': - { - "floating_ip_id": id, - "floating_ip": floating_ip, - "fixed_ip": fixed_ip}} + floating_ip = self.network_api.get_floating_ip(context, id) + return _translate_floating_ip_view(floating_ip) def disassociate(self, req, id, body=None): """ POST /floating_ips/{id}/disassociate """ context = req.environ['nova.context'] floating_ip = self.network_api.get_floating_ip(context, id) address = floating_ip['address'] - fixed_ip = floating_ip['fixed_ip']['address'] try: self.network_api.disassociate_floating_ip(context, address) except rpc.RemoteError: raise - return {'disassociated': {'floating_ip': address, - 'fixed_ip': fixed_ip}} + floating_ip = self.network_api.get_floating_ip(context, id) + return _translate_floating_ip_view(floating_ip) def _get_ip_by_id(self, context, value): """Checks that value is id and then returns its address.""" -- cgit From 92c6ee9dc7eeaa44bf6162387b5815fc0cdb1c71 Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Tue, 16 Aug 2011 17:51:45 +0900 Subject: Fixed the naming of the extension --- nova/api/openstack/contrib/virtual_interfaces.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/contrib/virtual_interfaces.py b/nova/api/openstack/contrib/virtual_interfaces.py index 3466d31c7..38246aeb5 100644 --- a/nova/api/openstack/contrib/virtual_interfaces.py +++ b/nova/api/openstack/contrib/virtual_interfaces.py @@ -72,13 +72,13 @@ class ServerVirtualInterfaceController(object): entity_maker=_translate_vif_summary_view) -class VirtualInterfaces(extensions.ExtensionDescriptor): +class Virtual_interfaces(extensions.ExtensionDescriptor): def get_name(self): - return "VirtualInterfaces" + return "Virtual_interfaces" def get_alias(self): - return "os-virtual_interfaces" + return "os-virtual-interfaces" def get_description(self): return "Virtual interface support" @@ -92,7 +92,7 @@ class VirtualInterfaces(extensions.ExtensionDescriptor): def get_resources(self): resources = [] - res = extensions.ResourceExtension('os-virtual_interfaces', + res = extensions.ResourceExtension('os-virtual-interfaces', ServerVirtualInterfaceController(), parent=dict( member_name='server', -- cgit From ee06de65b674a7a91597bc9121b3bd3bd11e658b Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Tue, 16 Aug 2011 18:37:50 +0900 Subject: Added uuid to allocate_mac_address --- nova/network/manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index b1b3f8ba2..f115c66f1 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -523,7 +523,8 @@ class NetworkManager(manager.SchedulerDependentManager): for network in networks: vif = {'address': self.generate_mac_address(), 'instance_id': instance_id, - 'network_id': network['id']} + 'network_id': network['id'], + 'uuid': utils.gen_uuid()} # try FLAG times to create a vif record with a unique mac_address for i in range(FLAGS.create_unique_mac_address_attempts): try: -- cgit From c3c164455f9b5d4ea994a4453342ccb00d987766 Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Tue, 16 Aug 2011 18:52:29 +0900 Subject: Include vif UUID in the network info dictionary --- nova/network/manager.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/network/manager.py b/nova/network/manager.py index f115c66f1..f32f8e837 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -499,6 +499,7 @@ class NetworkManager(manager.SchedulerDependentManager): 'dhcp_server': dhcp_server, 'broadcast': network['broadcast'], 'mac': vif['address'], + 'vif_uuid': vif['uuid'], 'rxtx_cap': flavor['rxtx_cap'], 'dns': [], 'ips': [ip_dict(ip) for ip in network_IPs], -- cgit From 0801dee7b05463b40bf66ee5911c92ac5e4aabc8 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 16 Aug 2011 10:49:26 -0400 Subject: Fix test_metadata tests. --- nova/api/ec2/cloud.py | 2 +- nova/tests/test_metadata.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 152cd6887..9aebf92e3 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -305,7 +305,7 @@ class CloudController(object): 'hostname': hostname, 'instance-action': 'none', 'instance-id': ec2_id, - 'instance-type': instance_ref['instance_type'].name, + 'instance-type': instance_ref['instance_type']['name'], 'local-hostname': hostname, 'local-ipv4': address, 'placement': {'availability-zone': availability_zone}, diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py index ad678714e..bfc7a6d44 100644 --- a/nova/tests/test_metadata.py +++ b/nova/tests/test_metadata.py @@ -39,7 +39,7 @@ class MetadataTestCase(test.TestCase): 'key_name': None, 'host': 'test', 'launch_index': 1, - 'instance_type': 'm1.tiny', + 'instance_type': {'name': 'm1.tiny'}, 'reservation_id': 'r-xxxxxxxx', 'user_data': '', 'image_ref': 7, -- cgit From 44a278bc5a456c8eda74c61aaa68cfd74ee0d6e8 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 16 Aug 2011 11:31:29 -0400 Subject: Small bug fix...don't cast DB objects to dicts. --- nova/api/openstack/views/servers.py | 4 ++-- nova/tests/integrated/test_servers.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 8222f6766..60fdf54be 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -146,7 +146,7 @@ class ViewBuilderV11(ViewBuilder): return response def _build_image(self, response, inst): - if 'image_ref' in dict(inst): + if inst.get("image_ref", None): image_href = inst['image_ref'] image_id = str(common.get_id_from_href(image_href)) _bookmark = self.image_builder.generate_bookmark(image_id) @@ -161,7 +161,7 @@ class ViewBuilderV11(ViewBuilder): } def _build_flavor(self, response, inst): - if "instance_type" in dict(inst): + if inst.get("instance_type", None): flavor_id = inst["instance_type"]['flavorid'] flavor_ref = self.flavor_builder.generate_href(flavor_id) flavor_bookmark = self.flavor_builder.generate_bookmark(flavor_id) diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 150279a95..725f6d529 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -27,6 +27,7 @@ LOG = logging.getLogger('nova.tests.integrated') class ServersTest(integrated_helpers._IntegratedTestBase): + def test_get_servers(self): """Simple check that listing servers works.""" servers = self.api.get_servers() @@ -103,6 +104,10 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # It should be available... # TODO(justinsb): Mock doesn't yet do this... #self.assertEqual('available', found_server['status']) + servers = self.api.get_servers(detail=True) + for server in servers: + self.assertTrue("image" in server) + self.assertTrue("flavor" in server) self._delete_server(created_server_id) -- cgit From 0385ef219b47fca0e98130d1c4c54c1673519f48 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 16 Aug 2011 12:02:39 -0400 Subject: Cleanup the '_base' directory in libvirt tests. --- nova/tests/test_libvirt.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 688518bb8..6a213b4f0 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -836,6 +836,7 @@ class LibvirtConnTestCase(test.TestCase): count = (0 <= str(e.message).find('Unexpected method call')) shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name)) + shutil.rmtree(os.path.join(FLAGS.instances_path, '_base')) self.assertTrue(count) -- cgit From 6220c4276e30c633ffc4165ce6db0d120c0e88a7 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:13:10 -0700 Subject: another trunk merge... a new change made it into nova before the code was merged --- nova/api/openstack/contrib/quotas.py | 107 ++++++++++++++++ nova/tests/api/openstack/contrib/test_quotas.py | 158 ++++++++++++++++++++++++ nova/tests/api/openstack/test_extensions.py | 1 + 3 files changed, 266 insertions(+) create mode 100644 nova/api/openstack/contrib/quotas.py create mode 100644 nova/tests/api/openstack/contrib/test_quotas.py diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py new file mode 100644 index 000000000..7dbafb79a --- /dev/null +++ b/nova/api/openstack/contrib/quotas.py @@ -0,0 +1,107 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import urlparse + +from nova import db +from nova import exception +from nova import quota +from nova.auth import manager as auth_manager +from nova.api.openstack import extensions + + +class QuotasController(object): + + def _format_quota_set(self, project_id, quota_set): + """Convert the quota object to a result dict""" + + return {'quota_set': { + 'id': str(project_id), + 'metadata_items': quota_set['metadata_items'], + 'injected_file_content_bytes': + quota_set['injected_file_content_bytes'], + 'volumes': quota_set['volumes'], + 'gigabytes': quota_set['gigabytes'], + 'ram': quota_set['ram'], + 'floating_ips': quota_set['floating_ips'], + 'instances': quota_set['instances'], + 'injected_files': quota_set['injected_files'], + 'cores': quota_set['cores'], + }} + + def index(self, req): + # NOTE(jakedahn): If http param defaults is true, list system defaults. + if urlparse.parse_qs(req.environ['QUERY_STRING']).get('defaults', + False): + return {'quota_set_list': [self._format_quota_set('__defaults__', + quota._get_default_quotas())]} + else: + context = req.environ['nova.context'] + user = req.environ.get('user') + projects = auth_manager.AuthManager().get_projects(user=user) + + quota_set_list = [self._format_quota_set(project.name, + quota.get_project_quotas(context, project.name)) + for project in projects] + return {'quota_set_list': quota_set_list} + + def show(self, req, id): + context = req.environ['nova.context'] + return self._format_quota_set(id, quota.get_project_quotas(context, + id)) + + def update(self, req, id, body): + context = req.environ['nova.context'] + project_id = id + resources = ['metadata_items', 'injected_file_content_bytes', + 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances', + 'injected_files', 'cores'] + + for key in body['quota_set'].keys(): + if key in resources: + value = int(body['quota_set'][key]) + try: + db.quota_update(context, project_id, key, value) + except exception.ProjectQuotaNotFound: + db.quota_create(context, project_id, key, value) + return {'quota_set': quota.get_project_quotas(context, project_id)} + + +class Quotas(extensions.ExtensionDescriptor): + + def get_name(self): + return "Quotas" + + def get_alias(self): + return "os-quotas" + + def get_description(self): + return "Quotas management support" + + def get_namespace(self): + return "http://docs.openstack.org/ext/quotas/api/v1.1" + + def get_updated(self): + return "2011-08-08T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension('os-quotas', QuotasController()) + resources.append(res) + + return resources diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py new file mode 100644 index 000000000..6ab2faf4d --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -0,0 +1,158 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import webob + +from nova import context +from nova import test +from nova.auth import manager as auth_manager +from nova.tests.api.openstack import fakes + + +from nova.api.openstack.contrib.quotas import QuotasController + + +def quota_set(id): + return {'quota_set': {'id': id, 'metadata_items': 128, 'volumes': 10, + 'gigabytes': 1000, 'ram': 51200, 'floating_ips': 10, + 'instances': 10, 'injected_files': 5, 'cores': 20, + 'injected_file_content_bytes': 10240}} + + +def quota_set_list(): + return {'quota_set_list': [quota_set('1234'), quota_set('5678'), + quota_set('update_me')]} + + +def create_project(project_name, manager_user): + auth_manager.AuthManager().create_project(project_name, manager_user) + + +def delete_project(project_name): + auth_manager.AuthManager().delete_project(project_name) + + +def create_admin_user(name): + auth_manager.AuthManager().create_user(name, admin=True) + + +def delete_user(name): + auth_manager.AuthManager().delete_user(name) + + +class QuotasTest(test.TestCase): + + def setUp(self): + super(QuotasTest, self).setUp() + self.controller = QuotasController() + self.context = context.get_admin_context() + + create_admin_user('foo') + create_project('1234', 'foo') + create_project('5678', 'foo') + create_project('update_me', 'foo') + + def tearDown(self): + delete_project('1234') + delete_project('5678') + delete_project('update_me') + delete_user('foo') + + def test_format_quota_set(self): + raw_quota_set = { + 'instances': 10, + 'cores': 20, + 'ram': 51200, + 'volumes': 10, + 'gigabytes': 1000, + 'floating_ips': 10, + 'metadata_items': 128, + 'injected_files': 5, + 'injected_file_content_bytes': 10240, + } + + quota_set = QuotasController()._format_quota_set('1234', raw_quota_set) + quota_set_check = quota_set['quota_set'] + + self.assertEqual(quota_set_check['id'], '1234') + self.assertEqual(quota_set_check['instances'], 10) + self.assertEqual(quota_set_check['cores'], 20) + self.assertEqual(quota_set_check['ram'], 51200) + self.assertEqual(quota_set_check['volumes'], 10) + self.assertEqual(quota_set_check['gigabytes'], 1000) + self.assertEqual(quota_set_check['floating_ips'], 10) + self.assertEqual(quota_set_check['metadata_items'], 128) + self.assertEqual(quota_set_check['injected_files'], 5) + self.assertEqual(quota_set_check['injected_file_content_bytes'], 10240) + + def test_quotas_index_with_default_param(self): + req = webob.Request.blank('/v1.1/os-quotas?defaults=True') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + expected = {'quota_set_list': [{'quota_set': { + 'id': '__defaults__', + 'instances': 10, + 'cores': 20, + 'ram': 51200, + 'volumes': 10, + 'gigabytes': 1000, + 'floating_ips': 10, + 'metadata_items': 128, + 'injected_files': 5, + 'injected_file_content_bytes': 10240}}]} + + self.assertEqual(json.loads(res.body), expected) + + def test_quotas_index(self): + req = webob.Request.blank('/v1.1/os-quotas') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + self.assertEqual(json.loads(res.body), quota_set_list()) + + def test_quotas_show(self): + req = webob.Request.blank('/v1.1/os-quotas/1234') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + self.assertEqual(json.loads(res.body), quota_set('1234')) + + def test_quotas_update(self): + updated_quota_set = {'quota_set': {'instances': 50, + 'cores': 50, 'ram': 51200, 'volumes': 10, + 'gigabytes': 1000, 'floating_ips': 10, + 'metadata_items': 128, 'injected_files': 5, + 'injected_file_content_bytes': 10240}} + + req = webob.Request.blank('/v1.1/os-quotas/update_me') + req.method = 'PUT' + req.body = json.dumps(updated_quota_set) + req.headers['Content-Type'] = 'application/json' + + res = req.get_response(fakes.wsgi_app(fake_auth_context=\ + context.RequestContext('fake', 'fake', + is_admin=True))) + + self.assertEqual(json.loads(res.body), updated_quota_set) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 3e990a30b..0ae1a059f 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -91,6 +91,7 @@ class ExtensionControllerTest(test.TestCase): "Hosts", "Keypairs", "Multinic", + "Quotas", "SecurityGroups", "Volumes", ] -- cgit From b6c8985cb10b40572d23b7971aac6d0577ebfe82 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:01 -0700 Subject: another trunk merge --- nova/tests/api/openstack/contrib/test_quotas.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index 6ab2faf4d..6a7a1d9fa 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -151,7 +151,7 @@ class QuotasTest(test.TestCase): req.body = json.dumps(updated_quota_set) req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context=\ + res = req.get_response(fakes.wsgi_app(fake_auth_context= context.RequestContext('fake', 'fake', is_admin=True))) -- cgit From e9cf4ff5c14b274b8a1d7aa39567768368851e81 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: merging test_extensions.py --- nova/tests/api/openstack/test_extensions.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 0ae1a059f..5d3208e10 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -111,7 +111,7 @@ class ExtensionControllerTest(test.TestCase): self.assertEqual(names, self.ext_list) # Make sure that at least Fox in Sox is correct. - (fox_ext,) = [ + (fox_ext, ) = [ x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] self.assertEqual(fox_ext, { 'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0', @@ -156,7 +156,7 @@ class ExtensionControllerTest(test.TestCase): self.assertEqual(len(exts), len(self.ext_list)) # Make sure that at least Fox in Sox is correct. - (fox_ext,) = [x for x in exts if x.get('alias') == 'FOXNSOX'] + (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX'] self.assertEqual(fox_ext.get('name'), 'Fox In Socks') self.assertEqual(fox_ext.get('namespace'), 'http://www.fox.in.socks/api/ext/pie/v1.0') @@ -228,6 +228,7 @@ class ResourceExtensionTest(test.TestCase): class InvalidExtension(object): + def get_alias(self): return "THIRD" -- cgit From 02c5d589483abef3fb8ec65f983e5b43a9e41f71 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: removed index, and separated out defaults into its own action --- nova/api/openstack/contrib/quotas.py | 28 ++++++--------------- nova/tests/api/openstack/contrib/test_quotas.py | 33 +++++++++---------------- 2 files changed, 20 insertions(+), 41 deletions(-) diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py index 7dbafb79a..5f2b54d57 100644 --- a/nova/api/openstack/contrib/quotas.py +++ b/nova/api/openstack/contrib/quotas.py @@ -24,7 +24,7 @@ from nova.auth import manager as auth_manager from nova.api.openstack import extensions -class QuotasController(object): +class QuotaSetsController(object): def _format_quota_set(self, project_id, quota_set): """Convert the quota object to a result dict""" @@ -43,22 +43,6 @@ class QuotasController(object): 'cores': quota_set['cores'], }} - def index(self, req): - # NOTE(jakedahn): If http param defaults is true, list system defaults. - if urlparse.parse_qs(req.environ['QUERY_STRING']).get('defaults', - False): - return {'quota_set_list': [self._format_quota_set('__defaults__', - quota._get_default_quotas())]} - else: - context = req.environ['nova.context'] - user = req.environ.get('user') - projects = auth_manager.AuthManager().get_projects(user=user) - - quota_set_list = [self._format_quota_set(project.name, - quota.get_project_quotas(context, project.name)) - for project in projects] - return {'quota_set_list': quota_set_list} - def show(self, req, id): context = req.environ['nova.context'] return self._format_quota_set(id, quota.get_project_quotas(context, @@ -80,6 +64,8 @@ class QuotasController(object): db.quota_create(context, project_id, key, value) return {'quota_set': quota.get_project_quotas(context, project_id)} + def defaults(self, req): + return self._format_quota_set('defaults', quota._get_default_quotas()) class Quotas(extensions.ExtensionDescriptor): @@ -87,13 +73,13 @@ class Quotas(extensions.ExtensionDescriptor): return "Quotas" def get_alias(self): - return "os-quotas" + return "os-quota-sets" def get_description(self): return "Quotas management support" def get_namespace(self): - return "http://docs.openstack.org/ext/quotas/api/v1.1" + return "http://docs.openstack.org/ext/quotas-sets/api/v1.1" def get_updated(self): return "2011-08-08T00:00:00+00:00" @@ -101,7 +87,9 @@ class Quotas(extensions.ExtensionDescriptor): def get_resources(self): resources = [] - res = extensions.ResourceExtension('os-quotas', QuotasController()) + res = extensions.ResourceExtension('os-quota-sets', + QuotaSetsController(), + member_actions={'defaults': 'GET'}) resources.append(res) return resources diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index 6a7a1d9fa..decc76b4e 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -24,7 +24,7 @@ from nova.auth import manager as auth_manager from nova.tests.api.openstack import fakes -from nova.api.openstack.contrib.quotas import QuotasController +from nova.api.openstack.contrib.quotas import QuotaSetsController def quota_set(id): @@ -55,11 +55,11 @@ def delete_user(name): auth_manager.AuthManager().delete_user(name) -class QuotasTest(test.TestCase): +class QuotaSetsTest(test.TestCase): def setUp(self): - super(QuotasTest, self).setUp() - self.controller = QuotasController() + super(QuotaSetsTest, self).setUp() + self.controller = QuotaSetsController() self.context = context.get_admin_context() create_admin_user('foo') @@ -86,7 +86,7 @@ class QuotasTest(test.TestCase): 'injected_file_content_bytes': 10240, } - quota_set = QuotasController()._format_quota_set('1234', raw_quota_set) + quota_set = QuotaSetsController()._format_quota_set('1234', raw_quota_set) quota_set_check = quota_set['quota_set'] self.assertEqual(quota_set_check['id'], '1234') @@ -100,15 +100,15 @@ class QuotasTest(test.TestCase): self.assertEqual(quota_set_check['injected_files'], 5) self.assertEqual(quota_set_check['injected_file_content_bytes'], 10240) - def test_quotas_index_with_default_param(self): - req = webob.Request.blank('/v1.1/os-quotas?defaults=True') + def test_quotas_defaults(self): + req = webob.Request.blank('/v1.1/os-quota-sets/defaults') req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) - expected = {'quota_set_list': [{'quota_set': { - 'id': '__defaults__', + expected = {'quota_set': { + 'id': 'defaults', 'instances': 10, 'cores': 20, 'ram': 51200, @@ -117,21 +117,12 @@ class QuotasTest(test.TestCase): 'floating_ips': 10, 'metadata_items': 128, 'injected_files': 5, - 'injected_file_content_bytes': 10240}}]} + 'injected_file_content_bytes': 10240}} self.assertEqual(json.loads(res.body), expected) - def test_quotas_index(self): - req = webob.Request.blank('/v1.1/os-quotas') - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app()) - - self.assertEqual(res.status_int, 200) - self.assertEqual(json.loads(res.body), quota_set_list()) - def test_quotas_show(self): - req = webob.Request.blank('/v1.1/os-quotas/1234') + req = webob.Request.blank('/v1.1/os-quota-sets/1234') req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) @@ -146,7 +137,7 @@ class QuotasTest(test.TestCase): 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240}} - req = webob.Request.blank('/v1.1/os-quotas/update_me') + req = webob.Request.blank('/v1.1/os-quota-sets/update_me') req.method = 'PUT' req.body = json.dumps(updated_quota_set) req.headers['Content-Type'] = 'application/json' -- cgit From 817b596bccd38f84f72e1ee73df3c3b35287c75c Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: refactoring tests to not use authmanager, and now returning 403 when non admin user tries to update quotas --- nova/api/openstack/contrib/quotas.py | 4 +- nova/tests/api/openstack/contrib/test_quotas.py | 113 ++++++++++++------------ 2 files changed, 58 insertions(+), 59 deletions(-) diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py index 5f2b54d57..f7e7b4105 100644 --- a/nova/api/openstack/contrib/quotas.py +++ b/nova/api/openstack/contrib/quotas.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. +import webob import urlparse from nova import db @@ -54,7 +55,6 @@ class QuotaSetsController(object): resources = ['metadata_items', 'injected_file_content_bytes', 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances', 'injected_files', 'cores'] - for key in body['quota_set'].keys(): if key in resources: value = int(body['quota_set'][key]) @@ -62,6 +62,8 @@ class QuotaSetsController(object): db.quota_update(context, project_id, key, value) except exception.ProjectQuotaNotFound: db.quota_create(context, project_id, key, value) + except exception.AdminRequired as e: + return webob.Response(status_int=403) return {'quota_set': quota.get_project_quotas(context, project_id)} def defaults(self, req): diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index decc76b4e..e2bd05428 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -39,39 +39,18 @@ def quota_set_list(): quota_set('update_me')]} -def create_project(project_name, manager_user): - auth_manager.AuthManager().create_project(project_name, manager_user) - - -def delete_project(project_name): - auth_manager.AuthManager().delete_project(project_name) - - -def create_admin_user(name): - auth_manager.AuthManager().create_user(name, admin=True) - - -def delete_user(name): - auth_manager.AuthManager().delete_user(name) - - class QuotaSetsTest(test.TestCase): def setUp(self): super(QuotaSetsTest, self).setUp() self.controller = QuotaSetsController() - self.context = context.get_admin_context() - - create_admin_user('foo') - create_project('1234', 'foo') - create_project('5678', 'foo') - create_project('update_me', 'foo') - - def tearDown(self): - delete_project('1234') - delete_project('5678') - delete_project('update_me') - delete_user('foo') + self.user_id = 'fake' + self.project_id = 'fake' + self.user_context = context.RequestContext(self.user_id, + self.project_id) + self.admin_context = context.RequestContext(self.user_id, + self.project_id, + is_admin=True) def test_format_quota_set(self): raw_quota_set = { @@ -83,22 +62,22 @@ class QuotaSetsTest(test.TestCase): 'floating_ips': 10, 'metadata_items': 128, 'injected_files': 5, - 'injected_file_content_bytes': 10240, - } - - quota_set = QuotaSetsController()._format_quota_set('1234', raw_quota_set) - quota_set_check = quota_set['quota_set'] - - self.assertEqual(quota_set_check['id'], '1234') - self.assertEqual(quota_set_check['instances'], 10) - self.assertEqual(quota_set_check['cores'], 20) - self.assertEqual(quota_set_check['ram'], 51200) - self.assertEqual(quota_set_check['volumes'], 10) - self.assertEqual(quota_set_check['gigabytes'], 1000) - self.assertEqual(quota_set_check['floating_ips'], 10) - self.assertEqual(quota_set_check['metadata_items'], 128) - self.assertEqual(quota_set_check['injected_files'], 5) - self.assertEqual(quota_set_check['injected_file_content_bytes'], 10240) + 'injected_file_content_bytes': 10240} + + quota_set = QuotaSetsController()._format_quota_set('1234', + raw_quota_set) + qs = quota_set['quota_set'] + + self.assertEqual(qs['id'], '1234') + self.assertEqual(qs['instances'], 10) + self.assertEqual(qs['cores'], 20) + self.assertEqual(qs['ram'], 51200) + self.assertEqual(qs['volumes'], 10) + self.assertEqual(qs['gigabytes'], 1000) + self.assertEqual(qs['floating_ips'], 10) + self.assertEqual(qs['metadata_items'], 128) + self.assertEqual(qs['injected_files'], 5) + self.assertEqual(qs['injected_file_content_bytes'], 10240) def test_quotas_defaults(self): req = webob.Request.blank('/v1.1/os-quota-sets/defaults') @@ -108,16 +87,16 @@ class QuotaSetsTest(test.TestCase): self.assertEqual(res.status_int, 200) expected = {'quota_set': { - 'id': 'defaults', - 'instances': 10, - 'cores': 20, - 'ram': 51200, - 'volumes': 10, - 'gigabytes': 1000, - 'floating_ips': 10, - 'metadata_items': 128, - 'injected_files': 5, - 'injected_file_content_bytes': 10240}} + 'id': 'defaults', + 'instances': 10, + 'cores': 20, + 'ram': 51200, + 'volumes': 10, + 'gigabytes': 1000, + 'floating_ips': 10, + 'metadata_items': 128, + 'injected_files': 5, + 'injected_file_content_bytes': 10240}} self.assertEqual(json.loads(res.body), expected) @@ -125,12 +104,13 @@ class QuotaSetsTest(test.TestCase): req = webob.Request.blank('/v1.1/os-quota-sets/1234') req.method = 'GET' req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app()) + res = req.get_response(fakes.wsgi_app(fake_auth_context= + self.admin_context)) self.assertEqual(res.status_int, 200) self.assertEqual(json.loads(res.body), quota_set('1234')) - def test_quotas_update(self): + def test_quotas_update_as_admin(self): updated_quota_set = {'quota_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'volumes': 10, 'gigabytes': 1000, 'floating_ips': 10, @@ -143,7 +123,24 @@ class QuotaSetsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context= - context.RequestContext('fake', 'fake', - is_admin=True))) + self.admin_context)) self.assertEqual(json.loads(res.body), updated_quota_set) + + + def test_quotas_update_as_user(self): + updated_quota_set = {'quota_set': {'instances': 50, + 'cores': 50, 'ram': 51200, 'volumes': 10, + 'gigabytes': 1000, 'floating_ips': 10, + 'metadata_items': 128, 'injected_files': 5, + 'injected_file_content_bytes': 10240}} + + req = webob.Request.blank('/v1.1/os-quota-sets/update_me') + req.method = 'PUT' + req.body = json.dumps(updated_quota_set) + req.headers['Content-Type'] = 'application/json' + + res = req.get_response(fakes.wsgi_app(fake_auth_context= + self.user_context)) + + self.assertEqual(res.status_int, 403) -- cgit From 903ae5a8274051aaf40a62c929117d7165729360 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: fixing pep8 errors --- nova/api/openstack/contrib/quotas.py | 2 +- nova/tests/api/openstack/contrib/test_quotas.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py index f7e7b4105..b5c6447c4 100644 --- a/nova/api/openstack/contrib/quotas.py +++ b/nova/api/openstack/contrib/quotas.py @@ -21,7 +21,6 @@ import urlparse from nova import db from nova import exception from nova import quota -from nova.auth import manager as auth_manager from nova.api.openstack import extensions @@ -69,6 +68,7 @@ class QuotaSetsController(object): def defaults(self, req): return self._format_quota_set('defaults', quota._get_default_quotas()) + class Quotas(extensions.ExtensionDescriptor): def get_name(self): diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index e2bd05428..b37edb9f5 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -127,7 +127,6 @@ class QuotaSetsTest(test.TestCase): self.assertEqual(json.loads(res.body), updated_quota_set) - def test_quotas_update_as_user(self): updated_quota_set = {'quota_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'volumes': 10, -- cgit From 8b6e551813ef964af38335fcca749ab9d0971200 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: cleaning up a few things from pyflakes --- nova/api/openstack/contrib/quotas.py | 3 +-- nova/tests/api/openstack/contrib/test_quotas.py | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py index b5c6447c4..d021a4a4f 100644 --- a/nova/api/openstack/contrib/quotas.py +++ b/nova/api/openstack/contrib/quotas.py @@ -16,7 +16,6 @@ # under the License. import webob -import urlparse from nova import db from nova import exception @@ -61,7 +60,7 @@ class QuotaSetsController(object): db.quota_update(context, project_id, key, value) except exception.ProjectQuotaNotFound: db.quota_create(context, project_id, key, value) - except exception.AdminRequired as e: + except exception.AdminRequired: return webob.Response(status_int=403) return {'quota_set': quota.get_project_quotas(context, project_id)} diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index b37edb9f5..cb5fcb120 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -20,7 +20,6 @@ import webob from nova import context from nova import test -from nova.auth import manager as auth_manager from nova.tests.api.openstack import fakes -- cgit From a3a0782f65d85c873c2ec3fc8f94486225cb6f76 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: fixing spacing issues --- nova/tests/api/openstack/contrib/test_quotas.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index cb5fcb120..f40a435aa 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -22,7 +22,6 @@ from nova import context from nova import test from nova.tests.api.openstack import fakes - from nova.api.openstack.contrib.quotas import QuotaSetsController @@ -57,9 +56,9 @@ class QuotaSetsTest(test.TestCase): 'cores': 20, 'ram': 51200, 'volumes': 10, - 'gigabytes': 1000, 'floating_ips': 10, 'metadata_items': 128, + 'gigabytes': 1000, 'injected_files': 5, 'injected_file_content_bytes': 10240} @@ -103,7 +102,7 @@ class QuotaSetsTest(test.TestCase): req = webob.Request.blank('/v1.1/os-quota-sets/1234') req.method = 'GET' req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context= + res = req.get_response(fakes.wsgi_app(fake_auth_context = self.admin_context)) self.assertEqual(res.status_int, 200) @@ -121,7 +120,7 @@ class QuotaSetsTest(test.TestCase): req.body = json.dumps(updated_quota_set) req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context= + res = req.get_response(fakes.wsgi_app(fake_auth_context = self.admin_context)) self.assertEqual(json.loads(res.body), updated_quota_set) @@ -138,7 +137,7 @@ class QuotaSetsTest(test.TestCase): req.body = json.dumps(updated_quota_set) req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context= + res = req.get_response(fakes.wsgi_app(fake_auth_context = self.user_context)) self.assertEqual(res.status_int, 403) -- cgit From bf269b3d799a431ad3fc68cdb039b826685c8760 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: fixing pep8 issues again --- nova/tests/api/openstack/contrib/test_quotas.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index f40a435aa..8f363aed6 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -102,8 +102,8 @@ class QuotaSetsTest(test.TestCase): req = webob.Request.blank('/v1.1/os-quota-sets/1234') req.method = 'GET' req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context = - self.admin_context)) + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.admin_context)) self.assertEqual(res.status_int, 200) self.assertEqual(json.loads(res.body), quota_set('1234')) @@ -120,8 +120,8 @@ class QuotaSetsTest(test.TestCase): req.body = json.dumps(updated_quota_set) req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context = - self.admin_context)) + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.admin_context)) self.assertEqual(json.loads(res.body), updated_quota_set) @@ -137,7 +137,7 @@ class QuotaSetsTest(test.TestCase): req.body = json.dumps(updated_quota_set) req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context = - self.user_context)) + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_context)) self.assertEqual(res.status_int, 403) -- cgit From 029261908ac5acd9950a4b027b7daec17c92854d Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: making get project quotas require context which has access to the project/tenant) --- nova/db/sqlalchemy/api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 57a4370d8..184ad60d5 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1959,6 +1959,7 @@ def quota_get(context, project_id, resource, session=None): @require_context def quota_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) session = get_session() result = {'project_id': project_id} rows = session.query(models.Quota).\ -- cgit From 6a5b9831c4b32053996a99307b7303ca851bf508 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: fixing up the show quotas tests, and extension --- nova/api/openstack/contrib/quotas.py | 8 ++++++-- nova/tests/api/openstack/contrib/test_quotas.py | 16 +++++++++++++--- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py index d021a4a4f..87046063a 100644 --- a/nova/api/openstack/contrib/quotas.py +++ b/nova/api/openstack/contrib/quotas.py @@ -44,8 +44,12 @@ class QuotaSetsController(object): def show(self, req, id): context = req.environ['nova.context'] - return self._format_quota_set(id, quota.get_project_quotas(context, - id)) + try: + db.sqlalchemy.api.authorize_project_context(context, id) + return self._format_quota_set(id, + quota.get_project_quotas(context, id)) + except exception.NotAuthorized: + return webob.Response(status_int=403) def update(self, req, id, body): context = req.environ['nova.context'] diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index 8f363aed6..d77ed40ed 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -78,14 +78,14 @@ class QuotaSetsTest(test.TestCase): self.assertEqual(qs['injected_file_content_bytes'], 10240) def test_quotas_defaults(self): - req = webob.Request.blank('/v1.1/os-quota-sets/defaults') + req = webob.Request.blank('/v1.1/os-quota-sets/fake_tenant/defaults') req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) expected = {'quota_set': { - 'id': 'defaults', + 'id': 'fake_tenant', 'instances': 10, 'cores': 20, 'ram': 51200, @@ -98,7 +98,7 @@ class QuotaSetsTest(test.TestCase): self.assertEqual(json.loads(res.body), expected) - def test_quotas_show(self): + def test_quotas_show_as_admin(self): req = webob.Request.blank('/v1.1/os-quota-sets/1234') req.method = 'GET' req.headers['Content-Type'] = 'application/json' @@ -108,6 +108,16 @@ class QuotaSetsTest(test.TestCase): self.assertEqual(res.status_int, 200) self.assertEqual(json.loads(res.body), quota_set('1234')) + + def test_quotas_show_as_unauthorized_user(self): + req = webob.Request.blank('/v1.1/os-quota-sets/1234') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_context)) + + self.assertEqual(res.status_int, 403) + def test_quotas_update_as_admin(self): updated_quota_set = {'quota_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'volumes': 10, -- cgit From b85deda977ff46722a4461aca98f0378fd10ee1b Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: defaults now is referred to using a tenant --- nova/api/openstack/contrib/quotas.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py index 87046063a..459b71dfd 100644 --- a/nova/api/openstack/contrib/quotas.py +++ b/nova/api/openstack/contrib/quotas.py @@ -68,8 +68,8 @@ class QuotaSetsController(object): return webob.Response(status_int=403) return {'quota_set': quota.get_project_quotas(context, project_id)} - def defaults(self, req): - return self._format_quota_set('defaults', quota._get_default_quotas()) + def defaults(self, req, id): + return self._format_quota_set(id, quota._get_default_quotas()) class Quotas(extensions.ExtensionDescriptor): -- cgit From f3e64fea374df91a6ff78a891ff627edf635fdb2 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: fixing pep8 issue --- nova/tests/api/openstack/contrib/test_quotas.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index d77ed40ed..f6a25385f 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -108,7 +108,6 @@ class QuotaSetsTest(test.TestCase): self.assertEqual(res.status_int, 200) self.assertEqual(json.loads(res.body), quota_set('1234')) - def test_quotas_show_as_unauthorized_user(self): req = webob.Request.blank('/v1.1/os-quota-sets/1234') req.method = 'GET' -- cgit From ca13037d2cd130f5b970d3af219566f3a70a9cb5 Mon Sep 17 00:00:00 2001 From: John Tran Date: Tue, 16 Aug 2011 09:18:13 -0700 Subject: test improvements per peer review --- nova/tests/test_cloud.py | 10 ++++++---- nova/tests/test_db_api.py | 12 +++++++----- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 39358eeff..0793784f8 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -488,13 +488,15 @@ class CloudTestCase(test.TestCase): db.service_destroy(self.context, comp2['id']) def test_describe_instances_deleted(self): - args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} - inst1 = db.instance_create(self.context, args) - inst2 = db.instance_create(self.context, args) + args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} + inst1 = db.instance_create(self.context, args1) + args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'} + inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context, inst1.id) result = self.cloud.describe_instances(self.context) result = result['reservationSet'][0]['instancesSet'] - self.assertEqual(1, len(result)) + self.assertEqual(result[0]['instanceId'], + ec2utils.id_to_ec2_id(inst2.id)) def _block_device_mapping_create(self, instance_id, mappings): volumes = [] diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py index ed363d1be..038c07f40 100644 --- a/nova/tests/test_db_api.py +++ b/nova/tests/test_db_api.py @@ -85,9 +85,11 @@ class DbApiTestCase(test.TestCase): self.assertTrue(2, len(result)) def test_instance_get_all_by_filters_deleted(self): - args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} - inst1 = db.instance_create(self.context, args) - inst2 = db.instance_create(self.context, args) + args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} + inst1 = db.instance_create(self.context, args1) + args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'} + inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context, inst1.id) - result = db.instance_get_all_by_filters(self.context, {}) - self.assertTrue(1, len(result)) + result = db.instance_get_all_by_filters(self.context.elevated(), {}) + self.assertEqual(1, len(result)) + self.assertEqual(result[0].id, inst2.id) -- cgit From 71935201aed268e94ee9674e887d67b4b9f217a6 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 16 Aug 2011 13:44:03 -0400 Subject: Updated ViewBuilderV10 as per feedback. --- nova/api/openstack/views/servers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 60fdf54be..edc328129 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -111,14 +111,14 @@ class ViewBuilderV10(ViewBuilder): response['uuid'] = inst['uuid'] def _build_image(self, response, inst): - if 'image_ref' in dict(inst): + if inst.get('image_ref', None): image_ref = inst['image_ref'] if str(image_ref).startswith('http'): raise exception.ListingImageRefsNotSupported() response['imageId'] = int(image_ref) def _build_flavor(self, response, inst): - if 'instance_type' in dict(inst): + if inst.get('instance_type', None): response['flavorId'] = inst['instance_type']['flavorid'] def _build_addresses(self, response, inst): -- cgit From 935c43b414c1685163957590a6fb77fd8ddbac2f Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 16 Aug 2011 13:36:11 -0500 Subject: Allow local_gb to be 0; PEP8 fixes. --- nova/compute/manager.py | 10 +++++++++- nova/virt/libvirt/connection.py | 5 +++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3299268f2..39f43a268 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -359,6 +359,13 @@ class ComputeManager(manager.SchedulerDependentManager): instance_type = self.db.instance_type_get(context, instance_type_id) allowed_size_gb = instance_type['local_gb'] + + if allowed_size_gb == 0: + # NOTE(jk0): Since the default local_gb of m1.tiny is 0, we will + # allow the check to proceed. We may want to look into changing the + # default size to 1GB. + return + allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024 LOG.debug(_("image_id=%(image_id)d, image_size_bytes=" @@ -1368,7 +1375,8 @@ class ComputeManager(manager.SchedulerDependentManager): # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. - self.driver.ensure_filtering_rules_for_instance(instance_ref, network_info) + self.driver.ensure_filtering_rules_for_instance(instance_ref, + network_info) # Preparation for block migration if block_migration: diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 2b17e244a..e8a657bac 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1538,8 +1538,9 @@ class LibvirtConnection(driver.ComputeDriver): # If any instances never launch at destination host, # basic-filtering must be set here. self.firewall_driver.setup_basic_filtering(instance_ref, network_info) - # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref, network_info) + # setting up nova-instance-instance-xx mainly. + self.firewall_driver.prepare_instance_filter(instance_ref, + network_info) # wait for completion timeout_count = range(FLAGS.live_migration_retry_count) -- cgit From 4ee7e94ab89189c284348c8756da611192dfe5ec Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 16 Aug 2011 13:43:33 -0500 Subject: Updated note. --- nova/compute/manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 39f43a268..88d290908 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -361,9 +361,9 @@ class ComputeManager(manager.SchedulerDependentManager): allowed_size_gb = instance_type['local_gb'] if allowed_size_gb == 0: - # NOTE(jk0): Since the default local_gb of m1.tiny is 0, we will - # allow the check to proceed. We may want to look into changing the - # default size to 1GB. + # NOTE(jk0): Since libvirt uses local_gb as a secondary drive, we + # need to handle potential situations where local_gb is 0. This is + # the default for m1.tiny. return allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024 -- cgit From dc2ccb95848c330eeb8e6fa55bf487c54e03a3c3 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 16 Aug 2011 13:45:13 -0500 Subject: Review feedback. --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 88d290908..66458fb36 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -360,10 +360,10 @@ class ComputeManager(manager.SchedulerDependentManager): instance_type_id) allowed_size_gb = instance_type['local_gb'] - if allowed_size_gb == 0: # NOTE(jk0): Since libvirt uses local_gb as a secondary drive, we # need to handle potential situations where local_gb is 0. This is # the default for m1.tiny. + if allowed_size_gb == 0: return allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024 -- cgit From fb43ea94e81e5eec51b73c2aab4a8a38cdf71361 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 16 Aug 2011 11:46:22 -0700 Subject: make delete more consistant --- nova/api/openstack/contrib/floating_ips.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 722320534..1276c0118 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -105,13 +105,13 @@ class FloatingIPController(object): ip = self.network_api.get_floating_ip(context, id) if 'fixed_ip' in ip: - self.disassociate(req, id) + try: + self.disassociate(req, id) + except exception.ApiError: + LOG.warn("disassociate failure %s", id) self.network_api.release_floating_ip(context, address=ip['address']) - - return {'released': { - "id": ip['id'], - "floating_ip": ip['address']}} + return exc.HTTPAccepted() def associate(self, req, id, body): """ /floating_ips/{id}/associate fixed ip in body """ -- cgit From ed3927455cd4054b5741fe5a3f0917d91a9066db Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 16 Aug 2011 16:50:15 -0700 Subject: fix unit tests --- .../api/openstack/contrib/test_floating_ips.py | 26 +++++++++++----------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index 704d06582..2f6f6a64d 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -153,15 +153,10 @@ class FloatingIpTest(test.TestCase): req = webob.Request.blank('/v1.1/os-floating-ips/1') req.method = 'DELETE' res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - actual = json.loads(res.body)['released'] - expected = { - "id": 1, - "floating_ip": '10.10.10.10'} - self.assertEqual(actual, expected) + self.assertEqual(res.status_int, 202) def test_floating_ip_associate(self): - body = dict(associate_address=dict(fixed_ip='1.2.3.4')) + body = dict(associate_address=dict(fixed_ip='11.0.0.1')) req = webob.Request.blank('/v1.1/os-floating-ips/1/associate') req.method = 'POST' req.body = json.dumps(body) @@ -169,11 +164,13 @@ class FloatingIpTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) - actual = json.loads(res.body)['associated'] + actual = json.loads(res.body)['floating_ip'] + expected = { - "floating_ip_id": '1', - "floating_ip": "10.10.10.10", - "fixed_ip": "1.2.3.4"} + "id": 1, + "instance_id": None, + "ip": "10.10.10.10", + "fixed_ip": "11.0.0.1"} self.assertEqual(actual, expected) def test_floating_ip_disassociate(self): @@ -184,8 +181,11 @@ class FloatingIpTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) - ip = json.loads(res.body)['disassociated'] + ip = json.loads(res.body)['floating_ip'] expected = { - "floating_ip": '10.10.10.10', + "id": 1, + "instance_id": None, + "ip": '10.10.10.10', "fixed_ip": '11.0.0.1'} + self.assertEqual(ip, expected) -- cgit From 83177757632b381d42cc5107fe7d1cba8830a10a Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 16 Aug 2011 16:59:36 -0700 Subject: all tests passing --- nova/api/openstack/contrib/floating_ips.py | 2 +- nova/tests/api/openstack/contrib/test_floating_ips.py | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 1276c0118..751b27c9f 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -85,7 +85,7 @@ class FloatingIPController(object): return _translate_floating_ips_view(floating_ips) - def create(self, req, body): + def create(self, req, body=None): context = req.environ['nova.context'] try: diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index 2f6f6a64d..9b41a58c0 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -143,10 +143,13 @@ class FloatingIpTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) print res self.assertEqual(res.status_int, 200) - ip = json.loads(res.body)['allocated'] + ip = json.loads(res.body)['floating_ip'] + expected = { "id": 1, - "floating_ip": '10.10.10.10'} + "instance_id": None, + "ip": "10.10.10.10", + "fixed_ip": None} self.assertEqual(ip, expected) def test_floating_ip_release(self): -- cgit From c890722ddfec7b6ef1911bfbbfd834ac1e3666d5 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 16 Aug 2011 23:15:54 -0400 Subject: Remove instances.admin_pass column. --- .../versions/037_instances_drop_admin_pass.py | 37 ++++++++++++++++++++++ nova/db/sqlalchemy/models.py | 1 - nova/virt/xenapi/vmops.py | 3 -- 3 files changed, 37 insertions(+), 4 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py b/nova/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py new file mode 100644 index 000000000..b957666c2 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py @@ -0,0 +1,37 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, MetaData, Table, String + +meta = MetaData() + +admin_pass = Column( + 'admin_pass', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + instances.drop_column('admin_pass') + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + instances.create_column(admin_pass) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index f2a4680b0..a8e9c36db 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -173,7 +173,6 @@ class Instance(BASE, NovaBase): base_name += "-rescue" return base_name - admin_pass = Column(String(255)) user_id = Column(String(255)) project_id = Column(String(255)) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index eb0a846b5..9a6215f88 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -709,9 +709,6 @@ class VMOps(object): if resp['returncode'] != '0': LOG.error(_('Failed to update password: %(resp)r') % locals()) return None - db.instance_update(nova_context.get_admin_context(), - instance['id'], - dict(admin_pass=new_pass)) return resp['message'] def inject_file(self, instance, path, contents): -- cgit From 536c1e95a68569abda6fe8ee4e3f571976521c8e Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Tue, 16 Aug 2011 20:36:49 -0700 Subject: add new vif uuid for OVS vifplug for libvirt + xenserver --- nova/virt/libvirt/vif.py | 13 +++++++------ nova/virt/xenapi/vif.py | 4 ++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 4cb9abda4..67366fdbe 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -98,10 +98,12 @@ class LibvirtBridgeDriver(VIFDriver): class LibvirtOpenVswitchDriver(VIFDriver): """VIF driver for Open vSwitch.""" + def get_dev_name(_self, iface_id): + return "tap-" + iface_id[0:15] + def plug(self, instance, network, mapping): - vif_id = str(instance['id']) + "-" + str(network['id']) - dev = "tap-%s" % vif_id - iface_id = "nova-" + vif_id + iface_id = mapping['vif_uuid'] + dev = self.get_dev_name(iface_id) if not linux_net._device_exists(dev): utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap', run_as_root=True) @@ -125,11 +127,10 @@ class LibvirtOpenVswitchDriver(VIFDriver): def unplug(self, instance, network, mapping): """Unplug the VIF from the network by deleting the port from the bridge.""" - vif_id = str(instance['id']) + "-" + str(network['id']) - dev = "tap-%s" % vif_id + dev = self.get_dev_name(mapping['vif_uuid']) try: utils.execute('ovs-vsctl', 'del-port', - network['bridge'], dev, run_as_root=True) + FLAGS.libvirt_ovs_bridge, dev, run_as_root=True) utils.execute('ip', 'link', 'delete', dev, run_as_root=True) except exception.ProcessExecutionError: LOG.warning(_("Failed while unplugging vif of instance '%s'"), diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py index 527602243..2f25efeb2 100644 --- a/nova/virt/xenapi/vif.py +++ b/nova/virt/xenapi/vif.py @@ -128,12 +128,12 @@ class XenAPIOpenVswitchDriver(VIFDriver): vif_rec['VM'] = vm_ref vif_rec['MAC'] = network_mapping['mac'] vif_rec['MTU'] = '1500' - vif_id = "nova-" + str(instance['id']) + "-" + str(network['id']) vif_rec['qos_algorithm_type'] = "" vif_rec['qos_algorithm_params'] = {} # OVS on the hypervisor monitors this key and uses it to # set the iface-id attribute - vif_rec['other_config'] = {"nicira-iface-id": vif_id} + vif_rec['other_config'] = \ + {"nicira-iface-id": network_mapping['vif_uuid']} return vif_rec def unplug(self, instance, network, mapping): -- cgit From 79f3b1512166a37790c5cb2863140d696c717455 Mon Sep 17 00:00:00 2001 From: Troy Toman Date: Wed, 17 Aug 2011 02:41:17 -0500 Subject: Changed return code to 413 for metadata, personality and instance quota issues --- Authors | 1 + nova/api/openstack/common.py | 3 ++- nova/api/openstack/create_instance_helper.py | 13 ++++++++++--- nova/api/openstack/faults.py | 2 +- nova/api/openstack/server_metadata.py | 3 ++- nova/quota.py | 2 +- nova/tests/api/openstack/test_image_metadata.py | 4 ++-- nova/tests/api/openstack/test_server_actions.py | 4 ++-- nova/tests/api/openstack/test_server_metadata.py | 4 ++-- 9 files changed, 23 insertions(+), 13 deletions(-) diff --git a/Authors b/Authors index 02fe46c79..864679929 100644 --- a/Authors +++ b/Authors @@ -101,6 +101,7 @@ Stephanie Reese Thierry Carrez Todd Willey Trey Morris +Troy Toman Tushar Patil Vasiliy Shlykov Vishvananda Ishaya diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index b2a675653..d9eb832f2 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -241,7 +241,8 @@ def check_img_metadata_quota_limit(context, metadata): quota_metadata = quota.allowed_metadata_items(context, num_metadata) if quota_metadata < num_metadata: expl = _("Image metadata limit exceeded") - raise webob.exc.HTTPBadRequest(explanation=expl) + raise webob.exc.HTTPRequestEntityTooLarge(explanation=expl, + headers={'Retry-After': 0}) class MetadataXMLDeserializer(wsgi.XMLDeserializer): diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 4e1da549e..b4a08dac0 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -180,13 +180,20 @@ class CreateInstanceHelper(object): """ if error.code == "OnsetFileLimitExceeded": expl = _("Personality file limit exceeded") - raise exc.HTTPBadRequest(explanation=expl) + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) if error.code == "OnsetFilePathLimitExceeded": expl = _("Personality file path too long") - raise exc.HTTPBadRequest(explanation=expl) + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) if error.code == "OnsetFileContentLimitExceeded": expl = _("Personality file content too long") - raise exc.HTTPBadRequest(explanation=expl) + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) + if error.code == "InstanceLimitExceeded": + expl = _("Instance quotas have been exceeded") + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) # if the original error is okay, just reraise it raise error diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index 1ab45d4f1..0ed6f1ff0 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -53,7 +53,7 @@ class Fault(webob.exc.HTTPException): fault_name: { 'code': code, 'message': self.wrapped_exc.explanation}} - if code == 413: + if code == 413 and self.wrapped_exc.headers['Retry-After']: retry = self.wrapped_exc.headers['Retry-After'] fault_data[fault_name]['retryAfter'] = retry diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index 2b235f79a..8ac3319c9 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -151,7 +151,8 @@ class Controller(object): def _handle_quota_error(self, error): """Reraise quota errors as api-specific http exceptions.""" if error.code == "MetadataLimitExceeded": - raise exc.HTTPBadRequest(explanation=error.message) + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) raise error diff --git a/nova/quota.py b/nova/quota.py index 58766e846..48e598659 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -164,5 +164,5 @@ def allowed_injected_file_path_bytes(context): class QuotaError(exception.ApiError): - """Quota Exceeeded.""" + """Quota Exceeded.""" pass diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py index 56a0932e7..21743eeef 100644 --- a/nova/tests/api/openstack/test_image_metadata.py +++ b/nova/tests/api/openstack/test_image_metadata.py @@ -230,7 +230,7 @@ class ImageMetaDataTest(test.TestCase): req.body = json_string req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, res.status_int) + self.assertEqual(413, res.status_int) def test_too_many_metadata_items_on_put(self): req = webob.Request.blank('/v1.1/images/3/metadata/blah') @@ -238,4 +238,4 @@ class ImageMetaDataTest(test.TestCase): req.body = '{"meta": {"blah": "blah"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, res.status_int) + self.assertEqual(413, res.status_int) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 687a19390..80a27e30f 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -392,7 +392,7 @@ class ServerActionsTest(test.TestCase): req.body = json.dumps(body) req.headers["content-type"] = "application/json" response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) + self.assertEqual(413, response.status_int) def test_create_backup_no_name(self): """Name is required for backups""" @@ -865,7 +865,7 @@ class ServerActionsTestV11(test.TestCase): req.body = json.dumps(body) req.headers["content-type"] = "application/json" response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) + self.assertEqual(413, response.status_int) def test_create_image_no_name(self): body = { diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py index ec446f0f0..8512bd518 100644 --- a/nova/tests/api/openstack/test_server_metadata.py +++ b/nova/tests/api/openstack/test_server_metadata.py @@ -417,9 +417,9 @@ class ServerMetaDataTest(test.TestCase): req.body = json_string req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, res.status_int) + self.assertEqual(413, res.status_int) - def test_to_many_metadata_items_on_update_item(self): + def test_too_many_metadata_items_on_update_item(self): self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata_max) req = webob.Request.blank('/v1.1/servers/1/metadata/key1') -- cgit From 228b185f1366df62da42b646ce98711de4195a5d Mon Sep 17 00:00:00 2001 From: Troy Toman Date: Wed, 17 Aug 2011 03:03:25 -0500 Subject: Removed a change from faults.py that was not required." --- nova/api/openstack/faults.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index 0ed6f1ff0..1ab45d4f1 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -53,7 +53,7 @@ class Fault(webob.exc.HTTPException): fault_name: { 'code': code, 'message': self.wrapped_exc.explanation}} - if code == 413 and self.wrapped_exc.headers['Retry-After']: + if code == 413: retry = self.wrapped_exc.headers['Retry-After'] fault_data[fault_name]['retryAfter'] = retry -- cgit From 77e1e0d3359bce9e5e30134f141151fc271a2e4b Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Wed, 17 Aug 2011 19:05:29 +0900 Subject: Removed serverId from the response --- nova/api/openstack/contrib/virtual_interfaces.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/api/openstack/contrib/virtual_interfaces.py b/nova/api/openstack/contrib/virtual_interfaces.py index 38246aeb5..86d1128fd 100644 --- a/nova/api/openstack/contrib/virtual_interfaces.py +++ b/nova/api/openstack/contrib/virtual_interfaces.py @@ -34,7 +34,6 @@ def _translate_vif_summary_view(_context, vif): d = {} d['id'] = vif['uuid'] d['macAddress'] = vif['address'] - d['serverId'] = vif['instance_id'] return d -- cgit From 623aa3a38cab6cc617fb5fb512cdc733f69b4887 Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Wed, 17 Aug 2011 19:07:14 +0900 Subject: Added virtual interfaces API test --- .../openstack/contrib/test_virtual_interfaces.py | 55 ++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 nova/tests/api/openstack/contrib/test_virtual_interfaces.py diff --git a/nova/tests/api/openstack/contrib/test_virtual_interfaces.py b/nova/tests/api/openstack/contrib/test_virtual_interfaces.py new file mode 100644 index 000000000..a3a177e33 --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_virtual_interfaces.py @@ -0,0 +1,55 @@ +# Copyright (C) 2011 Midokura KK +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import stubout +import webob + +from nova import test +from nova import compute +from nova.tests.api.openstack import fakes +from nova.api.openstack.contrib.virtual_interfaces import \ + ServerVirtualInterfaceController + + +def compute_api_get(self, context, server_id): + return {'virtual_interfaces': [ + {'uuid': '00000000-0000-0000-0000-00000000000000000', + 'address': '00-00-00-00-00-00'}, + {'uuid': '11111111-1111-1111-1111-11111111111111111', + 'address': '11-11-11-11-11-11'}]} + + +class ServerVirtualInterfaceTest(test.TestCase): + + def setUp(self): + super(ServerVirtualInterfaceTest, self).setUp() + self.controller = ServerVirtualInterfaceController() + self.stubs.Set(compute.api.API, "get", compute_api_get) + + def tearDown(self): + super(ServerVirtualInterfaceTest, self).tearDown() + + def test_get_virtual_interfaces_list(self): + req = webob.Request.blank('/v1.1/servers/1/os-virtual-interfaces') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + response = {'serverVirtualInterfaces': [ + {'id': '00000000-0000-0000-0000-00000000000000000', + 'macAddress': '00-00-00-00-00-00'}, + {'id': '11111111-1111-1111-1111-11111111111111111', + 'macAddress': '11-11-11-11-11-11'}]} + self.assertEqual(res_dict, response) -- cgit From 751c8b4ff0e94b4f665af5541b9249637623d193 Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Wed, 17 Aug 2011 19:58:26 +0900 Subject: Added XML support and changed JSON output keys --- nova/api/openstack/contrib/virtual_interfaces.py | 28 +++++++++++++++--------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/nova/api/openstack/contrib/virtual_interfaces.py b/nova/api/openstack/contrib/virtual_interfaces.py index 86d1128fd..715a54d52 100644 --- a/nova/api/openstack/contrib/virtual_interfaces.py +++ b/nova/api/openstack/contrib/virtual_interfaces.py @@ -24,16 +24,17 @@ from nova import log as logging from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import faults +from nova.api.openstack import wsgi LOG = logging.getLogger("nova.api.virtual_interfaces") def _translate_vif_summary_view(_context, vif): - """Maps keys for attachment summary view.""" + """Maps keys for VIF summary view.""" d = {} d['id'] = vif['uuid'] - d['macAddress'] = vif['address'] + d['mac_address'] = vif['address'] return d @@ -41,12 +42,6 @@ class ServerVirtualInterfaceController(object): """The instance VIF API controller for the Openstack API. """ - _serialization_metadata = { - 'application/xml': { - 'attributes': { - 'serverVirtualInterface': ['id', - 'macAddress']}}} - def __init__(self): self.compute_api = compute.API() super(ServerVirtualInterfaceController, self).__init__() @@ -63,7 +58,7 @@ class ServerVirtualInterfaceController(object): vifs = instance['virtual_interfaces'] limited_list = common.limited(vifs, req) res = [entity_maker(context, vif) for vif in limited_list] - return {'serverVirtualInterfaces': res} + return {'virtual_interfaces': res} def index(self, req, server_id): """Returns the list of VIFs for a given instance.""" @@ -91,11 +86,24 @@ class Virtual_interfaces(extensions.ExtensionDescriptor): def get_resources(self): resources = [] + metadata = _get_metadata() + body_serializers = { + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V11)} + serializer = wsgi.ResponseSerializer(body_serializers, None) res = extensions.ResourceExtension('os-virtual-interfaces', ServerVirtualInterfaceController(), parent=dict( member_name='server', - collection_name='servers')) + collection_name='servers'), + serializer=serializer) resources.append(res) return resources + + +def _get_metadata(): + metadata = { + "attributes": { + 'virtual_interface': ["id", "mac_address"]}} + return metadata -- cgit From ad8081a5b3abfc63834594c5dbf8ac1bb0721a4b Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Wed, 17 Aug 2011 19:58:57 +0900 Subject: Fixed vif test to match the JSON key change --- nova/tests/api/openstack/contrib/test_virtual_interfaces.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/tests/api/openstack/contrib/test_virtual_interfaces.py b/nova/tests/api/openstack/contrib/test_virtual_interfaces.py index a3a177e33..d541a9e95 100644 --- a/nova/tests/api/openstack/contrib/test_virtual_interfaces.py +++ b/nova/tests/api/openstack/contrib/test_virtual_interfaces.py @@ -47,9 +47,9 @@ class ServerVirtualInterfaceTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) - response = {'serverVirtualInterfaces': [ + response = {'virtual_interfaces': [ {'id': '00000000-0000-0000-0000-00000000000000000', - 'macAddress': '00-00-00-00-00-00'}, + 'mac_address': '00-00-00-00-00-00'}, {'id': '11111111-1111-1111-1111-11111111111111111', - 'macAddress': '11-11-11-11-11-11'}]} + 'mac_address': '11-11-11-11-11-11'}]} self.assertEqual(res_dict, response) -- cgit From 4407405244c3797ed1c0433eec7686e15340dca7 Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Wed, 17 Aug 2011 20:12:24 +0900 Subject: Cleaned up the file --- nova/api/openstack/contrib/virtual_interfaces.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/contrib/virtual_interfaces.py b/nova/api/openstack/contrib/virtual_interfaces.py index 715a54d52..2d3850e12 100644 --- a/nova/api/openstack/contrib/virtual_interfaces.py +++ b/nova/api/openstack/contrib/virtual_interfaces.py @@ -38,6 +38,13 @@ def _translate_vif_summary_view(_context, vif): return d +def _get_metadata(): + metadata = { + "attributes": { + 'virtual_interface': ["id", "mac_address"]}} + return metadata + + class ServerVirtualInterfaceController(object): """The instance VIF API controller for the Openstack API. """ @@ -100,10 +107,3 @@ class Virtual_interfaces(extensions.ExtensionDescriptor): resources.append(res) return resources - - -def _get_metadata(): - metadata = { - "attributes": { - 'virtual_interface': ["id", "mac_address"]}} - return metadata -- cgit From 5415a59d473fb9ed374e746fb36f30fc664c4dec Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Wed, 17 Aug 2011 20:17:09 +0900 Subject: Updated get_updated time --- nova/api/openstack/contrib/virtual_interfaces.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/contrib/virtual_interfaces.py b/nova/api/openstack/contrib/virtual_interfaces.py index 2d3850e12..b3bb00a8f 100644 --- a/nova/api/openstack/contrib/virtual_interfaces.py +++ b/nova/api/openstack/contrib/virtual_interfaces.py @@ -88,7 +88,7 @@ class Virtual_interfaces(extensions.ExtensionDescriptor): return "http://docs.openstack.org/ext/virtual_interfaces/api/v1.1" def get_updated(self): - return "2011-08-05T00:00:00+00:00" + return "2011-08-17T00:00:00+00:00" def get_resources(self): resources = [] -- cgit From 2e44657a20cdd620d982b252ca35413c07fd3c2b Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Wed, 17 Aug 2011 20:23:21 +0900 Subject: Cleaned up the extension metadata API data --- nova/api/openstack/contrib/virtual_interfaces.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/nova/api/openstack/contrib/virtual_interfaces.py b/nova/api/openstack/contrib/virtual_interfaces.py index b3bb00a8f..dab61efc8 100644 --- a/nova/api/openstack/contrib/virtual_interfaces.py +++ b/nova/api/openstack/contrib/virtual_interfaces.py @@ -76,10 +76,10 @@ class ServerVirtualInterfaceController(object): class Virtual_interfaces(extensions.ExtensionDescriptor): def get_name(self): - return "Virtual_interfaces" + return "VirtualInterfaces" def get_alias(self): - return "os-virtual-interfaces" + return "virtual_interfaces" def get_description(self): return "Virtual interface support" @@ -98,12 +98,11 @@ class Virtual_interfaces(extensions.ExtensionDescriptor): 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, xmlns=wsgi.XMLNS_V11)} serializer = wsgi.ResponseSerializer(body_serializers, None) - res = extensions.ResourceExtension('os-virtual-interfaces', - ServerVirtualInterfaceController(), - parent=dict( - member_name='server', - collection_name='servers'), - serializer=serializer) + res = extensions.ResourceExtension( + 'os-virtual-interfaces', + controller=ServerVirtualInterfaceController(), + parent=dict(member_name='server', collection_name='servers'), + serializer=serializer) resources.append(res) return resources -- cgit From 4f3a33859c350ff13b2fd94e33de4f10a7f93bc1 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 17 Aug 2011 10:05:01 -0700 Subject: fix some naming inconsistencies, make associate/disassociate PUTs --- nova/api/openstack/contrib/floating_ips.py | 35 +++++++++------------- .../api/openstack/contrib/test_floating_ips.py | 6 ++-- 2 files changed, 17 insertions(+), 24 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 751b27c9f..af3eee16a 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -102,45 +102,38 @@ class FloatingIPController(object): def delete(self, req, id): context = req.environ['nova.context'] - ip = self.network_api.get_floating_ip(context, id) + floating_ip = self.network_api.get_floating_ip(context, id) - if 'fixed_ip' in ip: - try: - self.disassociate(req, id) - except exception.ApiError: - LOG.warn("disassociate failure %s", id) + if 'fixed_ip' in floating_ip: + self.network_api.disassociate_floating_ip(context, floating_ip['address']) - self.network_api.release_floating_ip(context, address=ip['address']) + self.network_api.release_floating_ip(context, address=floating_ip['address']) return exc.HTTPAccepted() def associate(self, req, id, body): - """ /floating_ips/{id}/associate fixed ip in body """ + """PUT /floating_ips/{id}/associate fixed ip in body """ context = req.environ['nova.context'] floating_ip = self._get_ip_by_id(context, id) - fixed_ip = body['associate_address']['fixed_ip'] + fixed_ip = body['floating_ip']['fixed_ip'] - try: - self.network_api.associate_floating_ip(context, - floating_ip, fixed_ip) - except rpc.RemoteError: - raise + self.network_api.associate_floating_ip(context, + floating_ip, fixed_ip) floating_ip = self.network_api.get_floating_ip(context, id) return _translate_floating_ip_view(floating_ip) def disassociate(self, req, id, body=None): - """ POST /floating_ips/{id}/disassociate """ + """PUT /floating_ips/{id}/disassociate """ context = req.environ['nova.context'] floating_ip = self.network_api.get_floating_ip(context, id) address = floating_ip['address'] - try: + # no-op if this ip is already disassociated + if 'fixed_ip' in floating_ip: self.network_api.disassociate_floating_ip(context, address) - except rpc.RemoteError: - raise + floating_ip = self.network_api.get_floating_ip(context, id) - floating_ip = self.network_api.get_floating_ip(context, id) return _translate_floating_ip_view(floating_ip) def _get_ip_by_id(self, context, value): @@ -170,8 +163,8 @@ class Floating_ips(extensions.ExtensionDescriptor): res = extensions.ResourceExtension('os-floating-ips', FloatingIPController(), member_actions={ - 'associate': 'POST', - 'disassociate': 'POST'}) + 'associate': 'PUT', + 'disassociate': 'PUT'}) resources.append(res) return resources diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index 9b41a58c0..e506519f4 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -159,9 +159,9 @@ class FloatingIpTest(test.TestCase): self.assertEqual(res.status_int, 202) def test_floating_ip_associate(self): - body = dict(associate_address=dict(fixed_ip='11.0.0.1')) + body = dict(floating_ip=dict(fixed_ip='11.0.0.1')) req = webob.Request.blank('/v1.1/os-floating-ips/1/associate') - req.method = 'POST' + req.method = 'PUT' req.body = json.dumps(body) req.headers["content-type"] = "application/json" @@ -179,7 +179,7 @@ class FloatingIpTest(test.TestCase): def test_floating_ip_disassociate(self): body = dict() req = webob.Request.blank('/v1.1/os-floating-ips/1/disassociate') - req.method = 'POST' + req.method = 'PUT' req.body = json.dumps(body) req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) -- cgit From 65d7db1136557b7af1f0b9413bacc8fc59e7211f Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 17 Aug 2011 10:23:44 -0700 Subject: pep8 fix --- nova/api/openstack/contrib/floating_ips.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index af3eee16a..2f5fdd001 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -105,9 +105,11 @@ class FloatingIPController(object): floating_ip = self.network_api.get_floating_ip(context, id) if 'fixed_ip' in floating_ip: - self.network_api.disassociate_floating_ip(context, floating_ip['address']) + self.network_api.disassociate_floating_ip(context, + floating_ip['address']) - self.network_api.release_floating_ip(context, address=floating_ip['address']) + self.network_api.release_floating_ip(context, + address=floating_ip['address']) return exc.HTTPAccepted() def associate(self, req, id, body): -- cgit From 7e3f360eb256ba82629a44de60d36be643d5105d Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 17 Aug 2011 15:33:08 -0400 Subject: Added migration for accessIPv4 and accessIPv6 --- .../versions/037_add_instances_accessip.py | 49 ++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/037_add_instances_accessip.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_add_instances_accessip.py b/nova/db/sqlalchemy/migrate_repo/versions/037_add_instances_accessip.py new file mode 100644 index 000000000..82de2a874 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_add_instances_accessip.py @@ -0,0 +1,49 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table, String + +meta = MetaData() + +accessIPv4 = Column( + 'access_ip_v4', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + +accessIPv6 = Column( + 'access_ip_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + instances.create_column(accessIPv4) + instances.create_column(accessIPv6) + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta.bind = migrate_engine + instances.drop_column('access_ip_v4') + instances.drop_column('access_ip_v6') -- cgit From 8ba3ea03aa58d5b0791b9fd3654dd034cbd3a8bc Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 17 Aug 2011 15:40:17 -0400 Subject: Added accessip to models pep8 --- .../sqlalchemy/migrate_repo/versions/037_add_instances_accessip.py | 1 - nova/db/sqlalchemy/models.py | 5 +++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_add_instances_accessip.py b/nova/db/sqlalchemy/migrate_repo/versions/037_add_instances_accessip.py index 82de2a874..39f0dd6ce 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_add_instances_accessip.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_add_instances_accessip.py @@ -33,7 +33,6 @@ instances = Table('instances', meta, ) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index f2a4680b0..1249a6269 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -232,6 +232,11 @@ class Instance(BASE, NovaBase): root_device_name = Column(String(255)) + # User editable field meant to represent what ip should be used + # to connect to the instance + access_ip_v4 = Column(String(255)) + access_ip_v6 = Column(String(255)) + # TODO(vish): see Ewan's email about state improvements, probably # should be in a driver base class or some such # vmstate_state = running, halted, suspended, paused -- cgit From ecc4e9ee389115e3793f94aaf53f8fbe59e7ac66 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Wed, 17 Aug 2011 19:58:22 +0000 Subject: Added the host 'enabled' status to the host_data returned by the plugin. --- plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost index cd9694ce1..36c61f78d 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost @@ -258,6 +258,7 @@ def cleanup(dct): # out["host_suspend-image-sr-uuid"] = dct.get("suspend-image-sr-uuid", "") # out["host_crash-dump-sr-uuid"] = dct.get("crash-dump-sr-uuid", "") # out["host_local-cache-sr"] = dct.get("local-cache-sr", "") + out["enabled"] = dct.get("enabled", "true") == "true" out["host_memory"] = omm = {} omm["total"] = safe_int(dct.get("memory-total", "")) omm["overhead"] = safe_int(dct.get("memory-overhead", "")) -- cgit From a4379a342798016a9dc40761561c996093945d87 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 17 Aug 2011 16:03:03 -0400 Subject: Updated server create XML deserializer to account for accessIPv4 and accessIPv6 --- nova/api/openstack/create_instance_helper.py | 3 +- nova/tests/api/openstack/test_servers.py | 56 ++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 4e1da549e..5ba8afe97 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -443,7 +443,8 @@ class ServerXMLDeserializerV11(wsgi.MetadataXMLDeserializer): server = {} server_node = self.find_first_child_named(node, 'server') - attributes = ["name", "imageRef", "flavorRef", "adminPass"] + attributes = ["name", "imageRef", "flavorRef", "adminPass", + "accessIPv4", "accessIPv6"] for attr in attributes: if server_node.getAttribute(attr): server[attr] = server_node.getAttribute(attr) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index a510d7d97..6f1173d46 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -2491,6 +2491,62 @@ class TestServerCreateRequestXMLDeserializerV11(test.TestCase): } self.assertEquals(request['body'], expected) + def test_access_ipv4(self): + serial_request = """ +""" + request = self.deserializer.deserialize(serial_request, 'create') + expected = { + "server": { + "name": "new-server-test", + "imageRef": "1", + "flavorRef": "2", + "accessIPv4": "1.2.3.4", + }, + } + self.assertEquals(request['body'], expected) + + def test_access_ipv6(self): + serial_request = """ +""" + request = self.deserializer.deserialize(serial_request, 'create') + expected = { + "server": { + "name": "new-server-test", + "imageRef": "1", + "flavorRef": "2", + "accessIPv6": "fead:::::1234", + }, + } + self.assertEquals(request['body'], expected) + + def test_access_ip(self): + serial_request = """ +""" + request = self.deserializer.deserialize(serial_request, 'create') + expected = { + "server": { + "name": "new-server-test", + "imageRef": "1", + "flavorRef": "2", + "accessIPv4": "1.2.3.4", + "accessIPv6": "fead:::::1234", + }, + } + self.assertEquals(request['body'], expected) + def test_admin_pass(self): serial_request = """ Date: Wed, 17 Aug 2011 16:25:53 -0700 Subject: Make all services use the same launching strategy --- bin/nova-api | 44 +++++++++++++++++--------------------------- nova/service.py | 47 ++++++++++++++++++++++++++++------------------- nova/utils.py | 41 +++-------------------------------------- nova/wsgi.py | 3 --- 4 files changed, 48 insertions(+), 87 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index fe8e83366..d2086dc92 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -19,12 +19,15 @@ """Starter script for Nova API. -Starts both the EC2 and OpenStack APIs in separate processes. +Starts both the EC2 and OpenStack APIs in separate greenthreads. """ +import eventlet +eventlet.monkey_patch() + +import gettext import os -import signal import sys @@ -33,32 +36,19 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath( if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): sys.path.insert(0, possible_topdir) -import nova.service -import nova.utils +gettext.install('nova', unicode=1) from nova import flags - - -FLAGS = flags.FLAGS - - -def main(): - """Launch EC2 and OSAPI services.""" - nova.utils.Bootstrapper.bootstrap_binary(sys.argv) - - launcher = nova.service.Launcher() - - for api in FLAGS.enabled_apis: - service = nova.service.WSGIService(api) - launcher.launch_service(service) - - signal.signal(signal.SIGTERM, lambda *_: launcher.stop()) - - try: - launcher.wait() - except KeyboardInterrupt: - launcher.stop() - +from nova import log as logging +from nova import service +from nova import utils if __name__ == '__main__': - sys.exit(main()) + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + services = [] + for api in flags.FLAGS.enabled_apis: + services.append(service.WSGIService(api)) + service.serve(*services) + service.wait() diff --git a/nova/service.py b/nova/service.py index 6e9eddc5a..e0735d26f 100644 --- a/nova/service.py +++ b/nova/service.py @@ -20,13 +20,12 @@ """Generic Node baseclass for all workers that run on hosts.""" import inspect -import multiprocessing import os +import signal +import eventlet import greenlet -from eventlet import greenthread - from nova import context from nova import db from nova import exception @@ -77,10 +76,7 @@ class Launcher(object): """ service.start() - try: - service.wait() - except KeyboardInterrupt: - service.stop() + service.wait() def launch_service(self, service): """Load and start the given service. @@ -89,10 +85,8 @@ class Launcher(object): :returns: None """ - process = multiprocessing.Process(target=self.run_service, - args=(service,)) - process.start() - self._services.append(process) + gt = eventlet.spawn(self.run_service, service) + self._services.append(gt) def stop(self): """Stop all services which are currently running. @@ -101,8 +95,7 @@ class Launcher(object): """ for service in self._services: - if service.is_alive(): - service.terminate() + service.kill() def wait(self): """Waits until all services have been stopped, and then returns. @@ -111,7 +104,10 @@ class Launcher(object): """ for service in self._services: - service.join() + try: + service.wait() + except greenlet.GreenletExit: + pass class Service(object): @@ -121,6 +117,7 @@ class Service(object): periodic_interval=None, *args, **kwargs): self.host = host self.binary = binary + self.name = binary self.topic = topic self.manager_class_name = manager manager_class = utils.import_class(self.manager_class_name) @@ -173,7 +170,7 @@ class Service(object): finally: consumer_set.close() - self.consumer_set_thread = greenthread.spawn(_wait) + self.consumer_set_thread = eventlet.spawn(_wait) if self.report_interval: pulse = utils.LoopingCall(self.report_state) @@ -339,7 +336,17 @@ class WSGIService(object): self.server.wait() +# NOTE(vish): the global launcher is to maintain the existing +# functionality of calling service.serve + +# service.wait +_launcher = None + + def serve(*services): + global _launcher + if not _launcher: + _launcher = Launcher() + signal.signal(signal.SIGTERM, lambda *args: _launcher.stop()) try: if not services: services = [Service.create()] @@ -354,7 +361,7 @@ def serve(*services): flags.DEFINE_flag(flags.HelpXMLFlag()) FLAGS.ParseNewFlags() - name = '_'.join(x.binary for x in services) + name = '_'.join(x.name for x in services) logging.debug(_('Serving %s'), name) logging.debug(_('Full set of FLAGS:')) for flag in FLAGS: @@ -362,9 +369,11 @@ def serve(*services): logging.debug('%(flag)s : %(flag_get)s' % locals()) for x in services: - x.start() + _launcher.launch_service(x) def wait(): - while True: - greenthread.sleep(5) + try: + _launcher.wait() + except KeyboardInterrupt: + _launcher.stop() diff --git a/nova/utils.py b/nova/utils.py index 7276b6bd5..54126f644 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -260,8 +260,9 @@ def default_flagfile(filename='nova.conf', args=None): filename = "./nova.conf" if not os.path.exists(filename): filename = '/etc/nova/nova.conf' - flagfile = '--flagfile=%s' % filename - args.insert(1, flagfile) + if os.path.exists(filename): + flagfile = '--flagfile=%s' % filename + args.insert(1, flagfile) def debug(arg): @@ -837,39 +838,3 @@ def bool_from_str(val): return True if int(val) else False except ValueError: return val.lower() == 'true' - - -class Bootstrapper(object): - """Provides environment bootstrapping capabilities for entry points.""" - - @staticmethod - def bootstrap_binary(argv): - """Initialize the Nova environment using command line arguments.""" - Bootstrapper.setup_flags(argv) - Bootstrapper.setup_logging() - Bootstrapper.log_flags() - - @staticmethod - def setup_logging(): - """Initialize logging and log a message indicating the Nova version.""" - logging.setup() - logging.audit(_("Nova Version (%s)") % - version.version_string_with_vcs()) - - @staticmethod - def setup_flags(input_flags): - """Initialize flags, load flag file, and print help if needed.""" - default_flagfile(args=input_flags) - FLAGS(input_flags or []) - flags.DEFINE_flag(flags.HelpFlag()) - flags.DEFINE_flag(flags.HelpshortFlag()) - flags.DEFINE_flag(flags.HelpXMLFlag()) - FLAGS.ParseNewFlags() - - @staticmethod - def log_flags(): - """Log the list of all active flags being used.""" - logging.audit(_("Currently active flags:")) - for key in FLAGS: - value = FLAGS.get(key, None) - logging.audit(_("%(key)s : %(value)s" % locals())) diff --git a/nova/wsgi.py b/nova/wsgi.py index c8ddb97d7..f2846aa73 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -39,9 +39,6 @@ from nova import log as logging from nova import utils -eventlet.patcher.monkey_patch(socket=True, time=True) - - FLAGS = flags.FLAGS LOG = logging.getLogger('nova.wsgi') -- cgit From 635306fd009ea9e50259d01e10762f6b5ab45049 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Wed, 17 Aug 2011 22:00:38 -0700 Subject: bug #828429: remove references to interface in nova-dhcpbridge --- bin/nova-dhcpbridge | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index a47ea7a76..c2fd8994d 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -52,7 +52,7 @@ flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager') LOG = logging.getLogger('nova.dhcpbridge') -def add_lease(mac, ip_address, _interface): +def add_lease(mac, ip_address): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: LOG.debug(_("leasing ip")) @@ -66,13 +66,13 @@ def add_lease(mac, ip_address, _interface): "args": {"address": ip_address}}) -def old_lease(mac, ip_address, interface): +def old_lease(mac, ip_address): """Update just as add lease.""" LOG.debug(_("Adopted old lease or got a change of mac")) - add_lease(mac, ip_address, interface) + add_lease(mac, ip_address) -def del_lease(mac, ip_address, _interface): +def del_lease(mac, ip_address): """Called when a lease expires.""" if FLAGS.fake_rabbit: LOG.debug(_("releasing ip")) @@ -116,9 +116,9 @@ def main(): mac = argv[2] ip = argv[3] msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s" - " on interface %(interface)s") % locals() + " for network %(network_id)s") % locals() LOG.debug(msg) - globals()[action + '_lease'](mac, ip, interface) + globals()[action + '_lease'](mac, ip) else: print init_leases(network_id) -- cgit From b7019a57c416f7a14f8e8229776a18c28c109d38 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Wed, 17 Aug 2011 22:29:04 -0700 Subject: in dhcpbridge, only grab network id from env if needed --- bin/nova-dhcpbridge | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index c2fd8994d..afafca548 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -99,8 +99,6 @@ def main(): utils.default_flagfile(flagfile) argv = FLAGS(sys.argv) logging.setup() - # check ENV first so we don't break any older deploys - network_id = int(os.environ.get('NETWORK_ID')) if int(os.environ.get('TESTING', '0')): from nova.tests import fake_flags @@ -115,11 +113,11 @@ def main(): if action in ['add', 'del', 'old']: mac = argv[2] ip = argv[3] - msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s" - " for network %(network_id)s") % locals() + msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s") % locals() LOG.debug(msg) globals()[action + '_lease'](mac, ip) else: + network_id = int(os.environ.get('NETWORK_ID')) print init_leases(network_id) if __name__ == "__main__": -- cgit From af333cc72e753a4a28d0deb20369076df7bf09e3 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 18 Aug 2011 10:53:01 -0400 Subject: Added accessIPv4 and accessIPv6 to servers view builder Updated compute api to handle accessIPv4 and 6 --- nova/api/openstack/create_instance_helper.py | 2 + nova/api/openstack/views/servers.py | 4 + nova/compute/api.py | 15 ++- nova/tests/api/openstack/test_servers.py | 173 +++++++++++++++++++++++++++ 4 files changed, 189 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 5ba8afe97..332d5d9bb 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -157,6 +157,8 @@ class CreateInstanceHelper(object): key_name=key_name, key_data=key_data, metadata=server_dict.get('metadata', {}), + access_ip_v4=server_dict.get('accessIPv4'), + access_ip_v6=server_dict.get('accessIPv6'), injected_files=injected_files, admin_password=password, zone_blob=zone_blob, diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index edc328129..3b91c037a 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -182,6 +182,10 @@ class ViewBuilderV11(ViewBuilder): def _build_extra(self, response, inst): self._build_links(response, inst) response['uuid'] = inst['uuid'] + if inst.get('access_ip_v4'): + response['accessIPv4'] = inst['access_ip_v4'] + if inst.get('access_ip_v6'): + response['accessIPv6'] = inst['access_ip_v6'] def _build_links(self, response, inst): href = self.generate_href(inst["id"]) diff --git a/nova/compute/api.py b/nova/compute/api.py index e909e9959..168d46689 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -153,7 +153,7 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata=None, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None): + reservation_id=None, access_ip_v4=None, access_ip_v6=None): """Verify all the input parameters regardless of the provisioning strategy being performed.""" @@ -247,6 +247,8 @@ class API(base.Base): 'key_data': key_data, 'locked': False, 'metadata': metadata, + 'access_ip_v4': access_ip_v4, + 'access_ip_v6': access_ip_v6, 'availability_zone': availability_zone, 'os_type': os_type, 'architecture': architecture, @@ -421,6 +423,7 @@ class API(base.Base): 'num_instances': num_instances, } + print base_options rpc.cast(context, FLAGS.scheduler_topic, {"method": "run_instance", @@ -438,7 +441,8 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata=None, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None, block_device_mapping=None): + reservation_id=None, block_device_mapping=None, + access_ip_v4=None, access_ip_v6=None): """Provision the instances by passing the whole request to the Scheduler for execution. Returns a Reservation ID related to the creation of all of these instances.""" @@ -454,7 +458,7 @@ class API(base.Base): key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, zone_blob, - reservation_id) + reservation_id, access_ip_v4, access_ip_v6) self._ask_scheduler_to_create_instance(context, base_options, instance_type, zone_blob, @@ -472,7 +476,8 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata=None, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None, block_device_mapping=None): + reservation_id=None, block_device_mapping=None, + access_ip_v4=None, access_ip_v6=None): """ Provision the instances by sending off a series of single instance requests to the Schedulers. This is fine for trival @@ -496,7 +501,7 @@ class API(base.Base): key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, zone_blob, - reservation_id) + reservation_id, access_ip_v4, access_ip_v6) block_device_mapping = block_device_mapping or [] instances = [] diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 6f1173d46..25fce95b4 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1379,6 +1379,8 @@ class ServersTest(test.TestCase): 'display_name': 'server_test', 'uuid': FAKE_UUID, 'instance_type': dict(inst_type), + 'access_ip_v4': '1.2.3.4', + 'access_ip_v6': 'fead::1234', 'image_ref': image_ref, "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), @@ -1579,6 +1581,69 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) + def test_create_instance_with_access_ip_v1_1(self): + self._setup_for_create_instance() + + # proper local hrefs must start with 'http://localhost/v1.1/' + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/flavors/3' + access_ipv4 = '1.2.3.4' + access_ipv6 = 'fead::1234' + expected_flavor = { + "id": "3", + "links": [ + { + "rel": "bookmark", + "href": 'http://localhost/flavors/3', + }, + ], + } + expected_image = { + "id": "2", + "links": [ + { + "rel": "bookmark", + "href": 'http://localhost/images/2', + }, + ], + } + body = { + 'server': { + 'name': 'server_test', + 'imageRef': image_href, + 'flavorRef': flavor_ref, + 'accessIPv4': access_ipv4, + 'accessIPv6': access_ipv6, + 'metadata': { + 'hello': 'world', + 'open': 'stack', + }, + 'personality': [ + { + "path": "/etc/banner.txt", + "contents": "MQ==", + }, + ], + }, + } + + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + server = json.loads(res.body)['server'] + self.assertEqual(16, len(server['adminPass'])) + self.assertEqual(1, server['id']) + self.assertEqual(0, server['progress']) + self.assertEqual('server_test', server['name']) + self.assertEqual(expected_flavor, server['flavor']) + self.assertEqual(expected_image, server['image']) + self.assertEqual(access_ipv4, server['accessIPv4']) + def test_create_instance_v1_1(self): self._setup_for_create_instance() @@ -3095,6 +3160,8 @@ class ServersViewBuilderV11Test(test.TestCase): "display_description": "", "locked": False, "metadata": [], + "accessIPv4": "1.2.3.4", + "accessIPv6": "fead::::1234", #"address": , #"floating_ips": [{"address":ip} for ip in public_addresses]} "uuid": "deadbeef-feed-edee-beef-d0ea7beefedd"} @@ -3237,6 +3304,112 @@ class ServersViewBuilderV11Test(test.TestCase): output = self.view_builder.build(self.instance, True) self.assertDictMatch(output, expected_server) + def test_build_server_detail_with_accessipv4(self): + + self.instance['access_ip_v4'] = '1.2.3.4' + + image_bookmark = "http://localhost/images/5" + flavor_bookmark = "http://localhost/flavors/1" + expected_server = { + "server": { + "id": 1, + "uuid": self.instance['uuid'], + "updated": "2010-11-11T11:00:00Z", + "created": "2010-10-10T12:00:00Z", + "progress": 0, + "name": "test_server", + "status": "BUILD", + "hostId": '', + "image": { + "id": "5", + "links": [ + { + "rel": "bookmark", + "href": image_bookmark, + }, + ], + }, + "flavor": { + "id": "1", + "links": [ + { + "rel": "bookmark", + "href": flavor_bookmark, + }, + ], + }, + "addresses": {}, + "metadata": {}, + "accessIPv4": "1.2.3.4", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/servers/1", + }, + { + "rel": "bookmark", + "href": "http://localhost/servers/1", + }, + ], + } + } + + output = self.view_builder.build(self.instance, True) + self.assertDictMatch(output, expected_server) + + def test_build_server_detail_with_accessipv6(self): + + self.instance['access_ip_v6'] = 'fead::1234' + + image_bookmark = "http://localhost/images/5" + flavor_bookmark = "http://localhost/flavors/1" + expected_server = { + "server": { + "id": 1, + "uuid": self.instance['uuid'], + "updated": "2010-11-11T11:00:00Z", + "created": "2010-10-10T12:00:00Z", + "progress": 0, + "name": "test_server", + "status": "BUILD", + "hostId": '', + "image": { + "id": "5", + "links": [ + { + "rel": "bookmark", + "href": image_bookmark, + }, + ], + }, + "flavor": { + "id": "1", + "links": [ + { + "rel": "bookmark", + "href": flavor_bookmark, + }, + ], + }, + "addresses": {}, + "metadata": {}, + "accessIPv6": "fead::1234", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/servers/1", + }, + { + "rel": "bookmark", + "href": "http://localhost/servers/1", + }, + ], + } + } + + output = self.view_builder.build(self.instance, True) + self.assertDictMatch(output, expected_server) + def test_build_server_detail_with_metadata(self): metadata = [] -- cgit From 9b5416e8afc115fabb76664a65b6d33e9ba89b7f Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 18 Aug 2011 11:05:59 -0400 Subject: Updated ServersXMLSerializer to allow accessIPv4 and accessIPv6 in XML responses --- nova/api/openstack/servers.py | 4 ++++ nova/tests/api/openstack/test_servers.py | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 335ecad86..f06ee6b62 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -837,6 +837,10 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): node.setAttribute('created', str(server['created'])) node.setAttribute('updated', str(server['updated'])) node.setAttribute('status', server['status']) + if 'accessIPv4' in server: + node.setAttribute('accessIPv4', str(server['accessIPv4'])) + if 'accessIPv6' in server: + node.setAttribute('accessIPv6', str(server['accessIPv6'])) if 'progress' in server: node.setAttribute('progress', str(server['progress'])) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 25fce95b4..0bdbb2006 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3494,6 +3494,8 @@ class ServerXMLSerializationTest(test.TestCase): "name": "test_server", "status": "BUILD", "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0', + "accessIPv4": "1.2.3.4", + "accessIPv6": "fead::1234", "image": { "id": "5", "links": [ @@ -3570,6 +3572,8 @@ class ServerXMLSerializationTest(test.TestCase): created="%(expected_now)s" hostId="e4d909c290d0fb1ca068ffaddf22cbd0" status="BUILD" + accessIPv4="1.2.3.4" + accessIPv6="fead::1234" progress="0"> @@ -3614,6 +3618,7 @@ class ServerXMLSerializationTest(test.TestCase): "progress": 0, "name": "test_server", "status": "BUILD", + "accessIPv6": "fead::1234", "hostId": "e4d909c290d0fb1ca068ffaddf22cbd0", "adminPass": "test_password", "image": { @@ -3692,6 +3697,7 @@ class ServerXMLSerializationTest(test.TestCase): created="%(expected_now)s" hostId="e4d909c290d0fb1ca068ffaddf22cbd0" status="BUILD" + accessIPv6="fead::1234" adminPass="test_password" progress="0"> -- cgit From 155d640d3d53bcf76daa0ff0ae67ac5dbbe3022a Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 18 Aug 2011 12:19:47 -0400 Subject: Fixed issue where accessIP was added in none detail responses --- nova/api/openstack/views/servers.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 3b91c037a..8b3a1e221 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -143,6 +143,12 @@ class ViewBuilderV11(ViewBuilder): response['server']['progress'] = 100 elif response['server']['status'] == "BUILD": response['server']['progress'] = 0 + + if inst.get('access_ip_v4'): + response['server']['accessIPv4'] = inst['access_ip_v4'] + if inst.get('access_ip_v6'): + response['server']['accessIPv6'] = inst['access_ip_v6'] + return response def _build_image(self, response, inst): @@ -182,10 +188,6 @@ class ViewBuilderV11(ViewBuilder): def _build_extra(self, response, inst): self._build_links(response, inst) response['uuid'] = inst['uuid'] - if inst.get('access_ip_v4'): - response['accessIPv4'] = inst['access_ip_v4'] - if inst.get('access_ip_v6'): - response['accessIPv6'] = inst['access_ip_v6'] def _build_links(self, response, inst): href = self.generate_href(inst["id"]) -- cgit From 9011bf57d8caf8a0bd11dfb33cf968b2b65fe294 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 18 Aug 2011 11:21:35 -0500 Subject: Added rescue mode extension. --- nova/api/openstack/contrib/rescue.py | 72 ++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 nova/api/openstack/contrib/rescue.py diff --git a/nova/api/openstack/contrib/rescue.py b/nova/api/openstack/contrib/rescue.py new file mode 100644 index 000000000..efb882fd6 --- /dev/null +++ b/nova/api/openstack/contrib/rescue.py @@ -0,0 +1,72 @@ +# Copyright 2011 Openstack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The rescue mode extension.""" + +import webob +from webob import exc + +from nova import compute +from nova import log as logging +from nova.api.openstack import extensions as exts +from nova.api.openstack import faults + +LOG = logging.getLogger("nova.api.contrib.rescue") + + +class Rescue(exts.ExtensionDescriptor): + """The Rescue API controller for the OpenStack API.""" + def __init__(self): + super(Rescue, self).__init__() + self.compute_api = compute.API() + + def _rescue(self, input_dict, req, instance_id): + """Enable or disable rescue mode.""" + context = req.environ["nova.context"] + action = input_dict["rescue"]["action"] + + try: + if action == "rescue": + self.compute_api.rescue(context, instance_id) + elif action == "unrescue": + self.compute_api.unrescue(context, instance_id) + except Exception, e: + LOG.exception(_("Error in %(action)s: %(e)s") % locals()) + return faults.Fault(exc.HTTPBadRequest()) + + return webob.Response(status_int=202) + + def get_name(self): + return "Rescue" + + def get_alias(self): + return "rescue" + + def get_description(self): + return "Instance rescue mode" + + def get_namespace(self): + return "http://docs.openstack.org/ext/rescue/api/v1.1" + + def get_updated(self): + return "2011-08-18T00:00:00+00:00" + + def get_actions(self): + """Return the actions the extension adds, as required by contract.""" + actions = [ + exts.ActionExtension("servers", "rescue", self._rescue), + exts.ActionExtension("servers", "unrescue", self._rescue), + ] + + return actions -- cgit From 186987d854fabde120a37713909eaecfbabeaece Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 18 Aug 2011 16:22:56 +0000 Subject: Corrected the hardcoded filter path. Also simplified the filter matching code in host_filter.py --- nova/compute/api.py | 3 +-- nova/scheduler/host_filter.py | 4 +--- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index e909e9959..229d02af4 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -411,12 +411,11 @@ class API(base.Base): LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " (all-at-once)") % locals()) - filter_class = 'nova.scheduler.host_filter.InstanceTypeFilter' request_spec = { 'image': image, 'instance_properties': base_options, 'instance_type': instance_type, - 'filter': filter_class, + 'filter': 'InstanceTypeFilter' 'blob': zone_blob, 'num_instances': num_instances, } diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 4bc5158cc..826a99b0a 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -58,8 +58,6 @@ def choose_host_filter(filter_name=None): if not filter_name: filter_name = FLAGS.default_host_filter for filter_class in _get_filters(): - host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) - if (host_match.startswith("nova.scheduler.filters") and - (host_match.split(".")[-1] == filter_name)): + if filter_class.__name__ == filter_name: return filter_class() raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) -- cgit From 7c957d7821437604b99d7383c8674676dc3921dc Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 18 Aug 2011 16:40:41 +0000 Subject: Added the fix for the missing parameter for the call to create_db_entry_for_new_instance() --- nova/scheduler/abstract_scheduler.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index 77db67773..3930148e2 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -62,12 +62,13 @@ class AbstractScheduler(driver.Scheduler): host = build_plan_item['hostname'] base_options = request_spec['instance_properties'] image = request_spec['image'] + instance_type = request_spec['instance_type'] # TODO(sandy): I guess someone needs to add block_device_mapping # support at some point? Also, OS API has no concept of security # groups. instance = compute_api.API().create_db_entry_for_new_instance(context, - image, base_options, None, []) + instance_type, image, base_options, None, []) instance_id = instance['id'] kwargs['instance_id'] = instance_id @@ -158,8 +159,8 @@ class AbstractScheduler(driver.Scheduler): self._ask_child_zone_to_create_instance(context, host_info, request_spec, kwargs) else: - self._provision_resource_locally(context, host_info, request_spec, - kwargs) + self._provision_resource_locally(context, instance_type, host_info, + request_spec, kwargs) def _provision_resource(self, context, build_plan_item, instance_id, request_spec, kwargs): -- cgit From 1ef677a2eac6129aa3847aa10996f4357ec72a48 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 18 Aug 2011 09:50:24 -0700 Subject: dhcpbridge: add better error if NETWORK_ID is not set, convert locals() to static dict --- bin/nova-dhcpbridge | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index afafca548..1c9ae951e 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -113,11 +113,19 @@ def main(): if action in ['add', 'del', 'old']: mac = argv[2] ip = argv[3] - msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s") % locals() + msg = _("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'") % \ + {"action": action, + "mac": mac, + "ip": ip} LOG.debug(msg) globals()[action + '_lease'](mac, ip) else: - network_id = int(os.environ.get('NETWORK_ID')) + try: + network_id = int(os.environ.get('NETWORK_ID')) + except TypeError: + LOG.error(_("Environment variable 'NETWORK_ID' must be set.")) + sys.exit(1) + print init_leases(network_id) if __name__ == "__main__": -- cgit From 9033d4879556452d3b7c0ee9fa9fcafbea59e5be Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 18 Aug 2011 12:55:27 -0400 Subject: minor cleanup --- nova/compute/api.py | 1 - nova/tests/api/openstack/test_servers.py | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 168d46689..49222d476 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -423,7 +423,6 @@ class API(base.Base): 'num_instances': num_instances, } - print base_options rpc.cast(context, FLAGS.scheduler_topic, {"method": "run_instance", diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 0bdbb2006..b3e9bfe04 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -2580,14 +2580,14 @@ class TestServerCreateRequestXMLDeserializerV11(test.TestCase): name="new-server-test" imageRef="1" flavorRef="2" - accessIPv6="fead:::::1234"/>""" + accessIPv6="fead::1234"/>""" request = self.deserializer.deserialize(serial_request, 'create') expected = { "server": { "name": "new-server-test", "imageRef": "1", "flavorRef": "2", - "accessIPv6": "fead:::::1234", + "accessIPv6": "fead::1234", }, } self.assertEquals(request['body'], expected) @@ -2599,7 +2599,7 @@ class TestServerCreateRequestXMLDeserializerV11(test.TestCase): imageRef="1" flavorRef="2" accessIPv4="1.2.3.4" - accessIPv6="fead:::::1234"/>""" + accessIPv6="fead::1234"/>""" request = self.deserializer.deserialize(serial_request, 'create') expected = { "server": { @@ -2607,7 +2607,7 @@ class TestServerCreateRequestXMLDeserializerV11(test.TestCase): "imageRef": "1", "flavorRef": "2", "accessIPv4": "1.2.3.4", - "accessIPv6": "fead:::::1234", + "accessIPv6": "fead::1234", }, } self.assertEquals(request['body'], expected) @@ -3161,7 +3161,7 @@ class ServersViewBuilderV11Test(test.TestCase): "locked": False, "metadata": [], "accessIPv4": "1.2.3.4", - "accessIPv6": "fead::::1234", + "accessIPv6": "fead::1234", #"address": , #"floating_ips": [{"address":ip} for ip in public_addresses]} "uuid": "deadbeef-feed-edee-beef-d0ea7beefedd"} -- cgit From a68c1cde2e73e6d39d7ff6024cd3ff289c465619 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 18 Aug 2011 12:20:40 -0500 Subject: Refactored a little and updated unit test. --- nova/api/openstack/contrib/rescue.py | 12 ++++++++---- nova/tests/api/openstack/test_extensions.py | 1 + 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/contrib/rescue.py b/nova/api/openstack/contrib/rescue.py index efb882fd6..dac269efb 100644 --- a/nova/api/openstack/contrib/rescue.py +++ b/nova/api/openstack/contrib/rescue.py @@ -31,10 +31,10 @@ class Rescue(exts.ExtensionDescriptor): super(Rescue, self).__init__() self.compute_api = compute.API() - def _rescue(self, input_dict, req, instance_id): - """Enable or disable rescue mode.""" + def _rescue(self, input_dict, req, instance_id, exit_rescue=False): + """Rescue an instance.""" context = req.environ["nova.context"] - action = input_dict["rescue"]["action"] + action = "unrescue" if exit_rescue else "rescue" try: if action == "rescue": @@ -47,6 +47,10 @@ class Rescue(exts.ExtensionDescriptor): return webob.Response(status_int=202) + def _unrescue(self, input_dict, req, instance_id): + """Unrescue an instance.""" + self._rescue(input_dict, req, instance_id, exit_rescue=True) + def get_name(self): return "Rescue" @@ -66,7 +70,7 @@ class Rescue(exts.ExtensionDescriptor): """Return the actions the extension adds, as required by contract.""" actions = [ exts.ActionExtension("servers", "rescue", self._rescue), - exts.ActionExtension("servers", "unrescue", self._rescue), + exts.ActionExtension("servers", "unrescue", self._unrescue), ] return actions diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 5d3208e10..34a4b3f89 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -94,6 +94,7 @@ class ExtensionControllerTest(test.TestCase): "Quotas", "SecurityGroups", "Volumes", + "Rescue", ] self.ext_list.sort() -- cgit From a9d87715133ae79518cef6aafd87c95e26f20765 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 18 Aug 2011 12:25:22 -0500 Subject: Minor housecleaning. --- nova/api/openstack/contrib/rescue.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/contrib/rescue.py b/nova/api/openstack/contrib/rescue.py index dac269efb..65ce2874b 100644 --- a/nova/api/openstack/contrib/rescue.py +++ b/nova/api/openstack/contrib/rescue.py @@ -22,17 +22,22 @@ from nova import log as logging from nova.api.openstack import extensions as exts from nova.api.openstack import faults + LOG = logging.getLogger("nova.api.contrib.rescue") class Rescue(exts.ExtensionDescriptor): - """The Rescue API controller for the OpenStack API.""" + """The Rescue controller for the OpenStack API.""" def __init__(self): super(Rescue, self).__init__() self.compute_api = compute.API() def _rescue(self, input_dict, req, instance_id, exit_rescue=False): - """Rescue an instance.""" + """Rescue an instance. + + If exit_rescue is True, rescue mode should be torn down and the + instance restored to its original state. + """ context = req.environ["nova.context"] action = "unrescue" if exit_rescue else "rescue" -- cgit From ffbf26392f06ecac55e72ed25f59fd550a5262f5 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 18 Aug 2011 17:30:00 +0000 Subject: Changed the filter specified in _ask_scheduler_to_create_instance() to None, since the value isn't used when creating an instance. --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 229d02af4..e033c6c74 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -415,7 +415,7 @@ class API(base.Base): 'image': image, 'instance_properties': base_options, 'instance_type': instance_type, - 'filter': 'InstanceTypeFilter' + 'filter': None, 'blob': zone_blob, 'num_instances': num_instances, } -- cgit From 125a2affec7713cdbcb925537d34aea29a2e4230 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Aug 2011 10:55:39 -0700 Subject: more cleanup of binaries per review --- bin/nova-ajax-console-proxy | 7 +++---- bin/nova-api | 8 +++----- bin/nova-compute | 5 ++--- bin/nova-console | 5 ++--- bin/nova-direct-api | 11 +++++++---- bin/nova-network | 5 ++--- bin/nova-objectstore | 14 +++++++------- bin/nova-scheduler | 5 ++--- bin/nova-vncproxy | 15 ++++++--------- bin/nova-volume | 5 ++--- 10 files changed, 36 insertions(+), 44 deletions(-) diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy index 2329581a2..0a789b4b9 100755 --- a/bin/nova-ajax-console-proxy +++ b/bin/nova-ajax-console-proxy @@ -24,7 +24,6 @@ from eventlet import greenthread from eventlet.green import urllib2 import exceptions -import gettext import os import sys import time @@ -38,11 +37,11 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging from nova import rpc +from nova import service from nova import utils from nova import wsgi @@ -141,5 +140,5 @@ if __name__ == '__main__': acp = AjaxConsoleProxy() acp.register_listeners() server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port) - server.start() - server.wait() + service.serve(server) + service.wait() diff --git a/bin/nova-api b/bin/nova-api index d2086dc92..38e2624d8 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -26,7 +26,6 @@ Starts both the EC2 and OpenStack APIs in separate greenthreads. import eventlet eventlet.monkey_patch() -import gettext import os import sys @@ -36,7 +35,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath( if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging @@ -47,8 +45,8 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - services = [] + servers = [] for api in flags.FLAGS.enabled_apis: - services.append(service.WSGIService(api)) - service.serve(*services) + servers.append(service.WSGIService(api)) + service.serve(*servers) service.wait() diff --git a/bin/nova-compute b/bin/nova-compute index cd7c78def..9aef201e6 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -22,7 +22,6 @@ import eventlet eventlet.monkey_patch() -import gettext import os import sys @@ -34,7 +33,6 @@ POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')): sys.path.insert(0, POSSIBLE_TOPDIR) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging @@ -45,5 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - service.serve() + server = service.Server(binary='nova-compute') + service.serve(server) service.wait() diff --git a/bin/nova-console b/bin/nova-console index 40608b995..7f76fdc29 100755 --- a/bin/nova-console +++ b/bin/nova-console @@ -21,7 +21,6 @@ import eventlet eventlet.monkey_patch() -import gettext import os import sys @@ -33,7 +32,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging @@ -44,5 +42,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - service.serve() + server = service.Server(binary='nova-console') + service.serve(server) service.wait() diff --git a/bin/nova-direct-api b/bin/nova-direct-api index c6cf9b2ff..106e89ba9 100755 --- a/bin/nova-direct-api +++ b/bin/nova-direct-api @@ -20,7 +20,9 @@ """Starter script for Nova Direct API.""" -import gettext +import eventlet +eventlet.monkey_patch() + import os import sys @@ -32,12 +34,12 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import compute from nova import flags from nova import log as logging from nova import network +from nova import service from nova import utils from nova import volume from nova import wsgi @@ -97,5 +99,6 @@ if __name__ == '__main__': with_auth, host=FLAGS.direct_host, port=FLAGS.direct_port) - server.start() - server.wait() + + service.serve(server) + service.wait() diff --git a/bin/nova-network b/bin/nova-network index 101761ef7..ce93e9354 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -22,7 +22,6 @@ import eventlet eventlet.monkey_patch() -import gettext import os import sys @@ -34,7 +33,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging @@ -45,5 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - service.serve() + server = service.Server(binary='nova-compute') + service.serve(server) service.wait() diff --git a/bin/nova-objectstore b/bin/nova-objectstore index 4d5aec445..c7a76e120 100755 --- a/bin/nova-objectstore +++ b/bin/nova-objectstore @@ -17,11 +17,11 @@ # License for the specific language governing permissions and limitations # under the License. -""" - Daemon for nova objectstore. Supports S3 API. -""" +"""Daemon for nova objectstore. Supports S3 API.""" + +import eventlet +eventlet.monkey_patch() -import gettext import os import sys @@ -33,10 +33,10 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging +from nova import service from nova import utils from nova import wsgi from nova.objectstore import s3server @@ -54,5 +54,5 @@ if __name__ == '__main__': router, port=FLAGS.s3_port, host=FLAGS.s3_host) - server.start() - server.wait() + service.serve(server) + service.wait() diff --git a/bin/nova-scheduler b/bin/nova-scheduler index 0c205a80f..07d1c55e6 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -22,7 +22,6 @@ import eventlet eventlet.monkey_patch() -import gettext import os import sys @@ -34,7 +33,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging @@ -45,5 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - service.serve() + server = service.Server(binary='nova-compute') + service.serve(server) service.wait() diff --git a/bin/nova-vncproxy b/bin/nova-vncproxy index bdbb30a7f..dc08e2433 100755 --- a/bin/nova-vncproxy +++ b/bin/nova-vncproxy @@ -19,7 +19,8 @@ """VNC Console Proxy Server.""" import eventlet -import gettext +eventlet.monkey_patch() + import os import sys @@ -29,7 +30,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging @@ -41,7 +41,7 @@ from nova.vnc import auth from nova.vnc import proxy -LOG = logging.getLogger('nova.vnc-proxy') +LOG = logging.getLogger('nova.vncproxy') FLAGS = flags.FLAGS @@ -81,7 +81,7 @@ if __name__ == "__main__": FLAGS(sys.argv) logging.setup() - LOG.audit(_("Starting nova-vnc-proxy node (version %s)"), + LOG.audit(_("Starting nova-vncproxy node (version %s)"), version.version_string_with_vcs()) if not (os.path.exists(FLAGS.vncproxy_wwwroot) and @@ -107,13 +107,10 @@ if __name__ == "__main__": else: with_auth = auth.VNCNovaAuthMiddleware(with_logging) - service.serve() - server = wsgi.Server("VNC Proxy", with_auth, host=FLAGS.vncproxy_host, port=FLAGS.vncproxy_port) - server.start() server.start_tcp(handle_flash_socket_policy, 843, host=FLAGS.vncproxy_host) - - server.wait() + service.serve(server) + service.wait() diff --git a/bin/nova-volume b/bin/nova-volume index 8dcdbc500..1451de44a 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -22,7 +22,6 @@ import eventlet eventlet.monkey_patch() -import gettext import os import sys @@ -34,7 +33,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging @@ -45,5 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - service.serve() + server = service.Server(binary='nova-volume') + service.serve(server) service.wait() -- cgit From 0cf36be73e7de4942f395a2a7dfeb58df5870821 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Aug 2011 10:56:14 -0700 Subject: add separate api binaries --- bin/nova-api-ec2 | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ bin/nova-api-os | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 100 insertions(+) create mode 100755 bin/nova-api-ec2 create mode 100755 bin/nova-api-os diff --git a/bin/nova-api-ec2 b/bin/nova-api-ec2 new file mode 100755 index 000000000..9fac7b63a --- /dev/null +++ b/bin/nova-api-ec2 @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for Nova API. + +Starts both the EC2 and OpenStack APIs in separate greenthreads. + +""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + + +possible_topdir = os.path.normpath(os.path.join(os.path.abspath( + sys.argv[0]), os.pardir, os.pardir)) +if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): + sys.path.insert(0, possible_topdir) + + +from nova import flags +from nova import log as logging +from nova import service +from nova import utils + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + server = service.WSGIService('ec2') + service.serve(server) + service.wait() diff --git a/bin/nova-api-os b/bin/nova-api-os new file mode 100755 index 000000000..9d9a7b05e --- /dev/null +++ b/bin/nova-api-os @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for Nova API. + +Starts both the EC2 and OpenStack APIs in separate greenthreads. + +""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + + +possible_topdir = os.path.normpath(os.path.join(os.path.abspath( + sys.argv[0]), os.pardir, os.pardir)) +if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): + sys.path.insert(0, possible_topdir) + + +from nova import flags +from nova import log as logging +from nova import service +from nova import utils + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + server = service.WSGIService('osapi') + service.serve(server) + service.wait() -- cgit From 788e5c5e94c224c3909c4f12ecc569bba3ba1c9e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Aug 2011 11:00:47 -0700 Subject: remove signal handling and clean up service.serve --- nova/service.py | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/nova/service.py b/nova/service.py index e0735d26f..8ffd39629 100644 --- a/nova/service.py +++ b/nova/service.py @@ -21,7 +21,6 @@ import inspect import os -import signal import eventlet import greenlet @@ -346,33 +345,21 @@ def serve(*services): global _launcher if not _launcher: _launcher = Launcher() - signal.signal(signal.SIGTERM, lambda *args: _launcher.stop()) - try: - if not services: - services = [Service.create()] - except Exception: - logging.exception('in Service.create()') - raise - finally: - # After we've loaded up all our dynamic bits, check - # whether we should print help - flags.DEFINE_flag(flags.HelpFlag()) - flags.DEFINE_flag(flags.HelpshortFlag()) - flags.DEFINE_flag(flags.HelpXMLFlag()) - FLAGS.ParseNewFlags() - - name = '_'.join(x.name for x in services) - logging.debug(_('Serving %s'), name) - logging.debug(_('Full set of FLAGS:')) - for flag in FLAGS: - flag_get = FLAGS.get(flag, None) - logging.debug('%(flag)s : %(flag_get)s' % locals()) - for x in services: _launcher.launch_service(x) def wait(): + # After we've loaded up all our dynamic bits, check + # whether we should print help + flags.DEFINE_flag(flags.HelpFlag()) + flags.DEFINE_flag(flags.HelpshortFlag()) + flags.DEFINE_flag(flags.HelpXMLFlag()) + FLAGS.ParseNewFlags() + logging.debug(_('Full set of FLAGS:')) + for flag in FLAGS: + flag_get = FLAGS.get(flag, None) + logging.debug('%(flag)s : %(flag_get)s' % locals()) try: _launcher.wait() except KeyboardInterrupt: -- cgit From 97552f05d5d26e596ddf0cda8169f3a5d131a55a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Aug 2011 11:28:02 -0700 Subject: fix typo --- bin/nova-compute | 2 +- bin/nova-console | 2 +- bin/nova-network | 2 +- bin/nova-scheduler | 2 +- bin/nova-volume | 2 +- nova/service.py | 35 +++++++++++++++++++---------------- 6 files changed, 24 insertions(+), 21 deletions(-) diff --git a/bin/nova-compute b/bin/nova-compute index 9aef201e6..5239fae72 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -43,6 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - server = service.Server(binary='nova-compute') + server = service.Service.create(binary='nova-compute') service.serve(server) service.wait() diff --git a/bin/nova-console b/bin/nova-console index 7f76fdc29..22f6ef171 100755 --- a/bin/nova-console +++ b/bin/nova-console @@ -42,6 +42,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - server = service.Server(binary='nova-console') + server = service.Service.create(binary='nova-console') service.serve(server) service.wait() diff --git a/bin/nova-network b/bin/nova-network index ce93e9354..57759d30a 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -43,6 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - server = service.Server(binary='nova-compute') + server = service.Service.create(binary='nova-network') service.serve(server) service.wait() diff --git a/bin/nova-scheduler b/bin/nova-scheduler index 07d1c55e6..3b627e62d 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -43,6 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - server = service.Server(binary='nova-compute') + server = service.Service.create(binary='nova-compute') service.serve(server) service.wait() diff --git a/bin/nova-volume b/bin/nova-volume index 1451de44a..5405aebbb 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -43,6 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - server = service.Server(binary='nova-volume') + server = service.Service.create(binary='nova-volume') service.serve(server) service.wait() diff --git a/nova/service.py b/nova/service.py index 8ffd39629..959e79052 100644 --- a/nova/service.py +++ b/nova/service.py @@ -67,24 +67,24 @@ class Launcher(object): self._services = [] @staticmethod - def run_service(service): - """Start and wait for a service to finish. + def run_server(server): + """Start and wait for a server to finish. - :param service: Service to run and wait for. + :param service: Server to run and wait for. :returns: None """ - service.start() - service.wait() + server.start() + server.wait() - def launch_service(self, service): - """Load and start the given service. + def launch_server(self, server): + """Load and start the given server. - :param service: The service you would like to start. + :param server: The server you would like to start. :returns: None """ - gt = eventlet.spawn(self.run_service, service) + gt = eventlet.spawn(self.run_server, server) self._services.append(gt) def stop(self): @@ -110,13 +110,16 @@ class Launcher(object): class Service(object): - """Base class for workers that run on hosts.""" + """Service object for binaries running on hosts. + + A service takes a manager and enables rpc by listening to queues based + on topic. It also periodically runs tasks on the manager and reports + it state to the database services table.""" def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, *args, **kwargs): self.host = host self.binary = binary - self.name = binary self.topic = topic self.manager_class_name = manager manager_class = utils.import_class(self.manager_class_name) @@ -289,9 +292,9 @@ class WSGIService(object): """Provides ability to launch API from a 'paste' configuration.""" def __init__(self, name, loader=None): - """Initialize, but do not start the WSGI service. + """Initialize, but do not start the WSGI server. - :param name: The name of the WSGI service given to the loader. + :param name: The name of the WSGI server given to the loader. :param loader: Loads the WSGI application using the given name. :returns: None @@ -341,12 +344,12 @@ class WSGIService(object): _launcher = None -def serve(*services): +def serve(*servers): global _launcher if not _launcher: _launcher = Launcher() - for x in services: - _launcher.launch_service(x) + for server in servers: + _launcher.launch_server(server) def wait(): -- cgit From 05e8c1755d8fde5a9a3bde02e339938f670694c6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Aug 2011 11:28:43 -0700 Subject: one more --- bin/nova-scheduler | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-scheduler b/bin/nova-scheduler index 3b627e62d..2e168cbc6 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -43,6 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - server = service.Service.create(binary='nova-compute') + server = service.Service.create(binary='nova-scheduler') service.serve(server) service.wait() -- cgit From a4d63f18971bad12ea812c63bcee35d8070333f7 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Aug 2011 11:31:28 -0700 Subject: fix docstrings in new api bins --- bin/nova-api-ec2 | 6 +----- bin/nova-api-os | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/bin/nova-api-ec2 b/bin/nova-api-ec2 index 9fac7b63a..df50f713d 100755 --- a/bin/nova-api-ec2 +++ b/bin/nova-api-ec2 @@ -17,11 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Starter script for Nova API. - -Starts both the EC2 and OpenStack APIs in separate greenthreads. - -""" +"""Starter script for Nova EC2 API.""" import eventlet eventlet.monkey_patch() diff --git a/bin/nova-api-os b/bin/nova-api-os index 9d9a7b05e..374e850ea 100755 --- a/bin/nova-api-os +++ b/bin/nova-api-os @@ -17,11 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Starter script for Nova API. - -Starts both the EC2 and OpenStack APIs in separate greenthreads. - -""" +"""Starter script for Nova OS API.""" import eventlet eventlet.monkey_patch() -- cgit From b98c14c411ae09d9a8b5b2112d0e1b01b71ced44 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 18 Aug 2011 14:34:14 -0400 Subject: Don't send 'injected_files' and 'admin_pass' to db.update. --- nova/compute/manager.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 66458fb36..47e7864c4 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -393,11 +393,12 @@ class ComputeManager(manager.SchedulerDependentManager): updates['host'] = self.host updates['launched_on'] = self.host # NOTE(vish): used by virt but not in database - updates['injected_files'] = kwargs.get('injected_files', []) - updates['admin_pass'] = kwargs.get('admin_password', None) instance = self.db.instance_update(context, instance_id, updates) + instance['injected_files'] = kwargs.get('injected_files', []) + instance['admin_pass'] = kwargs.get('admin_password', None) + self.db.instance_set_state(context, instance_id, power_state.NOSTATE, -- cgit From 6b8c26d230d06c35921e2e0a2d30d9d3d745eff4 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 18 Aug 2011 14:44:10 -0400 Subject: Remove old comment. --- nova/compute/manager.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 47e7864c4..091b3b6b2 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -392,7 +392,6 @@ class ComputeManager(manager.SchedulerDependentManager): updates = {} updates['host'] = self.host updates['launched_on'] = self.host - # NOTE(vish): used by virt but not in database instance = self.db.instance_update(context, instance_id, updates) -- cgit From af9681bc82d7509cb2f65d213bd4d8ae24286663 Mon Sep 17 00:00:00 2001 From: "matt.dietz@rackspace.com" <> Date: Thu, 18 Aug 2011 13:47:09 -0500 Subject: Moved compute calls to their own handler --- nova/compute/api.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index e909e9959..0c5d4349d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1068,14 +1068,20 @@ class API(base.Base): """Unpause the given instance.""" self._cast_compute_message('unpause_instance', context, instance_id) + def _make_compute_call_for_host(self context, host, params): + """Call method deliberately designed to make host/service only calls""" + queue = self.db.queue_get_for(context, FLAGS.compute_topic, host) + kwargs = {'method': method, 'args': params} + return rpc.call(context, queue, kwargs) + def set_host_enabled(self, context, host, enabled): """Sets the specified host's ability to accept new instances.""" - return self._call_compute_message("set_host_enabled", context, + return self._make_compute_call_for_host("set_host_enabled", context, host=host, params={"enabled": enabled}) def host_power_action(self, context, host, action): """Reboots, shuts down or powers up the host.""" - return self._call_compute_message("host_power_action", context, + return self._make_compute_call_for_host("host_power_action", context, host=host, params={"action": action}) @scheduler_api.reroute_compute("diagnostics") -- cgit From 69996e83f10387b83bdc7e5e76b62fe67ea6c2ab Mon Sep 17 00:00:00 2001 From: "matt.dietz@rackspace.com" <> Date: Thu, 18 Aug 2011 13:55:38 -0500 Subject: Syntax error --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 0c5d4349d..3110cd92d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1068,7 +1068,7 @@ class API(base.Base): """Unpause the given instance.""" self._cast_compute_message('unpause_instance', context, instance_id) - def _make_compute_call_for_host(self context, host, params): + def _make_compute_call_for_host(self, context, host, params): """Call method deliberately designed to make host/service only calls""" queue = self.db.queue_get_for(context, FLAGS.compute_topic, host) kwargs = {'method': method, 'args': params} -- cgit From c6c004c44595f218f66eee8f6f9173c6108be8a4 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 18 Aug 2011 14:39:25 -0500 Subject: Updated the distributed scheduler docs with the latest changes to the classes. --- doc/source/devref/distributed_scheduler.rst | 56 ++++++++++++++-------------- doc/source/images/base_scheduler.png | Bin 0 -> 17068 bytes doc/source/images/zone_overview.png | Bin 0 -> 51587 bytes 3 files changed, 27 insertions(+), 29 deletions(-) create mode 100644 doc/source/images/base_scheduler.png create mode 100755 doc/source/images/zone_overview.png diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index e33fda4d2..c63e62f7f 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -31,9 +31,9 @@ This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capab So, how does this all work? -This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the :doc:`devguide/zones` documentation before reading this. +This document will explain the strategy employed by the `BaseScheduler`, which is the base for all schedulers designed to work across zones, and its derivations. You should read the :doc:`devguide/zones` documentation before reading this. - .. image:: /images/zone_aware_scheduler.png + .. image:: /images/base_scheduler.png Costs & Weights --------------- @@ -52,32 +52,32 @@ This Weight is computed for each Instance requested. If the customer asked for 1 .. image:: /images/costs_weights.png -nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler +nova.scheduler.base_scheduler.BaseScheduler ------------------------------------------------------ -As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions. +As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `BaseScheduler` uses this information to make its decisions. Here is how it works: 1. The compute nodes are filtered and the nodes remaining are weighed. - 2. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request. + 2. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request. 3. Weighing of the remaining compute nodes assigns a number based on their suitability for the request. 4. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent. 5. The parent Zone sorts and aggregates all the weights and a final build plan is constructed. 6. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed. - .. image:: /images/zone_aware_overview.png + .. image:: /images/zone_overview.png -`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which host filtering and weighing strategy will be used. +`BaseScheduler` by itself is not capable of handling all the provisioning itself. You should also specify the filter classes and weighting classes to be used in determining which host is selected for new instance creation. Filtering and Weighing ---------------------- -The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. +The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `BaseScheduler` are flexible and extensible. .. image:: /images/filtering.png Requesting a new instance ------------------------- -Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table. +Prior to the `BaseScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table. `nova.compute.api.create()` performed the following actions: 1. it validated all the fields passed into it. @@ -89,11 +89,11 @@ Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to .. image:: /images/nova.compute.api.create.png -Generally, the standard schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones. +Generally, the simplest schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones. The problem with this approach is each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once. -For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently: +For the `BaseScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently: 1. it validates all the fields passed into it. 2. it creates a single `reservation_id` for all of instances created. This is a UUID. 3. it creates a single `run_instance` request in the scheduler queue @@ -109,21 +109,19 @@ For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to The Catch --------- -This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world. +This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world. -When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going to be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates. +When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going to be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many, many `select` calls issued to child Zones asking for estimates. -Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. It's for this reason that it is so important that each Zone defines a unique encryption key via `--build_plan_encryption_key` +Instead, we take a rather innovative approach to the problem. We encrypt all the child Zone internal details and pass them back the to parent Zone. In the case of a nested Zone layout, each nesting layer will encrypt the data from all of its children and pass that to its parent Zone. In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent. Every Zone interface adds another layer of encryption, using its unique key. -In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent. +Once a host is selected, it will either be local to the Zone that received the initial API call, or one of its child Zones. In the latter case, the parent Zone it simply passes the encrypted data for the selected host back to each of its child Zones during the `POST /servers` call as an extra parameter. If the child Zone can decrypt the data, then it is the correct Zone for the selected host; all other Zones will not be able to decrypt the data and will discard the request. This is why it is critical that each Zone has a unique value specified in its config in `--build_plan_encryption_key`: it controls the ability to locate the selected host without having to hard-code path information or other identifying information. The child Zone can then act on the decrypted data and either go directly to the Compute node previously selected if it is located in that Zone, or repeat the process with its child Zones until the target Zone containing the selected host is reached. -Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.zone_aware_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use. +Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.base_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use. Reservation IDs --------------- -NOTE: The features described in this section are related to the up-coming 'merge-4' branch. - The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created. NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would not be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled. @@ -137,23 +135,23 @@ Finally, we need to give the user a way to get information on each of the instan Host Filter ----------- -As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms. +As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.filters` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms. -The filter used is determined by the `--default_host_filter` flag, which points to a Python Class. By default this flag is set to `nova.scheduler.host_filter.AllHostsFilter` which simply returns all available hosts. But there are others: +The filter used is determined by the `--default_host_filters` flag, which points to a Python Class. By default this flag is set to `[AllHostsFilter]` which simply returns all available hosts. But there are others: - * `nova.scheduler.host_filter.InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`. + * `InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`. - * `nova.scheduler.host_filter.JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples. + * `JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples. -To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The host tuple contains (``, ``) where `` is whatever you want it to be. +To create your own `HostFilter` the user simply has to derive from `nova.scheduler.filters.AbstractHostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of available hosts is in the `host_list` parameter passed into the call as well as the filter query. The host tuple contains (``, ``) where `` is whatever you want it to be. By default, it is the capabilities reported by the host. Cost Scheduler Weighing ----------------------- -Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `ZoneAwareScheduler` base class when all the results have been assembled. +Every `BaseScheduler` subclass should also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `BaseScheduler` when all the results have been assembled. -Simple Zone Aware Scheduling +Simple Scheduling Across Zones ---------------------------- -The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. +The `BaseScheduler` uses the default `filter_hosts` method, which will use either any filters specified in the request's `filter` parameter, or, if that is not specified, the filters specified in the `FLAGS.default_host_filters` setting. Its `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. The `--scheduler_driver` flag is how you specify the scheduler class name. @@ -168,14 +166,14 @@ All this Zone and Distributed Scheduler stuff can seem a little daunting to conf --enable_zone_routing=true --zone_name=zone1 --build_plan_encryption_key=c286696d887c9aa0611bbb3e2025a45b - --scheduler_driver=nova.scheduler.host_filter.HostFilterScheduler - --default_host_filter=nova.scheduler.host_filter.AllHostsFilter + --scheduler_driver=nova.scheduler.base_scheduler.BaseScheduler + --default_host_filter=nova.scheduler.filters.AllHostsFilter `--allow_admin_api` must be set for OS API to enable the new `/zones/*` commands. `--enable_zone_routing` must be set for OS API commands such as `create()`, `pause()` and `delete()` to get routed from Zone to Zone when looking for instances. `--zone_name` is only required in child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue. `build_plan_encryption_key` is the SHA-256 key for encrypting/decrypting the Host information when it leaves a Zone. Be sure to change this key for each Zone you create. Do not duplicate keys. -`scheduler_driver` is the real workhorse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler`. +`scheduler_driver` is the real workhorse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.base_scheduler.BaseScheduler`. `default_host_filter` is the host filter to be used for filtering candidate Compute nodes. Some optional flags which are handy for debugging are: diff --git a/doc/source/images/base_scheduler.png b/doc/source/images/base_scheduler.png new file mode 100644 index 000000000..75d029338 Binary files /dev/null and b/doc/source/images/base_scheduler.png differ diff --git a/doc/source/images/zone_overview.png b/doc/source/images/zone_overview.png new file mode 100755 index 000000000..cc891df0a Binary files /dev/null and b/doc/source/images/zone_overview.png differ -- cgit From 19495e51bc86bf1bc333759e3825ab4b5592ff66 Mon Sep 17 00:00:00 2001 From: Matt Dietz Date: Thu, 18 Aug 2011 19:40:59 +0000 Subject: Need to pass the action --- nova/compute/api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 3110cd92d..598270ba1 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1068,10 +1068,10 @@ class API(base.Base): """Unpause the given instance.""" self._cast_compute_message('unpause_instance', context, instance_id) - def _make_compute_call_for_host(self, context, host, params): + def _make_compute_call_for_host(self, action, context, host, params): """Call method deliberately designed to make host/service only calls""" queue = self.db.queue_get_for(context, FLAGS.compute_topic, host) - kwargs = {'method': method, 'args': params} + kwargs = {'method': action, 'args': params} return rpc.call(context, queue, kwargs) def set_host_enabled(self, context, host, enabled): -- cgit From fe28c88a6bfff9d8e0d83751ab89e83173aaf092 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 18 Aug 2011 14:56:22 -0500 Subject: Review feedback. --- nova/api/openstack/contrib/rescue.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/contrib/rescue.py b/nova/api/openstack/contrib/rescue.py index 65ce2874b..5ee071696 100644 --- a/nova/api/openstack/contrib/rescue.py +++ b/nova/api/openstack/contrib/rescue.py @@ -48,7 +48,7 @@ class Rescue(exts.ExtensionDescriptor): self.compute_api.unrescue(context, instance_id) except Exception, e: LOG.exception(_("Error in %(action)s: %(e)s") % locals()) - return faults.Fault(exc.HTTPBadRequest()) + return faults.Fault(exc.HTTPInternalServerError()) return webob.Response(status_int=202) @@ -60,7 +60,7 @@ class Rescue(exts.ExtensionDescriptor): return "Rescue" def get_alias(self): - return "rescue" + return "os-rescue" def get_description(self): return "Instance rescue mode" -- cgit From 508b45a3fda9caa92c90282045495acb6e2f638b Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 18 Aug 2011 15:08:51 -0500 Subject: Better docstring for _unrescue(). --- nova/api/openstack/contrib/rescue.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/contrib/rescue.py b/nova/api/openstack/contrib/rescue.py index 5ee071696..a30ed6dff 100644 --- a/nova/api/openstack/contrib/rescue.py +++ b/nova/api/openstack/contrib/rescue.py @@ -53,7 +53,11 @@ class Rescue(exts.ExtensionDescriptor): return webob.Response(status_int=202) def _unrescue(self, input_dict, req, instance_id): - """Unrescue an instance.""" + """Unrescue an instance. + + We pass exit_rescue=True here so _rescue() knows we would like to exit + rescue mode. + """ self._rescue(input_dict, req, instance_id, exit_rescue=True) def get_name(self): -- cgit From bbcb84a5fed2c537bd6d2143e344fa96f669d231 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 18 Aug 2011 20:25:32 +0000 Subject: DB password should be an empty string for MySQLdb --- nova/db/sqlalchemy/session.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 07f281938..643e2338e 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -73,9 +73,11 @@ def get_engine(): elif MySQLdb and "mysql" in connection_dict.drivername: LOG.info(_("Using mysql/eventlet db_pool.")) + # MySQLdb won't accept 'None' in the password field + password = connection_dict.password or '' pool_args = { "db": connection_dict.database, - "passwd": connection_dict.password, + "passwd": password, "host": connection_dict.host, "user": connection_dict.username, "min_size": FLAGS.sql_min_pool_size, -- cgit From cca07a461d6c826a9dcc902b7b88afe602377756 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 18 Aug 2011 16:27:49 -0400 Subject: updated PUT to severs/id to handle accessIPv4 and accessIPv6 --- nova/api/openstack/servers.py | 10 +++++- nova/tests/api/openstack/test_servers.py | 53 +++++++++++++++++++++++++++++++- 2 files changed, 61 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index f06ee6b62..df55d981a 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -163,7 +163,7 @@ class Controller(object): @scheduler_api.redirect_handler def update(self, req, id, body): - """Update server name then pass on to version-specific controller""" + """Update server then pass on to version-specific controller""" if len(req.body) == 0: raise exc.HTTPUnprocessableEntity() @@ -178,6 +178,14 @@ class Controller(object): self.helper._validate_server_name(name) update_dict['display_name'] = name.strip() + if 'accessIPv4' in body['server']: + access_ipv4 = body['server']['accessIPv4'] + update_dict['access_ip_v4'] = access_ipv4.strip() + + if 'accessIPv6' in body['server']: + access_ipv6 = body['server']['accessIPv6'] + update_dict['access_ip_v6'] = access_ipv6.strip() + try: self.compute_api.update(ctxt, id, **update_dict) except exception.NotFound: diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index b3e9bfe04..a813d4f96 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -145,7 +145,8 @@ def instance_addresses(context, instance_id): def stub_instance(id, user_id='fake', project_id='fake', private_address=None, public_addresses=None, host=None, power_state=0, reservation_id="", uuid=FAKE_UUID, image_ref="10", - flavor_id="1", interfaces=None, name=None): + flavor_id="1", interfaces=None, name=None, + access_ipv4=None, access_ipv6=None): metadata = [] metadata.append(InstanceMetadata(key='seq', value=id)) @@ -197,6 +198,8 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, "display_description": "", "locked": False, "metadata": metadata, + "access_ip_v4": access_ipv4, + "access_ip_v6": access_ipv6, "uuid": uuid, "virtual_interfaces": interfaces} @@ -1944,6 +1947,28 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) + def test_update_server_all_attributes_v1_1(self): + self.stubs.Set(nova.db.api, 'instance_get', + return_server_with_attributes(name='server_test', + access_ipv4='0.0.0.0', + access_ipv6='beef::0123')) + req = webob.Request.blank('/v1.1/servers/1') + req.method = 'PUT' + req.content_type = 'application/json' + body = {'server': { + 'name': 'server_test', + 'accessIPv4': '0.0.0.0', + 'accessIPv6': 'beef::0123', + }} + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['id'], 1) + self.assertEqual(res_dict['server']['name'], 'server_test') + self.assertEqual(res_dict['server']['accessIPv4'], '0.0.0.0') + self.assertEqual(res_dict['server']['accessIPv6'], 'beef::0123') + def test_update_server_name_v1_1(self): self.stubs.Set(nova.db.api, 'instance_get', return_server_with_attributes(name='server_test')) @@ -1957,6 +1982,32 @@ class ServersTest(test.TestCase): self.assertEqual(res_dict['server']['id'], 1) self.assertEqual(res_dict['server']['name'], 'server_test') + def test_update_server_access_ipv4_v1_1(self): + self.stubs.Set(nova.db.api, 'instance_get', + return_server_with_attributes(access_ipv4='0.0.0.0')) + req = webob.Request.blank('/v1.1/servers/1') + req.method = 'PUT' + req.content_type = 'application/json' + req.body = json.dumps({'server': {'accessIPv4': '0.0.0.0'}}) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['id'], 1) + self.assertEqual(res_dict['server']['accessIPv4'], '0.0.0.0') + + def test_update_server_access_ipv6_v1_1(self): + self.stubs.Set(nova.db.api, 'instance_get', + return_server_with_attributes(access_ipv6='beef::0123')) + req = webob.Request.blank('/v1.1/servers/1') + req.method = 'PUT' + req.content_type = 'application/json' + req.body = json.dumps({'server': {'accessIPv6': 'beef::0123'}}) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['id'], 1) + self.assertEqual(res_dict['server']['accessIPv6'], 'beef::0123') + def test_update_server_adminPass_ignored_v1_1(self): inst_dict = dict(name='server_test', adminPass='bacon') self.body = json.dumps(dict(server=inst_dict)) -- cgit From 56129e4a0b0c5cb2f8766e023bcaff77fc990008 Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Thu, 18 Aug 2011 13:45:45 -0700 Subject: Added more unit testcases for userdata functionality --- nova/tests/test_metadata.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py index bfc7a6d44..b06e5c136 100644 --- a/nova/tests/test_metadata.py +++ b/nova/tests/test_metadata.py @@ -23,12 +23,21 @@ import httplib import webob +from nova import exception from nova import test from nova import wsgi from nova.api.ec2 import metadatarequesthandler from nova.db.sqlalchemy import api +USER_DATA_STRING = ("This is an encoded string") +ENCODE_USER_DATA_STRING = base64.b64encode(USER_DATA_STRING) + + +def return_non_existing_server_by_address(context, address): + raise exception.NotFound() + + class MetadataTestCase(test.TestCase): """Test that metadata is returning proper values.""" @@ -79,3 +88,34 @@ class MetadataTestCase(test.TestCase): self.stubs.Set(api, 'security_group_get_by_instance', sg_get) self.assertEqual(self.request('/meta-data/security-groups'), 'default\nother') + + def test_user_data_non_existing_fixed_address(self): + self.stubs.Set(api, 'instance_get_all_by_filters', + return_non_existing_server_by_address) + request = webob.Request.blank('/user-data') + request.remote_addr = "127.1.1.1" + response = request.get_response(self.app) + self.assertEqual(response.status_int, 404) + + def test_user_data_none_fixed_address(self): + self.stubs.Set(api, 'instance_get_all_by_filters', + return_non_existing_server_by_address) + request = webob.Request.blank('/user-data') + request.remote_addr = None + response = request.get_response(self.app) + self.assertEqual(response.status_int, 500) + + def test_user_data_invalid_url(self): + request = webob.Request.blank('/user-data-invalid') + request.remote_addr = "127.0.0.1" + response = request.get_response(self.app) + self.assertEqual(response.status_int, 404) + + def test_user_data_with_use_forwarded_header(self): + self.instance['user_data'] = ENCODE_USER_DATA_STRING + self.flags(use_forwarded_for=True) + request = webob.Request.blank('/user-data') + request.remote_addr = "127.0.0.1" + response = request.get_response(self.app) + self.assertEqual(response.status_int, 200) + self.assertEqual(response.body, USER_DATA_STRING) -- cgit From 041dcdb2eba968d5be17c9a10bf333e1307f0537 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 18 Aug 2011 16:56:23 -0400 Subject: Added 'update' method to ServersXMLSerializer --- nova/api/openstack/servers.py | 6 ++ nova/tests/api/openstack/test_servers.py | 121 +++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 335ecad86..41e63ec3c 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -923,6 +923,12 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): node.setAttribute('adminPass', server_dict['server']['adminPass']) return self.to_xml_string(node, True) + def update(self, server_dict): + xml_doc = minidom.Document() + node = self._server_to_xml_detailed(xml_doc, + server_dict['server']) + return self.to_xml_string(node, True) + def create_resource(version='1.0'): controller = { diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index a510d7d97..437620854 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3740,3 +3740,124 @@ class ServerXMLSerializationTest(test.TestCase): """.replace(" ", "") % (locals())) self.assertEqual(expected.toxml(), actual.toxml()) + + def test_update(self): + serializer = servers.ServerXMLSerializer() + + fixture = { + "server": { + "id": 1, + "uuid": FAKE_UUID, + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + "progress": 0, + "name": "test_server", + "status": "BUILD", + "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0', + "image": { + "id": "5", + "links": [ + { + "rel": "bookmark", + "href": self.IMAGE_BOOKMARK, + }, + ], + }, + "flavor": { + "id": "1", + "links": [ + { + "rel": "bookmark", + "href": self.FLAVOR_BOOKMARK, + }, + ], + }, + "addresses": { + "network_one": [ + { + "version": 4, + "addr": "67.23.10.138", + }, + { + "version": 6, + "addr": "::babe:67.23.10.138", + }, + ], + "network_two": [ + { + "version": 4, + "addr": "67.23.10.139", + }, + { + "version": 6, + "addr": "::babe:67.23.10.139", + }, + ], + }, + "metadata": { + "Open": "Stack", + "Number": "1", + }, + 'links': [ + { + 'href': self.SERVER_HREF, + 'rel': 'self', + }, + { + 'href': self.SERVER_BOOKMARK, + 'rel': 'bookmark', + }, + ], + } + } + + output = serializer.serialize(fixture, 'update') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_server_bookmark = self.SERVER_BOOKMARK + expected_image_bookmark = self.IMAGE_BOOKMARK + expected_flavor_bookmark = self.FLAVOR_BOOKMARK + expected_now = self.TIMESTAMP + expected_uuid = FAKE_UUID + expected = minidom.parseString(""" + + + + + + + + + + + + Stack + + + 1 + + + + + + + + + + + + + + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) -- cgit From f86a5cc4bc43923077ffe1d4098e550841f1c4f0 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 18 Aug 2011 15:58:12 -0500 Subject: Review feedback. --- nova/api/openstack/contrib/rescue.py | 40 +++++++++++++++++------------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/nova/api/openstack/contrib/rescue.py b/nova/api/openstack/contrib/rescue.py index a30ed6dff..399bb7f35 100644 --- a/nova/api/openstack/contrib/rescue.py +++ b/nova/api/openstack/contrib/rescue.py @@ -26,39 +26,37 @@ from nova.api.openstack import faults LOG = logging.getLogger("nova.api.contrib.rescue") +def wrap_errors(fn): + """"Ensure errors are not passed along.""" + def wrapped(*args): + try: + fn(*args) + except Exception, e: + return faults.Fault(exc.HTTPInternalServerError()) + return wrapped + + class Rescue(exts.ExtensionDescriptor): """The Rescue controller for the OpenStack API.""" def __init__(self): super(Rescue, self).__init__() self.compute_api = compute.API() - def _rescue(self, input_dict, req, instance_id, exit_rescue=False): - """Rescue an instance. - - If exit_rescue is True, rescue mode should be torn down and the - instance restored to its original state. - """ + @wrap_errors + def _rescue(self, input_dict, req, instance_id): + """Rescue an instance.""" context = req.environ["nova.context"] - action = "unrescue" if exit_rescue else "rescue" - - try: - if action == "rescue": - self.compute_api.rescue(context, instance_id) - elif action == "unrescue": - self.compute_api.unrescue(context, instance_id) - except Exception, e: - LOG.exception(_("Error in %(action)s: %(e)s") % locals()) - return faults.Fault(exc.HTTPInternalServerError()) + self.compute_api.rescue(context, instance_id) return webob.Response(status_int=202) + @wrap_errors def _unrescue(self, input_dict, req, instance_id): - """Unrescue an instance. + """Rescue an instance.""" + context = req.environ["nova.context"] + self.compute_api.unrescue(context, instance_id) - We pass exit_rescue=True here so _rescue() knows we would like to exit - rescue mode. - """ - self._rescue(input_dict, req, instance_id, exit_rescue=True) + return webob.Response(status_int=202) def get_name(self): return "Rescue" -- cgit From 22ba538b3cb3ddd22cef0fc06b136db433a8d202 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 18 Aug 2011 16:07:02 -0500 Subject: Oops. --- nova/api/openstack/contrib/rescue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/contrib/rescue.py b/nova/api/openstack/contrib/rescue.py index 399bb7f35..3de128895 100644 --- a/nova/api/openstack/contrib/rescue.py +++ b/nova/api/openstack/contrib/rescue.py @@ -52,7 +52,7 @@ class Rescue(exts.ExtensionDescriptor): @wrap_errors def _unrescue(self, input_dict, req, instance_id): - """Rescue an instance.""" + """Unrescue an instance.""" context = req.environ["nova.context"] self.compute_api.unrescue(context, instance_id) -- cgit From ce5c95424148649cbd4faca1d5c85c0d6209e3d4 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 18 Aug 2011 21:38:29 +0000 Subject: Removed extra parameter from the call to _provision_resource_locally() --- nova/scheduler/abstract_scheduler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index 3930148e2..e8c343a4b 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -62,7 +62,7 @@ class AbstractScheduler(driver.Scheduler): host = build_plan_item['hostname'] base_options = request_spec['instance_properties'] image = request_spec['image'] - instance_type = request_spec['instance_type'] + instance_type = request_spec.get('instance_type') # TODO(sandy): I guess someone needs to add block_device_mapping # support at some point? Also, OS API has no concept of security @@ -159,8 +159,8 @@ class AbstractScheduler(driver.Scheduler): self._ask_child_zone_to_create_instance(context, host_info, request_spec, kwargs) else: - self._provision_resource_locally(context, instance_type, host_info, - request_spec, kwargs) + self._provision_resource_locally(context, host_info, request_spec, + kwargs) def _provision_resource(self, context, build_plan_item, instance_id, request_spec, kwargs): -- cgit From c718702496a98cefb434b4b21c3ea22fc6c8dc2d Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 18 Aug 2011 17:09:34 -0500 Subject: Added unit test. --- nova/tests/api/openstack/contrib/test_rescue.py | 55 +++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 nova/tests/api/openstack/contrib/test_rescue.py diff --git a/nova/tests/api/openstack/contrib/test_rescue.py b/nova/tests/api/openstack/contrib/test_rescue.py new file mode 100644 index 000000000..fc8e4be4e --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_rescue.py @@ -0,0 +1,55 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import webob + +from nova import compute +from nova import test +from nova.tests.api.openstack import fakes + + +def rescue(self, context, instance_id): + pass + + +def unrescue(self, context, instance_id): + pass + + +class RescueTest(test.TestCase): + def setUp(self): + super(RescueTest, self).setUp() + self.stubs.Set(compute.api.API, "rescue", rescue) + self.stubs.Set(compute.api.API, "unrescue", unrescue) + + def test_rescue(self): + body = dict(rescue=None) + req = webob.Request.blank('/v1.1/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + def test_unrescue(self): + body = dict(unrescue=None) + req = webob.Request.blank('/v1.1/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) -- cgit From 509ce9d3016731c183bb565e8726a27010eaf02a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Aug 2011 15:41:20 -0700 Subject: declare the use_forwarded_for flag --- nova/api/ec2/__init__.py | 1 + nova/api/ec2/metadatarequesthandler.py | 1 + 2 files changed, 2 insertions(+) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 2ae370f88..52f381dbb 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -47,6 +47,7 @@ flags.DEFINE_integer('lockout_window', 15, flags.DEFINE_string('keystone_ec2_url', 'http://localhost:5000/v2.0/ec2tokens', 'URL to get token from ec2 request.') +flags.DECLARE('use_forwarded_for', 'nova.api.auth') class RequestLogging(wsgi.Middleware): diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 1dc275c90..0198bf490 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -30,6 +30,7 @@ from nova.api.ec2 import cloud LOG = logging.getLogger('nova.api.ec2.metadata') FLAGS = flags.FLAGS +flags.DECLARE('use_forwarded_for', 'nova.api.auth') class MetadataRequestHandler(wsgi.Application): -- cgit From 32e57db9fdc5c48b3546640e838f5eb260080442 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Aug 2011 16:22:22 -0700 Subject: rename the test method --- nova/tests/test_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index 8f92406ff..760b150be 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -205,6 +205,6 @@ class TestLauncher(test.TestCase): def test_launch_app(self): self.assertEquals(0, self.service.port) launcher = service.Launcher() - launcher.launch_service(self.service) + launcher.launch_server(self.service) self.assertEquals(0, self.service.port) launcher.stop() -- cgit From be0c70562ce978e3ffa85465fc08dd5cb3ca07c3 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Fri, 19 Aug 2011 10:47:16 -0400 Subject: updated migration number --- .../versions/037_add_instances_accessip.py | 48 ---------------------- .../versions/038_add_instances_accessip.py | 48 ++++++++++++++++++++++ 2 files changed, 48 insertions(+), 48 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/037_add_instances_accessip.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/038_add_instances_accessip.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_add_instances_accessip.py b/nova/db/sqlalchemy/migrate_repo/versions/037_add_instances_accessip.py deleted file mode 100644 index 39f0dd6ce..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_add_instances_accessip.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column, Integer, MetaData, Table, String - -meta = MetaData() - -accessIPv4 = Column( - 'access_ip_v4', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=True) - -accessIPv6 = Column( - 'access_ip_v6', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - nullable=True) - -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - instances.create_column(accessIPv4) - instances.create_column(accessIPv6) - - -def downgrade(migrate_engine): - # Operations to reverse the above upgrade go here. - meta.bind = migrate_engine - instances.drop_column('access_ip_v4') - instances.drop_column('access_ip_v6') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/038_add_instances_accessip.py b/nova/db/sqlalchemy/migrate_repo/versions/038_add_instances_accessip.py new file mode 100644 index 000000000..39f0dd6ce --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/038_add_instances_accessip.py @@ -0,0 +1,48 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table, String + +meta = MetaData() + +accessIPv4 = Column( + 'access_ip_v4', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + +accessIPv6 = Column( + 'access_ip_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + instances.create_column(accessIPv4) + instances.create_column(accessIPv6) + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta.bind = migrate_engine + instances.drop_column('access_ip_v4') + instances.drop_column('access_ip_v6') -- cgit From c11a156b1e50fde6cf3047057746564d491634e2 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 19 Aug 2011 10:01:25 -0700 Subject: Fixes primitive with builtins, modules, etc --- nova/tests/test_utils.py | 10 ++++++++++ nova/utils.py | 12 +++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index ec5098a37..28e366a8e 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -384,3 +384,13 @@ class ToPrimitiveTestCase(test.TestCase): def test_typeerror(self): x = bytearray # Class, not instance self.assertEquals(utils.to_primitive(x), u"") + + def test_nasties(self): + def foo(): + pass + x = [datetime, foo, dir] + ret = utils.to_primitive(x) + self.assertEquals(len(ret), 3) + self.assertTrue(ret[0].startswith(u"') diff --git a/nova/utils.py b/nova/utils.py index 54126f644..b42f76457 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -547,11 +547,17 @@ def to_primitive(value, convert_instances=False, level=0): Therefore, convert_instances=True is lossy ... be aware. """ - if inspect.isclass(value): - return unicode(value) + nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + for test in nasty: + if test(value): + return unicode(value) if level > 3: - return [] + return '?' # The try block may not be necessary after the class check above, # but just in case ... -- cgit From fe8800ada8670cb29417fcdec085800b66cd881f Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Fri, 19 Aug 2011 15:21:04 -0400 Subject: Updated test_show in ServerXMLSerializationTest to use XML validation --- nova/tests/api/openstack/test_servers.py | 101 ++++++++++++++++++------------- 1 file changed, 58 insertions(+), 43 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 326962d72..cfe3f624e 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -19,6 +19,7 @@ import base64 import datetime import json import unittest +from lxml import etree from xml.dom import minidom import webob @@ -32,6 +33,7 @@ import nova.api.openstack from nova.api.openstack import create_instance_helper from nova.api.openstack import servers from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil import nova.compute.api from nova.compute import instance_types from nova.compute import power_state @@ -46,6 +48,8 @@ from nova.tests.api.openstack import fakes FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' +NS = "{http://docs.openstack.org/compute/api/v1.1}" +ATOMNS = "{http://www.w3.org/2005/Atom}" def fake_gen_uuid(): @@ -3605,7 +3609,9 @@ class ServerXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture, 'show') - actual = minidom.parseString(output.replace(" ", "")) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'server') expected_server_href = self.SERVER_HREF expected_server_bookmark = self.SERVER_BOOKMARK @@ -3613,49 +3619,58 @@ class ServerXMLSerializationTest(test.TestCase): expected_flavor_bookmark = self.FLAVOR_BOOKMARK expected_now = self.TIMESTAMP expected_uuid = FAKE_UUID - expected = minidom.parseString(""" - - - - - - - - - - - - Stack - - - 1 - - - - - - - - - - - - - - """.replace(" ", "") % (locals())) + server_dict = fixture['server'] + + for key in ['name', 'id', 'uuid', 'created', 'accessIPv4', + 'updated', 'progress', 'status', 'hostId', + 'accessIPv6']: + self.assertEqual(root.get(key), str(server_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(server_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = root.find('{0}metadata'.format(NS)) + metadata_elems = metadata_root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 2) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = server_dict['metadata'].items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) + + image_root = root.find('{0}image'.format(NS)) + self.assertEqual(image_root.get('id'), server_dict['image']['id']) + link_nodes = image_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 1) + for i, link in enumerate(server_dict['image']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + flavor_root = root.find('{0}flavor'.format(NS)) + self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id']) + link_nodes = flavor_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 1) + for i, link in enumerate(server_dict['flavor']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + addresses_root = root.find('{0}addresses'.format(NS)) + addresses_dict = server_dict['addresses'] + network_elems = addresses_root.findall('{0}network'.format(NS)) + self.assertEqual(len(network_elems), 2) + for i, network_elem in enumerate(network_elems): + network = addresses_dict.items()[i] + self.assertEqual(str(network_elem.get('id')), str(network[0])) + ip_elems = network_elem.findall('{0}ip'.format(NS)) + for z, ip_elem in enumerate(ip_elems): + ip = network[1][z] + self.assertEqual(str(ip_elem.get('version')), + str(ip['version'])) + self.assertEqual(str(ip_elem.get('addr')), + str(ip['addr'])) - self.assertEqual(expected.toxml(), actual.toxml()) def test_create(self): serializer = servers.ServerXMLSerializer() -- cgit From c75e132786a65501477f77efa1bc9147b7763c31 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Fri, 19 Aug 2011 15:55:56 -0400 Subject: Finished changing ServerXMLSerializationTest to use XML validation and lxml --- nova/api/openstack/schemas/v1.1/server.rng | 50 ++++ nova/api/openstack/schemas/v1.1/servers.rng | 6 + nova/api/openstack/schemas/v1.1/servers_index.rng | 12 + nova/tests/api/openstack/test_servers.py | 349 ++++++++++++---------- 4 files changed, 251 insertions(+), 166 deletions(-) create mode 100644 nova/api/openstack/schemas/v1.1/server.rng create mode 100644 nova/api/openstack/schemas/v1.1/servers.rng create mode 100644 nova/api/openstack/schemas/v1.1/servers_index.rng diff --git a/nova/api/openstack/schemas/v1.1/server.rng b/nova/api/openstack/schemas/v1.1/server.rng new file mode 100644 index 000000000..dbd169a83 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/server.rng @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/nova/api/openstack/schemas/v1.1/servers.rng b/nova/api/openstack/schemas/v1.1/servers.rng new file mode 100644 index 000000000..4e2bb8853 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/servers.rng @@ -0,0 +1,6 @@ + + + + + diff --git a/nova/api/openstack/schemas/v1.1/servers_index.rng b/nova/api/openstack/schemas/v1.1/servers_index.rng new file mode 100644 index 000000000..768f0912d --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/servers_index.rng @@ -0,0 +1,12 @@ + + + + + + + + + + + diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index cfe3f624e..961d2fb7c 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3684,6 +3684,7 @@ class ServerXMLSerializationTest(test.TestCase): "progress": 0, "name": "test_server", "status": "BUILD", + "accessIPv4": "1.2.3.4", "accessIPv6": "fead::1234", "hostId": "e4d909c290d0fb1ca068ffaddf22cbd0", "adminPass": "test_password", @@ -3745,7 +3746,9 @@ class ServerXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture, 'create') - actual = minidom.parseString(output.replace(" ", "")) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'server') expected_server_href = self.SERVER_HREF expected_server_bookmark = self.SERVER_BOOKMARK @@ -3753,49 +3756,57 @@ class ServerXMLSerializationTest(test.TestCase): expected_flavor_bookmark = self.FLAVOR_BOOKMARK expected_now = self.TIMESTAMP expected_uuid = FAKE_UUID - expected = minidom.parseString(""" - - - - - - - - - - - - Stack - - - 1 - - - - - - - - - - - - - - """.replace(" ", "") % (locals())) + server_dict = fixture['server'] - self.assertEqual(expected.toxml(), actual.toxml()) + for key in ['name', 'id', 'uuid', 'created', 'accessIPv4', + 'updated', 'progress', 'status', 'hostId', + 'accessIPv6', 'adminPass']: + self.assertEqual(root.get(key), str(server_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(server_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = root.find('{0}metadata'.format(NS)) + metadata_elems = metadata_root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 2) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = server_dict['metadata'].items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) + + image_root = root.find('{0}image'.format(NS)) + self.assertEqual(image_root.get('id'), server_dict['image']['id']) + link_nodes = image_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 1) + for i, link in enumerate(server_dict['image']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + flavor_root = root.find('{0}flavor'.format(NS)) + self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id']) + link_nodes = flavor_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 1) + for i, link in enumerate(server_dict['flavor']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + addresses_root = root.find('{0}addresses'.format(NS)) + addresses_dict = server_dict['addresses'] + network_elems = addresses_root.findall('{0}network'.format(NS)) + self.assertEqual(len(network_elems), 2) + for i, network_elem in enumerate(network_elems): + network = addresses_dict.items()[i] + self.assertEqual(str(network_elem.get('id')), str(network[0])) + ip_elems = network_elem.findall('{0}ip'.format(NS)) + for z, ip_elem in enumerate(ip_elems): + ip = network[1][z] + self.assertEqual(str(ip_elem.get('version')), + str(ip['version'])) + self.assertEqual(str(ip_elem.get('addr')), + str(ip['addr'])) def test_index(self): serializer = servers.ServerXMLSerializer() @@ -3836,23 +3847,21 @@ class ServerXMLSerializationTest(test.TestCase): ]} output = serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - - - - - - - - - - - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'servers_index') + server_elems = root.findall('{0}server'.format(NS)) + self.assertEqual(len(server_elems), 2) + for i, server_elem in enumerate(server_elems): + server_dict = fixture['servers'][i] + for key in ['name', 'id']: + self.assertEqual(server_elem.get(key), str(server_dict[key])) + + link_nodes = server_elem.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(server_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_detail(self): serializer = servers.ServerXMLSerializer() @@ -3875,6 +3884,8 @@ class ServerXMLSerializationTest(test.TestCase): "progress": 0, "name": "test_server", "status": "BUILD", + "accessIPv4": "1.2.3.4", + "accessIPv6": "fead::1234", "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0', "image": { "id": "5", @@ -3928,6 +3939,8 @@ class ServerXMLSerializationTest(test.TestCase): "progress": 100, "name": "test_server_2", "status": "ACTIVE", + "accessIPv4": "1.2.3.4", + "accessIPv6": "fead::1234", "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0', "image": { "id": "5", @@ -3976,71 +3989,61 @@ class ServerXMLSerializationTest(test.TestCase): ]} output = serializer.serialize(fixture, 'detail') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - - - - - - - - - - - - - 1 - - - - - - - - - - - - - - - - - - - - - 2 - - - - - - - - - - - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'servers') + server_elems = root.findall('{0}server'.format(NS)) + self.assertEqual(len(server_elems), 2) + for i, server_elem in enumerate(server_elems): + server_dict = fixture['servers'][i] + + for key in ['name', 'id', 'uuid', 'created', 'accessIPv4', + 'updated', 'progress', 'status', 'hostId', + 'accessIPv6']: + self.assertEqual(server_elem.get(key), str(server_dict[key])) + + link_nodes = server_elem.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(server_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = server_elem.find('{0}metadata'.format(NS)) + metadata_elems = metadata_root.findall('{0}meta'.format(NS)) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = server_dict['metadata'].items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) + + image_root = server_elem.find('{0}image'.format(NS)) + self.assertEqual(image_root.get('id'), server_dict['image']['id']) + link_nodes = image_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 1) + for i, link in enumerate(server_dict['image']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + flavor_root = server_elem.find('{0}flavor'.format(NS)) + self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id']) + link_nodes = flavor_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 1) + for i, link in enumerate(server_dict['flavor']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + addresses_root = server_elem.find('{0}addresses'.format(NS)) + addresses_dict = server_dict['addresses'] + network_elems = addresses_root.findall('{0}network'.format(NS)) + for i, network_elem in enumerate(network_elems): + network = addresses_dict.items()[i] + self.assertEqual(str(network_elem.get('id')), str(network[0])) + ip_elems = network_elem.findall('{0}ip'.format(NS)) + for z, ip_elem in enumerate(ip_elems): + ip = network[1][z] + self.assertEqual(str(ip_elem.get('version')), + str(ip['version'])) + self.assertEqual(str(ip_elem.get('addr')), + str(ip['addr'])) def test_update(self): serializer = servers.ServerXMLSerializer() @@ -4055,6 +4058,8 @@ class ServerXMLSerializationTest(test.TestCase): "name": "test_server", "status": "BUILD", "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0', + "accessIPv4": "1.2.3.4", + "accessIPv6": "fead::1234", "image": { "id": "5", "links": [ @@ -4113,7 +4118,9 @@ class ServerXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture, 'update') - actual = minidom.parseString(output.replace(" ", "")) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'server') expected_server_href = self.SERVER_HREF expected_server_bookmark = self.SERVER_BOOKMARK @@ -4121,44 +4128,54 @@ class ServerXMLSerializationTest(test.TestCase): expected_flavor_bookmark = self.FLAVOR_BOOKMARK expected_now = self.TIMESTAMP expected_uuid = FAKE_UUID - expected = minidom.parseString(""" - - - - - - - - - - - - Stack - - - 1 - - - - - - - - - - - - - - """.replace(" ", "") % (locals())) + server_dict = fixture['server'] - self.assertEqual(expected.toxml(), actual.toxml()) + for key in ['name', 'id', 'uuid', 'created', 'accessIPv4', + 'updated', 'progress', 'status', 'hostId', + 'accessIPv6']: + self.assertEqual(root.get(key), str(server_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(server_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = root.find('{0}metadata'.format(NS)) + metadata_elems = metadata_root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 2) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = server_dict['metadata'].items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) + + image_root = root.find('{0}image'.format(NS)) + self.assertEqual(image_root.get('id'), server_dict['image']['id']) + link_nodes = image_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 1) + for i, link in enumerate(server_dict['image']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + flavor_root = root.find('{0}flavor'.format(NS)) + self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id']) + link_nodes = flavor_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 1) + for i, link in enumerate(server_dict['flavor']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + addresses_root = root.find('{0}addresses'.format(NS)) + addresses_dict = server_dict['addresses'] + network_elems = addresses_root.findall('{0}network'.format(NS)) + self.assertEqual(len(network_elems), 2) + for i, network_elem in enumerate(network_elems): + network = addresses_dict.items()[i] + self.assertEqual(str(network_elem.get('id')), str(network[0])) + ip_elems = network_elem.findall('{0}ip'.format(NS)) + for z, ip_elem in enumerate(ip_elems): + ip = network[1][z] + self.assertEqual(str(ip_elem.get('version')), + str(ip['version'])) + self.assertEqual(str(ip_elem.get('addr')), + str(ip['addr'])) -- cgit From 9827c92838d144f7c129e9e5545126f100926dba Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Fri, 19 Aug 2011 15:58:50 -0400 Subject: pep8 --- nova/tests/api/openstack/test_servers.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 961d2fb7c..6cf2d2d6a 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3671,7 +3671,6 @@ class ServerXMLSerializationTest(test.TestCase): self.assertEqual(str(ip_elem.get('addr')), str(ip['addr'])) - def test_create(self): serializer = servers.ServerXMLSerializer() @@ -4013,7 +4012,8 @@ class ServerXMLSerializationTest(test.TestCase): for i, metadata_elem in enumerate(metadata_elems): (meta_key, meta_value) = server_dict['metadata'].items()[i] self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) - self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) + self.assertEqual(str(metadata_elem.text).strip(), + str(meta_value)) image_root = server_elem.find('{0}image'.format(NS)) self.assertEqual(image_root.get('id'), server_dict['image']['id']) @@ -4024,7 +4024,8 @@ class ServerXMLSerializationTest(test.TestCase): self.assertEqual(link_nodes[i].get(key), value) flavor_root = server_elem.find('{0}flavor'.format(NS)) - self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id']) + self.assertEqual(flavor_root.get('id'), + server_dict['flavor']['id']) link_nodes = flavor_root.findall('{0}link'.format(ATOMNS)) self.assertEqual(len(link_nodes), 1) for i, link in enumerate(server_dict['flavor']['links']): -- cgit From 5366332a84b89bc5a056bd7f43e528a908e8d188 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 19 Aug 2011 13:15:42 -0700 Subject: incorporate feedback from brian waldon and brian lamar. Move associate/disassociate to server actions --- nova/api/openstack/contrib/floating_ips.py | 69 ++++++++++++++-------- .../api/openstack/contrib/test_floating_ips.py | 57 ++++++++---------- 2 files changed, 69 insertions(+), 57 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 2f5fdd001..b305ebdcb 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -15,8 +15,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License -from webob import exc +import webob +from nova import compute from nova import exception from nova import log as logging from nova import network @@ -71,7 +72,7 @@ class FloatingIPController(object): try: floating_ip = self.network_api.get_floating_ip(context, id) except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) + return faults.Fault(webob.exc.HTTPNotFound()) return _translate_floating_ip_view(floating_ip) @@ -110,40 +111,49 @@ class FloatingIPController(object): self.network_api.release_floating_ip(context, address=floating_ip['address']) - return exc.HTTPAccepted() + return webob.exc.HTTPAccepted() - def associate(self, req, id, body): - """PUT /floating_ips/{id}/associate fixed ip in body """ + def _get_ip_by_id(self, context, value): + """Checks that value is id and then returns its address.""" + return self.network_api.get_floating_ip(context, value)['address'] + + +class Floating_ips(extensions.ExtensionDescriptor): + def __init__(self): + self.compute_api = compute.API() + self.network_api = network.API() + super(Floating_ips, self).__init__() + + def _add_floating_ip(self, input_dict, req, instance_id): + """Associate floating_ip to an instance.""" context = req.environ['nova.context'] - floating_ip = self._get_ip_by_id(context, id) - fixed_ip = body['floating_ip']['fixed_ip'] + try: + address = input_dict['addFloatingIp']['address'] + except KeyError: + msg = _("Address not specified") + raise webob.exc.HTTPBadRequest(explanation=msg) - self.network_api.associate_floating_ip(context, - floating_ip, fixed_ip) + self.compute_api.associate_floating_ip(context, instance_id, address) - floating_ip = self.network_api.get_floating_ip(context, id) - return _translate_floating_ip_view(floating_ip) + return webob.Response(status_int=202) - def disassociate(self, req, id, body=None): - """PUT /floating_ips/{id}/disassociate """ + def _remove_floating_ip(self, input_dict, req, instance_id): + """Dissociate floating_ip from an instance.""" context = req.environ['nova.context'] - floating_ip = self.network_api.get_floating_ip(context, id) - address = floating_ip['address'] - # no-op if this ip is already disassociated + try: + address = input_dict['removeFloatingIp']['address'] + except KeyError: + msg = _("Address not specified") + raise webob.exc.HTTPBadRequest(explanation=msg) + + floating_ip = self.network_api.get_floating_ip_by_ip(context, address) if 'fixed_ip' in floating_ip: self.network_api.disassociate_floating_ip(context, address) - floating_ip = self.network_api.get_floating_ip(context, id) - - return _translate_floating_ip_view(floating_ip) - - def _get_ip_by_id(self, context, value): - """Checks that value is id and then returns its address.""" - return self.network_api.get_floating_ip(context, value)['address'] + return webob.Response(status_int=202) -class Floating_ips(extensions.ExtensionDescriptor): def get_name(self): return "Floating_ips" @@ -170,3 +180,14 @@ class Floating_ips(extensions.ExtensionDescriptor): resources.append(res) return resources + + def get_actions(self): + """Return the actions the extension adds, as required by contract.""" + actions = [ + extensions.ActionExtension("servers", "addFloatingIp", + self._add_floating_ip), + extensions.ActionExtension("servers", "removeFloatingIp", + self._remove_floating_ip), + ] + + return actions diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index e506519f4..09234072a 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -17,6 +17,7 @@ import json import stubout import webob +from nova import compute from nova import context from nova import db from nova import test @@ -29,6 +30,11 @@ from nova.api.openstack.contrib.floating_ips import _translate_floating_ip_view def network_api_get_floating_ip(self, context, id): + return {'id': 1, 'address': '10.10.10.10', + 'fixed_ip': None} + + +def network_api_get_floating_ip_by_ip(self, context, address): return {'id': 1, 'address': '10.10.10.10', 'fixed_ip': {'address': '11.0.0.1'}} @@ -50,7 +56,7 @@ def network_api_release(self, context, address): pass -def network_api_associate(self, context, floating_ip, fixed_ip): +def compute_api_associate(self, context, instance_id, floating_ip): pass @@ -78,14 +84,16 @@ class FloatingIpTest(test.TestCase): fakes.stub_out_rate_limiting(self.stubs) self.stubs.Set(network.api.API, "get_floating_ip", network_api_get_floating_ip) + self.stubs.Set(network.api.API, "get_floating_ip_by_ip", + network_api_get_floating_ip) self.stubs.Set(network.api.API, "list_floating_ips", network_api_list_floating_ips) self.stubs.Set(network.api.API, "allocate_floating_ip", network_api_allocate) self.stubs.Set(network.api.API, "release_floating_ip", network_api_release) - self.stubs.Set(network.api.API, "associate_floating_ip", - network_api_associate) + self.stubs.Set(compute.api.API, "associate_floating_ip", + compute_api_associate) self.stubs.Set(network.api.API, "disassociate_floating_ip", network_api_disassociate) self.context = context.get_admin_context() @@ -133,7 +141,6 @@ class FloatingIpTest(test.TestCase): res_dict = json.loads(res.body) self.assertEqual(res_dict['floating_ip']['id'], 1) self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10') - self.assertEqual(res_dict['floating_ip']['fixed_ip'], '11.0.0.1') self.assertEqual(res_dict['floating_ip']['instance_id'], None) def test_floating_ip_allocate(self): @@ -141,7 +148,6 @@ class FloatingIpTest(test.TestCase): req.method = 'POST' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) - print res self.assertEqual(res.status_int, 200) ip = json.loads(res.body)['floating_ip'] @@ -158,37 +164,22 @@ class FloatingIpTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) - def test_floating_ip_associate(self): - body = dict(floating_ip=dict(fixed_ip='11.0.0.1')) - req = webob.Request.blank('/v1.1/os-floating-ips/1/associate') - req.method = 'PUT' + def test_add_floating_ip_to_instance(self): + body = dict(addFloatingIp=dict(address='11.0.0.1')) + req = webob.Request.blank('/v1.1/servers/test_inst/action') + req.method = "POST" req.body = json.dumps(body) req.headers["content-type"] = "application/json" - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - actual = json.loads(res.body)['floating_ip'] - - expected = { - "id": 1, - "instance_id": None, - "ip": "10.10.10.10", - "fixed_ip": "11.0.0.1"} - self.assertEqual(actual, expected) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 202) - def test_floating_ip_disassociate(self): - body = dict() - req = webob.Request.blank('/v1.1/os-floating-ips/1/disassociate') - req.method = 'PUT' + def test_remove_floating_ip_from_instance(self): + body = dict(removeFloatingIp=dict(address='11.0.0.1')) + req = webob.Request.blank('/v1.1/servers/test_inst/action') + req.method = "POST" req.body = json.dumps(body) - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - ip = json.loads(res.body)['floating_ip'] - expected = { - "id": 1, - "instance_id": None, - "ip": '10.10.10.10', - "fixed_ip": '11.0.0.1'} + req.headers["content-type"] = "application/json" - self.assertEqual(ip, expected) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 202) -- cgit From 468893c667c7ce6cddb9d62906dfcb807fcd6da1 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 19 Aug 2011 13:25:33 -0700 Subject: a few tweaks - remove unused member functions, add comment --- nova/api/openstack/contrib/floating_ips.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index b305ebdcb..3b400807a 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -80,6 +80,7 @@ class FloatingIPController(object): context = req.environ['nova.context'] try: + # FIXME - why does self.network_api.list_floating_ips raise this? floating_ips = self.network_api.list_floating_ips(context) except exception.FloatingIpNotFoundForProject: floating_ips = [] @@ -174,9 +175,7 @@ class Floating_ips(extensions.ExtensionDescriptor): res = extensions.ResourceExtension('os-floating-ips', FloatingIPController(), - member_actions={ - 'associate': 'PUT', - 'disassociate': 'PUT'}) + member_actions={}) resources.append(res) return resources -- cgit From ce4ac4be2b813a8f025a9f2891fbc1ed4101c496 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 19 Aug 2011 13:31:49 -0700 Subject: tweak to comment --- nova/api/openstack/contrib/floating_ips.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 3b400807a..0f27f2f27 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -80,7 +80,7 @@ class FloatingIPController(object): context = req.environ['nova.context'] try: - # FIXME - why does self.network_api.list_floating_ips raise this? + # FIXME(ja) - why does self.network_api.list_floating_ips raise? floating_ips = self.network_api.list_floating_ips(context) except exception.FloatingIpNotFoundForProject: floating_ips = [] -- cgit From 5f6cd490425d8d91870de1b4a492a6cb34502bcb Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Fri, 19 Aug 2011 16:36:20 -0400 Subject: Updated accessIPv4 and accessIPv6 to always be in a servers response --- nova/api/openstack/views/servers.py | 6 ++---- nova/tests/api/openstack/test_servers.py | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 8b3a1e221..d2c1b0ba1 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -144,10 +144,8 @@ class ViewBuilderV11(ViewBuilder): elif response['server']['status'] == "BUILD": response['server']['progress'] = 0 - if inst.get('access_ip_v4'): - response['server']['accessIPv4'] = inst['access_ip_v4'] - if inst.get('access_ip_v6'): - response['server']['accessIPv6'] = inst['access_ip_v6'] + response['server']['accessIPv4'] = inst.get('access_ip_v4') or "" + response['server']['accessIPv6'] = inst.get('access_ip_v6') or "" return response diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 6cf2d2d6a..d3eb4c517 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -341,6 +341,8 @@ class ServersTest(test.TestCase): "progress": 0, "name": "server1", "status": "BUILD", + "accessIPv4": "", + "accessIPv6": "", "hostId": '', "image": { "id": "10", @@ -438,6 +440,8 @@ class ServersTest(test.TestCase): created="%(expected_created)s" hostId="" status="BUILD" + accessIPv4="" + accessIPv6="" progress="0"> @@ -503,6 +507,8 @@ class ServersTest(test.TestCase): "progress": 100, "name": "server1", "status": "ACTIVE", + "accessIPv4": "", + "accessIPv6": "", "hostId": '', "image": { "id": "10", @@ -594,6 +600,8 @@ class ServersTest(test.TestCase): "progress": 100, "name": "server1", "status": "ACTIVE", + "accessIPv4": "", + "accessIPv6": "", "hostId": '', "image": { "id": "10", @@ -1650,6 +1658,7 @@ class ServersTest(test.TestCase): self.assertEqual(expected_flavor, server['flavor']) self.assertEqual(expected_image, server['image']) self.assertEqual(access_ipv4, server['accessIPv4']) + self.assertEqual(access_ipv6, server['accessIPv6']) def test_create_instance_v1_1(self): self._setup_for_create_instance() @@ -1708,6 +1717,8 @@ class ServersTest(test.TestCase): self.assertEqual('server_test', server['name']) self.assertEqual(expected_flavor, server['flavor']) self.assertEqual(expected_image, server['image']) + self.assertEqual('1.2.3.4', server['accessIPv4']) + self.assertEqual('fead::1234', server['accessIPv6']) def test_create_instance_v1_1_invalid_flavor_href(self): self._setup_for_create_instance() @@ -3271,6 +3282,8 @@ class ServersViewBuilderV11Test(test.TestCase): "progress": 0, "name": "test_server", "status": "BUILD", + "accessIPv4": "", + "accessIPv6": "", "hostId": '', "image": { "id": "5", @@ -3322,6 +3335,8 @@ class ServersViewBuilderV11Test(test.TestCase): "progress": 100, "name": "test_server", "status": "ACTIVE", + "accessIPv4": "", + "accessIPv6": "", "hostId": '', "image": { "id": "5", @@ -3396,6 +3411,7 @@ class ServersViewBuilderV11Test(test.TestCase): "addresses": {}, "metadata": {}, "accessIPv4": "1.2.3.4", + "accessIPv6": "", "links": [ { "rel": "self", @@ -3448,6 +3464,7 @@ class ServersViewBuilderV11Test(test.TestCase): }, "addresses": {}, "metadata": {}, + "accessIPv4": "", "accessIPv6": "fead::1234", "links": [ { @@ -3483,6 +3500,8 @@ class ServersViewBuilderV11Test(test.TestCase): "progress": 0, "name": "test_server", "status": "BUILD", + "accessIPv4": "", + "accessIPv6": "", "hostId": '', "image": { "id": "5", -- cgit From 9b65cdf0b2d5cc7ed7adcaca0dde4d6e2a10bf95 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 19 Aug 2011 14:16:57 -0700 Subject: better handle malformed input, and add associated tests --- nova/api/openstack/contrib/floating_ips.py | 6 ++++ .../api/openstack/contrib/test_floating_ips.py | 40 ++++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 0f27f2f27..40086f778 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -131,6 +131,9 @@ class Floating_ips(extensions.ExtensionDescriptor): try: address = input_dict['addFloatingIp']['address'] + except TypeError: + msg = _("Missing parameter dict") + raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Address not specified") raise webob.exc.HTTPBadRequest(explanation=msg) @@ -145,6 +148,9 @@ class Floating_ips(extensions.ExtensionDescriptor): try: address = input_dict['removeFloatingIp']['address'] + except TypeError: + msg = _("Missing parameter dict") + raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Address not specified") raise webob.exc.HTTPBadRequest(explanation=msg) diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index 09234072a..d2ca9c365 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -183,3 +183,43 @@ class FloatingIpTest(test.TestCase): resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 202) + + def test_bad_address_param_in_remove_floating_ip(self): + body = dict(removeFloatingIp=dict(badparam='11.0.0.1')) + req = webob.Request.blank('/v1.1/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_missing_dict_param_in_remove_floating_ip(self): + body = dict(removeFloatingIp='11.0.0.1') + req = webob.Request.blank('/v1.1/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_bad_address_param_in_add_floating_ip(self): + body = dict(addFloatingIp=dict(badparam='11.0.0.1')) + req = webob.Request.blank('/v1.1/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_missing_dict_param_in_add_floating_ip(self): + body = dict(addFloatingIp='11.0.0.1') + req = webob.Request.blank('/v1.1/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) -- cgit From bb989133196744779527e36cba22a76bd44e533b Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Sat, 20 Aug 2011 15:38:13 -0700 Subject: add/remove security groups to/from the servers as server actions --- nova/api/openstack/contrib/security_groups.py | 248 ++++++----------- nova/compute/api.py | 72 +++++ nova/exception.py | 10 + .../api/openstack/contrib/test_security_groups.py | 294 ++++++++++----------- 4 files changed, 296 insertions(+), 328 deletions(-) diff --git a/nova/api/openstack/contrib/security_groups.py b/nova/api/openstack/contrib/security_groups.py index a104a42e4..1fd64f3b8 100644 --- a/nova/api/openstack/contrib/security_groups.py +++ b/nova/api/openstack/contrib/security_groups.py @@ -168,135 +168,6 @@ class SecurityGroupController(object): "than 255 characters.") % typ raise exc.HTTPBadRequest(explanation=msg) - def associate(self, req, id, body): - context = req.environ['nova.context'] - - if not body: - raise exc.HTTPUnprocessableEntity() - - if not 'security_group_associate' in body: - raise exc.HTTPUnprocessableEntity() - - security_group = self._get_security_group(context, id) - - servers = body['security_group_associate'].get('servers') - - if not servers: - msg = _("No servers found") - return exc.HTTPBadRequest(explanation=msg) - - hosts = set() - for server in servers: - if server['id']: - try: - # check if the server exists - inst = db.instance_get(context, server['id']) - #check if the security group is assigned to the server - if self._is_security_group_associated_to_server( - security_group, inst['id']): - msg = _("Security group %s is already associated with" - " the instance %s") % (security_group['id'], - server['id']) - raise exc.HTTPBadRequest(explanation=msg) - - #check if the instance is in running state - if inst['state'] != power_state.RUNNING: - msg = _("Server %s is not in the running state")\ - % server['id'] - raise exc.HTTPBadRequest(explanation=msg) - - hosts.add(inst['host']) - except exception.InstanceNotFound as exp: - return exc.HTTPNotFound(explanation=unicode(exp)) - - # Associate security group with the server in the db - for server in servers: - if server['id']: - db.instance_add_security_group(context.elevated(), - server['id'], - security_group['id']) - - for host in hosts: - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "refresh_security_group_rules", - "args": {"security_group_id": security_group['id']}}) - - return exc.HTTPAccepted() - - def _is_security_group_associated_to_server(self, security_group, - instance_id): - if not security_group: - return False - - instances = security_group.get('instances') - if not instances: - return False - - inst_id = None - for inst_id in (instance['id'] for instance in instances \ - if instance_id == instance['id']): - return True - - return False - - def disassociate(self, req, id, body): - context = req.environ['nova.context'] - - if not body: - raise exc.HTTPUnprocessableEntity() - - if not 'security_group_disassociate' in body: - raise exc.HTTPUnprocessableEntity() - - security_group = self._get_security_group(context, id) - - servers = body['security_group_disassociate'].get('servers') - - if not servers: - msg = _("No servers found") - return exc.HTTPBadRequest(explanation=msg) - - hosts = set() - for server in servers: - if server['id']: - try: - # check if the instance exists - inst = db.instance_get(context, server['id']) - # Check if the security group is not associated - # with the instance - if not self._is_security_group_associated_to_server( - security_group, inst['id']): - msg = _("Security group %s is not associated with the" - "instance %s") % (security_group['id'], - server['id']) - raise exc.HTTPBadRequest(explanation=msg) - - #check if the instance is in running state - if inst['state'] != power_state.RUNNING: - msg = _("Server %s is not in the running state")\ - % server['id'] - raise exp.HTTPBadRequest(explanation=msg) - - hosts.add(inst['host']) - except exception.InstanceNotFound as exp: - return exc.HTTPNotFound(explanation=unicode(exp)) - - # Disassociate security group from the server - for server in servers: - if server['id']: - db.instance_remove_security_group(context.elevated(), - server['id'], - security_group['id']) - - for host in hosts: - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "refresh_security_group_rules", - "args": {"security_group_id": security_group['id']}}) - - return exc.HTTPAccepted() - class SecurityGroupRulesController(SecurityGroupController): @@ -461,6 +332,11 @@ class SecurityGroupRulesController(SecurityGroupController): class Security_groups(extensions.ExtensionDescriptor): + + def __init__(self): + self.compute_api = compute.API() + super(Security_groups, self).__init__() + def get_name(self): return "SecurityGroups" @@ -476,6 +352,82 @@ class Security_groups(extensions.ExtensionDescriptor): def get_updated(self): return "2011-07-21T00:00:00+00:00" + def _addSecurityGroup(self, input_dict, req, instance_id): + context = req.environ['nova.context'] + + try: + body = input_dict['addSecurityGroup'] + group_name = body['name'] + instance_id = int(instance_id) + except ValueError: + msg = _("Server id should be integer") + raise exc.HTTPBadRequest(explanation=msg) + except TypeError: + msg = _("Missing parameter dict") + raise webob.exc.HTTPBadRequest(explanation=msg) + except KeyError: + msg = _("Security group not specified") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if not group_name or group_name.strip() == '': + msg = _("Security group name cannot be empty") + raise webob.exc.HTTPBadRequest(explanation=msg) + + try: + self.compute_api.add_security_group(context, instance_id, + group_name) + except exception.SecurityGroupNotFound as exp: + return exc.HTTPNotFound(explanation=unicode(exp)) + except exception.InstanceNotFound as exp: + return exc.HTTPNotFound(explanation=unicode(exp)) + except exception.Invalid as exp: + return exc.HTTPBadRequest(explanation=unicode(exp)) + + return exc.HTTPAccepted() + + def _removeSecurityGroup(self, input_dict, req, instance_id): + context = req.environ['nova.context'] + + try: + body = input_dict['removeSecurityGroup'] + group_name = body['name'] + instance_id = int(instance_id) + except ValueError: + msg = _("Server id should be integer") + raise exc.HTTPBadRequest(explanation=msg) + except TypeError: + msg = _("Missing parameter dict") + raise webob.exc.HTTPBadRequest(explanation=msg) + except KeyError: + msg = _("Security group not specified") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if not group_name or group_name.strip() == '': + msg = _("Security group name cannot be empty") + raise webob.exc.HTTPBadRequest(explanation=msg) + + try: + self.compute_api.remove_security_group(context, instance_id, + group_name) + except exception.SecurityGroupNotFound as exp: + return exc.HTTPNotFound(explanation=unicode(exp)) + except exception.InstanceNotFound as exp: + return exc.HTTPNotFound(explanation=unicode(exp)) + except exception.Invalid as exp: + return exc.HTTPBadRequest(explanation=unicode(exp)) + + return exc.HTTPAccepted() + + def get_actions(self): + """Return the actions the extensions adds""" + actions = [ + extensions.ActionExtension("servers", "addSecurityGroup", + self._addSecurityGroup), + extensions.ActionExtension("servers", "removeSecurityGroup", + self._removeSecurityGroup) + ] + return actions + def get_resources(self): resources = [] @@ -493,10 +445,6 @@ class Security_groups(extensions.ExtensionDescriptor): res = extensions.ResourceExtension('os-security-groups', controller=SecurityGroupController(), - member_actions={ - 'associate': 'POST', - 'disassociate': 'POST' - }, deserializer=deserializer, serializer=serializer) @@ -534,40 +482,6 @@ class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): security_group['description'] = self.extract_text(desc_node) return {'body': {'security_group': security_group}} - def _get_servers(self, node): - servers_dict = {'servers': []} - if node is not None: - servers_node = self.find_first_child_named(node, - 'servers') - if servers_node is not None: - for server_node in self.find_children_named(servers_node, - "server"): - servers_dict['servers'].append( - {"id": self.extract_text(server_node)}) - return servers_dict - - def associate(self, string): - """Deserialize an xml-formatted security group associate request""" - dom = minidom.parseString(string) - node = self.find_first_child_named(dom, - 'security_group_associate') - result = {'body': {}} - if node: - result['body']['security_group_associate'] = \ - self._get_servers(node) - return result - - def disassociate(self, string): - """Deserialize an xml-formatted security group disassociate request""" - dom = minidom.parseString(string) - node = self.find_first_child_named(dom, - 'security_group_disassociate') - result = {'body': {}} - if node: - result['body']['security_group_disassociate'] = \ - self._get_servers(node) - return result - class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): """ diff --git a/nova/compute/api.py b/nova/compute/api.py index efc9da79b..0c6beacaa 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -613,6 +613,78 @@ class API(base.Base): self.db.queue_get_for(context, FLAGS.compute_topic, host), {'method': 'refresh_provider_fw_rules', 'args': {}}) + def _is_security_group_associated_with_server(self, security_group, + instance_id): + """Check if the security group is already associated + with the instance. If Yes, return True. + """ + + if not security_group: + return False + + instances = security_group.get('instances') + if not instances: + return False + + inst_id = None + for inst_id in (instance['id'] for instance in instances \ + if instance_id == instance['id']): + return True + + return False + + def add_security_group(self, context, instance_id, security_group_name): + """Add security group to the instance""" + security_group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + # check if the server exists + inst = db.instance_get(context, instance_id) + #check if the security group is associated with the server + if self._is_security_group_associated_with_server(security_group, + instance_id): + raise exception.SecurityGroupExistsForInstance( + security_group_id=security_group['id'], + instance_id=instance_id) + + #check if the instance is in running state + if inst['state'] != power_state.RUNNING: + raise exception.InstanceNotRunning(instance_id=instance_id) + + db.instance_add_security_group(context.elevated(), + instance_id, + security_group['id']) + rpc.cast(context, + db.queue_get_for(context, FLAGS.compute_topic, inst['host']), + {"method": "refresh_security_group_rules", + "args": {"security_group_id": security_group['id']}}) + + def remove_security_group(self, context, instance_id, security_group_name): + """Remove the security group associated with the instance""" + security_group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + # check if the server exists + inst = db.instance_get(context, instance_id) + #check if the security group is associated with the server + if not self._is_security_group_associated_with_server(security_group, + instance_id): + raise exception.SecurityGroupNotExistsForInstance( + security_group_id=security_group['id'], + instance_id=instance_id) + + #check if the instance is in running state + if inst['state'] != power_state.RUNNING: + raise exception.InstanceNotRunning(instance_id=instance_id) + + db.instance_remove_security_group(context.elevated(), + instance_id, + security_group['id']) + rpc.cast(context, + db.queue_get_for(context, FLAGS.compute_topic, inst['host']), + {"method": "refresh_security_group_rules", + "args": {"security_group_id": security_group['id']}}) + @scheduler_api.reroute_compute("update") def update(self, context, instance_id, **kwargs): """Updates the instance in the datastore. diff --git a/nova/exception.py b/nova/exception.py index b09d50797..e8cb7bcb5 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -541,6 +541,16 @@ class SecurityGroupNotFoundForRule(SecurityGroupNotFound): message = _("Security group with rule %(rule_id)s not found.") +class SecurityGroupExistsForInstance(Invalid): + message = _("Security group %(security_group_id)s is already associated" + " with the instance %(instance_id)s") + + +class SecurityGroupNotExistsForInstance(Invalid): + message = _("Security group %(security_group_id)s is not associated with" + " the instance %(instance_id)s") + + class MigrationNotFound(NotFound): message = _("Migration %(migration_id)s could not be found.") diff --git a/nova/tests/api/openstack/contrib/test_security_groups.py b/nova/tests/api/openstack/contrib/test_security_groups.py index 894b0c591..b44ebc9fb 100644 --- a/nova/tests/api/openstack/contrib/test_security_groups.py +++ b/nova/tests/api/openstack/contrib/test_security_groups.py @@ -63,17 +63,17 @@ def return_non_running_server(context, server_id): 'host': "localhost"} -def return_security_group(context, group_id): - return {'id': group_id, "instances": [ +def return_security_group(context, project_id, group_name): + return {'id': 1, 'name': group_name, "instances": [ {'id': 1}]} -def return_security_group_without_instances(context, group_id): - return {'id': group_id} +def return_security_group_without_instances(context, project_id, group_name): + return {'id': 1, 'name': group_name} def return_server_nonexistant(context, server_id): - raise exception.InstanceNotFound() + raise exception.InstanceNotFound(instance_id=server_id) class TestSecurityGroups(test.TestCase): @@ -350,117 +350,89 @@ class TestSecurityGroups(test.TestCase): response = self._delete_security_group(11111111) self.assertEquals(response.status_int, 404) - def test_associate_by_non_existing_security_group_id(self): - req = webob.Request.blank('/v1.1/os-security-groups/111111/associate') + def test_associate_by_non_existing_security_group_name(self): + body = dict(addSecurityGroup=dict(name='non-existing')) + req = webob.Request.blank('/v1.1/servers/1/action') req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_associate": { - "servers": [ - {"id": '2'} - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 404) - def test_associate_by_invalid_security_group_id(self): - req = webob.Request.blank('/v1.1/os-security-groups/invalid/associate') + def test_associate_by_invalid_server_id(self): + body = dict(addSecurityGroup=dict(name='test')) + self.stubs.Set(nova.db, 'security_group_get_by_name', + return_security_group) + req = webob.Request.blank('/v1.1/servers/invalid/action') req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_associate": { - "servers": [ - {"id": "2"} - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 400) def test_associate_without_body(self): - req = webob.Request.blank('/v1.1/os-security-groups/1/associate') + req = webob.Request.blank('/v1.1/servers/1/action') + body = dict(addSecurityGroup=None) + self.stubs.Set(nova.db, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' - req.body = json.dumps(None) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) - self.assertEquals(response.status_int, 422) + self.assertEquals(response.status_int, 400) - def test_associate_no_security_group_element(self): - req = webob.Request.blank('/v1.1/os-security-groups/1/associate') + def test_associate_no_security_group_name(self): + req = webob.Request.blank('/v1.1/servers/1/action') + body = dict(addSecurityGroup=dict()) + self.stubs.Set(nova.db, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_associate_invalid": { - "servers": [ - {"id": "2"} - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) - self.assertEquals(response.status_int, 422) + self.assertEquals(response.status_int, 400) - def test_associate_no_instances(self): - #self.stubs.Set(nova.db.api, 'instance_get', return_server) - self.stubs.Set(nova.db, 'security_group_get', return_security_group) - req = webob.Request.blank('/v1.1/os-security-groups/1/associate') + def test_associate_security_group_name_with_whitespaces(self): + req = webob.Request.blank('/v1.1/servers/1/action') + body = dict(addSecurityGroup=dict(name=" ")) + self.stubs.Set(nova.db, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_associate": { - "servers": [ - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 400) def test_associate_non_existing_instance(self): self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant) - self.stubs.Set(nova.db, 'security_group_get', return_security_group) - req = webob.Request.blank('/v1.1/os-security-groups/1/associate') + body = dict(addSecurityGroup=dict(name="test")) + self.stubs.Set(nova.db, 'security_group_get_by_name', + return_security_group) + req = webob.Request.blank('/v1.1/servers/10000/action') req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_associate": { - "servers": [ - {'id': 2} - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 404) def test_associate_non_running_instance(self): self.stubs.Set(nova.db, 'instance_get', return_non_running_server) - self.stubs.Set(nova.db, 'security_group_get', + self.stubs.Set(nova.db, 'security_group_get_by_name', return_security_group_without_instances) - req = webob.Request.blank('/v1.1/os-security-groups/1/associate') + body = dict(addSecurityGroup=dict(name="test")) + req = webob.Request.blank('/v1.1/servers/1/action') req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_associate": { - "servers": [ - {'id': 1} - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 400) def test_associate_already_associated_security_group_to_instance(self): self.stubs.Set(nova.db, 'instance_get', return_server) - self.stubs.Set(nova.db, 'security_group_get', return_security_group) - req = webob.Request.blank('/v1.1/os-security-groups/1/associate') + self.stubs.Set(nova.db, 'security_group_get_by_name', + return_security_group) + body = dict(addSecurityGroup=dict(name="test")) + req = webob.Request.blank('/v1.1/servers/1/action') req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_associate": { - "servers": [ - {'id': 1} - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 400) @@ -470,134 +442,120 @@ class TestSecurityGroups(test.TestCase): nova.db.instance_add_security_group(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.stubs.Set(nova.db, 'security_group_get', + self.stubs.Set(nova.db, 'security_group_get_by_name', return_security_group_without_instances) self.mox.ReplayAll() - req = webob.Request.blank('/v1.1/os-security-groups/1/associate') + body = dict(addSecurityGroup=dict(name="test")) + req = webob.Request.blank('/v1.1/servers/1/action') req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_associate": { - "servers": [ - {'id': 1} - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 202) + + def test_associate_xml(self): + self.stubs.Set(nova.db, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db, 'instance_add_security_group') + nova.db.instance_add_security_group(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()) + self.stubs.Set(nova.db, 'security_group_get_by_name', + return_security_group_without_instances) + self.mox.ReplayAll() + + req = webob.Request.blank('/v1.1/servers/1/action') + req.headers['Content-Type'] = 'application/xml' + req.method = 'POST' + req.body = """ + test + """ response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 202) - def test_disassociate_by_non_existing_security_group_id(self): - req = webob.Request.blank('/v1.1/os-security-groups/1111/disassociate') + def test_disassociate_by_non_existing_security_group_name(self): + body = dict(removeSecurityGroup=dict(name='non-existing')) + req = webob.Request.blank('/v1.1/servers/1/action') req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_disassociate": { - "servers": [ - {"id": "2"} - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 404) - def test_disassociate_by_invalid_security_group_id(self): - req = webob.Request.blank('/v1.1/os-security-groups/id/disassociate') + def test_disassociate_by_invalid_server_id(self): + body = dict(removeSecurityGroup=dict(name='test')) + self.stubs.Set(nova.db, 'security_group_get_by_name', + return_security_group) + req = webob.Request.blank('/v1.1/servers/invalid/action') req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_disassociate": { - "servers": [ - {"id": "2"} - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 400) def test_disassociate_without_body(self): - req = webob.Request.blank('/v1.1/os-security-groups/1/disassociate') + req = webob.Request.blank('/v1.1/servers/1/action') + body = dict(removeSecurityGroup=None) + self.stubs.Set(nova.db, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' - req.body = json.dumps(None) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) - self.assertEquals(response.status_int, 422) + self.assertEquals(response.status_int, 400) - def test_disassociate_no_security_group_element(self): - req = webob.Request.blank('/v1.1/os-security-groups/1/disassociate') + def test_disassociate_no_security_group_name(self): + req = webob.Request.blank('/v1.1/servers/1/action') + body = dict(removeSecurityGroup=dict()) + self.stubs.Set(nova.db, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_disassociate_invalid": { - "servers": [ - {"id": "2"} - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) - self.assertEquals(response.status_int, 422) + self.assertEquals(response.status_int, 400) - def test_disassociate_no_instances(self): - #self.stubs.Set(nova.db.api, 'instance_get', return_server) - self.stubs.Set(nova.db, 'security_group_get', return_security_group) - req = webob.Request.blank('/v1.1/os-security-groups/1/disassociate') + def test_disassociate_security_group_name_with_whitespaces(self): + req = webob.Request.blank('/v1.1/servers/1/action') + body = dict(removeSecurityGroup=dict(name=" ")) + self.stubs.Set(nova.db, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_disassociate": { - "servers": [ - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 400) def test_disassociate_non_existing_instance(self): self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant) - self.stubs.Set(nova.db, 'security_group_get', return_security_group) - req = webob.Request.blank('/v1.1/os-security-groups/1/disassociate') + body = dict(removeSecurityGroup=dict(name="test")) + self.stubs.Set(nova.db, 'security_group_get_by_name', + return_security_group) + req = webob.Request.blank('/v1.1/servers/10000/action') req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_disassociate": { - "servers": [ - {'id': 2} - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 404) def test_disassociate_non_running_instance(self): self.stubs.Set(nova.db, 'instance_get', return_non_running_server) - self.stubs.Set(nova.db, 'security_group_get', - return_security_group_without_instances) - req = webob.Request.blank('/v1.1/os-security-groups/1/disassociate') + self.stubs.Set(nova.db, 'security_group_get_by_name', + return_security_group) + body = dict(removeSecurityGroup=dict(name="test")) + req = webob.Request.blank('/v1.1/servers/1/action') req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_disassociate": { - "servers": [ - {'id': 1} - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 400) - def test_disassociate_not_associated_security_group_to_instance(self): + def test_disassociate_already_associated_security_group_to_instance(self): self.stubs.Set(nova.db, 'instance_get', return_server) - self.stubs.Set(nova.db, 'security_group_get', return_security_group) - req = webob.Request.blank('/v1.1/os-security-groups/1/disassociate') + self.stubs.Set(nova.db, 'security_group_get_by_name', + return_security_group_without_instances) + body = dict(removeSecurityGroup=dict(name="test")) + req = webob.Request.blank('/v1.1/servers/1/action') req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_disassociate": { - "servers": [ - {'id': 2} - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 400) @@ -607,20 +565,34 @@ class TestSecurityGroups(test.TestCase): nova.db.instance_remove_security_group(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.stubs.Set(nova.db, 'security_group_get', + self.stubs.Set(nova.db, 'security_group_get_by_name', return_security_group) self.mox.ReplayAll() - req = webob.Request.blank('/v1.1/os-security-groups/1/disassociate') + body = dict(removeSecurityGroup=dict(name="test")) + req = webob.Request.blank('/v1.1/servers/1/action') req.headers['Content-Type'] = 'application/json' req.method = 'POST' - body_dict = {"security_group_disassociate": { - "servers": [ - {'id': 1} - ] - } - } - req.body = json.dumps(body_dict) + req.body = json.dumps(body) + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 202) + + def test_disassociate_xml(self): + self.stubs.Set(nova.db, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group') + nova.db.instance_remove_security_group(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()) + self.stubs.Set(nova.db, 'security_group_get_by_name', + return_security_group) + self.mox.ReplayAll() + + req = webob.Request.blank('/v1.1/servers/1/action') + req.headers['Content-Type'] = 'application/xml' + req.method = 'POST' + req.body = """ + test + """ response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 202) -- cgit